Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Fri, 04 Mar 2016 11:15:49
Message-Id: 1457090131.47a90382973671498d5d2d5e308bf5985467506d.mpagano@gentoo
1 commit: 47a90382973671498d5d2d5e308bf5985467506d
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Mar 4 11:15:31 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Mar 4 11:15:31 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=47a90382
7
8 Linux patch 4.4.4
9
10 0000_README | 4 +
11 1003_linux-4.4.4.patch | 13326 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 13330 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 91631f2..08bdc40 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -55,6 +55,10 @@ Patch: 1002_linux-4.4.3.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.3
21
22 +Patch: 1003_linux-4.4.4.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.4
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1003_linux-4.4.4.patch b/1003_linux-4.4.4.patch
31 new file mode 100644
32 index 0000000..62b4415
33 --- /dev/null
34 +++ b/1003_linux-4.4.4.patch
35 @@ -0,0 +1,13326 @@
36 +diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt
37 +index c477af086e65..686a64bba775 100644
38 +--- a/Documentation/filesystems/efivarfs.txt
39 ++++ b/Documentation/filesystems/efivarfs.txt
40 +@@ -14,3 +14,10 @@ filesystem.
41 + efivarfs is typically mounted like this,
42 +
43 + mount -t efivarfs none /sys/firmware/efi/efivars
44 ++
45 ++Due to the presence of numerous firmware bugs where removing non-standard
46 ++UEFI variables causes the system firmware to fail to POST, efivarfs
47 ++files that are not well-known standardized variables are created
48 ++as immutable files. This doesn't prevent removal - "chattr -i" will work -
49 ++but it does prevent this kind of failure from being accomplished
50 ++accidentally.
51 +diff --git a/Makefile b/Makefile
52 +index 802be10c40c5..344bc6f27ea1 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,6 +1,6 @@
56 + VERSION = 4
57 + PATCHLEVEL = 4
58 +-SUBLEVEL = 3
59 ++SUBLEVEL = 4
60 + EXTRAVERSION =
61 + NAME = Blurry Fish Butt
62 +
63 +diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
64 +index 258b0e5ad332..68b6092349d7 100644
65 +--- a/arch/arc/include/asm/irqflags-arcv2.h
66 ++++ b/arch/arc/include/asm/irqflags-arcv2.h
67 +@@ -22,6 +22,7 @@
68 + #define AUX_IRQ_CTRL 0x00E
69 + #define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
70 + #define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */
71 ++#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
72 + #define AUX_IRQ_PRIORITY 0x206
73 + #define ICAUSE 0x40a
74 + #define AUX_IRQ_SELECT 0x40b
75 +@@ -112,6 +113,16 @@ static inline int arch_irqs_disabled(void)
76 + return arch_irqs_disabled_flags(arch_local_save_flags());
77 + }
78 +
79 ++static inline void arc_softirq_trigger(int irq)
80 ++{
81 ++ write_aux_reg(AUX_IRQ_HINT, irq);
82 ++}
83 ++
84 ++static inline void arc_softirq_clear(int irq)
85 ++{
86 ++ write_aux_reg(AUX_IRQ_HINT, 0);
87 ++}
88 ++
89 + #else
90 +
91 + .macro IRQ_DISABLE scratch
92 +diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
93 +index cbfec79137bf..c1264607bbff 100644
94 +--- a/arch/arc/kernel/entry-arcv2.S
95 ++++ b/arch/arc/kernel/entry-arcv2.S
96 +@@ -45,11 +45,12 @@ VECTOR reserved ; Reserved slots
97 + VECTOR handle_interrupt ; (16) Timer0
98 + VECTOR handle_interrupt ; unused (Timer1)
99 + VECTOR handle_interrupt ; unused (WDT)
100 +-VECTOR handle_interrupt ; (19) ICI (inter core interrupt)
101 +-VECTOR handle_interrupt
102 +-VECTOR handle_interrupt
103 +-VECTOR handle_interrupt
104 +-VECTOR handle_interrupt ; (23) End of fixed IRQs
105 ++VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI)
106 ++VECTOR handle_interrupt ; (20) perf Interrupt
107 ++VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI)
108 ++VECTOR handle_interrupt ; unused
109 ++VECTOR handle_interrupt ; (23) unused
110 ++# End of fixed IRQs
111 +
112 + .rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8
113 + VECTOR handle_interrupt
114 +@@ -211,7 +212,11 @@ debug_marker_syscall:
115 + ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
116 + ; entry was via Exception in DS which got preempted in kernel).
117 + ;
118 +-; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling
119 ++; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
120 ++;
121 ++; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
122 ++; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
123 ++
124 + .Lintr_ret_to_delay_slot:
125 + debug_marker_ds:
126 +
127 +@@ -222,18 +227,23 @@ debug_marker_ds:
128 + ld r2, [sp, PT_ret]
129 + ld r3, [sp, PT_status32]
130 +
131 ++ ; STAT32 for Int return created from scratch
132 ++ ; (No delay dlot, disable Further intr in trampoline)
133 ++
134 + bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
135 + st r0, [sp, PT_status32]
136 +
137 + mov r1, .Lintr_ret_to_delay_slot_2
138 + st r1, [sp, PT_ret]
139 +
140 ++ ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
141 + st r2, [sp, 0]
142 + st r3, [sp, 4]
143 +
144 + b .Lisr_ret_fast_path
145 +
146 + .Lintr_ret_to_delay_slot_2:
147 ++ ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
148 + sub sp, sp, SZ_PT_REGS
149 + st r9, [sp, -4]
150 +
151 +@@ -243,11 +253,19 @@ debug_marker_ds:
152 + ld r9, [sp, 4]
153 + sr r9, [erstatus]
154 +
155 ++ ; restore AUX_USER_SP if returning to U mode
156 ++ bbit0 r9, STATUS_U_BIT, 1f
157 ++ ld r9, [sp, PT_sp]
158 ++ sr r9, [AUX_USER_SP]
159 ++
160 ++1:
161 + ld r9, [sp, 8]
162 + sr r9, [erbta]
163 +
164 + ld r9, [sp, -4]
165 + add sp, sp, SZ_PT_REGS
166 ++
167 ++ ; return from pure kernel mode to delay slot
168 + rtie
169 +
170 + END(ret_from_exception)
171 +diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
172 +index bd237acdf4f2..30d806ce0c78 100644
173 +--- a/arch/arc/kernel/mcip.c
174 ++++ b/arch/arc/kernel/mcip.c
175 +@@ -11,9 +11,12 @@
176 + #include <linux/smp.h>
177 + #include <linux/irq.h>
178 + #include <linux/spinlock.h>
179 ++#include <asm/irqflags-arcv2.h>
180 + #include <asm/mcip.h>
181 + #include <asm/setup.h>
182 +
183 ++#define SOFTIRQ_IRQ 21
184 ++
185 + static char smp_cpuinfo_buf[128];
186 + static int idu_detected;
187 +
188 +@@ -22,6 +25,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
189 + static void mcip_setup_per_cpu(int cpu)
190 + {
191 + smp_ipi_irq_setup(cpu, IPI_IRQ);
192 ++ smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
193 + }
194 +
195 + static void mcip_ipi_send(int cpu)
196 +@@ -29,6 +33,12 @@ static void mcip_ipi_send(int cpu)
197 + unsigned long flags;
198 + int ipi_was_pending;
199 +
200 ++ /* ARConnect can only send IPI to others */
201 ++ if (unlikely(cpu == raw_smp_processor_id())) {
202 ++ arc_softirq_trigger(SOFTIRQ_IRQ);
203 ++ return;
204 ++ }
205 ++
206 + /*
207 + * NOTE: We must spin here if the other cpu hasn't yet
208 + * serviced a previous message. This can burn lots
209 +@@ -63,6 +73,11 @@ static void mcip_ipi_clear(int irq)
210 + unsigned long flags;
211 + unsigned int __maybe_unused copy;
212 +
213 ++ if (unlikely(irq == SOFTIRQ_IRQ)) {
214 ++ arc_softirq_clear(irq);
215 ++ return;
216 ++ }
217 ++
218 + raw_spin_lock_irqsave(&mcip_lock, flags);
219 +
220 + /* Who sent the IPI */
221 +diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
222 +index 259c0ca9c99a..ddbb361267d8 100644
223 +--- a/arch/arm/Kconfig.debug
224 ++++ b/arch/arm/Kconfig.debug
225 +@@ -162,10 +162,9 @@ choice
226 + mobile SoCs in the Kona family of chips (e.g. bcm28155,
227 + bcm11351, etc...)
228 +
229 +- config DEBUG_BCM63XX
230 ++ config DEBUG_BCM63XX_UART
231 + bool "Kernel low-level debugging on BCM63XX UART"
232 + depends on ARCH_BCM_63XX
233 +- select DEBUG_UART_BCM63XX
234 +
235 + config DEBUG_BERLIN_UART
236 + bool "Marvell Berlin SoC Debug UART"
237 +@@ -1348,7 +1347,7 @@ config DEBUG_LL_INCLUDE
238 + default "debug/vf.S" if DEBUG_VF_UART
239 + default "debug/vt8500.S" if DEBUG_VT8500_UART0
240 + default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
241 +- default "debug/bcm63xx.S" if DEBUG_UART_BCM63XX
242 ++ default "debug/bcm63xx.S" if DEBUG_BCM63XX_UART
243 + default "debug/digicolor.S" if DEBUG_DIGICOLOR_UA0
244 + default "mach/debug-macro.S"
245 +
246 +@@ -1364,10 +1363,6 @@ config DEBUG_UART_8250
247 + ARCH_IOP33X || ARCH_IXP4XX || \
248 + ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC
249 +
250 +-# Compatibility options for BCM63xx
251 +-config DEBUG_UART_BCM63XX
252 +- def_bool ARCH_BCM_63XX
253 +-
254 + config DEBUG_UART_PHYS
255 + hex "Physical base address of debug UART"
256 + default 0x00100a00 if DEBUG_NETX_UART
257 +@@ -1462,7 +1457,7 @@ config DEBUG_UART_PHYS
258 + default 0xfffb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1
259 + default 0xfffb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
260 + default 0xfffb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3
261 +- default 0xfffe8600 if DEBUG_UART_BCM63XX
262 ++ default 0xfffe8600 if DEBUG_BCM63XX_UART
263 + default 0xfffff700 if ARCH_IOP33X
264 + depends on ARCH_EP93XX || \
265 + DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
266 +@@ -1474,7 +1469,7 @@ config DEBUG_UART_PHYS
267 + DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \
268 + DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \
269 + DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \
270 +- DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
271 ++ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
272 + DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 || \
273 + DEBUG_AT91_UART
274 +
275 +@@ -1515,7 +1510,7 @@ config DEBUG_UART_VIRT
276 + default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
277 + default 0xfc40ab00 if DEBUG_BRCMSTB_UART
278 + default 0xfc705000 if DEBUG_ZTE_ZX
279 +- default 0xfcfe8600 if DEBUG_UART_BCM63XX
280 ++ default 0xfcfe8600 if DEBUG_BCM63XX_UART
281 + default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
282 + default 0xfd000000 if ARCH_SPEAR13XX
283 + default 0xfd012000 if ARCH_MV78XX0
284 +@@ -1566,7 +1561,7 @@ config DEBUG_UART_VIRT
285 + DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
286 + DEBUG_NETX_UART || \
287 + DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
288 +- DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
289 ++ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
290 + DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
291 +
292 + config DEBUG_UART_8250_SHIFT
293 +diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
294 +index 1afe24629d1f..b0c912feaa2f 100644
295 +--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
296 ++++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
297 +@@ -90,7 +90,7 @@
298 + #define PIN_PA14__I2SC1_MCK PINMUX_PIN(PIN_PA14, 4, 2)
299 + #define PIN_PA14__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA14, 5, 1)
300 + #define PIN_PA14__D9 PINMUX_PIN(PIN_PA14, 6, 2)
301 +-#define PIN_PA15 14
302 ++#define PIN_PA15 15
303 + #define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0)
304 + #define PIN_PA15__SPI0_MOSI PINMUX_PIN(PIN_PA15, 1, 1)
305 + #define PIN_PA15__TF1 PINMUX_PIN(PIN_PA15, 2, 1)
306 +diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
307 +index 68ee3ce17b82..b4c6d99364f1 100644
308 +--- a/arch/arm/include/asm/psci.h
309 ++++ b/arch/arm/include/asm/psci.h
310 +@@ -16,7 +16,7 @@
311 +
312 + extern struct smp_operations psci_smp_ops;
313 +
314 +-#ifdef CONFIG_ARM_PSCI
315 ++#if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI)
316 + bool psci_smp_available(void);
317 + #else
318 + static inline bool psci_smp_available(void) { return false; }
319 +diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
320 +index 0375c8caa061..9408a994cc91 100644
321 +--- a/arch/arm/include/asm/xen/page-coherent.h
322 ++++ b/arch/arm/include/asm/xen/page-coherent.h
323 +@@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
324 + dma_addr_t dev_addr, unsigned long offset, size_t size,
325 + enum dma_data_direction dir, struct dma_attrs *attrs)
326 + {
327 +- bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page);
328 ++ unsigned long page_pfn = page_to_xen_pfn(page);
329 ++ unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
330 ++ unsigned long compound_pages =
331 ++ (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
332 ++ bool local = (page_pfn <= dev_pfn) &&
333 ++ (dev_pfn - page_pfn < compound_pages);
334 ++
335 + /*
336 +- * Dom0 is mapped 1:1, while the Linux page can be spanned accross
337 +- * multiple Xen page, it's not possible to have a mix of local and
338 +- * foreign Xen page. So if the first xen_pfn == mfn the page is local
339 +- * otherwise it's a foreign page grant-mapped in dom0. If the page is
340 +- * local we can safely call the native dma_ops function, otherwise we
341 +- * call the xen specific function.
342 ++ * Dom0 is mapped 1:1, while the Linux page can span across
343 ++ * multiple Xen pages, it's not possible for it to contain a
344 ++ * mix of local and foreign Xen pages. So if the first xen_pfn
345 ++ * == mfn the page is local otherwise it's a foreign page
346 ++ * grant-mapped in dom0. If the page is local we can safely
347 ++ * call the native dma_ops function, otherwise we call the xen
348 ++ * specific function.
349 + */
350 + if (local)
351 + __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
352 +diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
353 +index 7b76ce01c21d..8633c703546a 100644
354 +--- a/arch/arm/mach-omap2/gpmc-onenand.c
355 ++++ b/arch/arm/mach-omap2/gpmc-onenand.c
356 +@@ -101,10 +101,8 @@ static void omap2_onenand_set_async_mode(void __iomem *onenand_base)
357 +
358 + static void set_onenand_cfg(void __iomem *onenand_base)
359 + {
360 +- u32 reg;
361 ++ u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
362 +
363 +- reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
364 +- reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9));
365 + reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
366 + ONENAND_SYS_CFG1_BL_16;
367 + if (onenand_flags & ONENAND_FLAG_SYNCREAD)
368 +@@ -123,6 +121,7 @@ static void set_onenand_cfg(void __iomem *onenand_base)
369 + reg |= ONENAND_SYS_CFG1_VHF;
370 + else
371 + reg &= ~ONENAND_SYS_CFG1_VHF;
372 ++
373 + writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
374 + }
375 +
376 +@@ -289,6 +288,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
377 + }
378 + }
379 +
380 ++ onenand_async.sync_write = true;
381 + omap2_onenand_calc_async_timings(&t);
382 +
383 + ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);
384 +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
385 +index cd822d8454c0..b6c90e5006e4 100644
386 +--- a/arch/arm64/Makefile
387 ++++ b/arch/arm64/Makefile
388 +@@ -27,6 +27,7 @@ $(warning LSE atomics not supported by binutils)
389 + endif
390 +
391 + KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
392 ++KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
393 + KBUILD_AFLAGS += $(lseinstr)
394 +
395 + ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
396 +diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
397 +index 2046c0230224..21ed7150fec3 100644
398 +--- a/arch/mips/include/asm/page.h
399 ++++ b/arch/mips/include/asm/page.h
400 +@@ -33,7 +33,7 @@
401 + #define PAGE_SHIFT 16
402 + #endif
403 + #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
404 +-#define PAGE_MASK (~(PAGE_SIZE - 1))
405 ++#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
406 +
407 + /*
408 + * This is used for calculating the real page sizes
409 +diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
410 +index 8957f15e21ec..18826aa15a7c 100644
411 +--- a/arch/mips/include/asm/pgtable.h
412 ++++ b/arch/mips/include/asm/pgtable.h
413 +@@ -353,7 +353,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
414 + static inline pte_t pte_mkyoung(pte_t pte)
415 + {
416 + pte_val(pte) |= _PAGE_ACCESSED;
417 +-#ifdef CONFIG_CPU_MIPSR2
418 ++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
419 + if (!(pte_val(pte) & _PAGE_NO_READ))
420 + pte_val(pte) |= _PAGE_SILENT_READ;
421 + else
422 +@@ -560,7 +560,7 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
423 + {
424 + pmd_val(pmd) |= _PAGE_ACCESSED;
425 +
426 +-#ifdef CONFIG_CPU_MIPSR2
427 ++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
428 + if (!(pmd_val(pmd) & _PAGE_NO_READ))
429 + pmd_val(pmd) |= _PAGE_SILENT_READ;
430 + else
431 +diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
432 +index 6499d93ae68d..47bc45a67e9b 100644
433 +--- a/arch/mips/include/asm/syscall.h
434 ++++ b/arch/mips/include/asm/syscall.h
435 +@@ -101,10 +101,8 @@ static inline void syscall_get_arguments(struct task_struct *task,
436 + /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
437 + if ((config_enabled(CONFIG_32BIT) ||
438 + test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
439 +- (regs->regs[2] == __NR_syscall)) {
440 ++ (regs->regs[2] == __NR_syscall))
441 + i++;
442 +- n++;
443 +- }
444 +
445 + while (n--)
446 + ret |= mips_get_syscall_arg(args++, task, regs, i++);
447 +diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
448 +index bf9f1a77f0e5..a2631a52ca99 100644
449 +--- a/arch/mips/loongson64/loongson-3/hpet.c
450 ++++ b/arch/mips/loongson64/loongson-3/hpet.c
451 +@@ -13,6 +13,9 @@
452 + #define SMBUS_PCI_REG64 0x64
453 + #define SMBUS_PCI_REGB4 0xb4
454 +
455 ++#define HPET_MIN_CYCLES 64
456 ++#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
457 ++
458 + static DEFINE_SPINLOCK(hpet_lock);
459 + DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
460 +
461 +@@ -161,8 +164,9 @@ static int hpet_next_event(unsigned long delta,
462 + cnt += delta;
463 + hpet_write(HPET_T0_CMP, cnt);
464 +
465 +- res = ((int)(hpet_read(HPET_COUNTER) - cnt) > 0) ? -ETIME : 0;
466 +- return res;
467 ++ res = (int)(cnt - hpet_read(HPET_COUNTER));
468 ++
469 ++ return res < HPET_MIN_CYCLES ? -ETIME : 0;
470 + }
471 +
472 + static irqreturn_t hpet_irq_handler(int irq, void *data)
473 +@@ -237,7 +241,7 @@ void __init setup_hpet_timer(void)
474 + cd->cpumask = cpumask_of(cpu);
475 + clockevent_set_clock(cd, HPET_FREQ);
476 + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
477 +- cd->min_delta_ns = 5000;
478 ++ cd->min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, cd);
479 +
480 + clockevents_register_device(cd);
481 + setup_irq(HPET_T0_IRQ, &hpet_irq);
482 +diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
483 +index 1a4738a8f2d3..509832a9836c 100644
484 +--- a/arch/mips/loongson64/loongson-3/smp.c
485 ++++ b/arch/mips/loongson64/loongson-3/smp.c
486 +@@ -30,13 +30,13 @@
487 + #include "smp.h"
488 +
489 + DEFINE_PER_CPU(int, cpu_state);
490 +-DEFINE_PER_CPU(uint32_t, core0_c0count);
491 +
492 + static void *ipi_set0_regs[16];
493 + static void *ipi_clear0_regs[16];
494 + static void *ipi_status0_regs[16];
495 + static void *ipi_en0_regs[16];
496 + static void *ipi_mailbox_buf[16];
497 ++static uint32_t core0_c0count[NR_CPUS];
498 +
499 + /* read a 32bit value from ipi register */
500 + #define loongson3_ipi_read32(addr) readl(addr)
501 +@@ -275,12 +275,14 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
502 + if (action & SMP_ASK_C0COUNT) {
503 + BUG_ON(cpu != 0);
504 + c0count = read_c0_count();
505 +- for (i = 1; i < num_possible_cpus(); i++)
506 +- per_cpu(core0_c0count, i) = c0count;
507 ++ c0count = c0count ? c0count : 1;
508 ++ for (i = 1; i < nr_cpu_ids; i++)
509 ++ core0_c0count[i] = c0count;
510 ++ __wbflush(); /* Let others see the result ASAP */
511 + }
512 + }
513 +
514 +-#define MAX_LOOPS 1111
515 ++#define MAX_LOOPS 800
516 + /*
517 + * SMP init and finish on secondary CPUs
518 + */
519 +@@ -305,16 +307,20 @@ static void loongson3_init_secondary(void)
520 + cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
521 +
522 + i = 0;
523 +- __this_cpu_write(core0_c0count, 0);
524 ++ core0_c0count[cpu] = 0;
525 + loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
526 +- while (!__this_cpu_read(core0_c0count)) {
527 ++ while (!core0_c0count[cpu]) {
528 + i++;
529 + cpu_relax();
530 + }
531 +
532 + if (i > MAX_LOOPS)
533 + i = MAX_LOOPS;
534 +- initcount = __this_cpu_read(core0_c0count) + i;
535 ++ if (cpu_data[cpu].package)
536 ++ initcount = core0_c0count[cpu] + i;
537 ++ else /* Local access is faster for loops */
538 ++ initcount = core0_c0count[cpu] + i/2;
539 ++
540 + write_c0_count(initcount);
541 + }
542 +
543 +diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
544 +index 32e0be27673f..29f73e00253d 100644
545 +--- a/arch/mips/mm/tlbex.c
546 ++++ b/arch/mips/mm/tlbex.c
547 +@@ -242,7 +242,7 @@ static void output_pgtable_bits_defines(void)
548 + pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
549 + pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
550 + #endif
551 +-#ifdef CONFIG_CPU_MIPSR2
552 ++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
553 + if (cpu_has_rixi) {
554 + #ifdef _PAGE_NO_EXEC_SHIFT
555 + pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
556 +diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
557 +index f69ecaa7ce33..52c1e273f8cd 100644
558 +--- a/arch/powerpc/kernel/eeh_driver.c
559 ++++ b/arch/powerpc/kernel/eeh_driver.c
560 +@@ -418,8 +418,7 @@ static void *eeh_rmv_device(void *data, void *userdata)
561 + eeh_pcid_put(dev);
562 + if (driver->err_handler &&
563 + driver->err_handler->error_detected &&
564 +- driver->err_handler->slot_reset &&
565 +- driver->err_handler->resume)
566 ++ driver->err_handler->slot_reset)
567 + return NULL;
568 + }
569 +
570 +diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h
571 +index 2559b16da525..17d9dcd29d45 100644
572 +--- a/arch/s390/include/asm/fpu/internal.h
573 ++++ b/arch/s390/include/asm/fpu/internal.h
574 +@@ -48,6 +48,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
575 + static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
576 + {
577 + fpregs->pad = 0;
578 ++ fpregs->fpc = fpu->fpc;
579 + if (MACHINE_HAS_VX)
580 + convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
581 + else
582 +@@ -57,6 +58,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
583 +
584 + static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
585 + {
586 ++ fpu->fpc = fpregs->fpc;
587 + if (MACHINE_HAS_VX)
588 + convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
589 + else
590 +diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
591 +index efaac2c3bb77..e9a983f40a24 100644
592 +--- a/arch/s390/include/asm/kvm_host.h
593 ++++ b/arch/s390/include/asm/kvm_host.h
594 +@@ -506,7 +506,6 @@ struct kvm_vcpu_arch {
595 + struct kvm_s390_sie_block *sie_block;
596 + unsigned int host_acrs[NUM_ACRS];
597 + struct fpu host_fpregs;
598 +- struct fpu guest_fpregs;
599 + struct kvm_s390_local_interrupt local_int;
600 + struct hrtimer ckc_timer;
601 + struct kvm_s390_pgm_info pgm;
602 +diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
603 +index 9cd248f637c7..dc6c9c604543 100644
604 +--- a/arch/s390/kernel/asm-offsets.c
605 ++++ b/arch/s390/kernel/asm-offsets.c
606 +@@ -181,6 +181,7 @@ int main(void)
607 + OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area);
608 + OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area);
609 + OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area);
610 ++ OFFSET(__LC_TOD_PROGREG_SAVE_AREA, _lowcore, tod_progreg_save_area);
611 + OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area);
612 + OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area);
613 + OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area);
614 +diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
615 +index 66c94417c0ba..4af60374eba0 100644
616 +--- a/arch/s390/kernel/compat_signal.c
617 ++++ b/arch/s390/kernel/compat_signal.c
618 +@@ -271,7 +271,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
619 +
620 + /* Restore high gprs from signal stack */
621 + if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
622 +- sizeof(&sregs_ext->gprs_high)))
623 ++ sizeof(sregs_ext->gprs_high)))
624 + return -EFAULT;
625 + for (i = 0; i < NUM_GPRS; i++)
626 + *(__u32 *)&regs->gprs[i] = gprs_high[i];
627 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
628 +index 846589281b04..a08d0afd5ff6 100644
629 +--- a/arch/s390/kvm/kvm-s390.c
630 ++++ b/arch/s390/kvm/kvm-s390.c
631 +@@ -1268,44 +1268,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
632 + return 0;
633 + }
634 +
635 +-/*
636 +- * Backs up the current FP/VX register save area on a particular
637 +- * destination. Used to switch between different register save
638 +- * areas.
639 +- */
640 +-static inline void save_fpu_to(struct fpu *dst)
641 +-{
642 +- dst->fpc = current->thread.fpu.fpc;
643 +- dst->regs = current->thread.fpu.regs;
644 +-}
645 +-
646 +-/*
647 +- * Switches the FP/VX register save area from which to lazy
648 +- * restore register contents.
649 +- */
650 +-static inline void load_fpu_from(struct fpu *from)
651 +-{
652 +- current->thread.fpu.fpc = from->fpc;
653 +- current->thread.fpu.regs = from->regs;
654 +-}
655 +-
656 + void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
657 + {
658 + /* Save host register state */
659 + save_fpu_regs();
660 +- save_fpu_to(&vcpu->arch.host_fpregs);
661 +-
662 +- if (test_kvm_facility(vcpu->kvm, 129)) {
663 +- current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
664 +- /*
665 +- * Use the register save area in the SIE-control block
666 +- * for register restore and save in kvm_arch_vcpu_put()
667 +- */
668 +- current->thread.fpu.vxrs =
669 +- (__vector128 *)&vcpu->run->s.regs.vrs;
670 +- } else
671 +- load_fpu_from(&vcpu->arch.guest_fpregs);
672 ++ vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
673 ++ vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
674 +
675 ++ /* Depending on MACHINE_HAS_VX, data stored to vrs either
676 ++ * has vector register or floating point register format.
677 ++ */
678 ++ current->thread.fpu.regs = vcpu->run->s.regs.vrs;
679 ++ current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
680 + if (test_fp_ctl(current->thread.fpu.fpc))
681 + /* User space provided an invalid FPC, let's clear it */
682 + current->thread.fpu.fpc = 0;
683 +@@ -1321,19 +1295,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
684 + atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
685 + gmap_disable(vcpu->arch.gmap);
686 +
687 ++ /* Save guest register state */
688 + save_fpu_regs();
689 ++ vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
690 +
691 +- if (test_kvm_facility(vcpu->kvm, 129))
692 +- /*
693 +- * kvm_arch_vcpu_load() set up the register save area to
694 +- * the &vcpu->run->s.regs.vrs and, thus, the vector registers
695 +- * are already saved. Only the floating-point control must be
696 +- * copied.
697 +- */
698 +- vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
699 +- else
700 +- save_fpu_to(&vcpu->arch.guest_fpregs);
701 +- load_fpu_from(&vcpu->arch.host_fpregs);
702 ++ /* Restore host register state */
703 ++ current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
704 ++ current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
705 +
706 + save_access_regs(vcpu->run->s.regs.acrs);
707 + restore_access_regs(vcpu->arch.host_acrs);
708 +@@ -1351,8 +1319,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
709 + memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
710 + vcpu->arch.sie_block->gcr[0] = 0xE0UL;
711 + vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
712 +- vcpu->arch.guest_fpregs.fpc = 0;
713 +- asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
714 ++ /* make sure the new fpc will be lazily loaded */
715 ++ save_fpu_regs();
716 ++ current->thread.fpu.fpc = 0;
717 + vcpu->arch.sie_block->gbea = 1;
718 + vcpu->arch.sie_block->pp = 0;
719 + vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
720 +@@ -1501,19 +1470,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
721 + vcpu->arch.local_int.wq = &vcpu->wq;
722 + vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
723 +
724 +- /*
725 +- * Allocate a save area for floating-point registers. If the vector
726 +- * extension is available, register contents are saved in the SIE
727 +- * control block. The allocated save area is still required in
728 +- * particular places, for example, in kvm_s390_vcpu_store_status().
729 +- */
730 +- vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
731 +- GFP_KERNEL);
732 +- if (!vcpu->arch.guest_fpregs.fprs) {
733 +- rc = -ENOMEM;
734 +- goto out_free_sie_block;
735 +- }
736 +-
737 + rc = kvm_vcpu_init(vcpu, kvm, id);
738 + if (rc)
739 + goto out_free_sie_block;
740 +@@ -1734,19 +1690,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
741 +
742 + int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
743 + {
744 ++ /* make sure the new values will be lazily loaded */
745 ++ save_fpu_regs();
746 + if (test_fp_ctl(fpu->fpc))
747 + return -EINVAL;
748 +- memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
749 +- vcpu->arch.guest_fpregs.fpc = fpu->fpc;
750 +- save_fpu_regs();
751 +- load_fpu_from(&vcpu->arch.guest_fpregs);
752 ++ current->thread.fpu.fpc = fpu->fpc;
753 ++ if (MACHINE_HAS_VX)
754 ++ convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
755 ++ else
756 ++ memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
757 + return 0;
758 + }
759 +
760 + int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
761 + {
762 +- memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
763 +- fpu->fpc = vcpu->arch.guest_fpregs.fpc;
764 ++ /* make sure we have the latest values */
765 ++ save_fpu_regs();
766 ++ if (MACHINE_HAS_VX)
767 ++ convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
768 ++ else
769 ++ memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
770 ++ fpu->fpc = current->thread.fpu.fpc;
771 + return 0;
772 + }
773 +
774 +@@ -2266,41 +2230,50 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
775 + int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
776 + {
777 + unsigned char archmode = 1;
778 ++ freg_t fprs[NUM_FPRS];
779 + unsigned int px;
780 + u64 clkcomp;
781 + int rc;
782 +
783 ++ px = kvm_s390_get_prefix(vcpu);
784 + if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
785 + if (write_guest_abs(vcpu, 163, &archmode, 1))
786 + return -EFAULT;
787 +- gpa = SAVE_AREA_BASE;
788 ++ gpa = 0;
789 + } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
790 + if (write_guest_real(vcpu, 163, &archmode, 1))
791 + return -EFAULT;
792 +- gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
793 ++ gpa = px;
794 ++ } else
795 ++ gpa -= __LC_FPREGS_SAVE_AREA;
796 ++
797 ++ /* manually convert vector registers if necessary */
798 ++ if (MACHINE_HAS_VX) {
799 ++ convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
800 ++ rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
801 ++ fprs, 128);
802 ++ } else {
803 ++ rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
804 ++ vcpu->run->s.regs.vrs, 128);
805 + }
806 +- rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
807 +- vcpu->arch.guest_fpregs.fprs, 128);
808 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
809 ++ rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
810 + vcpu->run->s.regs.gprs, 128);
811 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
812 ++ rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
813 + &vcpu->arch.sie_block->gpsw, 16);
814 +- px = kvm_s390_get_prefix(vcpu);
815 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
816 ++ rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
817 + &px, 4);
818 +- rc |= write_guest_abs(vcpu,
819 +- gpa + offsetof(struct save_area, fp_ctrl_reg),
820 +- &vcpu->arch.guest_fpregs.fpc, 4);
821 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
822 ++ rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
823 ++ &vcpu->run->s.regs.fpc, 4);
824 ++ rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
825 + &vcpu->arch.sie_block->todpr, 4);
826 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
827 ++ rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
828 + &vcpu->arch.sie_block->cputm, 8);
829 + clkcomp = vcpu->arch.sie_block->ckc >> 8;
830 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
831 ++ rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
832 + &clkcomp, 8);
833 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
834 ++ rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
835 + &vcpu->run->s.regs.acrs, 64);
836 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
837 ++ rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
838 + &vcpu->arch.sie_block->gcr, 128);
839 + return rc ? -EFAULT : 0;
840 + }
841 +@@ -2313,19 +2286,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
842 + * it into the save area
843 + */
844 + save_fpu_regs();
845 +- if (test_kvm_facility(vcpu->kvm, 129)) {
846 +- /*
847 +- * If the vector extension is available, the vector registers
848 +- * which overlaps with floating-point registers are saved in
849 +- * the SIE-control block. Hence, extract the floating-point
850 +- * registers and the FPC value and store them in the
851 +- * guest_fpregs structure.
852 +- */
853 +- vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
854 +- convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
855 +- current->thread.fpu.vxrs);
856 +- } else
857 +- save_fpu_to(&vcpu->arch.guest_fpregs);
858 ++ vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
859 + save_access_regs(vcpu->run->s.regs.acrs);
860 +
861 + return kvm_s390_store_status_unloaded(vcpu, addr);
862 +diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
863 +index 4d1ee88864e8..18c8b819b0aa 100644
864 +--- a/arch/s390/mm/extable.c
865 ++++ b/arch/s390/mm/extable.c
866 +@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
867 + int i;
868 +
869 + /* Normalize entries to being relative to the start of the section */
870 +- for (p = start, i = 0; p < finish; p++, i += 8)
871 ++ for (p = start, i = 0; p < finish; p++, i += 8) {
872 + p->insn += i;
873 ++ p->fixup += i + 4;
874 ++ }
875 + sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
876 + /* Denormalize all entries */
877 +- for (p = start, i = 0; p < finish; p++, i += 8)
878 ++ for (p = start, i = 0; p < finish; p++, i += 8) {
879 + p->insn -= i;
880 ++ p->fixup -= i + 4;
881 ++ }
882 + }
883 +
884 + #ifdef CONFIG_MODULES
885 +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
886 +index 30e7ddb27a3a..c690c8e16a96 100644
887 +--- a/arch/sparc/kernel/sys_sparc_64.c
888 ++++ b/arch/sparc/kernel/sys_sparc_64.c
889 +@@ -413,7 +413,7 @@ out:
890 +
891 + SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
892 + {
893 +- int ret;
894 ++ long ret;
895 +
896 + if (personality(current->personality) == PER_LINUX32 &&
897 + personality(personality) == PER_LINUX)
898 +diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
899 +index 47f1ff056a54..22a358ef1b0c 100644
900 +--- a/arch/um/os-Linux/start_up.c
901 ++++ b/arch/um/os-Linux/start_up.c
902 +@@ -94,6 +94,8 @@ static int start_ptraced_child(void)
903 + {
904 + int pid, n, status;
905 +
906 ++ fflush(stdout);
907 ++
908 + pid = fork();
909 + if (pid == 0)
910 + ptrace_child();
911 +diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
912 +index 6a1ae3751e82..15cfebaa7688 100644
913 +--- a/arch/x86/entry/entry_64_compat.S
914 ++++ b/arch/x86/entry/entry_64_compat.S
915 +@@ -267,6 +267,7 @@ ENTRY(entry_INT80_compat)
916 + * Interrupts are off on entry.
917 + */
918 + PARAVIRT_ADJUST_EXCEPTION_FRAME
919 ++ ASM_CLAC /* Do this early to minimize exposure */
920 + SWAPGS
921 +
922 + /*
923 +diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
924 +index 881b4768644a..e7de5c9a4fbd 100644
925 +--- a/arch/x86/include/asm/irq.h
926 ++++ b/arch/x86/include/asm/irq.h
927 +@@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
928 +
929 + #define __ARCH_HAS_DO_SOFTIRQ
930 +
931 ++struct irq_desc;
932 ++
933 + #ifdef CONFIG_HOTPLUG_CPU
934 + #include <linux/cpumask.h>
935 + extern int check_irq_vectors_for_cpu_disable(void);
936 + extern void fixup_irqs(void);
937 +-extern void irq_force_complete_move(int);
938 ++extern void irq_force_complete_move(struct irq_desc *desc);
939 + #endif
940 +
941 + #ifdef CONFIG_HAVE_KVM
942 +@@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
943 + extern void (*x86_platform_ipi_callback)(void);
944 + extern void native_init_IRQ(void);
945 +
946 +-struct irq_desc;
947 + extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
948 +
949 + extern __visible unsigned int do_IRQ(struct pt_regs *regs);
950 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
951 +index f25321894ad2..fdb0fbfb1197 100644
952 +--- a/arch/x86/kernel/apic/io_apic.c
953 ++++ b/arch/x86/kernel/apic/io_apic.c
954 +@@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void)
955 + {
956 + int pin, ioapic, irq, irq_entry;
957 + const struct cpumask *mask;
958 ++ struct irq_desc *desc;
959 + struct irq_data *idata;
960 + struct irq_chip *chip;
961 +
962 +@@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void)
963 + if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
964 + continue;
965 +
966 +- idata = irq_get_irq_data(irq);
967 ++ desc = irq_to_desc(irq);
968 ++ raw_spin_lock_irq(&desc->lock);
969 ++ idata = irq_desc_get_irq_data(desc);
970 +
971 + /*
972 + * Honour affinities which have been set in early boot
973 +@@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void)
974 + /* Might be lapic_chip for irq 0 */
975 + if (chip->irq_set_affinity)
976 + chip->irq_set_affinity(idata, mask, false);
977 ++ raw_spin_unlock_irq(&desc->lock);
978 + }
979 + }
980 + #endif
981 +diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
982 +index 861bc59c8f25..a35f6b5473f4 100644
983 +--- a/arch/x86/kernel/apic/vector.c
984 ++++ b/arch/x86/kernel/apic/vector.c
985 +@@ -30,7 +30,7 @@ struct apic_chip_data {
986 +
987 + struct irq_domain *x86_vector_domain;
988 + static DEFINE_RAW_SPINLOCK(vector_lock);
989 +-static cpumask_var_t vector_cpumask;
990 ++static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
991 + static struct irq_chip lapic_controller;
992 + #ifdef CONFIG_X86_IO_APIC
993 + static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
994 +@@ -116,35 +116,47 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
995 + */
996 + static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
997 + static int current_offset = VECTOR_OFFSET_START % 16;
998 +- int cpu, err;
999 ++ int cpu, vector;
1000 +
1001 +- if (d->move_in_progress)
1002 ++ /*
1003 ++ * If there is still a move in progress or the previous move has not
1004 ++ * been cleaned up completely, tell the caller to come back later.
1005 ++ */
1006 ++ if (d->move_in_progress ||
1007 ++ cpumask_intersects(d->old_domain, cpu_online_mask))
1008 + return -EBUSY;
1009 +
1010 + /* Only try and allocate irqs on cpus that are present */
1011 +- err = -ENOSPC;
1012 + cpumask_clear(d->old_domain);
1013 ++ cpumask_clear(searched_cpumask);
1014 + cpu = cpumask_first_and(mask, cpu_online_mask);
1015 + while (cpu < nr_cpu_ids) {
1016 +- int new_cpu, vector, offset;
1017 ++ int new_cpu, offset;
1018 +
1019 ++ /* Get the possible target cpus for @mask/@cpu from the apic */
1020 + apic->vector_allocation_domain(cpu, vector_cpumask, mask);
1021 +
1022 ++ /*
1023 ++ * Clear the offline cpus from @vector_cpumask for searching
1024 ++ * and verify whether the result overlaps with @mask. If true,
1025 ++ * then the call to apic->cpu_mask_to_apicid_and() will
1026 ++ * succeed as well. If not, no point in trying to find a
1027 ++ * vector in this mask.
1028 ++ */
1029 ++ cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
1030 ++ if (!cpumask_intersects(vector_searchmask, mask))
1031 ++ goto next_cpu;
1032 ++
1033 + if (cpumask_subset(vector_cpumask, d->domain)) {
1034 +- err = 0;
1035 + if (cpumask_equal(vector_cpumask, d->domain))
1036 +- break;
1037 ++ goto success;
1038 + /*
1039 +- * New cpumask using the vector is a proper subset of
1040 +- * the current in use mask. So cleanup the vector
1041 +- * allocation for the members that are not used anymore.
1042 ++ * Mark the cpus which are not longer in the mask for
1043 ++ * cleanup.
1044 + */
1045 +- cpumask_andnot(d->old_domain, d->domain,
1046 +- vector_cpumask);
1047 +- d->move_in_progress =
1048 +- cpumask_intersects(d->old_domain, cpu_online_mask);
1049 +- cpumask_and(d->domain, d->domain, vector_cpumask);
1050 +- break;
1051 ++ cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
1052 ++ vector = d->cfg.vector;
1053 ++ goto update;
1054 + }
1055 +
1056 + vector = current_vector;
1057 +@@ -156,45 +168,60 @@ next:
1058 + vector = FIRST_EXTERNAL_VECTOR + offset;
1059 + }
1060 +
1061 +- if (unlikely(current_vector == vector)) {
1062 +- cpumask_or(d->old_domain, d->old_domain,
1063 +- vector_cpumask);
1064 +- cpumask_andnot(vector_cpumask, mask, d->old_domain);
1065 +- cpu = cpumask_first_and(vector_cpumask,
1066 +- cpu_online_mask);
1067 +- continue;
1068 +- }
1069 ++ /* If the search wrapped around, try the next cpu */
1070 ++ if (unlikely(current_vector == vector))
1071 ++ goto next_cpu;
1072 +
1073 + if (test_bit(vector, used_vectors))
1074 + goto next;
1075 +
1076 +- for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
1077 ++ for_each_cpu(new_cpu, vector_searchmask) {
1078 + if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
1079 + goto next;
1080 + }
1081 + /* Found one! */
1082 + current_vector = vector;
1083 + current_offset = offset;
1084 +- if (d->cfg.vector) {
1085 ++ /* Schedule the old vector for cleanup on all cpus */
1086 ++ if (d->cfg.vector)
1087 + cpumask_copy(d->old_domain, d->domain);
1088 +- d->move_in_progress =
1089 +- cpumask_intersects(d->old_domain, cpu_online_mask);
1090 +- }
1091 +- for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
1092 ++ for_each_cpu(new_cpu, vector_searchmask)
1093 + per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
1094 +- d->cfg.vector = vector;
1095 +- cpumask_copy(d->domain, vector_cpumask);
1096 +- err = 0;
1097 +- break;
1098 +- }
1099 ++ goto update;
1100 +
1101 +- if (!err) {
1102 +- /* cache destination APIC IDs into cfg->dest_apicid */
1103 +- err = apic->cpu_mask_to_apicid_and(mask, d->domain,
1104 +- &d->cfg.dest_apicid);
1105 ++next_cpu:
1106 ++ /*
1107 ++ * We exclude the current @vector_cpumask from the requested
1108 ++ * @mask and try again with the next online cpu in the
1109 ++ * result. We cannot modify @mask, so we use @vector_cpumask
1110 ++ * as a temporary buffer here as it will be reassigned when
1111 ++ * calling apic->vector_allocation_domain() above.
1112 ++ */
1113 ++ cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
1114 ++ cpumask_andnot(vector_cpumask, mask, searched_cpumask);
1115 ++ cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
1116 ++ continue;
1117 + }
1118 ++ return -ENOSPC;
1119 +
1120 +- return err;
1121 ++update:
1122 ++ /*
1123 ++ * Exclude offline cpus from the cleanup mask and set the
1124 ++ * move_in_progress flag when the result is not empty.
1125 ++ */
1126 ++ cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
1127 ++ d->move_in_progress = !cpumask_empty(d->old_domain);
1128 ++ d->cfg.vector = vector;
1129 ++ cpumask_copy(d->domain, vector_cpumask);
1130 ++success:
1131 ++ /*
1132 ++ * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
1133 ++ * as we already established, that mask & d->domain & cpu_online_mask
1134 ++ * is not empty.
1135 ++ */
1136 ++ BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
1137 ++ &d->cfg.dest_apicid));
1138 ++ return 0;
1139 + }
1140 +
1141 + static int assign_irq_vector(int irq, struct apic_chip_data *data,
1142 +@@ -224,10 +251,8 @@ static int assign_irq_vector_policy(int irq, int node,
1143 + static void clear_irq_vector(int irq, struct apic_chip_data *data)
1144 + {
1145 + struct irq_desc *desc;
1146 +- unsigned long flags;
1147 + int cpu, vector;
1148 +
1149 +- raw_spin_lock_irqsave(&vector_lock, flags);
1150 + BUG_ON(!data->cfg.vector);
1151 +
1152 + vector = data->cfg.vector;
1153 +@@ -237,10 +262,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
1154 + data->cfg.vector = 0;
1155 + cpumask_clear(data->domain);
1156 +
1157 +- if (likely(!data->move_in_progress)) {
1158 +- raw_spin_unlock_irqrestore(&vector_lock, flags);
1159 ++ /*
1160 ++ * If move is in progress or the old_domain mask is not empty,
1161 ++ * i.e. the cleanup IPI has not been processed yet, we need to remove
1162 ++ * the old references to desc from all cpus vector tables.
1163 ++ */
1164 ++ if (!data->move_in_progress && cpumask_empty(data->old_domain))
1165 + return;
1166 +- }
1167 +
1168 + desc = irq_to_desc(irq);
1169 + for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
1170 +@@ -253,7 +281,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
1171 + }
1172 + }
1173 + data->move_in_progress = 0;
1174 +- raw_spin_unlock_irqrestore(&vector_lock, flags);
1175 + }
1176 +
1177 + void init_irq_alloc_info(struct irq_alloc_info *info,
1178 +@@ -274,19 +301,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
1179 + static void x86_vector_free_irqs(struct irq_domain *domain,
1180 + unsigned int virq, unsigned int nr_irqs)
1181 + {
1182 ++ struct apic_chip_data *apic_data;
1183 + struct irq_data *irq_data;
1184 ++ unsigned long flags;
1185 + int i;
1186 +
1187 + for (i = 0; i < nr_irqs; i++) {
1188 + irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
1189 + if (irq_data && irq_data->chip_data) {
1190 ++ raw_spin_lock_irqsave(&vector_lock, flags);
1191 + clear_irq_vector(virq + i, irq_data->chip_data);
1192 +- free_apic_chip_data(irq_data->chip_data);
1193 ++ apic_data = irq_data->chip_data;
1194 ++ irq_domain_reset_irq_data(irq_data);
1195 ++ raw_spin_unlock_irqrestore(&vector_lock, flags);
1196 ++ free_apic_chip_data(apic_data);
1197 + #ifdef CONFIG_X86_IO_APIC
1198 + if (virq + i < nr_legacy_irqs())
1199 + legacy_irq_data[virq + i] = NULL;
1200 + #endif
1201 +- irq_domain_reset_irq_data(irq_data);
1202 + }
1203 + }
1204 + }
1205 +@@ -404,6 +436,8 @@ int __init arch_early_irq_init(void)
1206 + arch_init_htirq_domain(x86_vector_domain);
1207 +
1208 + BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
1209 ++ BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
1210 ++ BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
1211 +
1212 + return arch_early_ioapic_init();
1213 + }
1214 +@@ -492,14 +526,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
1215 + return -EINVAL;
1216 +
1217 + err = assign_irq_vector(irq, data, dest);
1218 +- if (err) {
1219 +- if (assign_irq_vector(irq, data,
1220 +- irq_data_get_affinity_mask(irq_data)))
1221 +- pr_err("Failed to recover vector for irq %d\n", irq);
1222 +- return err;
1223 +- }
1224 +-
1225 +- return IRQ_SET_MASK_OK;
1226 ++ return err ? err : IRQ_SET_MASK_OK;
1227 + }
1228 +
1229 + static struct irq_chip lapic_controller = {
1230 +@@ -511,20 +538,12 @@ static struct irq_chip lapic_controller = {
1231 + #ifdef CONFIG_SMP
1232 + static void __send_cleanup_vector(struct apic_chip_data *data)
1233 + {
1234 +- cpumask_var_t cleanup_mask;
1235 +-
1236 +- if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
1237 +- unsigned int i;
1238 +-
1239 +- for_each_cpu_and(i, data->old_domain, cpu_online_mask)
1240 +- apic->send_IPI_mask(cpumask_of(i),
1241 +- IRQ_MOVE_CLEANUP_VECTOR);
1242 +- } else {
1243 +- cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
1244 +- apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1245 +- free_cpumask_var(cleanup_mask);
1246 +- }
1247 ++ raw_spin_lock(&vector_lock);
1248 ++ cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
1249 + data->move_in_progress = 0;
1250 ++ if (!cpumask_empty(data->old_domain))
1251 ++ apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
1252 ++ raw_spin_unlock(&vector_lock);
1253 + }
1254 +
1255 + void send_cleanup_vector(struct irq_cfg *cfg)
1256 +@@ -568,12 +587,25 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
1257 + goto unlock;
1258 +
1259 + /*
1260 +- * Check if the irq migration is in progress. If so, we
1261 +- * haven't received the cleanup request yet for this irq.
1262 ++ * Nothing to cleanup if irq migration is in progress
1263 ++ * or this cpu is not set in the cleanup mask.
1264 + */
1265 +- if (data->move_in_progress)
1266 ++ if (data->move_in_progress ||
1267 ++ !cpumask_test_cpu(me, data->old_domain))
1268 + goto unlock;
1269 +
1270 ++ /*
1271 ++ * We have two cases to handle here:
1272 ++ * 1) vector is unchanged but the target mask got reduced
1273 ++ * 2) vector and the target mask has changed
1274 ++ *
1275 ++ * #1 is obvious, but in #2 we have two vectors with the same
1276 ++ * irq descriptor: the old and the new vector. So we need to
1277 ++ * make sure that we only cleanup the old vector. The new
1278 ++ * vector has the current @vector number in the config and
1279 ++ * this cpu is part of the target mask. We better leave that
1280 ++ * one alone.
1281 ++ */
1282 + if (vector == data->cfg.vector &&
1283 + cpumask_test_cpu(me, data->domain))
1284 + goto unlock;
1285 +@@ -591,6 +623,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
1286 + goto unlock;
1287 + }
1288 + __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
1289 ++ cpumask_clear_cpu(me, data->old_domain);
1290 + unlock:
1291 + raw_spin_unlock(&desc->lock);
1292 + }
1293 +@@ -619,12 +652,48 @@ void irq_complete_move(struct irq_cfg *cfg)
1294 + __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
1295 + }
1296 +
1297 +-void irq_force_complete_move(int irq)
1298 ++/*
1299 ++ * Called with @desc->lock held and interrupts disabled.
1300 ++ */
1301 ++void irq_force_complete_move(struct irq_desc *desc)
1302 + {
1303 +- struct irq_cfg *cfg = irq_cfg(irq);
1304 ++ struct irq_data *irqdata = irq_desc_get_irq_data(desc);
1305 ++ struct apic_chip_data *data = apic_chip_data(irqdata);
1306 ++ struct irq_cfg *cfg = data ? &data->cfg : NULL;
1307 +
1308 +- if (cfg)
1309 +- __irq_complete_move(cfg, cfg->vector);
1310 ++ if (!cfg)
1311 ++ return;
1312 ++
1313 ++ __irq_complete_move(cfg, cfg->vector);
1314 ++
1315 ++ /*
1316 ++ * This is tricky. If the cleanup of @data->old_domain has not been
1317 ++ * done yet, then the following setaffinity call will fail with
1318 ++ * -EBUSY. This can leave the interrupt in a stale state.
1319 ++ *
1320 ++ * The cleanup cannot make progress because we hold @desc->lock. So in
1321 ++ * case @data->old_domain is not yet cleaned up, we need to drop the
1322 ++ * lock and acquire it again. @desc cannot go away, because the
1323 ++ * hotplug code holds the sparse irq lock.
1324 ++ */
1325 ++ raw_spin_lock(&vector_lock);
1326 ++ /* Clean out all offline cpus (including ourself) first. */
1327 ++ cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
1328 ++ while (!cpumask_empty(data->old_domain)) {
1329 ++ raw_spin_unlock(&vector_lock);
1330 ++ raw_spin_unlock(&desc->lock);
1331 ++ cpu_relax();
1332 ++ raw_spin_lock(&desc->lock);
1333 ++ /*
1334 ++ * Reevaluate apic_chip_data. It might have been cleared after
1335 ++ * we dropped @desc->lock.
1336 ++ */
1337 ++ data = apic_chip_data(irqdata);
1338 ++ if (!data)
1339 ++ return;
1340 ++ raw_spin_lock(&vector_lock);
1341 ++ }
1342 ++ raw_spin_unlock(&vector_lock);
1343 + }
1344 + #endif
1345 +
1346 +diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
1347 +index f8062aaf5df9..61521dc19c10 100644
1348 +--- a/arch/x86/kernel/irq.c
1349 ++++ b/arch/x86/kernel/irq.c
1350 +@@ -462,7 +462,7 @@ void fixup_irqs(void)
1351 + * non intr-remapping case, we can't wait till this interrupt
1352 + * arrives at this cpu before completing the irq move.
1353 + */
1354 +- irq_force_complete_move(irq);
1355 ++ irq_force_complete_move(desc);
1356 +
1357 + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
1358 + break_affinity = 1;
1359 +@@ -470,6 +470,15 @@ void fixup_irqs(void)
1360 + }
1361 +
1362 + chip = irq_data_get_irq_chip(data);
1363 ++ /*
1364 ++ * The interrupt descriptor might have been cleaned up
1365 ++ * already, but it is not yet removed from the radix tree
1366 ++ */
1367 ++ if (!chip) {
1368 ++ raw_spin_unlock(&desc->lock);
1369 ++ continue;
1370 ++ }
1371 ++
1372 + if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
1373 + chip->irq_mask(data);
1374 +
1375 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
1376 +index 1505587d06e9..b9b09fec173b 100644
1377 +--- a/arch/x86/kvm/emulate.c
1378 ++++ b/arch/x86/kvm/emulate.c
1379 +@@ -650,10 +650,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
1380 + u16 sel;
1381 +
1382 + la = seg_base(ctxt, addr.seg) + addr.ea;
1383 +- *linear = la;
1384 + *max_size = 0;
1385 + switch (mode) {
1386 + case X86EMUL_MODE_PROT64:
1387 ++ *linear = la;
1388 + if (is_noncanonical_address(la))
1389 + goto bad;
1390 +
1391 +@@ -662,6 +662,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
1392 + goto bad;
1393 + break;
1394 + default:
1395 ++ *linear = la = (u32)la;
1396 + usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
1397 + addr.seg);
1398 + if (!usable)
1399 +@@ -689,7 +690,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
1400 + if (size > *max_size)
1401 + goto bad;
1402 + }
1403 +- la &= (u32)-1;
1404 + break;
1405 + }
1406 + if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
1407 +diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
1408 +index 3058a22a658d..7be8a251363e 100644
1409 +--- a/arch/x86/kvm/paging_tmpl.h
1410 ++++ b/arch/x86/kvm/paging_tmpl.h
1411 +@@ -249,7 +249,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
1412 + return ret;
1413 +
1414 + kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
1415 +- walker->ptes[level] = pte;
1416 ++ walker->ptes[level - 1] = pte;
1417 + }
1418 + return 0;
1419 + }
1420 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1421 +index 9a2ed8904513..6ef3856aab4b 100644
1422 +--- a/arch/x86/kvm/x86.c
1423 ++++ b/arch/x86/kvm/x86.c
1424 +@@ -2736,6 +2736,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1425 + }
1426 +
1427 + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1428 ++ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
1429 + }
1430 +
1431 + void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1432 +diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
1433 +index b2fd67da1701..ef05755a1900 100644
1434 +--- a/arch/x86/mm/mpx.c
1435 ++++ b/arch/x86/mm/mpx.c
1436 +@@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
1437 + break;
1438 + }
1439 +
1440 +- if (regno > nr_registers) {
1441 ++ if (regno >= nr_registers) {
1442 + WARN_ONCE(1, "decoded an instruction with an invalid register");
1443 + return -EINVAL;
1444 + }
1445 +diff --git a/block/bio.c b/block/bio.c
1446 +index 4f184d938942..d4d144363250 100644
1447 +--- a/block/bio.c
1448 ++++ b/block/bio.c
1449 +@@ -1090,9 +1090,12 @@ int bio_uncopy_user(struct bio *bio)
1450 + if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1451 + /*
1452 + * if we're in a workqueue, the request is orphaned, so
1453 +- * don't copy into a random user address space, just free.
1454 ++ * don't copy into a random user address space, just free
1455 ++ * and return -EINTR so user space doesn't expect any data.
1456 + */
1457 +- if (current->mm && bio_data_dir(bio) == READ)
1458 ++ if (!current->mm)
1459 ++ ret = -EINTR;
1460 ++ else if (bio_data_dir(bio) == READ)
1461 + ret = bio_copy_to_iter(bio, bmd->iter);
1462 + if (bmd->is_our_pages)
1463 + bio_free_pages(bio);
1464 +diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
1465 +index 3405f7a41e25..5fdac394207a 100644
1466 +--- a/drivers/acpi/acpi_video.c
1467 ++++ b/drivers/acpi/acpi_video.c
1468 +@@ -465,6 +465,15 @@ static struct dmi_system_id video_dmi_table[] = {
1469 + * as brightness control does not work.
1470 + */
1471 + {
1472 ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
1473 ++ .callback = video_disable_backlight_sysfs_if,
1474 ++ .ident = "Toshiba Portege R700",
1475 ++ .matches = {
1476 ++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1477 ++ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
1478 ++ },
1479 ++ },
1480 ++ {
1481 + /* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
1482 + .callback = video_disable_backlight_sysfs_if,
1483 + .ident = "Toshiba Portege R830",
1484 +@@ -473,6 +482,15 @@ static struct dmi_system_id video_dmi_table[] = {
1485 + DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"),
1486 + },
1487 + },
1488 ++ {
1489 ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
1490 ++ .callback = video_disable_backlight_sysfs_if,
1491 ++ .ident = "Toshiba Satellite R830",
1492 ++ .matches = {
1493 ++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1494 ++ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"),
1495 ++ },
1496 ++ },
1497 + /*
1498 + * Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
1499 + * but the IDs actually follow the Device ID Scheme.
1500 +diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
1501 +index aa45d4802707..11d8209e6e5d 100644
1502 +--- a/drivers/acpi/nfit.c
1503 ++++ b/drivers/acpi/nfit.c
1504 +@@ -468,37 +468,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
1505 + nfit_mem->bdw = NULL;
1506 + }
1507 +
1508 +-static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
1509 ++static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
1510 + struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
1511 + {
1512 + u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
1513 + struct nfit_memdev *nfit_memdev;
1514 + struct nfit_flush *nfit_flush;
1515 +- struct nfit_dcr *nfit_dcr;
1516 + struct nfit_bdw *nfit_bdw;
1517 + struct nfit_idt *nfit_idt;
1518 + u16 idt_idx, range_index;
1519 +
1520 +- list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1521 +- if (nfit_dcr->dcr->region_index != dcr)
1522 +- continue;
1523 +- nfit_mem->dcr = nfit_dcr->dcr;
1524 +- break;
1525 +- }
1526 +-
1527 +- if (!nfit_mem->dcr) {
1528 +- dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
1529 +- spa->range_index, __to_nfit_memdev(nfit_mem)
1530 +- ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
1531 +- return -ENODEV;
1532 +- }
1533 +-
1534 +- /*
1535 +- * We've found enough to create an nvdimm, optionally
1536 +- * find an associated BDW
1537 +- */
1538 +- list_add(&nfit_mem->list, &acpi_desc->dimms);
1539 +-
1540 + list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
1541 + if (nfit_bdw->bdw->region_index != dcr)
1542 + continue;
1543 +@@ -507,12 +486,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
1544 + }
1545 +
1546 + if (!nfit_mem->bdw)
1547 +- return 0;
1548 ++ return;
1549 +
1550 + nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
1551 +
1552 + if (!nfit_mem->spa_bdw)
1553 +- return 0;
1554 ++ return;
1555 +
1556 + range_index = nfit_mem->spa_bdw->range_index;
1557 + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1558 +@@ -537,8 +516,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
1559 + }
1560 + break;
1561 + }
1562 +-
1563 +- return 0;
1564 + }
1565 +
1566 + static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1567 +@@ -547,7 +524,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1568 + struct nfit_mem *nfit_mem, *found;
1569 + struct nfit_memdev *nfit_memdev;
1570 + int type = nfit_spa_type(spa);
1571 +- u16 dcr;
1572 +
1573 + switch (type) {
1574 + case NFIT_SPA_DCR:
1575 +@@ -558,14 +534,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1576 + }
1577 +
1578 + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1579 +- int rc;
1580 ++ struct nfit_dcr *nfit_dcr;
1581 ++ u32 device_handle;
1582 ++ u16 dcr;
1583 +
1584 + if (nfit_memdev->memdev->range_index != spa->range_index)
1585 + continue;
1586 + found = NULL;
1587 + dcr = nfit_memdev->memdev->region_index;
1588 ++ device_handle = nfit_memdev->memdev->device_handle;
1589 + list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1590 +- if (__to_nfit_memdev(nfit_mem)->region_index == dcr) {
1591 ++ if (__to_nfit_memdev(nfit_mem)->device_handle
1592 ++ == device_handle) {
1593 + found = nfit_mem;
1594 + break;
1595 + }
1596 +@@ -578,6 +558,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1597 + if (!nfit_mem)
1598 + return -ENOMEM;
1599 + INIT_LIST_HEAD(&nfit_mem->list);
1600 ++ list_add(&nfit_mem->list, &acpi_desc->dimms);
1601 ++ }
1602 ++
1603 ++ list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1604 ++ if (nfit_dcr->dcr->region_index != dcr)
1605 ++ continue;
1606 ++ /*
1607 ++ * Record the control region for the dimm. For
1608 ++ * the ACPI 6.1 case, where there are separate
1609 ++ * control regions for the pmem vs blk
1610 ++ * interfaces, be sure to record the extended
1611 ++ * blk details.
1612 ++ */
1613 ++ if (!nfit_mem->dcr)
1614 ++ nfit_mem->dcr = nfit_dcr->dcr;
1615 ++ else if (nfit_mem->dcr->windows == 0
1616 ++ && nfit_dcr->dcr->windows)
1617 ++ nfit_mem->dcr = nfit_dcr->dcr;
1618 ++ break;
1619 ++ }
1620 ++
1621 ++ if (dcr && !nfit_mem->dcr) {
1622 ++ dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
1623 ++ spa->range_index, dcr);
1624 ++ return -ENODEV;
1625 + }
1626 +
1627 + if (type == NFIT_SPA_DCR) {
1628 +@@ -594,6 +599,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1629 + nfit_mem->idt_dcr = nfit_idt->idt;
1630 + break;
1631 + }
1632 ++ nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
1633 + } else {
1634 + /*
1635 + * A single dimm may belong to multiple SPA-PM
1636 +@@ -602,13 +608,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1637 + */
1638 + nfit_mem->memdev_pmem = nfit_memdev->memdev;
1639 + }
1640 +-
1641 +- if (found)
1642 +- continue;
1643 +-
1644 +- rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
1645 +- if (rc)
1646 +- return rc;
1647 + }
1648 +
1649 + return 0;
1650 +diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
1651 +index daaf1c4e1e0f..80e55cb0827b 100644
1652 +--- a/drivers/acpi/video_detect.c
1653 ++++ b/drivers/acpi/video_detect.c
1654 +@@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
1655 + DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
1656 + },
1657 + },
1658 +- {
1659 +- .callback = video_detect_force_vendor,
1660 +- .ident = "Dell Inspiron 5737",
1661 +- .matches = {
1662 +- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1663 +- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
1664 +- },
1665 +- },
1666 +
1667 + /*
1668 + * These models have a working acpi_video backlight control, and using
1669 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
1670 +index a39e85f9efa9..7d00b7a015ea 100644
1671 +--- a/drivers/android/binder.c
1672 ++++ b/drivers/android/binder.c
1673 +@@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc,
1674 + if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1675 + return -EFAULT;
1676 +
1677 +- ptr += sizeof(void *);
1678 ++ ptr += sizeof(cookie);
1679 + list_for_each_entry(w, &proc->delivered_death, entry) {
1680 + struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
1681 +
1682 +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
1683 +index cdf6215a9a22..7dbba387d12a 100644
1684 +--- a/drivers/ata/libata-sff.c
1685 ++++ b/drivers/ata/libata-sff.c
1686 +@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
1687 + static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1688 + {
1689 + struct ata_port *ap = qc->ap;
1690 +- unsigned long flags;
1691 +
1692 + if (ap->ops->error_handler) {
1693 + if (in_wq) {
1694 +- spin_lock_irqsave(ap->lock, flags);
1695 +-
1696 + /* EH might have kicked in while host lock is
1697 + * released.
1698 + */
1699 +@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1700 + } else
1701 + ata_port_freeze(ap);
1702 + }
1703 +-
1704 +- spin_unlock_irqrestore(ap->lock, flags);
1705 + } else {
1706 + if (likely(!(qc->err_mask & AC_ERR_HSM)))
1707 + ata_qc_complete(qc);
1708 +@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1709 + }
1710 + } else {
1711 + if (in_wq) {
1712 +- spin_lock_irqsave(ap->lock, flags);
1713 + ata_sff_irq_on(ap);
1714 + ata_qc_complete(qc);
1715 +- spin_unlock_irqrestore(ap->lock, flags);
1716 + } else
1717 + ata_qc_complete(qc);
1718 + }
1719 +@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1720 + {
1721 + struct ata_link *link = qc->dev->link;
1722 + struct ata_eh_info *ehi = &link->eh_info;
1723 +- unsigned long flags = 0;
1724 + int poll_next;
1725 +
1726 ++ lockdep_assert_held(ap->lock);
1727 ++
1728 + WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1729 +
1730 + /* Make sure ata_sff_qc_issue() does not throw things
1731 +@@ -1112,14 +1106,6 @@ fsm_start:
1732 + }
1733 + }
1734 +
1735 +- /* Send the CDB (atapi) or the first data block (ata pio out).
1736 +- * During the state transition, interrupt handler shouldn't
1737 +- * be invoked before the data transfer is complete and
1738 +- * hsm_task_state is changed. Hence, the following locking.
1739 +- */
1740 +- if (in_wq)
1741 +- spin_lock_irqsave(ap->lock, flags);
1742 +-
1743 + if (qc->tf.protocol == ATA_PROT_PIO) {
1744 + /* PIO data out protocol.
1745 + * send first data block.
1746 +@@ -1135,9 +1121,6 @@ fsm_start:
1747 + /* send CDB */
1748 + atapi_send_cdb(ap, qc);
1749 +
1750 +- if (in_wq)
1751 +- spin_unlock_irqrestore(ap->lock, flags);
1752 +-
1753 + /* if polling, ata_sff_pio_task() handles the rest.
1754 + * otherwise, interrupt handler takes over from here.
1755 + */
1756 +@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
1757 + u8 status;
1758 + int poll_next;
1759 +
1760 ++ spin_lock_irq(ap->lock);
1761 ++
1762 + BUG_ON(ap->sff_pio_task_link == NULL);
1763 + /* qc can be NULL if timeout occurred */
1764 + qc = ata_qc_from_tag(ap, link->active_tag);
1765 + if (!qc) {
1766 + ap->sff_pio_task_link = NULL;
1767 +- return;
1768 ++ goto out_unlock;
1769 + }
1770 +
1771 + fsm_start:
1772 +@@ -1381,11 +1366,14 @@ fsm_start:
1773 + */
1774 + status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1775 + if (status & ATA_BUSY) {
1776 ++ spin_unlock_irq(ap->lock);
1777 + ata_msleep(ap, 2);
1778 ++ spin_lock_irq(ap->lock);
1779 ++
1780 + status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1781 + if (status & ATA_BUSY) {
1782 + ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1783 +- return;
1784 ++ goto out_unlock;
1785 + }
1786 + }
1787 +
1788 +@@ -1402,6 +1390,8 @@ fsm_start:
1789 + */
1790 + if (poll_next)
1791 + goto fsm_start;
1792 ++out_unlock:
1793 ++ spin_unlock_irq(ap->lock);
1794 + }
1795 +
1796 + /**
1797 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1798 +index 92f0ee388f9e..968897108c76 100644
1799 +--- a/drivers/bluetooth/btusb.c
1800 ++++ b/drivers/bluetooth/btusb.c
1801 +@@ -153,6 +153,10 @@ static const struct usb_device_id btusb_table[] = {
1802 + { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
1803 + .driver_info = BTUSB_BCM_PATCHRAM },
1804 +
1805 ++ /* Toshiba Corp - Broadcom based */
1806 ++ { USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
1807 ++ .driver_info = BTUSB_BCM_PATCHRAM },
1808 ++
1809 + /* Intel Bluetooth USB Bootloader (RAM module) */
1810 + { USB_DEVICE(0x8087, 0x0a5a),
1811 + .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
1812 +diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
1813 +index 2fe37f708dc7..813003d6ce09 100644
1814 +--- a/drivers/clk/samsung/clk-cpu.c
1815 ++++ b/drivers/clk/samsung/clk-cpu.c
1816 +@@ -148,6 +148,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
1817 + unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
1818 + unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
1819 + unsigned long div0, div1 = 0, mux_reg;
1820 ++ unsigned long flags;
1821 +
1822 + /* find out the divider values to use for clock data */
1823 + while ((cfg_data->prate * 1000) != ndata->new_rate) {
1824 +@@ -156,7 +157,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
1825 + cfg_data++;
1826 + }
1827 +
1828 +- spin_lock(cpuclk->lock);
1829 ++ spin_lock_irqsave(cpuclk->lock, flags);
1830 +
1831 + /*
1832 + * For the selected PLL clock frequency, get the pre-defined divider
1833 +@@ -212,7 +213,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
1834 + DIV_MASK_ALL);
1835 + }
1836 +
1837 +- spin_unlock(cpuclk->lock);
1838 ++ spin_unlock_irqrestore(cpuclk->lock, flags);
1839 + return 0;
1840 + }
1841 +
1842 +@@ -223,6 +224,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
1843 + const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
1844 + unsigned long div = 0, div_mask = DIV_MASK;
1845 + unsigned long mux_reg;
1846 ++ unsigned long flags;
1847 +
1848 + /* find out the divider values to use for clock data */
1849 + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
1850 +@@ -233,7 +235,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
1851 + }
1852 + }
1853 +
1854 +- spin_lock(cpuclk->lock);
1855 ++ spin_lock_irqsave(cpuclk->lock, flags);
1856 +
1857 + /* select mout_apll as the alternate parent */
1858 + mux_reg = readl(base + E4210_SRC_CPU);
1859 +@@ -246,7 +248,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
1860 + }
1861 +
1862 + exynos_set_safe_div(base, div, div_mask);
1863 +- spin_unlock(cpuclk->lock);
1864 ++ spin_unlock_irqrestore(cpuclk->lock, flags);
1865 + return 0;
1866 + }
1867 +
1868 +diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
1869 +index 6ee91401918e..4da2af9694a2 100644
1870 +--- a/drivers/clocksource/tcb_clksrc.c
1871 ++++ b/drivers/clocksource/tcb_clksrc.c
1872 +@@ -98,7 +98,8 @@ static int tc_shutdown(struct clock_event_device *d)
1873 +
1874 + __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
1875 + __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
1876 +- clk_disable(tcd->clk);
1877 ++ if (!clockevent_state_detached(d))
1878 ++ clk_disable(tcd->clk);
1879 +
1880 + return 0;
1881 + }
1882 +diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
1883 +index a92e94b40b5b..dfc3bb410b00 100644
1884 +--- a/drivers/clocksource/vt8500_timer.c
1885 ++++ b/drivers/clocksource/vt8500_timer.c
1886 +@@ -50,6 +50,8 @@
1887 +
1888 + #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
1889 +
1890 ++#define MIN_OSCR_DELTA 16
1891 ++
1892 + static void __iomem *regbase;
1893 +
1894 + static cycle_t vt8500_timer_read(struct clocksource *cs)
1895 +@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
1896 + cpu_relax();
1897 + writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
1898 +
1899 +- if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
1900 ++ if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
1901 + return -ETIME;
1902 +
1903 + writel(1, regbase + TIMER_IER_VAL);
1904 +@@ -151,7 +153,7 @@ static void __init vt8500_timer_init(struct device_node *np)
1905 + pr_err("%s: setup_irq failed for %s\n", __func__,
1906 + clockevent.name);
1907 + clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
1908 +- 4, 0xf0000000);
1909 ++ MIN_OSCR_DELTA * 2, 0xf0000000);
1910 + }
1911 +
1912 + CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
1913 +diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
1914 +index b260576ddb12..d994b0f652d3 100644
1915 +--- a/drivers/cpufreq/cpufreq_governor.c
1916 ++++ b/drivers/cpufreq/cpufreq_governor.c
1917 +@@ -356,16 +356,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
1918 + if (!have_governor_per_policy())
1919 + cdata->gdbs_data = dbs_data;
1920 +
1921 ++ policy->governor_data = dbs_data;
1922 ++
1923 + ret = sysfs_create_group(get_governor_parent_kobj(policy),
1924 + get_sysfs_attr(dbs_data));
1925 + if (ret)
1926 + goto reset_gdbs_data;
1927 +
1928 +- policy->governor_data = dbs_data;
1929 +-
1930 + return 0;
1931 +
1932 + reset_gdbs_data:
1933 ++ policy->governor_data = NULL;
1934 ++
1935 + if (!have_governor_per_policy())
1936 + cdata->gdbs_data = NULL;
1937 + cdata->exit(dbs_data, !policy->governor->initialized);
1938 +@@ -386,16 +388,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
1939 + if (!cdbs->shared || cdbs->shared->policy)
1940 + return -EBUSY;
1941 +
1942 +- policy->governor_data = NULL;
1943 + if (!--dbs_data->usage_count) {
1944 + sysfs_remove_group(get_governor_parent_kobj(policy),
1945 + get_sysfs_attr(dbs_data));
1946 +
1947 ++ policy->governor_data = NULL;
1948 ++
1949 + if (!have_governor_per_policy())
1950 + cdata->gdbs_data = NULL;
1951 +
1952 + cdata->exit(dbs_data, policy->governor->initialized == 1);
1953 + kfree(dbs_data);
1954 ++ } else {
1955 ++ policy->governor_data = NULL;
1956 + }
1957 +
1958 + free_common_dbs_info(policy, cdata);
1959 +diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
1960 +index 1d99c97defa9..096377232747 100644
1961 +--- a/drivers/cpufreq/pxa2xx-cpufreq.c
1962 ++++ b/drivers/cpufreq/pxa2xx-cpufreq.c
1963 +@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
1964 + }
1965 + }
1966 + #else
1967 +-static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq)
1968 ++static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
1969 + {
1970 + return 0;
1971 + }
1972 +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
1973 +index 370c661c7d7b..fa00f3a186da 100644
1974 +--- a/drivers/dma/at_xdmac.c
1975 ++++ b/drivers/dma/at_xdmac.c
1976 +@@ -1688,6 +1688,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1977 + list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1978 + at_xdmac_remove_xfer(atchan, desc);
1979 +
1980 ++ clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1981 + clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1982 + spin_unlock_irqrestore(&atchan->lock, flags);
1983 +
1984 +@@ -1820,6 +1821,8 @@ static int atmel_xdmac_resume(struct device *dev)
1985 + atchan = to_at_xdmac_chan(chan);
1986 + at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
1987 + if (at_xdmac_chan_is_cyclic(atchan)) {
1988 ++ if (at_xdmac_chan_is_paused(atchan))
1989 ++ at_xdmac_device_resume(chan);
1990 + at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
1991 + at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
1992 + at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
1993 +diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
1994 +index 7067b6ddc1db..4f099ea29f83 100644
1995 +--- a/drivers/dma/dw/core.c
1996 ++++ b/drivers/dma/dw/core.c
1997 +@@ -536,16 +536,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
1998 +
1999 + /* Called with dwc->lock held and all DMAC interrupts disabled */
2000 + static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
2001 +- u32 status_err, u32 status_xfer)
2002 ++ u32 status_block, u32 status_err, u32 status_xfer)
2003 + {
2004 + unsigned long flags;
2005 +
2006 +- if (dwc->mask) {
2007 ++ if (status_block & dwc->mask) {
2008 + void (*callback)(void *param);
2009 + void *callback_param;
2010 +
2011 + dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
2012 + channel_readl(dwc, LLP));
2013 ++ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
2014 +
2015 + callback = dwc->cdesc->period_callback;
2016 + callback_param = dwc->cdesc->period_callback_param;
2017 +@@ -577,6 +578,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
2018 + channel_writel(dwc, CTL_LO, 0);
2019 + channel_writel(dwc, CTL_HI, 0);
2020 +
2021 ++ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
2022 + dma_writel(dw, CLEAR.ERROR, dwc->mask);
2023 + dma_writel(dw, CLEAR.XFER, dwc->mask);
2024 +
2025 +@@ -585,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
2026 +
2027 + spin_unlock_irqrestore(&dwc->lock, flags);
2028 + }
2029 ++
2030 ++ /* Re-enable interrupts */
2031 ++ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
2032 + }
2033 +
2034 + /* ------------------------------------------------------------------------- */
2035 +@@ -593,10 +598,12 @@ static void dw_dma_tasklet(unsigned long data)
2036 + {
2037 + struct dw_dma *dw = (struct dw_dma *)data;
2038 + struct dw_dma_chan *dwc;
2039 ++ u32 status_block;
2040 + u32 status_xfer;
2041 + u32 status_err;
2042 + int i;
2043 +
2044 ++ status_block = dma_readl(dw, RAW.BLOCK);
2045 + status_xfer = dma_readl(dw, RAW.XFER);
2046 + status_err = dma_readl(dw, RAW.ERROR);
2047 +
2048 +@@ -605,16 +612,15 @@ static void dw_dma_tasklet(unsigned long data)
2049 + for (i = 0; i < dw->dma.chancnt; i++) {
2050 + dwc = &dw->chan[i];
2051 + if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
2052 +- dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
2053 ++ dwc_handle_cyclic(dw, dwc, status_block, status_err,
2054 ++ status_xfer);
2055 + else if (status_err & (1 << i))
2056 + dwc_handle_error(dw, dwc);
2057 + else if (status_xfer & (1 << i))
2058 + dwc_scan_descriptors(dw, dwc);
2059 + }
2060 +
2061 +- /*
2062 +- * Re-enable interrupts.
2063 +- */
2064 ++ /* Re-enable interrupts */
2065 + channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
2066 + channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
2067 + }
2068 +@@ -635,6 +641,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
2069 + * softirq handler.
2070 + */
2071 + channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
2072 ++ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
2073 + channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
2074 +
2075 + status = dma_readl(dw, STATUS_INT);
2076 +@@ -645,6 +652,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
2077 +
2078 + /* Try to recover */
2079 + channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
2080 ++ channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
2081 + channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
2082 + channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
2083 + channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
2084 +@@ -1111,6 +1119,7 @@ static void dw_dma_off(struct dw_dma *dw)
2085 + dma_writel(dw, CFG, 0);
2086 +
2087 + channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
2088 ++ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
2089 + channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
2090 + channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
2091 + channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
2092 +@@ -1216,6 +1225,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
2093 +
2094 + /* Disable interrupts */
2095 + channel_clear_bit(dw, MASK.XFER, dwc->mask);
2096 ++ channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
2097 + channel_clear_bit(dw, MASK.ERROR, dwc->mask);
2098 +
2099 + spin_unlock_irqrestore(&dwc->lock, flags);
2100 +@@ -1245,7 +1255,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
2101 + int dw_dma_cyclic_start(struct dma_chan *chan)
2102 + {
2103 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
2104 +- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
2105 ++ struct dw_dma *dw = to_dw_dma(chan->device);
2106 + unsigned long flags;
2107 +
2108 + if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
2109 +@@ -1255,25 +1265,10 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
2110 +
2111 + spin_lock_irqsave(&dwc->lock, flags);
2112 +
2113 +- /* Assert channel is idle */
2114 +- if (dma_readl(dw, CH_EN) & dwc->mask) {
2115 +- dev_err(chan2dev(&dwc->chan),
2116 +- "%s: BUG: Attempted to start non-idle channel\n",
2117 +- __func__);
2118 +- dwc_dump_chan_regs(dwc);
2119 +- spin_unlock_irqrestore(&dwc->lock, flags);
2120 +- return -EBUSY;
2121 +- }
2122 +-
2123 +- dma_writel(dw, CLEAR.ERROR, dwc->mask);
2124 +- dma_writel(dw, CLEAR.XFER, dwc->mask);
2125 ++ /* Enable interrupts to perform cyclic transfer */
2126 ++ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
2127 +
2128 +- /* Setup DMAC channel registers */
2129 +- channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
2130 +- channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
2131 +- channel_writel(dwc, CTL_HI, 0);
2132 +-
2133 +- channel_set_bit(dw, CH_EN, dwc->mask);
2134 ++ dwc_dostart(dwc, dwc->cdesc->desc[0]);
2135 +
2136 + spin_unlock_irqrestore(&dwc->lock, flags);
2137 +
2138 +@@ -1479,6 +1474,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
2139 +
2140 + dwc_chan_disable(dw, dwc);
2141 +
2142 ++ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
2143 + dma_writel(dw, CLEAR.ERROR, dwc->mask);
2144 + dma_writel(dw, CLEAR.XFER, dwc->mask);
2145 +
2146 +@@ -1567,9 +1563,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
2147 + /* Force dma off, just in case */
2148 + dw_dma_off(dw);
2149 +
2150 +- /* Disable BLOCK interrupts as well */
2151 +- channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
2152 +-
2153 + /* Create a pool of consistent memory blocks for hardware descriptors */
2154 + dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
2155 + sizeof(struct dw_desc), 4, 0);
2156 +diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
2157 +index 592af5f0cf39..53587377e672 100644
2158 +--- a/drivers/edac/edac_device.c
2159 ++++ b/drivers/edac/edac_device.c
2160 +@@ -435,16 +435,13 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
2161 + */
2162 + void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
2163 + {
2164 +- int status;
2165 +-
2166 + if (!edac_dev->edac_check)
2167 + return;
2168 +
2169 +- status = cancel_delayed_work(&edac_dev->work);
2170 +- if (status == 0) {
2171 +- /* workq instance might be running, wait for it */
2172 +- flush_workqueue(edac_workqueue);
2173 +- }
2174 ++ edac_dev->op_state = OP_OFFLINE;
2175 ++
2176 ++ cancel_delayed_work_sync(&edac_dev->work);
2177 ++ flush_workqueue(edac_workqueue);
2178 + }
2179 +
2180 + /*
2181 +diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
2182 +index 77ecd6a4179a..1b2c2187b347 100644
2183 +--- a/drivers/edac/edac_mc.c
2184 ++++ b/drivers/edac/edac_mc.c
2185 +@@ -586,18 +586,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
2186 + */
2187 + static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
2188 + {
2189 +- int status;
2190 +-
2191 +- if (mci->op_state != OP_RUNNING_POLL)
2192 +- return;
2193 +-
2194 +- status = cancel_delayed_work(&mci->work);
2195 +- if (status == 0) {
2196 +- edac_dbg(0, "not canceled, flush the queue\n");
2197 ++ mci->op_state = OP_OFFLINE;
2198 +
2199 +- /* workq instance might be running, wait for it */
2200 +- flush_workqueue(edac_workqueue);
2201 +- }
2202 ++ cancel_delayed_work_sync(&mci->work);
2203 ++ flush_workqueue(edac_workqueue);
2204 + }
2205 +
2206 + /*
2207 +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
2208 +index a75acea0f674..58aed67b7eba 100644
2209 +--- a/drivers/edac/edac_mc_sysfs.c
2210 ++++ b/drivers/edac/edac_mc_sysfs.c
2211 +@@ -880,21 +880,26 @@ static struct device_type mci_attr_type = {
2212 + int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
2213 + const struct attribute_group **groups)
2214 + {
2215 ++ char *name;
2216 + int i, err;
2217 +
2218 + /*
2219 + * The memory controller needs its own bus, in order to avoid
2220 + * namespace conflicts at /sys/bus/edac.
2221 + */
2222 +- mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
2223 +- if (!mci->bus->name)
2224 ++ name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
2225 ++ if (!name)
2226 + return -ENOMEM;
2227 +
2228 ++ mci->bus->name = name;
2229 ++
2230 + edac_dbg(0, "creating bus %s\n", mci->bus->name);
2231 +
2232 + err = bus_register(mci->bus);
2233 +- if (err < 0)
2234 +- goto fail_free_name;
2235 ++ if (err < 0) {
2236 ++ kfree(name);
2237 ++ return err;
2238 ++ }
2239 +
2240 + /* get the /sys/devices/system/edac subsys reference */
2241 + mci->dev.type = &mci_attr_type;
2242 +@@ -961,8 +966,8 @@ fail_unregister_dimm:
2243 + device_unregister(&mci->dev);
2244 + fail_unregister_bus:
2245 + bus_unregister(mci->bus);
2246 +-fail_free_name:
2247 +- kfree(mci->bus->name);
2248 ++ kfree(name);
2249 ++
2250 + return err;
2251 + }
2252 +
2253 +@@ -993,10 +998,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
2254 +
2255 + void edac_unregister_sysfs(struct mem_ctl_info *mci)
2256 + {
2257 ++ const char *name = mci->bus->name;
2258 ++
2259 + edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
2260 + device_unregister(&mci->dev);
2261 + bus_unregister(mci->bus);
2262 +- kfree(mci->bus->name);
2263 ++ kfree(name);
2264 + }
2265 +
2266 + static void mc_attr_release(struct device *dev)
2267 +diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
2268 +index 2cf44b4db80c..b4b38603b804 100644
2269 +--- a/drivers/edac/edac_pci.c
2270 ++++ b/drivers/edac/edac_pci.c
2271 +@@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
2272 + */
2273 + static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
2274 + {
2275 +- int status;
2276 +-
2277 + edac_dbg(0, "\n");
2278 +
2279 +- status = cancel_delayed_work(&pci->work);
2280 +- if (status == 0)
2281 +- flush_workqueue(edac_workqueue);
2282 ++ pci->op_state = OP_OFFLINE;
2283 ++
2284 ++ cancel_delayed_work_sync(&pci->work);
2285 ++ flush_workqueue(edac_workqueue);
2286 + }
2287 +
2288 + /*
2289 +diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
2290 +index 756eca8c4cf8..10e6774ab2a2 100644
2291 +--- a/drivers/firmware/efi/efivars.c
2292 ++++ b/drivers/firmware/efi/efivars.c
2293 +@@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
2294 + }
2295 +
2296 + if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
2297 +- efivar_validate(name, data, size) == false) {
2298 ++ efivar_validate(vendor, name, data, size) == false) {
2299 + printk(KERN_ERR "efivars: Malformed variable content\n");
2300 + return -EINVAL;
2301 + }
2302 +@@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
2303 + }
2304 +
2305 + if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
2306 +- efivar_validate(name, data, size) == false) {
2307 ++ efivar_validate(new_var->VendorGuid, name, data,
2308 ++ size) == false) {
2309 + printk(KERN_ERR "efivars: Malformed variable content\n");
2310 + return -EINVAL;
2311 + }
2312 +@@ -540,38 +541,30 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
2313 + static int
2314 + efivar_create_sysfs_entry(struct efivar_entry *new_var)
2315 + {
2316 +- int i, short_name_size;
2317 ++ int short_name_size;
2318 + char *short_name;
2319 +- unsigned long variable_name_size;
2320 +- efi_char16_t *variable_name;
2321 ++ unsigned long utf8_name_size;
2322 ++ efi_char16_t *variable_name = new_var->var.VariableName;
2323 + int ret;
2324 +
2325 +- variable_name = new_var->var.VariableName;
2326 +- variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
2327 +-
2328 + /*
2329 +- * Length of the variable bytes in ASCII, plus the '-' separator,
2330 ++ * Length of the variable bytes in UTF8, plus the '-' separator,
2331 + * plus the GUID, plus trailing NUL
2332 + */
2333 +- short_name_size = variable_name_size / sizeof(efi_char16_t)
2334 +- + 1 + EFI_VARIABLE_GUID_LEN + 1;
2335 +-
2336 +- short_name = kzalloc(short_name_size, GFP_KERNEL);
2337 ++ utf8_name_size = ucs2_utf8size(variable_name);
2338 ++ short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
2339 +
2340 ++ short_name = kmalloc(short_name_size, GFP_KERNEL);
2341 + if (!short_name)
2342 + return -ENOMEM;
2343 +
2344 +- /* Convert Unicode to normal chars (assume top bits are 0),
2345 +- ala UTF-8 */
2346 +- for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
2347 +- short_name[i] = variable_name[i] & 0xFF;
2348 +- }
2349 ++ ucs2_as_utf8(short_name, variable_name, short_name_size);
2350 ++
2351 + /* This is ugly, but necessary to separate one vendor's
2352 + private variables from another's. */
2353 +-
2354 +- *(short_name + strlen(short_name)) = '-';
2355 ++ short_name[utf8_name_size] = '-';
2356 + efi_guid_to_str(&new_var->var.VendorGuid,
2357 +- short_name + strlen(short_name));
2358 ++ short_name + utf8_name_size + 1);
2359 +
2360 + new_var->kobj.kset = efivars_kset;
2361 +
2362 +diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
2363 +index 70a0fb10517f..7f2ea21c730d 100644
2364 +--- a/drivers/firmware/efi/vars.c
2365 ++++ b/drivers/firmware/efi/vars.c
2366 +@@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
2367 + }
2368 +
2369 + struct variable_validate {
2370 ++ efi_guid_t vendor;
2371 + char *name;
2372 + bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
2373 + unsigned long len);
2374 + };
2375 +
2376 ++/*
2377 ++ * This is the list of variables we need to validate, as well as the
2378 ++ * whitelist for what we think is safe not to default to immutable.
2379 ++ *
2380 ++ * If it has a validate() method that's not NULL, it'll go into the
2381 ++ * validation routine. If not, it is assumed valid, but still used for
2382 ++ * whitelisting.
2383 ++ *
2384 ++ * Note that it's sorted by {vendor,name}, but globbed names must come after
2385 ++ * any other name with the same prefix.
2386 ++ */
2387 + static const struct variable_validate variable_validate[] = {
2388 +- { "BootNext", validate_uint16 },
2389 +- { "BootOrder", validate_boot_order },
2390 +- { "DriverOrder", validate_boot_order },
2391 +- { "Boot*", validate_load_option },
2392 +- { "Driver*", validate_load_option },
2393 +- { "ConIn", validate_device_path },
2394 +- { "ConInDev", validate_device_path },
2395 +- { "ConOut", validate_device_path },
2396 +- { "ConOutDev", validate_device_path },
2397 +- { "ErrOut", validate_device_path },
2398 +- { "ErrOutDev", validate_device_path },
2399 +- { "Timeout", validate_uint16 },
2400 +- { "Lang", validate_ascii_string },
2401 +- { "PlatformLang", validate_ascii_string },
2402 +- { "", NULL },
2403 ++ { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
2404 ++ { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
2405 ++ { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
2406 ++ { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
2407 ++ { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
2408 ++ { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
2409 ++ { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
2410 ++ { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
2411 ++ { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
2412 ++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
2413 ++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
2414 ++ { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
2415 ++ { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
2416 ++ { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
2417 ++ { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
2418 ++ { LINUX_EFI_CRASH_GUID, "*", NULL },
2419 ++ { NULL_GUID, "", NULL },
2420 + };
2421 +
2422 ++static bool
2423 ++variable_matches(const char *var_name, size_t len, const char *match_name,
2424 ++ int *match)
2425 ++{
2426 ++ for (*match = 0; ; (*match)++) {
2427 ++ char c = match_name[*match];
2428 ++ char u = var_name[*match];
2429 ++
2430 ++ /* Wildcard in the matching name means we've matched */
2431 ++ if (c == '*')
2432 ++ return true;
2433 ++
2434 ++ /* Case sensitive match */
2435 ++ if (!c && *match == len)
2436 ++ return true;
2437 ++
2438 ++ if (c != u)
2439 ++ return false;
2440 ++
2441 ++ if (!c)
2442 ++ return true;
2443 ++ }
2444 ++ return true;
2445 ++}
2446 ++
2447 + bool
2448 +-efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len)
2449 ++efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
2450 ++ unsigned long data_size)
2451 + {
2452 + int i;
2453 +- u16 *unicode_name = var_name;
2454 ++ unsigned long utf8_size;
2455 ++ u8 *utf8_name;
2456 +
2457 +- for (i = 0; variable_validate[i].validate != NULL; i++) {
2458 +- const char *name = variable_validate[i].name;
2459 +- int match;
2460 ++ utf8_size = ucs2_utf8size(var_name);
2461 ++ utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
2462 ++ if (!utf8_name)
2463 ++ return false;
2464 +
2465 +- for (match = 0; ; match++) {
2466 +- char c = name[match];
2467 +- u16 u = unicode_name[match];
2468 ++ ucs2_as_utf8(utf8_name, var_name, utf8_size);
2469 ++ utf8_name[utf8_size] = '\0';
2470 +
2471 +- /* All special variables are plain ascii */
2472 +- if (u > 127)
2473 +- return true;
2474 ++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
2475 ++ const char *name = variable_validate[i].name;
2476 ++ int match = 0;
2477 +
2478 +- /* Wildcard in the matching name means we've matched */
2479 +- if (c == '*')
2480 +- return variable_validate[i].validate(var_name,
2481 +- match, data, len);
2482 ++ if (efi_guidcmp(vendor, variable_validate[i].vendor))
2483 ++ continue;
2484 +
2485 +- /* Case sensitive match */
2486 +- if (c != u)
2487 ++ if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
2488 ++ if (variable_validate[i].validate == NULL)
2489 + break;
2490 +-
2491 +- /* Reached the end of the string while matching */
2492 +- if (!c)
2493 +- return variable_validate[i].validate(var_name,
2494 +- match, data, len);
2495 ++ kfree(utf8_name);
2496 ++ return variable_validate[i].validate(var_name, match,
2497 ++ data, data_size);
2498 + }
2499 + }
2500 +-
2501 ++ kfree(utf8_name);
2502 + return true;
2503 + }
2504 + EXPORT_SYMBOL_GPL(efivar_validate);
2505 +
2506 ++bool
2507 ++efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
2508 ++ size_t len)
2509 ++{
2510 ++ int i;
2511 ++ bool found = false;
2512 ++ int match = 0;
2513 ++
2514 ++ /*
2515 ++ * Check if our variable is in the validated variables list
2516 ++ */
2517 ++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
2518 ++ if (efi_guidcmp(variable_validate[i].vendor, vendor))
2519 ++ continue;
2520 ++
2521 ++ if (variable_matches(var_name, len,
2522 ++ variable_validate[i].name, &match)) {
2523 ++ found = true;
2524 ++ break;
2525 ++ }
2526 ++ }
2527 ++
2528 ++ /*
2529 ++ * If it's in our list, it is removable.
2530 ++ */
2531 ++ return found;
2532 ++}
2533 ++EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
2534 ++
2535 + static efi_status_t
2536 + check_var_size(u32 attributes, unsigned long size)
2537 + {
2538 +@@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
2539 +
2540 + *set = false;
2541 +
2542 +- if (efivar_validate(name, data, *size) == false)
2543 ++ if (efivar_validate(*vendor, name, data, *size) == false)
2544 + return -EINVAL;
2545 +
2546 + /*
2547 +diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
2548 +index 04c270757030..ca066018ea34 100644
2549 +--- a/drivers/gpu/drm/amd/amdgpu/Makefile
2550 ++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
2551 +@@ -22,7 +22,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
2552 + amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
2553 +
2554 + # add asic specific block
2555 +-amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
2556 ++amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
2557 + ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
2558 + amdgpu_amdkfd_gfx_v7.o
2559 +
2560 +@@ -31,6 +31,7 @@ amdgpu-y += \
2561 +
2562 + # add GMC block
2563 + amdgpu-y += \
2564 ++ gmc_v7_0.o \
2565 + gmc_v8_0.o
2566 +
2567 + # add IH block
2568 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
2569 +index 048cfe073dae..bb1099c549df 100644
2570 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
2571 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
2572 +@@ -604,8 +604,6 @@ struct amdgpu_sa_manager {
2573 + uint32_t align;
2574 + };
2575 +
2576 +-struct amdgpu_sa_bo;
2577 +-
2578 + /* sub-allocation buffer */
2579 + struct amdgpu_sa_bo {
2580 + struct list_head olist;
2581 +@@ -2314,6 +2312,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
2582 + int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2583 + uint32_t flags);
2584 + bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
2585 ++bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
2586 ++ unsigned long end);
2587 + bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
2588 + uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2589 + struct ttm_mem_reg *mem);
2590 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2591 +index d5b421330145..c961fe093e12 100644
2592 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2593 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2594 +@@ -1744,15 +1744,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
2595 + }
2596 +
2597 + /* post card */
2598 +- amdgpu_atom_asic_init(adev->mode_info.atom_context);
2599 ++ if (!amdgpu_card_posted(adev))
2600 ++ amdgpu_atom_asic_init(adev->mode_info.atom_context);
2601 +
2602 + r = amdgpu_resume(adev);
2603 ++ if (r)
2604 ++ DRM_ERROR("amdgpu_resume failed (%d).\n", r);
2605 +
2606 + amdgpu_fence_driver_resume(adev);
2607 +
2608 +- r = amdgpu_ib_ring_tests(adev);
2609 +- if (r)
2610 +- DRM_ERROR("ib ring test failed (%d).\n", r);
2611 ++ if (resume) {
2612 ++ r = amdgpu_ib_ring_tests(adev);
2613 ++ if (r)
2614 ++ DRM_ERROR("ib ring test failed (%d).\n", r);
2615 ++ }
2616 +
2617 + r = amdgpu_late_init(adev);
2618 + if (r)
2619 +@@ -1788,6 +1793,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
2620 + }
2621 +
2622 + drm_kms_helper_poll_enable(dev);
2623 ++ drm_helper_hpd_irq_event(dev);
2624 +
2625 + if (fbcon) {
2626 + amdgpu_fbdev_set_suspend(adev, 0);
2627 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
2628 +index 5580d3420c3a..0c713a908304 100644
2629 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
2630 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
2631 +@@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
2632 +
2633 + struct drm_crtc *crtc = &amdgpuCrtc->base;
2634 + unsigned long flags;
2635 +- unsigned i;
2636 +- int vpos, hpos, stat, min_udelay;
2637 ++ unsigned i, repcnt = 4;
2638 ++ int vpos, hpos, stat, min_udelay = 0;
2639 + struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
2640 +
2641 + amdgpu_flip_wait_fence(adev, &work->excl);
2642 +@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
2643 + * In practice this won't execute very often unless on very fast
2644 + * machines because the time window for this to happen is very small.
2645 + */
2646 +- for (;;) {
2647 ++ while (amdgpuCrtc->enabled && repcnt--) {
2648 + /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
2649 + * start in hpos, and to the "fudged earlier" vblank start in
2650 + * vpos.
2651 +@@ -114,10 +114,22 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
2652 + /* Sleep at least until estimated real start of hw vblank */
2653 + spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
2654 + min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
2655 ++ if (min_udelay > vblank->framedur_ns / 2000) {
2656 ++ /* Don't wait ridiculously long - something is wrong */
2657 ++ repcnt = 0;
2658 ++ break;
2659 ++ }
2660 + usleep_range(min_udelay, 2 * min_udelay);
2661 + spin_lock_irqsave(&crtc->dev->event_lock, flags);
2662 + };
2663 +
2664 ++ if (!repcnt)
2665 ++ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
2666 ++ "framedur %d, linedur %d, stat %d, vpos %d, "
2667 ++ "hpos %d\n", work->crtc_id, min_udelay,
2668 ++ vblank->framedur_ns / 1000,
2669 ++ vblank->linedur_ns / 1000, stat, vpos, hpos);
2670 ++
2671 + /* do the flip (mmio) */
2672 + adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
2673 + /* set the flip status */
2674 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2675 +index 0508c5cd103a..8d6668cedf6d 100644
2676 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2677 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2678 +@@ -250,11 +250,11 @@ static struct pci_device_id pciidlist[] = {
2679 + {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
2680 + #endif
2681 + /* topaz */
2682 +- {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
2683 +- {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
2684 +- {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
2685 +- {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
2686 +- {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
2687 ++ {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
2688 ++ {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
2689 ++ {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
2690 ++ {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
2691 ++ {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
2692 + /* tonga */
2693 + {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
2694 + {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
2695 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
2696 +index b1969f2b2038..d4e2780c0796 100644
2697 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
2698 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
2699 +@@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
2700 +
2701 + list_for_each_entry(bo, &node->bos, mn_list) {
2702 +
2703 +- if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
2704 ++ if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
2705 ++ end))
2706 + continue;
2707 +
2708 + r = amdgpu_bo_reserve(bo, true);
2709 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
2710 +index c3ce103b6a33..a2a16acee34d 100644
2711 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
2712 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
2713 +@@ -399,7 +399,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
2714 + }
2715 + if (fpfn > bo->placements[i].fpfn)
2716 + bo->placements[i].fpfn = fpfn;
2717 +- if (lpfn && lpfn < bo->placements[i].lpfn)
2718 ++ if (!bo->placements[i].lpfn ||
2719 ++ (lpfn && lpfn < bo->placements[i].lpfn))
2720 + bo->placements[i].lpfn = lpfn;
2721 + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
2722 + }
2723 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
2724 +index 22a8c7d3a3ab..03fe25142b78 100644
2725 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
2726 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
2727 +@@ -595,8 +595,6 @@ force:
2728 +
2729 + /* update display watermarks based on new power state */
2730 + amdgpu_display_bandwidth_update(adev);
2731 +- /* update displays */
2732 +- amdgpu_dpm_display_configuration_changed(adev);
2733 +
2734 + adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
2735 + adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
2736 +@@ -616,6 +614,9 @@ force:
2737 +
2738 + amdgpu_dpm_post_set_power_state(adev);
2739 +
2740 ++ /* update displays */
2741 ++ amdgpu_dpm_display_configuration_changed(adev);
2742 ++
2743 + if (adev->pm.funcs->force_performance_level) {
2744 + if (adev->pm.dpm.thermal_active) {
2745 + enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
2746 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
2747 +index 8b88edb0434b..ca72a2e487b9 100644
2748 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
2749 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
2750 +@@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
2751 +
2752 + for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
2753 + if (fences[i])
2754 +- fences[count++] = fences[i];
2755 ++ fences[count++] = fence_get(fences[i]);
2756 +
2757 + if (count) {
2758 + spin_unlock(&sa_manager->wq.lock);
2759 + t = fence_wait_any_timeout(fences, count, false,
2760 + MAX_SCHEDULE_TIMEOUT);
2761 ++ for (i = 0; i < count; ++i)
2762 ++ fence_put(fences[i]);
2763 ++
2764 + r = (t > 0) ? 0 : t;
2765 + spin_lock(&sa_manager->wq.lock);
2766 + } else {
2767 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
2768 +index dd005c336c97..181ce39ef5e5 100644
2769 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
2770 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
2771 +@@ -293,7 +293,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
2772 + fence = to_amdgpu_fence(sync->sync_to[i]);
2773 +
2774 + /* check if we really need to sync */
2775 +- if (!amdgpu_fence_need_sync(fence, ring))
2776 ++ if (!amdgpu_enable_scheduler &&
2777 ++ !amdgpu_fence_need_sync(fence, ring))
2778 + continue;
2779 +
2780 + /* prevent GPU deadlocks */
2781 +@@ -303,7 +304,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
2782 + }
2783 +
2784 + if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
2785 +- r = fence_wait(&fence->base, true);
2786 ++ r = fence_wait(sync->sync_to[i], true);
2787 + if (r)
2788 + return r;
2789 + continue;
2790 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2791 +index 8a1752ff3d8e..1cbb16e15307 100644
2792 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2793 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2794 +@@ -712,7 +712,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
2795 + 0, PAGE_SIZE,
2796 + PCI_DMA_BIDIRECTIONAL);
2797 + if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
2798 +- while (--i) {
2799 ++ while (i--) {
2800 + pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
2801 + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
2802 + gtt->ttm.dma_address[i] = 0;
2803 +@@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
2804 + return !!gtt->userptr;
2805 + }
2806 +
2807 ++bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
2808 ++ unsigned long end)
2809 ++{
2810 ++ struct amdgpu_ttm_tt *gtt = (void *)ttm;
2811 ++ unsigned long size;
2812 ++
2813 ++ if (gtt == NULL)
2814 ++ return false;
2815 ++
2816 ++ if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
2817 ++ return false;
2818 ++
2819 ++ size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
2820 ++ if (gtt->userptr > end || gtt->userptr + size <= start)
2821 ++ return false;
2822 ++
2823 ++ return true;
2824 ++}
2825 ++
2826 + bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
2827 + {
2828 + struct amdgpu_ttm_tt *gtt = (void *)ttm;
2829 +@@ -808,7 +827,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2830 + flags |= AMDGPU_PTE_SNOOPED;
2831 + }
2832 +
2833 +- if (adev->asic_type >= CHIP_TOPAZ)
2834 ++ if (adev->asic_type >= CHIP_TONGA)
2835 + flags |= AMDGPU_PTE_EXECUTABLE;
2836 +
2837 + flags |= AMDGPU_PTE_READABLE;
2838 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2839 +index b53d273eb7a1..39adbb6470d1 100644
2840 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2841 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2842 +@@ -1010,13 +1010,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2843 + return -EINVAL;
2844 +
2845 + /* make sure object fit at this offset */
2846 +- eaddr = saddr + size;
2847 ++ eaddr = saddr + size - 1;
2848 + if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
2849 + return -EINVAL;
2850 +
2851 + last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
2852 +- if (last_pfn > adev->vm_manager.max_pfn) {
2853 +- dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
2854 ++ if (last_pfn >= adev->vm_manager.max_pfn) {
2855 ++ dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
2856 + last_pfn, adev->vm_manager.max_pfn);
2857 + return -EINVAL;
2858 + }
2859 +@@ -1025,7 +1025,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2860 + eaddr /= AMDGPU_GPU_PAGE_SIZE;
2861 +
2862 + spin_lock(&vm->it_lock);
2863 +- it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
2864 ++ it = interval_tree_iter_first(&vm->va, saddr, eaddr);
2865 + spin_unlock(&vm->it_lock);
2866 + if (it) {
2867 + struct amdgpu_bo_va_mapping *tmp;
2868 +@@ -1046,7 +1046,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2869 +
2870 + INIT_LIST_HEAD(&mapping->list);
2871 + mapping->it.start = saddr;
2872 +- mapping->it.last = eaddr - 1;
2873 ++ mapping->it.last = eaddr;
2874 + mapping->offset = offset;
2875 + mapping->flags = flags;
2876 +
2877 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
2878 +index e1dcab98e249..4cb45f4602aa 100644
2879 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
2880 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
2881 +@@ -90,7 +90,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
2882 + MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
2883 + MODULE_FIRMWARE("amdgpu/topaz_me.bin");
2884 + MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
2885 +-MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
2886 + MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
2887 +
2888 + MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
2889 +@@ -807,7 +806,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
2890 + adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
2891 + adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
2892 +
2893 +- if (adev->asic_type != CHIP_STONEY) {
2894 ++ if ((adev->asic_type != CHIP_STONEY) &&
2895 ++ (adev->asic_type != CHIP_TOPAZ)) {
2896 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
2897 + err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
2898 + if (!err) {
2899 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
2900 +index ed8abb58a785..272110cc18c2 100644
2901 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
2902 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
2903 +@@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
2904 +
2905 + MODULE_FIRMWARE("radeon/bonaire_mc.bin");
2906 + MODULE_FIRMWARE("radeon/hawaii_mc.bin");
2907 ++MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
2908 ++
2909 ++static const u32 golden_settings_iceland_a11[] =
2910 ++{
2911 ++ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
2912 ++ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
2913 ++ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
2914 ++ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
2915 ++};
2916 ++
2917 ++static const u32 iceland_mgcg_cgcg_init[] =
2918 ++{
2919 ++ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
2920 ++};
2921 ++
2922 ++static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
2923 ++{
2924 ++ switch (adev->asic_type) {
2925 ++ case CHIP_TOPAZ:
2926 ++ amdgpu_program_register_sequence(adev,
2927 ++ iceland_mgcg_cgcg_init,
2928 ++ (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
2929 ++ amdgpu_program_register_sequence(adev,
2930 ++ golden_settings_iceland_a11,
2931 ++ (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
2932 ++ break;
2933 ++ default:
2934 ++ break;
2935 ++ }
2936 ++}
2937 +
2938 + /**
2939 +- * gmc8_mc_wait_for_idle - wait for MC idle callback.
2940 ++ * gmc7_mc_wait_for_idle - wait for MC idle callback.
2941 + *
2942 + * @adev: amdgpu_device pointer
2943 + *
2944 +@@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
2945 + case CHIP_HAWAII:
2946 + chip_name = "hawaii";
2947 + break;
2948 ++ case CHIP_TOPAZ:
2949 ++ chip_name = "topaz";
2950 ++ break;
2951 + case CHIP_KAVERI:
2952 + case CHIP_KABINI:
2953 + return 0;
2954 + default: BUG();
2955 + }
2956 +
2957 +- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
2958 ++ if (adev->asic_type == CHIP_TOPAZ)
2959 ++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
2960 ++ else
2961 ++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
2962 ++
2963 + err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
2964 + if (err)
2965 + goto out;
2966 +@@ -980,6 +1017,8 @@ static int gmc_v7_0_hw_init(void *handle)
2967 + int r;
2968 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2969 +
2970 ++ gmc_v7_0_init_golden_registers(adev);
2971 ++
2972 + gmc_v7_0_mc_program(adev);
2973 +
2974 + if (!(adev->flags & AMD_IS_APU)) {
2975 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
2976 +index d39028440814..ba4ad00ba8b4 100644
2977 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
2978 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
2979 +@@ -42,9 +42,7 @@
2980 + static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
2981 + static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
2982 +
2983 +-MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
2984 + MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
2985 +-MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
2986 +
2987 + static const u32 golden_settings_tonga_a11[] =
2988 + {
2989 +@@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
2990 + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
2991 + };
2992 +
2993 +-static const u32 golden_settings_iceland_a11[] =
2994 +-{
2995 +- mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
2996 +- mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
2997 +- mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
2998 +- mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
2999 +-};
3000 +-
3001 +-static const u32 iceland_mgcg_cgcg_init[] =
3002 +-{
3003 +- mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
3004 +-};
3005 +-
3006 + static const u32 cz_mgcg_cgcg_init[] =
3007 + {
3008 + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
3009 +@@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
3010 + static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
3011 + {
3012 + switch (adev->asic_type) {
3013 +- case CHIP_TOPAZ:
3014 +- amdgpu_program_register_sequence(adev,
3015 +- iceland_mgcg_cgcg_init,
3016 +- (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
3017 +- amdgpu_program_register_sequence(adev,
3018 +- golden_settings_iceland_a11,
3019 +- (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
3020 +- break;
3021 + case CHIP_FIJI:
3022 + amdgpu_program_register_sequence(adev,
3023 + fiji_mgcg_cgcg_init,
3024 +@@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
3025 + DRM_DEBUG("\n");
3026 +
3027 + switch (adev->asic_type) {
3028 +- case CHIP_TOPAZ:
3029 +- chip_name = "topaz";
3030 +- break;
3031 + case CHIP_TONGA:
3032 + chip_name = "tonga";
3033 + break;
3034 + case CHIP_FIJI:
3035 +- chip_name = "fiji";
3036 +- break;
3037 + case CHIP_CARRIZO:
3038 + case CHIP_STONEY:
3039 + return 0;
3040 +@@ -1003,7 +975,7 @@ static int gmc_v8_0_hw_init(void *handle)
3041 +
3042 + gmc_v8_0_mc_program(adev);
3043 +
3044 +- if (!(adev->flags & AMD_IS_APU)) {
3045 ++ if (adev->asic_type == CHIP_TONGA) {
3046 + r = gmc_v8_0_mc_load_microcode(adev);
3047 + if (r) {
3048 + DRM_ERROR("Failed to load MC firmware!\n");
3049 +diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
3050 +index 966d4b2ed9da..090486c18249 100644
3051 +--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
3052 ++++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
3053 +@@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
3054 + case AMDGPU_UCODE_ID_CP_ME:
3055 + return UCODE_ID_CP_ME_MASK;
3056 + case AMDGPU_UCODE_ID_CP_MEC1:
3057 +- return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK;
3058 ++ return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
3059 + case AMDGPU_UCODE_ID_CP_MEC2:
3060 + return UCODE_ID_CP_MEC_MASK;
3061 + case AMDGPU_UCODE_ID_RLC_G:
3062 +@@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
3063 + return -EINVAL;
3064 + }
3065 +
3066 +- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
3067 +- &toc->entry[toc->num_entries++])) {
3068 +- DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
3069 +- return -EINVAL;
3070 +- }
3071 +-
3072 + if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
3073 + &toc->entry[toc->num_entries++])) {
3074 + DRM_ERROR("Failed to get firmware entry for SDMA0\n");
3075 +@@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
3076 + UCODE_ID_CP_ME_MASK |
3077 + UCODE_ID_CP_PFP_MASK |
3078 + UCODE_ID_CP_MEC_MASK |
3079 +- UCODE_ID_CP_MEC_JT1_MASK |
3080 +- UCODE_ID_CP_MEC_JT2_MASK;
3081 ++ UCODE_ID_CP_MEC_JT1_MASK;
3082 ++
3083 +
3084 + if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
3085 + DRM_ERROR("Fail to request SMU load ucode\n");
3086 +diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
3087 +index 204903897b4f..63d6cb3c1110 100644
3088 +--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
3089 ++++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
3090 +@@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle)
3091 +
3092 + static int tonga_dpm_suspend(void *handle)
3093 + {
3094 +- return 0;
3095 ++ return tonga_dpm_hw_fini(handle);
3096 + }
3097 +
3098 + static int tonga_dpm_resume(void *handle)
3099 + {
3100 +- int ret;
3101 +- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3102 +-
3103 +- mutex_lock(&adev->pm.mutex);
3104 +-
3105 +- ret = tonga_smu_start(adev);
3106 +- if (ret) {
3107 +- DRM_ERROR("SMU start failed\n");
3108 +- goto fail;
3109 +- }
3110 +-
3111 +-fail:
3112 +- mutex_unlock(&adev->pm.mutex);
3113 +- return ret;
3114 ++ return tonga_dpm_hw_init(handle);
3115 + }
3116 +
3117 + static int tonga_dpm_set_clockgating_state(void *handle,
3118 +diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
3119 +index 2adc1c855e85..7628eb44cce2 100644
3120 +--- a/drivers/gpu/drm/amd/amdgpu/vi.c
3121 ++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
3122 +@@ -60,6 +60,7 @@
3123 + #include "vi.h"
3124 + #include "vi_dpm.h"
3125 + #include "gmc_v8_0.h"
3126 ++#include "gmc_v7_0.h"
3127 + #include "gfx_v8_0.h"
3128 + #include "sdma_v2_4.h"
3129 + #include "sdma_v3_0.h"
3130 +@@ -1128,10 +1129,10 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
3131 + },
3132 + {
3133 + .type = AMD_IP_BLOCK_TYPE_GMC,
3134 +- .major = 8,
3135 +- .minor = 0,
3136 ++ .major = 7,
3137 ++ .minor = 4,
3138 + .rev = 0,
3139 +- .funcs = &gmc_v8_0_ip_funcs,
3140 ++ .funcs = &gmc_v7_0_ip_funcs,
3141 + },
3142 + {
3143 + .type = AMD_IP_BLOCK_TYPE_IH,
3144 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
3145 +index 809959d56d78..39d7e2e15c11 100644
3146 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
3147 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
3148 +@@ -798,6 +798,18 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
3149 + return mstb;
3150 + }
3151 +
3152 ++static void drm_dp_free_mst_port(struct kref *kref);
3153 ++
3154 ++static void drm_dp_free_mst_branch_device(struct kref *kref)
3155 ++{
3156 ++ struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
3157 ++ if (mstb->port_parent) {
3158 ++ if (list_empty(&mstb->port_parent->next))
3159 ++ kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
3160 ++ }
3161 ++ kfree(mstb);
3162 ++}
3163 ++
3164 + static void drm_dp_destroy_mst_branch_device(struct kref *kref)
3165 + {
3166 + struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
3167 +@@ -805,6 +817,15 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
3168 + bool wake_tx = false;
3169 +
3170 + /*
3171 ++ * init kref again to be used by ports to remove mst branch when it is
3172 ++ * not needed anymore
3173 ++ */
3174 ++ kref_init(kref);
3175 ++
3176 ++ if (mstb->port_parent && list_empty(&mstb->port_parent->next))
3177 ++ kref_get(&mstb->port_parent->kref);
3178 ++
3179 ++ /*
3180 + * destroy all ports - don't need lock
3181 + * as there are no more references to the mst branch
3182 + * device at this point.
3183 +@@ -830,7 +851,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
3184 +
3185 + if (wake_tx)
3186 + wake_up(&mstb->mgr->tx_waitq);
3187 +- kfree(mstb);
3188 ++
3189 ++ kref_put(kref, drm_dp_free_mst_branch_device);
3190 + }
3191 +
3192 + static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
3193 +@@ -878,6 +900,7 @@ static void drm_dp_destroy_port(struct kref *kref)
3194 + * from an EDID retrieval */
3195 +
3196 + mutex_lock(&mgr->destroy_connector_lock);
3197 ++ kref_get(&port->parent->kref);
3198 + list_add(&port->next, &mgr->destroy_connector_list);
3199 + mutex_unlock(&mgr->destroy_connector_lock);
3200 + schedule_work(&mgr->destroy_connector_work);
3201 +@@ -973,17 +996,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u
3202 + static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
3203 + u8 *rad)
3204 + {
3205 +- int lct = port->parent->lct;
3206 ++ int parent_lct = port->parent->lct;
3207 + int shift = 4;
3208 +- int idx = lct / 2;
3209 +- if (lct > 1) {
3210 +- memcpy(rad, port->parent->rad, idx);
3211 +- shift = (lct % 2) ? 4 : 0;
3212 ++ int idx = (parent_lct - 1) / 2;
3213 ++ if (parent_lct > 1) {
3214 ++ memcpy(rad, port->parent->rad, idx + 1);
3215 ++ shift = (parent_lct % 2) ? 4 : 0;
3216 + } else
3217 + rad[0] = 0;
3218 +
3219 + rad[idx] |= port->port_num << shift;
3220 +- return lct + 1;
3221 ++ return parent_lct + 1;
3222 + }
3223 +
3224 + /*
3225 +@@ -1013,18 +1036,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
3226 + return send_link;
3227 + }
3228 +
3229 +-static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
3230 +- struct drm_dp_mst_port *port)
3231 ++static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
3232 + {
3233 + int ret;
3234 +- if (port->dpcd_rev >= 0x12) {
3235 +- port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
3236 +- if (!port->guid_valid) {
3237 +- ret = drm_dp_send_dpcd_write(mstb->mgr,
3238 +- port,
3239 +- DP_GUID,
3240 +- 16, port->guid);
3241 +- port->guid_valid = true;
3242 ++
3243 ++ memcpy(mstb->guid, guid, 16);
3244 ++
3245 ++ if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
3246 ++ if (mstb->port_parent) {
3247 ++ ret = drm_dp_send_dpcd_write(
3248 ++ mstb->mgr,
3249 ++ mstb->port_parent,
3250 ++ DP_GUID,
3251 ++ 16,
3252 ++ mstb->guid);
3253 ++ } else {
3254 ++
3255 ++ ret = drm_dp_dpcd_write(
3256 ++ mstb->mgr->aux,
3257 ++ DP_GUID,
3258 ++ mstb->guid,
3259 ++ 16);
3260 + }
3261 + }
3262 + }
3263 +@@ -1039,7 +1071,7 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
3264 + snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
3265 + for (i = 0; i < (mstb->lct - 1); i++) {
3266 + int shift = (i % 2) ? 0 : 4;
3267 +- int port_num = mstb->rad[i / 2] >> shift;
3268 ++ int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
3269 + snprintf(temp, sizeof(temp), "-%d", port_num);
3270 + strlcat(proppath, temp, proppath_size);
3271 + }
3272 +@@ -1081,7 +1113,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
3273 + port->dpcd_rev = port_msg->dpcd_revision;
3274 + port->num_sdp_streams = port_msg->num_sdp_streams;
3275 + port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
3276 +- memcpy(port->guid, port_msg->peer_guid, 16);
3277 +
3278 + /* manage mstb port lists with mgr lock - take a reference
3279 + for this list */
3280 +@@ -1094,11 +1125,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
3281 +
3282 + if (old_ddps != port->ddps) {
3283 + if (port->ddps) {
3284 +- drm_dp_check_port_guid(mstb, port);
3285 + if (!port->input)
3286 + drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
3287 + } else {
3288 +- port->guid_valid = false;
3289 + port->available_pbn = 0;
3290 + }
3291 + }
3292 +@@ -1157,10 +1186,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
3293 +
3294 + if (old_ddps != port->ddps) {
3295 + if (port->ddps) {
3296 +- drm_dp_check_port_guid(mstb, port);
3297 + dowork = true;
3298 + } else {
3299 +- port->guid_valid = false;
3300 + port->available_pbn = 0;
3301 + }
3302 + }
3303 +@@ -1190,7 +1217,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
3304 +
3305 + for (i = 0; i < lct - 1; i++) {
3306 + int shift = (i % 2) ? 0 : 4;
3307 +- int port_num = rad[i / 2] >> shift;
3308 ++ int port_num = (rad[i / 2] >> shift) & 0xf;
3309 +
3310 + list_for_each_entry(port, &mstb->ports, next) {
3311 + if (port->port_num == port_num) {
3312 +@@ -1210,6 +1237,48 @@ out:
3313 + return mstb;
3314 + }
3315 +
3316 ++static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
3317 ++ struct drm_dp_mst_branch *mstb,
3318 ++ uint8_t *guid)
3319 ++{
3320 ++ struct drm_dp_mst_branch *found_mstb;
3321 ++ struct drm_dp_mst_port *port;
3322 ++
3323 ++ if (memcmp(mstb->guid, guid, 16) == 0)
3324 ++ return mstb;
3325 ++
3326 ++
3327 ++ list_for_each_entry(port, &mstb->ports, next) {
3328 ++ if (!port->mstb)
3329 ++ continue;
3330 ++
3331 ++ found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
3332 ++
3333 ++ if (found_mstb)
3334 ++ return found_mstb;
3335 ++ }
3336 ++
3337 ++ return NULL;
3338 ++}
3339 ++
3340 ++static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
3341 ++ struct drm_dp_mst_topology_mgr *mgr,
3342 ++ uint8_t *guid)
3343 ++{
3344 ++ struct drm_dp_mst_branch *mstb;
3345 ++
3346 ++ /* find the port by iterating down */
3347 ++ mutex_lock(&mgr->lock);
3348 ++
3349 ++ mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
3350 ++
3351 ++ if (mstb)
3352 ++ kref_get(&mstb->kref);
3353 ++
3354 ++ mutex_unlock(&mgr->lock);
3355 ++ return mstb;
3356 ++}
3357 ++
3358 + static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
3359 + struct drm_dp_mst_branch *mstb)
3360 + {
3361 +@@ -1320,6 +1389,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
3362 + struct drm_dp_sideband_msg_tx *txmsg)
3363 + {
3364 + struct drm_dp_mst_branch *mstb = txmsg->dst;
3365 ++ u8 req_type;
3366 +
3367 + /* both msg slots are full */
3368 + if (txmsg->seqno == -1) {
3369 +@@ -1336,7 +1406,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
3370 + txmsg->seqno = 1;
3371 + mstb->tx_slots[txmsg->seqno] = txmsg;
3372 + }
3373 +- hdr->broadcast = 0;
3374 ++
3375 ++ req_type = txmsg->msg[0] & 0x7f;
3376 ++ if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
3377 ++ req_type == DP_RESOURCE_STATUS_NOTIFY)
3378 ++ hdr->broadcast = 1;
3379 ++ else
3380 ++ hdr->broadcast = 0;
3381 + hdr->path_msg = txmsg->path_msg;
3382 + hdr->lct = mstb->lct;
3383 + hdr->lcr = mstb->lct - 1;
3384 +@@ -1438,26 +1514,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
3385 + }
3386 +
3387 + /* called holding qlock */
3388 +-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
3389 ++static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
3390 ++ struct drm_dp_sideband_msg_tx *txmsg)
3391 + {
3392 +- struct drm_dp_sideband_msg_tx *txmsg;
3393 + int ret;
3394 +
3395 + /* construct a chunk from the first msg in the tx_msg queue */
3396 +- if (list_empty(&mgr->tx_msg_upq)) {
3397 +- mgr->tx_up_in_progress = false;
3398 +- return;
3399 +- }
3400 +-
3401 +- txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
3402 + ret = process_single_tx_qlock(mgr, txmsg, true);
3403 +- if (ret == 1) {
3404 +- /* up txmsgs aren't put in slots - so free after we send it */
3405 +- list_del(&txmsg->next);
3406 +- kfree(txmsg);
3407 +- } else if (ret)
3408 ++
3409 ++ if (ret != 1)
3410 + DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
3411 +- mgr->tx_up_in_progress = true;
3412 ++
3413 ++ txmsg->dst->tx_slots[txmsg->seqno] = NULL;
3414 + }
3415 +
3416 + static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
3417 +@@ -1507,6 +1575,9 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
3418 + txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
3419 + txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
3420 + }
3421 ++
3422 ++ drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
3423 ++
3424 + for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
3425 + drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
3426 + }
3427 +@@ -1554,6 +1625,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
3428 + return 0;
3429 + }
3430 +
3431 ++static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
3432 ++{
3433 ++ if (!mstb->port_parent)
3434 ++ return NULL;
3435 ++
3436 ++ if (mstb->port_parent->mstb != mstb)
3437 ++ return mstb->port_parent;
3438 ++
3439 ++ return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
3440 ++}
3441 ++
3442 ++static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3443 ++ struct drm_dp_mst_branch *mstb,
3444 ++ int *port_num)
3445 ++{
3446 ++ struct drm_dp_mst_branch *rmstb = NULL;
3447 ++ struct drm_dp_mst_port *found_port;
3448 ++ mutex_lock(&mgr->lock);
3449 ++ if (mgr->mst_primary) {
3450 ++ found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3451 ++
3452 ++ if (found_port) {
3453 ++ rmstb = found_port->parent;
3454 ++ kref_get(&rmstb->kref);
3455 ++ *port_num = found_port->port_num;
3456 ++ }
3457 ++ }
3458 ++ mutex_unlock(&mgr->lock);
3459 ++ return rmstb;
3460 ++}
3461 ++
3462 + static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3463 + struct drm_dp_mst_port *port,
3464 + int id,
3465 +@@ -1561,11 +1663,16 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3466 + {
3467 + struct drm_dp_sideband_msg_tx *txmsg;
3468 + struct drm_dp_mst_branch *mstb;
3469 +- int len, ret;
3470 ++ int len, ret, port_num;
3471 +
3472 ++ port_num = port->port_num;
3473 + mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
3474 +- if (!mstb)
3475 +- return -EINVAL;
3476 ++ if (!mstb) {
3477 ++ mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
3478 ++
3479 ++ if (!mstb)
3480 ++ return -EINVAL;
3481 ++ }
3482 +
3483 + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3484 + if (!txmsg) {
3485 +@@ -1574,7 +1681,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3486 + }
3487 +
3488 + txmsg->dst = mstb;
3489 +- len = build_allocate_payload(txmsg, port->port_num,
3490 ++ len = build_allocate_payload(txmsg, port_num,
3491 + id,
3492 + pbn);
3493 +
3494 +@@ -1844,11 +1951,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3495 + drm_dp_encode_up_ack_reply(txmsg, req_type);
3496 +
3497 + mutex_lock(&mgr->qlock);
3498 +- list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
3499 +- if (!mgr->tx_up_in_progress) {
3500 +- process_single_up_tx_qlock(mgr);
3501 +- }
3502 ++
3503 ++ process_single_up_tx_qlock(mgr, txmsg);
3504 ++
3505 + mutex_unlock(&mgr->qlock);
3506 ++
3507 ++ kfree(txmsg);
3508 + return 0;
3509 + }
3510 +
3511 +@@ -1927,31 +2035,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
3512 + mgr->mst_primary = mstb;
3513 + kref_get(&mgr->mst_primary->kref);
3514 +
3515 +- {
3516 +- struct drm_dp_payload reset_pay;
3517 +- reset_pay.start_slot = 0;
3518 +- reset_pay.num_slots = 0x3f;
3519 +- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3520 +- }
3521 +-
3522 + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3523 +- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3524 ++ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3525 + if (ret < 0) {
3526 + goto out_unlock;
3527 + }
3528 +
3529 +-
3530 +- /* sort out guid */
3531 +- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
3532 +- if (ret != 16) {
3533 +- DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
3534 +- goto out_unlock;
3535 +- }
3536 +-
3537 +- mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
3538 +- if (!mgr->guid_valid) {
3539 +- ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
3540 +- mgr->guid_valid = true;
3541 ++ {
3542 ++ struct drm_dp_payload reset_pay;
3543 ++ reset_pay.start_slot = 0;
3544 ++ reset_pay.num_slots = 0x3f;
3545 ++ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3546 + }
3547 +
3548 + queue_work(system_long_wq, &mgr->work);
3549 +@@ -2145,28 +2239,51 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
3550 +
3551 + if (mgr->up_req_recv.have_eomt) {
3552 + struct drm_dp_sideband_msg_req_body msg;
3553 +- struct drm_dp_mst_branch *mstb;
3554 ++ struct drm_dp_mst_branch *mstb = NULL;
3555 + bool seqno;
3556 +- mstb = drm_dp_get_mst_branch_device(mgr,
3557 +- mgr->up_req_recv.initial_hdr.lct,
3558 +- mgr->up_req_recv.initial_hdr.rad);
3559 +- if (!mstb) {
3560 +- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3561 +- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3562 +- return 0;
3563 ++
3564 ++ if (!mgr->up_req_recv.initial_hdr.broadcast) {
3565 ++ mstb = drm_dp_get_mst_branch_device(mgr,
3566 ++ mgr->up_req_recv.initial_hdr.lct,
3567 ++ mgr->up_req_recv.initial_hdr.rad);
3568 ++ if (!mstb) {
3569 ++ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3570 ++ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3571 ++ return 0;
3572 ++ }
3573 + }
3574 +
3575 + seqno = mgr->up_req_recv.initial_hdr.seqno;
3576 + drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
3577 +
3578 + if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
3579 +- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
3580 ++ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
3581 ++
3582 ++ if (!mstb)
3583 ++ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
3584 ++
3585 ++ if (!mstb) {
3586 ++ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3587 ++ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3588 ++ return 0;
3589 ++ }
3590 ++
3591 + drm_dp_update_port(mstb, &msg.u.conn_stat);
3592 ++
3593 + DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
3594 + (*mgr->cbs->hotplug)(mgr);
3595 +
3596 + } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3597 +- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
3598 ++ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
3599 ++ if (!mstb)
3600 ++ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
3601 ++
3602 ++ if (!mstb) {
3603 ++ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3604 ++ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3605 ++ return 0;
3606 ++ }
3607 ++
3608 + DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
3609 + }
3610 +
3611 +@@ -2346,6 +2463,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
3612 + DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
3613 + if (pbn == port->vcpi.pbn) {
3614 + *slots = port->vcpi.num_slots;
3615 ++ drm_dp_put_port(port);
3616 + return true;
3617 + }
3618 + }
3619 +@@ -2505,32 +2623,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
3620 + */
3621 + int drm_dp_calc_pbn_mode(int clock, int bpp)
3622 + {
3623 +- fixed20_12 pix_bw;
3624 +- fixed20_12 fbpp;
3625 +- fixed20_12 result;
3626 +- fixed20_12 margin, tmp;
3627 +- u32 res;
3628 +-
3629 +- pix_bw.full = dfixed_const(clock);
3630 +- fbpp.full = dfixed_const(bpp);
3631 +- tmp.full = dfixed_const(8);
3632 +- fbpp.full = dfixed_div(fbpp, tmp);
3633 +-
3634 +- result.full = dfixed_mul(pix_bw, fbpp);
3635 +- margin.full = dfixed_const(54);
3636 +- tmp.full = dfixed_const(64);
3637 +- margin.full = dfixed_div(margin, tmp);
3638 +- result.full = dfixed_div(result, margin);
3639 +-
3640 +- margin.full = dfixed_const(1006);
3641 +- tmp.full = dfixed_const(1000);
3642 +- margin.full = dfixed_div(margin, tmp);
3643 +- result.full = dfixed_mul(result, margin);
3644 +-
3645 +- result.full = dfixed_div(result, tmp);
3646 +- result.full = dfixed_ceil(result);
3647 +- res = dfixed_trunc(result);
3648 +- return res;
3649 ++ u64 kbps;
3650 ++ s64 peak_kbps;
3651 ++ u32 numerator;
3652 ++ u32 denominator;
3653 ++
3654 ++ kbps = clock * bpp;
3655 ++
3656 ++ /*
3657 ++ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
3658 ++ * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
3659 ++ * common multiplier to render an integer PBN for all link rate/lane
3660 ++ * counts combinations
3661 ++ * calculate
3662 ++ * peak_kbps *= (1006/1000)
3663 ++ * peak_kbps *= (64/54)
3664 ++ * peak_kbps *= 8 convert to bytes
3665 ++ */
3666 ++
3667 ++ numerator = 64 * 1006;
3668 ++ denominator = 54 * 8 * 1000 * 1000;
3669 ++
3670 ++ kbps *= numerator;
3671 ++ peak_kbps = drm_fixp_from_fraction(kbps, denominator);
3672 ++
3673 ++ return drm_fixp2int_ceil(peak_kbps);
3674 + }
3675 + EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
3676 +
3677 +@@ -2538,11 +2655,23 @@ static int test_calc_pbn_mode(void)
3678 + {
3679 + int ret;
3680 + ret = drm_dp_calc_pbn_mode(154000, 30);
3681 +- if (ret != 689)
3682 ++ if (ret != 689) {
3683 ++ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3684 ++ 154000, 30, 689, ret);
3685 + return -EINVAL;
3686 ++ }
3687 + ret = drm_dp_calc_pbn_mode(234000, 30);
3688 +- if (ret != 1047)
3689 ++ if (ret != 1047) {
3690 ++ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3691 ++ 234000, 30, 1047, ret);
3692 ++ return -EINVAL;
3693 ++ }
3694 ++ ret = drm_dp_calc_pbn_mode(297000, 24);
3695 ++ if (ret != 1063) {
3696 ++ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3697 ++ 297000, 24, 1063, ret);
3698 + return -EINVAL;
3699 ++ }
3700 + return 0;
3701 + }
3702 +
3703 +@@ -2683,6 +2812,13 @@ static void drm_dp_tx_work(struct work_struct *work)
3704 + mutex_unlock(&mgr->qlock);
3705 + }
3706 +
3707 ++static void drm_dp_free_mst_port(struct kref *kref)
3708 ++{
3709 ++ struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
3710 ++ kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
3711 ++ kfree(port);
3712 ++}
3713 ++
3714 + static void drm_dp_destroy_connector_work(struct work_struct *work)
3715 + {
3716 + struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
3717 +@@ -2703,13 +2839,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
3718 + list_del(&port->next);
3719 + mutex_unlock(&mgr->destroy_connector_lock);
3720 +
3721 ++ kref_init(&port->kref);
3722 ++ INIT_LIST_HEAD(&port->next);
3723 ++
3724 + mgr->cbs->destroy_connector(mgr, port->connector);
3725 +
3726 + drm_dp_port_teardown_pdt(port, port->pdt);
3727 +
3728 +- if (!port->input && port->vcpi.vcpi > 0)
3729 +- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3730 +- kfree(port);
3731 ++ if (!port->input && port->vcpi.vcpi > 0) {
3732 ++ if (mgr->mst_state) {
3733 ++ drm_dp_mst_reset_vcpi_slots(mgr, port);
3734 ++ drm_dp_update_payload_part1(mgr);
3735 ++ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3736 ++ }
3737 ++ }
3738 ++
3739 ++ kref_put(&port->kref, drm_dp_free_mst_port);
3740 + send_hotplug = true;
3741 + }
3742 + if (send_hotplug)
3743 +@@ -2736,7 +2881,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
3744 + mutex_init(&mgr->qlock);
3745 + mutex_init(&mgr->payload_lock);
3746 + mutex_init(&mgr->destroy_connector_lock);
3747 +- INIT_LIST_HEAD(&mgr->tx_msg_upq);
3748 + INIT_LIST_HEAD(&mgr->tx_msg_downq);
3749 + INIT_LIST_HEAD(&mgr->destroy_connector_list);
3750 + INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
3751 +diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
3752 +index 607f493ae801..8090989185b2 100644
3753 +--- a/drivers/gpu/drm/drm_irq.c
3754 ++++ b/drivers/gpu/drm/drm_irq.c
3755 +@@ -221,6 +221,64 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
3756 + diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
3757 + }
3758 +
3759 ++ /*
3760 ++ * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
3761 ++ * interval? If so then vblank irqs keep running and it will likely
3762 ++ * happen that the hardware vblank counter is not trustworthy as it
3763 ++ * might reset at some point in that interval and vblank timestamps
3764 ++ * are not trustworthy either in that interval. Iow. this can result
3765 ++ * in a bogus diff >> 1 which must be avoided as it would cause
3766 ++ * random large forward jumps of the software vblank counter.
3767 ++ */
3768 ++ if (diff > 1 && (vblank->inmodeset & 0x2)) {
3769 ++ DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
3770 ++ " due to pre-modeset.\n", pipe, diff);
3771 ++ diff = 1;
3772 ++ }
3773 ++
3774 ++ /*
3775 ++ * FIMXE: Need to replace this hack with proper seqlocks.
3776 ++ *
3777 ++ * Restrict the bump of the software vblank counter to a safe maximum
3778 ++ * value of +1 whenever there is the possibility that concurrent readers
3779 ++ * of vblank timestamps could be active at the moment, as the current
3780 ++ * implementation of the timestamp caching and updating is not safe
3781 ++ * against concurrent readers for calls to store_vblank() with a bump
3782 ++ * of anything but +1. A bump != 1 would very likely return corrupted
3783 ++ * timestamps to userspace, because the same slot in the cache could
3784 ++ * be concurrently written by store_vblank() and read by one of those
3785 ++ * readers without the read-retry logic detecting the collision.
3786 ++ *
3787 ++ * Concurrent readers can exist when we are called from the
3788 ++ * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
3789 ++ * irq callers. However, all those calls to us are happening with the
3790 ++ * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
3791 ++ * can't increase while we are executing. Therefore a zero refcount at
3792 ++ * this point is safe for arbitrary counter bumps if we are called
3793 ++ * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
3794 ++ * we must also accept a refcount of 1, as whenever we are called from
3795 ++ * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
3796 ++ * we must let that one pass through in order to not lose vblank counts
3797 ++ * during vblank irq off - which would completely defeat the whole
3798 ++ * point of this routine.
3799 ++ *
3800 ++ * Whenever we are called from vblank irq, we have to assume concurrent
3801 ++ * readers exist or can show up any time during our execution, even if
3802 ++ * the refcount is currently zero, as vblank irqs are usually only
3803 ++ * enabled due to the presence of readers, and because when we are called
3804 ++ * from vblank irq we can't hold the vbl_lock to protect us from sudden
3805 ++ * bumps in vblank refcount. Therefore also restrict bumps to +1 when
3806 ++ * called from vblank irq.
3807 ++ */
3808 ++ if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
3809 ++ (flags & DRM_CALLED_FROM_VBLIRQ))) {
3810 ++ DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
3811 ++ "refcount %u, vblirq %u\n", pipe, diff,
3812 ++ atomic_read(&vblank->refcount),
3813 ++ (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
3814 ++ diff = 1;
3815 ++ }
3816 ++
3817 + DRM_DEBUG_VBL("updating vblank count on crtc %u:"
3818 + " current=%u, diff=%u, hw=%u hw_last=%u\n",
3819 + pipe, vblank->count, diff, cur_vblank, vblank->last);
3820 +@@ -1313,7 +1371,13 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
3821 + spin_lock_irqsave(&dev->event_lock, irqflags);
3822 +
3823 + spin_lock(&dev->vbl_lock);
3824 +- vblank_disable_and_save(dev, pipe);
3825 ++ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
3826 ++ pipe, vblank->enabled, vblank->inmodeset);
3827 ++
3828 ++ /* Avoid redundant vblank disables without previous drm_vblank_on(). */
3829 ++ if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
3830 ++ vblank_disable_and_save(dev, pipe);
3831 ++
3832 + wake_up(&vblank->queue);
3833 +
3834 + /*
3835 +@@ -1415,6 +1479,9 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
3836 + return;
3837 +
3838 + spin_lock_irqsave(&dev->vbl_lock, irqflags);
3839 ++ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
3840 ++ pipe, vblank->enabled, vblank->inmodeset);
3841 ++
3842 + /* Drop our private "prevent drm_vblank_get" refcount */
3843 + if (vblank->inmodeset) {
3844 + atomic_dec(&vblank->refcount);
3845 +@@ -1427,8 +1494,7 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
3846 + * re-enable interrupts if there are users left, or the
3847 + * user wishes vblank interrupts to be enabled all the time.
3848 + */
3849 +- if (atomic_read(&vblank->refcount) != 0 ||
3850 +- (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
3851 ++ if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
3852 + WARN_ON(drm_vblank_enable(dev, pipe));
3853 + spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
3854 + }
3855 +@@ -1523,6 +1589,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
3856 + if (vblank->inmodeset) {
3857 + spin_lock_irqsave(&dev->vbl_lock, irqflags);
3858 + dev->vblank_disable_allowed = true;
3859 ++ drm_reset_vblank_timestamp(dev, pipe);
3860 + spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
3861 +
3862 + if (vblank->inmodeset & 0x2)
3863 +diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
3864 +index c707fa6fca85..e3bdc8b1c32c 100644
3865 +--- a/drivers/gpu/drm/gma500/gem.c
3866 ++++ b/drivers/gpu/drm/gma500/gem.c
3867 +@@ -130,7 +130,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
3868 + return ret;
3869 + }
3870 + /* We have the initial and handle reference but need only one now */
3871 +- drm_gem_object_unreference(&r->gem);
3872 ++ drm_gem_object_unreference_unlocked(&r->gem);
3873 + *handlep = handle;
3874 + return 0;
3875 + }
3876 +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
3877 +index b4741d121a74..61fcb3b22297 100644
3878 +--- a/drivers/gpu/drm/i915/i915_dma.c
3879 ++++ b/drivers/gpu/drm/i915/i915_dma.c
3880 +@@ -402,6 +402,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
3881 + if (ret)
3882 + goto cleanup_gem_stolen;
3883 +
3884 ++ intel_setup_gmbus(dev);
3885 ++
3886 + /* Important: The output setup functions called by modeset_init need
3887 + * working irqs for e.g. gmbus and dp aux transfers. */
3888 + intel_modeset_init(dev);
3889 +@@ -451,6 +453,7 @@ cleanup_gem:
3890 + cleanup_irq:
3891 + intel_guc_ucode_fini(dev);
3892 + drm_irq_uninstall(dev);
3893 ++ intel_teardown_gmbus(dev);
3894 + cleanup_gem_stolen:
3895 + i915_gem_cleanup_stolen(dev);
3896 + cleanup_vga_switcheroo:
3897 +@@ -1028,7 +1031,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
3898 +
3899 + /* Try to make sure MCHBAR is enabled before poking at it */
3900 + intel_setup_mchbar(dev);
3901 +- intel_setup_gmbus(dev);
3902 + intel_opregion_setup(dev);
3903 +
3904 + i915_gem_load(dev);
3905 +@@ -1099,7 +1101,6 @@ out_gem_unload:
3906 + if (dev->pdev->msi_enabled)
3907 + pci_disable_msi(dev->pdev);
3908 +
3909 +- intel_teardown_gmbus(dev);
3910 + intel_teardown_mchbar(dev);
3911 + pm_qos_remove_request(&dev_priv->pm_qos);
3912 + destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
3913 +@@ -1198,7 +1199,6 @@ int i915_driver_unload(struct drm_device *dev)
3914 +
3915 + intel_csr_ucode_fini(dev);
3916 +
3917 +- intel_teardown_gmbus(dev);
3918 + intel_teardown_mchbar(dev);
3919 +
3920 + destroy_workqueue(dev_priv->hotplug.dp_wq);
3921 +diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
3922 +index 02ceb7a4b481..0433d25f9d23 100644
3923 +--- a/drivers/gpu/drm/i915/i915_gem_context.c
3924 ++++ b/drivers/gpu/drm/i915/i915_gem_context.c
3925 +@@ -340,6 +340,10 @@ void i915_gem_context_reset(struct drm_device *dev)
3926 + i915_gem_context_unreference(lctx);
3927 + ring->last_context = NULL;
3928 + }
3929 ++
3930 ++ /* Force the GPU state to be reinitialised on enabling */
3931 ++ if (ring->default_context)
3932 ++ ring->default_context->legacy_hw_ctx.initialized = false;
3933 + }
3934 + }
3935 +
3936 +@@ -708,7 +712,7 @@ static int do_switch(struct drm_i915_gem_request *req)
3937 + if (ret)
3938 + goto unpin_out;
3939 +
3940 +- if (!to->legacy_hw_ctx.initialized) {
3941 ++ if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
3942 + hw_flags |= MI_RESTORE_INHIBIT;
3943 + /* NB: If we inhibit the restore, the context is not allowed to
3944 + * die because future work may end up depending on valid address
3945 +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
3946 +index 0d228f909dcb..0f42a2782afc 100644
3947 +--- a/drivers/gpu/drm/i915/i915_irq.c
3948 ++++ b/drivers/gpu/drm/i915/i915_irq.c
3949 +@@ -2354,9 +2354,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
3950 + spt_irq_handler(dev, pch_iir);
3951 + else
3952 + cpt_irq_handler(dev, pch_iir);
3953 +- } else
3954 +- DRM_ERROR("The master control interrupt lied (SDE)!\n");
3955 +-
3956 ++ } else {
3957 ++ /*
3958 ++ * Like on previous PCH there seems to be something
3959 ++ * fishy going on with forwarding PCH interrupts.
3960 ++ */
3961 ++ DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
3962 ++ }
3963 + }
3964 +
3965 + I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3966 +diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
3967 +index a6752a61d99f..7e6158b889da 100644
3968 +--- a/drivers/gpu/drm/i915/intel_ddi.c
3969 ++++ b/drivers/gpu/drm/i915/intel_ddi.c
3970 +@@ -1582,7 +1582,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
3971 + DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
3972 + DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
3973 + wrpll_params.central_freq;
3974 +- } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
3975 ++ } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
3976 ++ intel_encoder->type == INTEL_OUTPUT_DP_MST) {
3977 + switch (crtc_state->port_clock / 2) {
3978 + case 81000:
3979 + ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
3980 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
3981 +index 32cf97346978..f859a5b87ed4 100644
3982 +--- a/drivers/gpu/drm/i915/intel_display.c
3983 ++++ b/drivers/gpu/drm/i915/intel_display.c
3984 +@@ -11930,11 +11930,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
3985 + pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
3986 + }
3987 +
3988 +- /* Clamp bpp to 8 on screens without EDID 1.4 */
3989 +- if (connector->base.display_info.bpc == 0 && bpp > 24) {
3990 +- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
3991 +- bpp);
3992 +- pipe_config->pipe_bpp = 24;
3993 ++ /* Clamp bpp to default limit on screens without EDID 1.4 */
3994 ++ if (connector->base.display_info.bpc == 0) {
3995 ++ int type = connector->base.connector_type;
3996 ++ int clamp_bpp = 24;
3997 ++
3998 ++ /* Fall back to 18 bpp when DP sink capability is unknown. */
3999 ++ if (type == DRM_MODE_CONNECTOR_DisplayPort ||
4000 ++ type == DRM_MODE_CONNECTOR_eDP)
4001 ++ clamp_bpp = 18;
4002 ++
4003 ++ if (bpp > clamp_bpp) {
4004 ++ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
4005 ++ bpp, clamp_bpp);
4006 ++ pipe_config->pipe_bpp = clamp_bpp;
4007 ++ }
4008 + }
4009 + }
4010 +
4011 +@@ -13537,11 +13547,12 @@ intel_check_primary_plane(struct drm_plane *plane,
4012 + int max_scale = DRM_PLANE_HELPER_NO_SCALING;
4013 + bool can_position = false;
4014 +
4015 +- /* use scaler when colorkey is not required */
4016 +- if (INTEL_INFO(plane->dev)->gen >= 9 &&
4017 +- state->ckey.flags == I915_SET_COLORKEY_NONE) {
4018 +- min_scale = 1;
4019 +- max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
4020 ++ if (INTEL_INFO(plane->dev)->gen >= 9) {
4021 ++ /* use scaler when colorkey is not required */
4022 ++ if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
4023 ++ min_scale = 1;
4024 ++ max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
4025 ++ }
4026 + can_position = true;
4027 + }
4028 +
4029 +@@ -15565,6 +15576,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
4030 + mutex_lock(&dev->struct_mutex);
4031 + intel_cleanup_gt_powersave(dev);
4032 + mutex_unlock(&dev->struct_mutex);
4033 ++
4034 ++ intel_teardown_gmbus(dev);
4035 + }
4036 +
4037 + /*
4038 +diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
4039 +index a5e99ac305da..a8912aecc31f 100644
4040 +--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
4041 ++++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
4042 +@@ -207,7 +207,12 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
4043 + gpio = *data++;
4044 +
4045 + /* pull up/down */
4046 +- action = *data++;
4047 ++ action = *data++ & 1;
4048 ++
4049 ++ if (gpio >= ARRAY_SIZE(gtable)) {
4050 ++ DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
4051 ++ goto out;
4052 ++ }
4053 +
4054 + function = gtable[gpio].function_reg;
4055 + pad = gtable[gpio].pad_reg;
4056 +@@ -226,6 +231,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
4057 + vlv_gpio_nc_write(dev_priv, pad, val);
4058 + mutex_unlock(&dev_priv->sb_lock);
4059 +
4060 ++out:
4061 + return data;
4062 + }
4063 +
4064 +diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
4065 +index b17785719598..d7a6437d9da2 100644
4066 +--- a/drivers/gpu/drm/i915/intel_hotplug.c
4067 ++++ b/drivers/gpu/drm/i915/intel_hotplug.c
4068 +@@ -468,9 +468,14 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
4069 + list_for_each_entry(connector, &mode_config->connector_list, head) {
4070 + struct intel_connector *intel_connector = to_intel_connector(connector);
4071 + connector->polled = intel_connector->polled;
4072 +- if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4073 +- connector->polled = DRM_CONNECTOR_POLL_HPD;
4074 ++
4075 ++ /* MST has a dynamic intel_connector->encoder and it's reprobing
4076 ++ * is all handled by the MST helpers. */
4077 + if (intel_connector->mst_port)
4078 ++ continue;
4079 ++
4080 ++ if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
4081 ++ intel_connector->encoder->hpd_pin > HPD_NONE)
4082 + connector->polled = DRM_CONNECTOR_POLL_HPD;
4083 + }
4084 +
4085 +diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
4086 +index 8324654037b6..f3bee54c414f 100644
4087 +--- a/drivers/gpu/drm/i915/intel_i2c.c
4088 ++++ b/drivers/gpu/drm/i915/intel_i2c.c
4089 +@@ -675,7 +675,7 @@ int intel_setup_gmbus(struct drm_device *dev)
4090 + return 0;
4091 +
4092 + err:
4093 +- while (--pin) {
4094 ++ while (pin--) {
4095 + if (!intel_gmbus_is_valid_pin(dev_priv, pin))
4096 + continue;
4097 +
4098 +diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
4099 +index 88e12bdf79e2..d69547a65dbb 100644
4100 +--- a/drivers/gpu/drm/i915/intel_lrc.c
4101 ++++ b/drivers/gpu/drm/i915/intel_lrc.c
4102 +@@ -1706,6 +1706,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
4103 + if (flush_domains) {
4104 + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
4105 + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
4106 ++ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
4107 + flags |= PIPE_CONTROL_FLUSH_ENABLE;
4108 + }
4109 +
4110 +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
4111 +index 9461a238f5d5..f6b2a814e629 100644
4112 +--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
4113 ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
4114 +@@ -347,6 +347,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
4115 + if (flush_domains) {
4116 + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
4117 + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
4118 ++ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
4119 + flags |= PIPE_CONTROL_FLUSH_ENABLE;
4120 + }
4121 + if (invalidate_domains) {
4122 +@@ -419,6 +420,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
4123 + if (flush_domains) {
4124 + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
4125 + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
4126 ++ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
4127 + flags |= PIPE_CONTROL_FLUSH_ENABLE;
4128 + }
4129 + if (invalidate_domains) {
4130 +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
4131 +index 2e7cbe933533..2a5ed7460354 100644
4132 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
4133 ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
4134 +@@ -969,10 +969,13 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
4135 +
4136 + NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
4137 +
4138 ++ mutex_lock(&drm->dev->mode_config.mutex);
4139 + if (plugged)
4140 + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
4141 + else
4142 + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
4143 ++ mutex_unlock(&drm->dev->mode_config.mutex);
4144 ++
4145 + drm_helper_hpd_irq_event(connector->dev);
4146 + }
4147 +
4148 +diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
4149 +index 64c8d932d5f1..58a3f7cf2fb3 100644
4150 +--- a/drivers/gpu/drm/nouveau/nouveau_display.c
4151 ++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
4152 +@@ -634,10 +634,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
4153 + nv_crtc->lut.depth = 0;
4154 + }
4155 +
4156 +- /* Make sure that drm and hw vblank irqs get resumed if needed. */
4157 +- for (head = 0; head < dev->mode_config.num_crtc; head++)
4158 +- drm_vblank_on(dev, head);
4159 +-
4160 + /* This should ensure we don't hit a locking problem when someone
4161 + * wakes us up via a connector. We should never go into suspend
4162 + * while the display is on anyways.
4163 +@@ -647,6 +643,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
4164 +
4165 + drm_helper_resume_force_mode(dev);
4166 +
4167 ++ /* Make sure that drm and hw vblank irqs get resumed if needed. */
4168 ++ for (head = 0; head < dev->mode_config.num_crtc; head++)
4169 ++ drm_vblank_on(dev, head);
4170 ++
4171 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4172 + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
4173 +
4174 +diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
4175 +index 60e32c4e4e49..35ecc0d0458f 100644
4176 +--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
4177 ++++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
4178 +@@ -24,7 +24,7 @@
4179 + static int nouveau_platform_probe(struct platform_device *pdev)
4180 + {
4181 + const struct nvkm_device_tegra_func *func;
4182 +- struct nvkm_device *device;
4183 ++ struct nvkm_device *device = NULL;
4184 + struct drm_device *drm;
4185 + int ret;
4186 +
4187 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
4188 +index 7f8a42721eb2..e7e581d6a8ff 100644
4189 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
4190 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
4191 +@@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
4192 +
4193 + if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
4194 + return -ENOMEM;
4195 +- *pdevice = &tdev->device;
4196 ++
4197 + tdev->func = func;
4198 + tdev->pdev = pdev;
4199 + tdev->irq = -1;
4200 +
4201 + tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
4202 +- if (IS_ERR(tdev->vdd))
4203 +- return PTR_ERR(tdev->vdd);
4204 ++ if (IS_ERR(tdev->vdd)) {
4205 ++ ret = PTR_ERR(tdev->vdd);
4206 ++ goto free;
4207 ++ }
4208 +
4209 + tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
4210 +- if (IS_ERR(tdev->rst))
4211 +- return PTR_ERR(tdev->rst);
4212 ++ if (IS_ERR(tdev->rst)) {
4213 ++ ret = PTR_ERR(tdev->rst);
4214 ++ goto free;
4215 ++ }
4216 +
4217 + tdev->clk = devm_clk_get(&pdev->dev, "gpu");
4218 +- if (IS_ERR(tdev->clk))
4219 +- return PTR_ERR(tdev->clk);
4220 ++ if (IS_ERR(tdev->clk)) {
4221 ++ ret = PTR_ERR(tdev->clk);
4222 ++ goto free;
4223 ++ }
4224 +
4225 + tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
4226 +- if (IS_ERR(tdev->clk_pwr))
4227 +- return PTR_ERR(tdev->clk_pwr);
4228 ++ if (IS_ERR(tdev->clk_pwr)) {
4229 ++ ret = PTR_ERR(tdev->clk_pwr);
4230 ++ goto free;
4231 ++ }
4232 +
4233 + nvkm_device_tegra_probe_iommu(tdev);
4234 +
4235 + ret = nvkm_device_tegra_power_up(tdev);
4236 + if (ret)
4237 +- return ret;
4238 ++ goto remove;
4239 +
4240 + tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
4241 + ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
4242 +@@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
4243 + cfg, dbg, detect, mmio, subdev_mask,
4244 + &tdev->device);
4245 + if (ret)
4246 +- return ret;
4247 ++ goto powerdown;
4248 ++
4249 ++ *pdevice = &tdev->device;
4250 +
4251 + return 0;
4252 ++
4253 ++powerdown:
4254 ++ nvkm_device_tegra_power_down(tdev);
4255 ++remove:
4256 ++ nvkm_device_tegra_remove_iommu(tdev);
4257 ++free:
4258 ++ kfree(tdev);
4259 ++ return ret;
4260 + }
4261 + #else
4262 + int
4263 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
4264 +index 74e2f7c6c07e..9688970eca47 100644
4265 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
4266 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
4267 +@@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w)
4268 + .outp = outp,
4269 + }, *dp = &_dp;
4270 + u32 datarate = 0;
4271 ++ u8 pwr;
4272 + int ret;
4273 +
4274 + if (!outp->base.info.location && disp->func->sor.magic)
4275 +@@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w)
4276 + /* disable link interrupt handling during link training */
4277 + nvkm_notify_put(&outp->irq);
4278 +
4279 ++ /* ensure sink is not in a low-power state */
4280 ++ if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
4281 ++ if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
4282 ++ pwr &= ~DPCD_SC00_SET_POWER;
4283 ++ pwr |= DPCD_SC00_SET_POWER_D0;
4284 ++ nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
4285 ++ }
4286 ++ }
4287 ++
4288 + /* enable down-spreading and execute pre-train script from vbios */
4289 + dp_link_train_init(dp, outp->dpcd[3] & 0x01);
4290 +
4291 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
4292 +index 9596290329c7..6e10c5e0ef11 100644
4293 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
4294 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
4295 +@@ -71,5 +71,11 @@
4296 + #define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
4297 + #define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
4298 +
4299 ++/* DPCD Sink Control */
4300 ++#define DPCD_SC00 0x00600
4301 ++#define DPCD_SC00_SET_POWER 0x03
4302 ++#define DPCD_SC00_SET_POWER_D0 0x01
4303 ++#define DPCD_SC00_SET_POWER_D3 0x03
4304 ++
4305 + void nvkm_dp_train(struct work_struct *);
4306 + #endif
4307 +diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
4308 +index 2ae8577497ca..7c2e78201ead 100644
4309 +--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
4310 ++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
4311 +@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
4312 + cmd->command_size))
4313 + return -EFAULT;
4314 +
4315 +- reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
4316 ++ reloc_info = kmalloc_array(cmd->relocs_num,
4317 ++ sizeof(struct qxl_reloc_info), GFP_KERNEL);
4318 + if (!reloc_info)
4319 + return -ENOMEM;
4320 +
4321 +diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
4322 +index 752072771388..367a916f364e 100644
4323 +--- a/drivers/gpu/drm/radeon/dce6_afmt.c
4324 ++++ b/drivers/gpu/drm/radeon/dce6_afmt.c
4325 +@@ -301,6 +301,14 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
4326 + * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
4327 + */
4328 + if (ASIC_IS_DCE8(rdev)) {
4329 ++ unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
4330 ++ DENTIST_DPREFCLK_WDIVIDER_MASK) >>
4331 ++ DENTIST_DPREFCLK_WDIVIDER_SHIFT;
4332 ++ div = radeon_audio_decode_dfs_div(div);
4333 ++
4334 ++ if (div)
4335 ++ clock = clock * 100 / div;
4336 ++
4337 + WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
4338 + WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
4339 + } else {
4340 +diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
4341 +index 9953356fe263..3cf04a2f44bb 100644
4342 +--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
4343 ++++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
4344 +@@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
4345 + * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
4346 + * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
4347 + */
4348 ++ if (ASIC_IS_DCE41(rdev)) {
4349 ++ unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
4350 ++ DENTIST_DPREFCLK_WDIVIDER_MASK) >>
4351 ++ DENTIST_DPREFCLK_WDIVIDER_SHIFT;
4352 ++ div = radeon_audio_decode_dfs_div(div);
4353 ++
4354 ++ if (div)
4355 ++ clock = 100 * clock / div;
4356 ++ }
4357 ++
4358 + WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
4359 + WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
4360 + }
4361 +diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
4362 +index 4aa5f755572b..13b6029d65cc 100644
4363 +--- a/drivers/gpu/drm/radeon/evergreend.h
4364 ++++ b/drivers/gpu/drm/radeon/evergreend.h
4365 +@@ -511,6 +511,11 @@
4366 + #define DCCG_AUDIO_DTO1_CNTL 0x05cc
4367 + # define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
4368 +
4369 ++#define DCE41_DENTIST_DISPCLK_CNTL 0x049c
4370 ++# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
4371 ++# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
4372 ++# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
4373 ++
4374 + /* DCE 4.0 AFMT */
4375 + #define HDMI_CONTROL 0x7030
4376 + # define HDMI_KEEPOUT_MODE (1 << 0)
4377 +diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
4378 +index 87db64983ea8..5580568088bb 100644
4379 +--- a/drivers/gpu/drm/radeon/radeon.h
4380 ++++ b/drivers/gpu/drm/radeon/radeon.h
4381 +@@ -268,6 +268,7 @@ struct radeon_clock {
4382 + uint32_t current_dispclk;
4383 + uint32_t dp_extclk;
4384 + uint32_t max_pixel_clock;
4385 ++ uint32_t vco_freq;
4386 + };
4387 +
4388 + /*
4389 +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
4390 +index 8f285244c839..de9a2ffcf5f7 100644
4391 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c
4392 ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
4393 +@@ -437,7 +437,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
4394 + }
4395 +
4396 + /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
4397 +- if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
4398 ++ if (((dev->pdev->device == 0x9802) ||
4399 ++ (dev->pdev->device == 0x9805) ||
4400 ++ (dev->pdev->device == 0x9806)) &&
4401 + (dev->pdev->subsystem_vendor == 0x1734) &&
4402 + (dev->pdev->subsystem_device == 0x11bd)) {
4403 + if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
4404 +@@ -448,14 +450,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
4405 + }
4406 + }
4407 +
4408 +- /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
4409 +- if ((dev->pdev->device == 0x9805) &&
4410 +- (dev->pdev->subsystem_vendor == 0x1734) &&
4411 +- (dev->pdev->subsystem_device == 0x11bd)) {
4412 +- if (*connector_type == DRM_MODE_CONNECTOR_VGA)
4413 +- return false;
4414 +- }
4415 +-
4416 + return true;
4417 + }
4418 +
4419 +@@ -1112,6 +1106,31 @@ union firmware_info {
4420 + ATOM_FIRMWARE_INFO_V2_2 info_22;
4421 + };
4422 +
4423 ++union igp_info {
4424 ++ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
4425 ++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
4426 ++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
4427 ++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
4428 ++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
4429 ++};
4430 ++
4431 ++static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
4432 ++{
4433 ++ struct radeon_mode_info *mode_info = &rdev->mode_info;
4434 ++ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
4435 ++ union igp_info *igp_info;
4436 ++ u8 frev, crev;
4437 ++ u16 data_offset;
4438 ++
4439 ++ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
4440 ++ &frev, &crev, &data_offset)) {
4441 ++ igp_info = (union igp_info *)(mode_info->atom_context->bios +
4442 ++ data_offset);
4443 ++ rdev->clock.vco_freq =
4444 ++ le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
4445 ++ }
4446 ++}
4447 ++
4448 + bool radeon_atom_get_clock_info(struct drm_device *dev)
4449 + {
4450 + struct radeon_device *rdev = dev->dev_private;
4451 +@@ -1263,20 +1282,25 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
4452 + rdev->mode_info.firmware_flags =
4453 + le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
4454 +
4455 ++ if (ASIC_IS_DCE8(rdev))
4456 ++ rdev->clock.vco_freq =
4457 ++ le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
4458 ++ else if (ASIC_IS_DCE5(rdev))
4459 ++ rdev->clock.vco_freq = rdev->clock.current_dispclk;
4460 ++ else if (ASIC_IS_DCE41(rdev))
4461 ++ radeon_atombios_get_dentist_vco_freq(rdev);
4462 ++ else
4463 ++ rdev->clock.vco_freq = rdev->clock.current_dispclk;
4464 ++
4465 ++ if (rdev->clock.vco_freq == 0)
4466 ++ rdev->clock.vco_freq = 360000; /* 3.6 GHz */
4467 ++
4468 + return true;
4469 + }
4470 +
4471 + return false;
4472 + }
4473 +
4474 +-union igp_info {
4475 +- struct _ATOM_INTEGRATED_SYSTEM_INFO info;
4476 +- struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
4477 +- struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
4478 +- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
4479 +- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
4480 +-};
4481 +-
4482 + bool radeon_atombios_sideport_present(struct radeon_device *rdev)
4483 + {
4484 + struct radeon_mode_info *mode_info = &rdev->mode_info;
4485 +diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
4486 +index 2c02e99b5f95..b214663b370d 100644
4487 +--- a/drivers/gpu/drm/radeon/radeon_audio.c
4488 ++++ b/drivers/gpu/drm/radeon/radeon_audio.c
4489 +@@ -739,9 +739,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
4490 + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
4491 + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
4492 + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
4493 +- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
4494 +- struct radeon_connector_atom_dig *dig_connector =
4495 +- radeon_connector->con_priv;
4496 +
4497 + if (!dig || !dig->afmt)
4498 + return;
4499 +@@ -753,10 +750,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
4500 + radeon_audio_write_speaker_allocation(encoder);
4501 + radeon_audio_write_sad_regs(encoder);
4502 + radeon_audio_write_latency_fields(encoder, mode);
4503 +- if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
4504 +- radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
4505 +- else
4506 +- radeon_audio_set_dto(encoder, dig_connector->dp_clock);
4507 ++ radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
4508 + radeon_audio_set_audio_packet(encoder);
4509 + radeon_audio_select_pin(encoder);
4510 +
4511 +@@ -781,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
4512 + if (radeon_encoder->audio && radeon_encoder->audio->dpms)
4513 + radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
4514 + }
4515 ++
4516 ++unsigned int radeon_audio_decode_dfs_div(unsigned int div)
4517 ++{
4518 ++ if (div >= 8 && div < 64)
4519 ++ return (div - 8) * 25 + 200;
4520 ++ else if (div >= 64 && div < 96)
4521 ++ return (div - 64) * 50 + 1600;
4522 ++ else if (div >= 96 && div < 128)
4523 ++ return (div - 96) * 100 + 3200;
4524 ++ else
4525 ++ return 0;
4526 ++}
4527 +diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
4528 +index 059cc3012062..5c70cceaa4a6 100644
4529 +--- a/drivers/gpu/drm/radeon/radeon_audio.h
4530 ++++ b/drivers/gpu/drm/radeon/radeon_audio.h
4531 +@@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev);
4532 + void radeon_audio_mode_set(struct drm_encoder *encoder,
4533 + struct drm_display_mode *mode);
4534 + void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
4535 ++unsigned int radeon_audio_decode_dfs_div(unsigned int div);
4536 +
4537 + #endif
4538 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
4539 +index c566993a2ec3..d690df545b4d 100644
4540 +--- a/drivers/gpu/drm/radeon/radeon_device.c
4541 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
4542 +@@ -1744,6 +1744,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
4543 + }
4544 +
4545 + drm_kms_helper_poll_enable(dev);
4546 ++ drm_helper_hpd_irq_event(dev);
4547 +
4548 + /* set the power state here in case we are a PX system or headless */
4549 + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
4550 +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
4551 +index 1eca0acac016..13767d21835f 100644
4552 +--- a/drivers/gpu/drm/radeon/radeon_display.c
4553 ++++ b/drivers/gpu/drm/radeon/radeon_display.c
4554 +@@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
4555 + struct drm_crtc *crtc = &radeon_crtc->base;
4556 + unsigned long flags;
4557 + int r;
4558 +- int vpos, hpos, stat, min_udelay;
4559 ++ int vpos, hpos, stat, min_udelay = 0;
4560 ++ unsigned repcnt = 4;
4561 + struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
4562 +
4563 + down_read(&rdev->exclusive_lock);
4564 +@@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
4565 + * In practice this won't execute very often unless on very fast
4566 + * machines because the time window for this to happen is very small.
4567 + */
4568 +- for (;;) {
4569 ++ while (radeon_crtc->enabled && repcnt--) {
4570 + /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
4571 + * start in hpos, and to the "fudged earlier" vblank start in
4572 + * vpos.
4573 +@@ -472,10 +473,22 @@ static void radeon_flip_work_func(struct work_struct *__work)
4574 + /* Sleep at least until estimated real start of hw vblank */
4575 + spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4576 + min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
4577 ++ if (min_udelay > vblank->framedur_ns / 2000) {
4578 ++ /* Don't wait ridiculously long - something is wrong */
4579 ++ repcnt = 0;
4580 ++ break;
4581 ++ }
4582 + usleep_range(min_udelay, 2 * min_udelay);
4583 + spin_lock_irqsave(&crtc->dev->event_lock, flags);
4584 + };
4585 +
4586 ++ if (!repcnt)
4587 ++ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
4588 ++ "framedur %d, linedur %d, stat %d, vpos %d, "
4589 ++ "hpos %d\n", work->crtc_id, min_udelay,
4590 ++ vblank->framedur_ns / 1000,
4591 ++ vblank->linedur_ns / 1000, stat, vpos, hpos);
4592 ++
4593 + /* do the flip (mmio) */
4594 + radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
4595 +
4596 +diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
4597 +index 84d45633d28c..fb6ad143873f 100644
4598 +--- a/drivers/gpu/drm/radeon/radeon_object.c
4599 ++++ b/drivers/gpu/drm/radeon/radeon_object.c
4600 +@@ -33,6 +33,7 @@
4601 + #include <linux/slab.h>
4602 + #include <drm/drmP.h>
4603 + #include <drm/radeon_drm.h>
4604 ++#include <drm/drm_cache.h>
4605 + #include "radeon.h"
4606 + #include "radeon_trace.h"
4607 +
4608 +@@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev,
4609 + DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
4610 + "better performance thanks to write-combining\n");
4611 + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
4612 ++#else
4613 ++ /* For architectures that don't support WC memory,
4614 ++ * mask out the WC flag from the BO
4615 ++ */
4616 ++ if (!drm_arch_can_wc_memory())
4617 ++ bo->flags &= ~RADEON_GEM_GTT_WC;
4618 + #endif
4619 +
4620 + radeon_ttm_placement_from_domain(bo, domain);
4621 +diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
4622 +index 59abebd6b5dc..2081a60d08fb 100644
4623 +--- a/drivers/gpu/drm/radeon/radeon_pm.c
4624 ++++ b/drivers/gpu/drm/radeon/radeon_pm.c
4625 +@@ -1075,8 +1075,6 @@ force:
4626 +
4627 + /* update display watermarks based on new power state */
4628 + radeon_bandwidth_update(rdev);
4629 +- /* update displays */
4630 +- radeon_dpm_display_configuration_changed(rdev);
4631 +
4632 + rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
4633 + rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
4634 +@@ -1097,6 +1095,9 @@ force:
4635 +
4636 + radeon_dpm_post_set_power_state(rdev);
4637 +
4638 ++ /* update displays */
4639 ++ radeon_dpm_display_configuration_changed(rdev);
4640 ++
4641 + if (rdev->asic->dpm.force_performance_level) {
4642 + if (rdev->pm.dpm.thermal_active) {
4643 + enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
4644 +diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
4645 +index c507896aca45..197b157b73d0 100644
4646 +--- a/drivers/gpu/drm/radeon/radeon_sa.c
4647 ++++ b/drivers/gpu/drm/radeon/radeon_sa.c
4648 +@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
4649 + /* see if we can skip over some allocations */
4650 + } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
4651 +
4652 ++ for (i = 0; i < RADEON_NUM_RINGS; ++i)
4653 ++ radeon_fence_ref(fences[i]);
4654 ++
4655 + spin_unlock(&sa_manager->wq.lock);
4656 + r = radeon_fence_wait_any(rdev, fences, false);
4657 ++ for (i = 0; i < RADEON_NUM_RINGS; ++i)
4658 ++ radeon_fence_unref(&fences[i]);
4659 + spin_lock(&sa_manager->wq.lock);
4660 + /* if we have nothing to wait for block */
4661 + if (r == -ENOENT) {
4662 +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
4663 +index e34307459e50..e06ac546a90f 100644
4664 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c
4665 ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
4666 +@@ -758,7 +758,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
4667 + 0, PAGE_SIZE,
4668 + PCI_DMA_BIDIRECTIONAL);
4669 + if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
4670 +- while (--i) {
4671 ++ while (i--) {
4672 + pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
4673 + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
4674 + gtt->ttm.dma_address[i] = 0;
4675 +diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
4676 +index 48d97c040f49..3979632b9225 100644
4677 +--- a/drivers/gpu/drm/radeon/radeon_vm.c
4678 ++++ b/drivers/gpu/drm/radeon/radeon_vm.c
4679 +@@ -455,15 +455,15 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
4680 +
4681 + if (soffset) {
4682 + /* make sure object fit at this offset */
4683 +- eoffset = soffset + size;
4684 ++ eoffset = soffset + size - 1;
4685 + if (soffset >= eoffset) {
4686 + r = -EINVAL;
4687 + goto error_unreserve;
4688 + }
4689 +
4690 + last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
4691 +- if (last_pfn > rdev->vm_manager.max_pfn) {
4692 +- dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
4693 ++ if (last_pfn >= rdev->vm_manager.max_pfn) {
4694 ++ dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
4695 + last_pfn, rdev->vm_manager.max_pfn);
4696 + r = -EINVAL;
4697 + goto error_unreserve;
4698 +@@ -478,7 +478,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
4699 + eoffset /= RADEON_GPU_PAGE_SIZE;
4700 + if (soffset || eoffset) {
4701 + struct interval_tree_node *it;
4702 +- it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
4703 ++ it = interval_tree_iter_first(&vm->va, soffset, eoffset);
4704 + if (it && it != &bo_va->it) {
4705 + struct radeon_bo_va *tmp;
4706 + tmp = container_of(it, struct radeon_bo_va, it);
4707 +@@ -518,7 +518,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
4708 + if (soffset || eoffset) {
4709 + spin_lock(&vm->status_lock);
4710 + bo_va->it.start = soffset;
4711 +- bo_va->it.last = eoffset - 1;
4712 ++ bo_va->it.last = eoffset;
4713 + list_add(&bo_va->vm_status, &vm->cleared);
4714 + spin_unlock(&vm->status_lock);
4715 + interval_tree_insert(&bo_va->it, &vm->va);
4716 +@@ -888,7 +888,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
4717 + unsigned i;
4718 +
4719 + start >>= radeon_vm_block_size;
4720 +- end >>= radeon_vm_block_size;
4721 ++ end = (end - 1) >> radeon_vm_block_size;
4722 +
4723 + for (i = start; i <= end; ++i)
4724 + radeon_bo_fence(vm->page_tables[i].bo, fence, true);
4725 +diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
4726 +index 4c4a7218a3bd..d1a7b58dd291 100644
4727 +--- a/drivers/gpu/drm/radeon/sid.h
4728 ++++ b/drivers/gpu/drm/radeon/sid.h
4729 +@@ -915,6 +915,11 @@
4730 + #define DCCG_AUDIO_DTO1_PHASE 0x05c0
4731 + #define DCCG_AUDIO_DTO1_MODULE 0x05c4
4732 +
4733 ++#define DENTIST_DISPCLK_CNTL 0x0490
4734 ++# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
4735 ++# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
4736 ++# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
4737 ++
4738 + #define AFMT_AUDIO_SRC_CONTROL 0x713c
4739 + #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
4740 + /* AFMT_AUDIO_SRC_SELECT
4741 +diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
4742 +index 07a0d378e122..a01efe39a820 100644
4743 +--- a/drivers/gpu/drm/radeon/vce_v1_0.c
4744 ++++ b/drivers/gpu/drm/radeon/vce_v1_0.c
4745 +@@ -178,12 +178,12 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
4746 + return -EINVAL;
4747 + }
4748 +
4749 +- for (i = 0; i < sign->num; ++i) {
4750 +- if (sign->val[i].chip_id == chip_id)
4751 ++ for (i = 0; i < le32_to_cpu(sign->num); ++i) {
4752 ++ if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
4753 + break;
4754 + }
4755 +
4756 +- if (i == sign->num)
4757 ++ if (i == le32_to_cpu(sign->num))
4758 + return -EINVAL;
4759 +
4760 + data += (256 - 64) / 4;
4761 +@@ -191,18 +191,18 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
4762 + data[1] = sign->val[i].nonce[1];
4763 + data[2] = sign->val[i].nonce[2];
4764 + data[3] = sign->val[i].nonce[3];
4765 +- data[4] = sign->len + 64;
4766 ++ data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64);
4767 +
4768 + memset(&data[5], 0, 44);
4769 + memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
4770 +
4771 +- data += data[4] / 4;
4772 ++ data += le32_to_cpu(data[4]) / 4;
4773 + data[0] = sign->val[i].sigval[0];
4774 + data[1] = sign->val[i].sigval[1];
4775 + data[2] = sign->val[i].sigval[2];
4776 + data[3] = sign->val[i].sigval[3];
4777 +
4778 +- rdev->vce.keyselect = sign->val[i].keyselect;
4779 ++ rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
4780 +
4781 + return 0;
4782 + }
4783 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
4784 +index 6377e8151000..67cebb23c940 100644
4785 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
4786 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
4787 +@@ -247,7 +247,7 @@ static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
4788 + {
4789 + struct vmw_cmdbuf_man *man = header->man;
4790 +
4791 +- BUG_ON(!spin_is_locked(&man->lock));
4792 ++ lockdep_assert_held_once(&man->lock);
4793 +
4794 + if (header->inline_space) {
4795 + vmw_cmdbuf_header_inline_free(header);
4796 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
4797 +index c49812b80dd0..24fb348a44e1 100644
4798 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
4799 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
4800 +@@ -25,6 +25,7 @@
4801 + *
4802 + **************************************************************************/
4803 + #include <linux/module.h>
4804 ++#include <linux/console.h>
4805 +
4806 + #include <drm/drmP.h>
4807 + #include "vmwgfx_drv.h"
4808 +@@ -1538,6 +1539,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4809 + static int __init vmwgfx_init(void)
4810 + {
4811 + int ret;
4812 ++
4813 ++#ifdef CONFIG_VGA_CONSOLE
4814 ++ if (vgacon_text_force())
4815 ++ return -EINVAL;
4816 ++#endif
4817 ++
4818 + ret = drm_pci_init(&driver, &vmw_pci_driver);
4819 + if (ret)
4820 + DRM_ERROR("Failed initializing DRM.\n");
4821 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
4822 +index 9b4bb9e74d73..7c2e118a77b0 100644
4823 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
4824 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
4825 +@@ -763,21 +763,25 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
4826 + uint32_t format;
4827 + struct drm_vmw_size content_base_size;
4828 + struct vmw_resource *res;
4829 ++ unsigned int bytes_pp;
4830 + int ret;
4831 +
4832 + switch (mode_cmd->depth) {
4833 + case 32:
4834 + case 24:
4835 + format = SVGA3D_X8R8G8B8;
4836 ++ bytes_pp = 4;
4837 + break;
4838 +
4839 + case 16:
4840 + case 15:
4841 + format = SVGA3D_R5G6B5;
4842 ++ bytes_pp = 2;
4843 + break;
4844 +
4845 + case 8:
4846 + format = SVGA3D_P8;
4847 ++ bytes_pp = 1;
4848 + break;
4849 +
4850 + default:
4851 +@@ -785,7 +789,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
4852 + return -EINVAL;
4853 + }
4854 +
4855 +- content_base_size.width = mode_cmd->width;
4856 ++ content_base_size.width = mode_cmd->pitch / bytes_pp;
4857 + content_base_size.height = mode_cmd->height;
4858 + content_base_size.depth = 1;
4859 +
4860 +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
4861 +index c4dcab048cb8..9098f13f2f44 100644
4862 +--- a/drivers/hv/channel.c
4863 ++++ b/drivers/hv/channel.c
4864 +@@ -630,10 +630,19 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
4865 + * on the ring. We will not signal if more data is
4866 + * to be placed.
4867 + *
4868 ++ * Based on the channel signal state, we will decide
4869 ++ * which signaling policy will be applied.
4870 ++ *
4871 + * If we cannot write to the ring-buffer; signal the host
4872 + * even if we may not have written anything. This is a rare
4873 + * enough condition that it should not matter.
4874 + */
4875 ++
4876 ++ if (channel->signal_policy)
4877 ++ signal = true;
4878 ++ else
4879 ++ kick_q = true;
4880 ++
4881 + if (((ret == 0) && kick_q && signal) || (ret))
4882 + vmbus_setevent(channel);
4883 +
4884 +@@ -733,10 +742,19 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
4885 + * on the ring. We will not signal if more data is
4886 + * to be placed.
4887 + *
4888 ++ * Based on the channel signal state, we will decide
4889 ++ * which signaling policy will be applied.
4890 ++ *
4891 + * If we cannot write to the ring-buffer; signal the host
4892 + * even if we may not have written anything. This is a rare
4893 + * enough condition that it should not matter.
4894 + */
4895 ++
4896 ++ if (channel->signal_policy)
4897 ++ signal = true;
4898 ++ else
4899 ++ kick_q = true;
4900 ++
4901 + if (((ret == 0) && kick_q && signal) || (ret))
4902 + vmbus_setevent(channel);
4903 +
4904 +diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
4905 +index f155b8380481..2b3105c8aed3 100644
4906 +--- a/drivers/hwmon/ads1015.c
4907 ++++ b/drivers/hwmon/ads1015.c
4908 +@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
4909 + struct ads1015_data *data = i2c_get_clientdata(client);
4910 + unsigned int pga = data->channel_data[channel].pga;
4911 + int fullscale = fullscale_table[pga];
4912 +- const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
4913 ++ const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
4914 +
4915 + return DIV_ROUND_CLOSEST(reg * fullscale, mask);
4916 + }
4917 +diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
4918 +index c8487894b312..c43318d3416e 100644
4919 +--- a/drivers/hwmon/dell-smm-hwmon.c
4920 ++++ b/drivers/hwmon/dell-smm-hwmon.c
4921 +@@ -932,6 +932,17 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
4922 + static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
4923 + {
4924 + /*
4925 ++ * CPU fan speed going up and down on Dell Studio XPS 8000
4926 ++ * for unknown reasons.
4927 ++ */
4928 ++ .ident = "Dell Studio XPS 8000",
4929 ++ .matches = {
4930 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
4931 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8000"),
4932 ++ },
4933 ++ },
4934 ++ {
4935 ++ /*
4936 + * CPU fan speed going up and down on Dell Studio XPS 8100
4937 + * for unknown reasons.
4938 + */
4939 +diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
4940 +index 82de3deeb18a..685568b1236d 100644
4941 +--- a/drivers/hwmon/gpio-fan.c
4942 ++++ b/drivers/hwmon/gpio-fan.c
4943 +@@ -406,16 +406,11 @@ static int gpio_fan_get_cur_state(struct thermal_cooling_device *cdev,
4944 + unsigned long *state)
4945 + {
4946 + struct gpio_fan_data *fan_data = cdev->devdata;
4947 +- int r;
4948 +
4949 + if (!fan_data)
4950 + return -EINVAL;
4951 +
4952 +- r = get_fan_speed_index(fan_data);
4953 +- if (r < 0)
4954 +- return r;
4955 +-
4956 +- *state = r;
4957 ++ *state = fan_data->speed_index;
4958 + return 0;
4959 + }
4960 +
4961 +diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
4962 +index e25492137d8b..93738dfbf631 100644
4963 +--- a/drivers/hwtracing/coresight/coresight.c
4964 ++++ b/drivers/hwtracing/coresight/coresight.c
4965 +@@ -548,7 +548,7 @@ static int coresight_name_match(struct device *dev, void *data)
4966 + to_match = data;
4967 + i_csdev = to_coresight_device(dev);
4968 +
4969 +- if (!strcmp(to_match, dev_name(&i_csdev->dev)))
4970 ++ if (to_match && !strcmp(to_match, dev_name(&i_csdev->dev)))
4971 + return 1;
4972 +
4973 + return 0;
4974 +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
4975 +index f62d69799a9c..27fa0cb09538 100644
4976 +--- a/drivers/i2c/busses/i2c-i801.c
4977 ++++ b/drivers/i2c/busses/i2c-i801.c
4978 +@@ -1271,6 +1271,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
4979 + switch (dev->device) {
4980 + case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
4981 + case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
4982 ++ case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
4983 ++ case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
4984 + case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
4985 + priv->features |= FEATURE_I2C_BLOCK_READ;
4986 + priv->features |= FEATURE_IRQ;
4987 +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
4988 +index 0a26dd6d9b19..d6d2b3582910 100644
4989 +--- a/drivers/infiniband/core/cm.c
4990 ++++ b/drivers/infiniband/core/cm.c
4991 +@@ -782,11 +782,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
4992 + wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
4993 +
4994 + /* Check if the device started its remove_one */
4995 +- spin_lock_irq(&cm.lock);
4996 ++ spin_lock_irqsave(&cm.lock, flags);
4997 + if (!cm_dev->going_down)
4998 + queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
4999 + msecs_to_jiffies(wait_time));
5000 +- spin_unlock_irq(&cm.lock);
5001 ++ spin_unlock_irqrestore(&cm.lock, flags);
5002 +
5003 + cm_id_priv->timewait_info = NULL;
5004 + }
5005 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
5006 +index 2d762a2ecd81..17a15c56028c 100644
5007 +--- a/drivers/infiniband/core/cma.c
5008 ++++ b/drivers/infiniband/core/cma.c
5009 +@@ -453,7 +453,7 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
5010 + if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
5011 + return ret;
5012 +
5013 +- if (dev_type == ARPHRD_ETHER)
5014 ++ if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
5015 + ndev = dev_get_by_index(&init_net, bound_if_index);
5016 +
5017 + ret = ib_find_cached_gid_by_port(device, gid, port, ndev, NULL);
5018 +diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
5019 +index cb78b1e9bcd9..f504ba73e5dc 100644
5020 +--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
5021 ++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
5022 +@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
5023 + error = l2t_send(tdev, skb, l2e);
5024 + if (error < 0)
5025 + kfree_skb(skb);
5026 +- return error;
5027 ++ return error < 0 ? error : 0;
5028 + }
5029 +
5030 + int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
5031 +@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
5032 + error = cxgb3_ofld_send(tdev, skb);
5033 + if (error < 0)
5034 + kfree_skb(skb);
5035 +- return error;
5036 ++ return error < 0 ? error : 0;
5037 + }
5038 +
5039 + static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
5040 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
5041 +index 7e97cb55a6bf..c4e091528390 100644
5042 +--- a/drivers/infiniband/hw/mlx5/main.c
5043 ++++ b/drivers/infiniband/hw/mlx5/main.c
5044 +@@ -275,7 +275,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
5045 + props->max_sge = min(max_rq_sg, max_sq_sg);
5046 + props->max_sge_rd = props->max_sge;
5047 + props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
5048 +- props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
5049 ++ props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
5050 + props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
5051 + props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
5052 + props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
5053 +diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
5054 +index 40f85bb3e0d3..3eff35c2d453 100644
5055 +--- a/drivers/infiniband/hw/qib/qib_qp.c
5056 ++++ b/drivers/infiniband/hw/qib/qib_qp.c
5057 +@@ -100,9 +100,10 @@ static u32 credit_table[31] = {
5058 + 32768 /* 1E */
5059 + };
5060 +
5061 +-static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
5062 ++static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
5063 ++ gfp_t gfp)
5064 + {
5065 +- unsigned long page = get_zeroed_page(GFP_KERNEL);
5066 ++ unsigned long page = get_zeroed_page(gfp);
5067 +
5068 + /*
5069 + * Free the page if someone raced with us installing it.
5070 +@@ -121,7 +122,7 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
5071 + * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
5072 + */
5073 + static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
5074 +- enum ib_qp_type type, u8 port)
5075 ++ enum ib_qp_type type, u8 port, gfp_t gfp)
5076 + {
5077 + u32 i, offset, max_scan, qpn;
5078 + struct qpn_map *map;
5079 +@@ -151,7 +152,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
5080 + max_scan = qpt->nmaps - !offset;
5081 + for (i = 0;;) {
5082 + if (unlikely(!map->page)) {
5083 +- get_map_page(qpt, map);
5084 ++ get_map_page(qpt, map, gfp);
5085 + if (unlikely(!map->page))
5086 + break;
5087 + }
5088 +@@ -983,13 +984,21 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
5089 + size_t sz;
5090 + size_t sg_list_sz;
5091 + struct ib_qp *ret;
5092 ++ gfp_t gfp;
5093 ++
5094 +
5095 + if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
5096 + init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
5097 +- init_attr->create_flags) {
5098 +- ret = ERR_PTR(-EINVAL);
5099 +- goto bail;
5100 +- }
5101 ++ init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
5102 ++ return ERR_PTR(-EINVAL);
5103 ++
5104 ++ /* GFP_NOIO is applicable in RC QPs only */
5105 ++ if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
5106 ++ init_attr->qp_type != IB_QPT_RC)
5107 ++ return ERR_PTR(-EINVAL);
5108 ++
5109 ++ gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
5110 ++ GFP_NOIO : GFP_KERNEL;
5111 +
5112 + /* Check receive queue parameters if no SRQ is specified. */
5113 + if (!init_attr->srq) {
5114 +@@ -1021,7 +1030,8 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
5115 + sz = sizeof(struct qib_sge) *
5116 + init_attr->cap.max_send_sge +
5117 + sizeof(struct qib_swqe);
5118 +- swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
5119 ++ swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
5120 ++ gfp, PAGE_KERNEL);
5121 + if (swq == NULL) {
5122 + ret = ERR_PTR(-ENOMEM);
5123 + goto bail;
5124 +@@ -1037,13 +1047,13 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
5125 + } else if (init_attr->cap.max_recv_sge > 1)
5126 + sg_list_sz = sizeof(*qp->r_sg_list) *
5127 + (init_attr->cap.max_recv_sge - 1);
5128 +- qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
5129 ++ qp = kzalloc(sz + sg_list_sz, gfp);
5130 + if (!qp) {
5131 + ret = ERR_PTR(-ENOMEM);
5132 + goto bail_swq;
5133 + }
5134 + RCU_INIT_POINTER(qp->next, NULL);
5135 +- qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
5136 ++ qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp);
5137 + if (!qp->s_hdr) {
5138 + ret = ERR_PTR(-ENOMEM);
5139 + goto bail_qp;
5140 +@@ -1058,8 +1068,16 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
5141 + qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
5142 + sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
5143 + sizeof(struct qib_rwqe);
5144 +- qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
5145 +- qp->r_rq.size * sz);
5146 ++ if (gfp != GFP_NOIO)
5147 ++ qp->r_rq.wq = vmalloc_user(
5148 ++ sizeof(struct qib_rwq) +
5149 ++ qp->r_rq.size * sz);
5150 ++ else
5151 ++ qp->r_rq.wq = __vmalloc(
5152 ++ sizeof(struct qib_rwq) +
5153 ++ qp->r_rq.size * sz,
5154 ++ gfp, PAGE_KERNEL);
5155 ++
5156 + if (!qp->r_rq.wq) {
5157 + ret = ERR_PTR(-ENOMEM);
5158 + goto bail_qp;
5159 +@@ -1090,7 +1108,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
5160 + dev = to_idev(ibpd->device);
5161 + dd = dd_from_dev(dev);
5162 + err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
5163 +- init_attr->port_num);
5164 ++ init_attr->port_num, gfp);
5165 + if (err < 0) {
5166 + ret = ERR_PTR(err);
5167 + vfree(qp->r_rq.wq);
5168 +diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
5169 +index f8ea069a3eaf..b2fb5286dbd9 100644
5170 +--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
5171 ++++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
5172 +@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
5173 + struct qib_ibdev *dev = to_idev(ibqp->device);
5174 + struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
5175 + struct qib_mcast *mcast = NULL;
5176 +- struct qib_mcast_qp *p, *tmp;
5177 ++ struct qib_mcast_qp *p, *tmp, *delp = NULL;
5178 + struct rb_node *n;
5179 + int last = 0;
5180 + int ret;
5181 +
5182 +- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
5183 +- ret = -EINVAL;
5184 +- goto bail;
5185 +- }
5186 ++ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
5187 ++ return -EINVAL;
5188 +
5189 + spin_lock_irq(&ibp->lock);
5190 +
5191 +@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
5192 + while (1) {
5193 + if (n == NULL) {
5194 + spin_unlock_irq(&ibp->lock);
5195 +- ret = -EINVAL;
5196 +- goto bail;
5197 ++ return -EINVAL;
5198 + }
5199 +
5200 + mcast = rb_entry(n, struct qib_mcast, rb_node);
5201 +@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
5202 + */
5203 + list_del_rcu(&p->list);
5204 + mcast->n_attached--;
5205 ++ delp = p;
5206 +
5207 + /* If this was the last attached QP, remove the GID too. */
5208 + if (list_empty(&mcast->qp_list)) {
5209 +@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
5210 + }
5211 +
5212 + spin_unlock_irq(&ibp->lock);
5213 ++ /* QP not attached */
5214 ++ if (!delp)
5215 ++ return -EINVAL;
5216 ++ /*
5217 ++ * Wait for any list walkers to finish before freeing the
5218 ++ * list element.
5219 ++ */
5220 ++ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
5221 ++ qib_mcast_qp_free(delp);
5222 +
5223 +- if (p) {
5224 +- /*
5225 +- * Wait for any list walkers to finish before freeing the
5226 +- * list element.
5227 +- */
5228 +- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
5229 +- qib_mcast_qp_free(p);
5230 +- }
5231 + if (last) {
5232 + atomic_dec(&mcast->refcount);
5233 + wait_event(mcast->wait, !atomic_read(&mcast->refcount));
5234 +@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
5235 + dev->n_mcast_grps_allocated--;
5236 + spin_unlock_irq(&dev->n_mcast_grps_lock);
5237 + }
5238 +-
5239 +- ret = 0;
5240 +-
5241 +-bail:
5242 +- return ret;
5243 ++ return 0;
5244 + }
5245 +
5246 + int qib_mcast_tree_empty(struct qib_ibport *ibp)
5247 +diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
5248 +index b12a5d58546f..37199b9b2cfa 100644
5249 +--- a/drivers/irqchip/irq-atmel-aic-common.c
5250 ++++ b/drivers/irqchip/irq-atmel-aic-common.c
5251 +@@ -86,7 +86,7 @@ int aic_common_set_priority(int priority, unsigned *val)
5252 + priority > AT91_AIC_IRQ_MAX_PRIORITY)
5253 + return -EINVAL;
5254 +
5255 +- *val &= AT91_AIC_PRIOR;
5256 ++ *val &= ~AT91_AIC_PRIOR;
5257 + *val |= priority;
5258 +
5259 + return 0;
5260 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
5261 +index e23d1d18f9d6..a159529f9d53 100644
5262 +--- a/drivers/irqchip/irq-gic-v3-its.c
5263 ++++ b/drivers/irqchip/irq-gic-v3-its.c
5264 +@@ -597,11 +597,6 @@ static void its_unmask_irq(struct irq_data *d)
5265 + lpi_set_config(d, true);
5266 + }
5267 +
5268 +-static void its_eoi_irq(struct irq_data *d)
5269 +-{
5270 +- gic_write_eoir(d->hwirq);
5271 +-}
5272 +-
5273 + static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
5274 + bool force)
5275 + {
5276 +@@ -638,7 +633,7 @@ static struct irq_chip its_irq_chip = {
5277 + .name = "ITS",
5278 + .irq_mask = its_mask_irq,
5279 + .irq_unmask = its_unmask_irq,
5280 +- .irq_eoi = its_eoi_irq,
5281 ++ .irq_eoi = irq_chip_eoi_parent,
5282 + .irq_set_affinity = its_set_affinity,
5283 + .irq_compose_msi_msg = its_irq_compose_msi_msg,
5284 + };
5285 +diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
5286 +index c22e2d40cb30..efe50845939d 100644
5287 +--- a/drivers/irqchip/irq-mxs.c
5288 ++++ b/drivers/irqchip/irq-mxs.c
5289 +@@ -241,6 +241,7 @@ static int __init asm9260_of_init(struct device_node *np,
5290 + writel(0, icoll_priv.intr + i);
5291 +
5292 + icoll_add_domain(np, ASM9260_NUM_IRQS);
5293 ++ set_handle_irq(icoll_handle_irq);
5294 +
5295 + return 0;
5296 + }
5297 +diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
5298 +index 8587d0f8d8c0..f6cb1b8bb981 100644
5299 +--- a/drivers/irqchip/irq-omap-intc.c
5300 ++++ b/drivers/irqchip/irq-omap-intc.c
5301 +@@ -47,6 +47,7 @@
5302 + #define INTC_ILR0 0x0100
5303 +
5304 + #define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */
5305 ++#define SPURIOUSIRQ_MASK (0x1ffffff << 7)
5306 + #define INTCPS_NR_ILR_REGS 128
5307 + #define INTCPS_NR_MIR_REGS 4
5308 +
5309 +@@ -330,11 +331,35 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
5310 + static asmlinkage void __exception_irq_entry
5311 + omap_intc_handle_irq(struct pt_regs *regs)
5312 + {
5313 ++ extern unsigned long irq_err_count;
5314 + u32 irqnr;
5315 +
5316 + irqnr = intc_readl(INTC_SIR);
5317 ++
5318 ++ /*
5319 ++ * A spurious IRQ can result if interrupt that triggered the
5320 ++ * sorting is no longer active during the sorting (10 INTC
5321 ++ * functional clock cycles after interrupt assertion). Or a
5322 ++ * change in interrupt mask affected the result during sorting
5323 ++ * time. There is no special handling required except ignoring
5324 ++ * the SIR register value just read and retrying.
5325 ++ * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
5326 ++ *
5327 ++ * Many a times, a spurious interrupt situation has been fixed
5328 ++ * by adding a flush for the posted write acking the IRQ in
5329 ++ * the device driver. Typically, this is going be the device
5330 ++ * driver whose interrupt was handled just before the spurious
5331 ++ * IRQ occurred. Pay attention to those device drivers if you
5332 ++ * run into hitting the spurious IRQ condition below.
5333 ++ */
5334 ++ if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
5335 ++ pr_err_once("%s: spurious irq!\n", __func__);
5336 ++ irq_err_count++;
5337 ++ omap_ack_irq(NULL);
5338 ++ return;
5339 ++ }
5340 ++
5341 + irqnr &= ACTIVEIRQ_MASK;
5342 +- WARN_ONCE(!irqnr, "Spurious IRQ ?\n");
5343 + handle_domain_irq(domain, irqnr, regs);
5344 + }
5345 +
5346 +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
5347 +index 83392f856dfd..22b9e34ceb75 100644
5348 +--- a/drivers/md/bcache/btree.c
5349 ++++ b/drivers/md/bcache/btree.c
5350 +@@ -1741,6 +1741,7 @@ static void bch_btree_gc(struct cache_set *c)
5351 + do {
5352 + ret = btree_root(gc_root, c, &op, &writes, &stats);
5353 + closure_sync(&writes);
5354 ++ cond_resched();
5355 +
5356 + if (ret && ret != -EAGAIN)
5357 + pr_warn("gc failed!");
5358 +@@ -2162,8 +2163,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
5359 + rw_lock(true, b, b->level);
5360 +
5361 + if (b->key.ptr[0] != btree_ptr ||
5362 +- b->seq != seq + 1)
5363 ++ b->seq != seq + 1) {
5364 ++ op->lock = b->level;
5365 + goto out;
5366 ++ }
5367 + }
5368 +
5369 + SET_KEY_PTRS(check_key, 1);
5370 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
5371 +index 679a093a3bf6..8d0ead98eb6e 100644
5372 +--- a/drivers/md/bcache/super.c
5373 ++++ b/drivers/md/bcache/super.c
5374 +@@ -685,6 +685,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
5375 + WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
5376 + sysfs_create_link(&c->kobj, &d->kobj, d->name),
5377 + "Couldn't create device <-> cache set symlinks");
5378 ++
5379 ++ clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
5380 + }
5381 +
5382 + static void bcache_device_detach(struct bcache_device *d)
5383 +@@ -847,8 +849,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
5384 + buf[SB_LABEL_SIZE] = '\0';
5385 + env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
5386 +
5387 +- if (atomic_xchg(&dc->running, 1))
5388 ++ if (atomic_xchg(&dc->running, 1)) {
5389 ++ kfree(env[1]);
5390 ++ kfree(env[2]);
5391 + return;
5392 ++ }
5393 +
5394 + if (!d->c &&
5395 + BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
5396 +@@ -1933,6 +1938,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
5397 + else
5398 + err = "device busy";
5399 + mutex_unlock(&bch_register_lock);
5400 ++ if (attr == &ksysfs_register_quiet)
5401 ++ goto out;
5402 + }
5403 + goto err;
5404 + }
5405 +@@ -1971,8 +1978,7 @@ out:
5406 + err_close:
5407 + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
5408 + err:
5409 +- if (attr != &ksysfs_register_quiet)
5410 +- pr_info("error opening %s: %s", path, err);
5411 ++ pr_info("error opening %s: %s", path, err);
5412 + ret = -EINVAL;
5413 + goto out;
5414 + }
5415 +@@ -2066,8 +2072,10 @@ static int __init bcache_init(void)
5416 + closure_debug_init();
5417 +
5418 + bcache_major = register_blkdev(0, "bcache");
5419 +- if (bcache_major < 0)
5420 ++ if (bcache_major < 0) {
5421 ++ unregister_reboot_notifier(&reboot);
5422 + return bcache_major;
5423 ++ }
5424 +
5425 + if (!(bcache_wq = create_workqueue("bcache")) ||
5426 + !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
5427 +diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
5428 +index b23f88d9f18c..b9346cd9cda1 100644
5429 +--- a/drivers/md/bcache/writeback.c
5430 ++++ b/drivers/md/bcache/writeback.c
5431 +@@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
5432 +
5433 + static bool dirty_pred(struct keybuf *buf, struct bkey *k)
5434 + {
5435 ++ struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
5436 ++
5437 ++ BUG_ON(KEY_INODE(k) != dc->disk.id);
5438 ++
5439 + return KEY_DIRTY(k);
5440 + }
5441 +
5442 +@@ -372,11 +376,24 @@ next:
5443 + }
5444 + }
5445 +
5446 ++/*
5447 ++ * Returns true if we scanned the entire disk
5448 ++ */
5449 + static bool refill_dirty(struct cached_dev *dc)
5450 + {
5451 + struct keybuf *buf = &dc->writeback_keys;
5452 ++ struct bkey start = KEY(dc->disk.id, 0, 0);
5453 + struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
5454 +- bool searched_from_start = false;
5455 ++ struct bkey start_pos;
5456 ++
5457 ++ /*
5458 ++ * make sure keybuf pos is inside the range for this disk - at bringup
5459 ++ * we might not be attached yet so this disk's inode nr isn't
5460 ++ * initialized then
5461 ++ */
5462 ++ if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
5463 ++ bkey_cmp(&buf->last_scanned, &end) > 0)
5464 ++ buf->last_scanned = start;
5465 +
5466 + if (dc->partial_stripes_expensive) {
5467 + refill_full_stripes(dc);
5468 +@@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc)
5469 + return false;
5470 + }
5471 +
5472 +- if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
5473 +- buf->last_scanned = KEY(dc->disk.id, 0, 0);
5474 +- searched_from_start = true;
5475 +- }
5476 +-
5477 ++ start_pos = buf->last_scanned;
5478 + bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
5479 +
5480 +- return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
5481 ++ if (bkey_cmp(&buf->last_scanned, &end) < 0)
5482 ++ return false;
5483 ++
5484 ++ /*
5485 ++ * If we get to the end start scanning again from the beginning, and
5486 ++ * only scan up to where we initially started scanning from:
5487 ++ */
5488 ++ buf->last_scanned = start;
5489 ++ bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
5490 ++
5491 ++ return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
5492 + }
5493 +
5494 + static int bch_writeback_thread(void *arg)
5495 +diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
5496 +index 0a9dab187b79..073a042aed24 100644
5497 +--- a/drivers/md/bcache/writeback.h
5498 ++++ b/drivers/md/bcache/writeback.h
5499 +@@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
5500 +
5501 + static inline void bch_writeback_queue(struct cached_dev *dc)
5502 + {
5503 +- wake_up_process(dc->writeback_thread);
5504 ++ if (!IS_ERR_OR_NULL(dc->writeback_thread))
5505 ++ wake_up_process(dc->writeback_thread);
5506 + }
5507 +
5508 + static inline void bch_writeback_add(struct cached_dev *dc)
5509 +diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
5510 +index fae34e7a0b1e..12b5216c2cfe 100644
5511 +--- a/drivers/md/dm-exception-store.h
5512 ++++ b/drivers/md/dm-exception-store.h
5513 +@@ -69,7 +69,7 @@ struct dm_exception_store_type {
5514 + * Update the metadata with this exception.
5515 + */
5516 + void (*commit_exception) (struct dm_exception_store *store,
5517 +- struct dm_exception *e,
5518 ++ struct dm_exception *e, int valid,
5519 + void (*callback) (void *, int success),
5520 + void *callback_context);
5521 +
5522 +diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
5523 +index 3164b8bce294..4d3909393f2c 100644
5524 +--- a/drivers/md/dm-snap-persistent.c
5525 ++++ b/drivers/md/dm-snap-persistent.c
5526 +@@ -695,7 +695,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
5527 + }
5528 +
5529 + static void persistent_commit_exception(struct dm_exception_store *store,
5530 +- struct dm_exception *e,
5531 ++ struct dm_exception *e, int valid,
5532 + void (*callback) (void *, int success),
5533 + void *callback_context)
5534 + {
5535 +@@ -704,6 +704,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
5536 + struct core_exception ce;
5537 + struct commit_callback *cb;
5538 +
5539 ++ if (!valid)
5540 ++ ps->valid = 0;
5541 ++
5542 + ce.old_chunk = e->old_chunk;
5543 + ce.new_chunk = e->new_chunk;
5544 + write_exception(ps, ps->current_committed++, &ce);
5545 +diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
5546 +index 9b7c8c8049d6..4d50a12cf00c 100644
5547 +--- a/drivers/md/dm-snap-transient.c
5548 ++++ b/drivers/md/dm-snap-transient.c
5549 +@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
5550 + }
5551 +
5552 + static void transient_commit_exception(struct dm_exception_store *store,
5553 +- struct dm_exception *e,
5554 ++ struct dm_exception *e, int valid,
5555 + void (*callback) (void *, int success),
5556 + void *callback_context)
5557 + {
5558 + /* Just succeed */
5559 +- callback(callback_context, 1);
5560 ++ callback(callback_context, valid);
5561 + }
5562 +
5563 + static void transient_usage(struct dm_exception_store *store,
5564 +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
5565 +index c06b74e91cd6..61f184ad081c 100644
5566 +--- a/drivers/md/dm-snap.c
5567 ++++ b/drivers/md/dm-snap.c
5568 +@@ -1438,8 +1438,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
5569 + dm_table_event(s->ti->table);
5570 + }
5571 +
5572 +-static void pending_complete(struct dm_snap_pending_exception *pe, int success)
5573 ++static void pending_complete(void *context, int success)
5574 + {
5575 ++ struct dm_snap_pending_exception *pe = context;
5576 + struct dm_exception *e;
5577 + struct dm_snapshot *s = pe->snap;
5578 + struct bio *origin_bios = NULL;
5579 +@@ -1509,24 +1510,13 @@ out:
5580 + free_pending_exception(pe);
5581 + }
5582 +
5583 +-static void commit_callback(void *context, int success)
5584 +-{
5585 +- struct dm_snap_pending_exception *pe = context;
5586 +-
5587 +- pending_complete(pe, success);
5588 +-}
5589 +-
5590 + static void complete_exception(struct dm_snap_pending_exception *pe)
5591 + {
5592 + struct dm_snapshot *s = pe->snap;
5593 +
5594 +- if (unlikely(pe->copy_error))
5595 +- pending_complete(pe, 0);
5596 +-
5597 +- else
5598 +- /* Update the metadata if we are persistent */
5599 +- s->store->type->commit_exception(s->store, &pe->e,
5600 +- commit_callback, pe);
5601 ++ /* Update the metadata if we are persistent */
5602 ++ s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
5603 ++ pending_complete, pe);
5604 + }
5605 +
5606 + /*
5607 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
5608 +index 63903a5a5d9e..a1cc797fe88f 100644
5609 +--- a/drivers/md/dm-thin.c
5610 ++++ b/drivers/md/dm-thin.c
5611 +@@ -3453,8 +3453,8 @@ static void pool_postsuspend(struct dm_target *ti)
5612 + struct pool_c *pt = ti->private;
5613 + struct pool *pool = pt->pool;
5614 +
5615 +- cancel_delayed_work(&pool->waker);
5616 +- cancel_delayed_work(&pool->no_space_timeout);
5617 ++ cancel_delayed_work_sync(&pool->waker);
5618 ++ cancel_delayed_work_sync(&pool->no_space_timeout);
5619 + flush_workqueue(pool->wq);
5620 + (void) commit(pool);
5621 + }
5622 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
5623 +index 5df40480228b..dd834927bc66 100644
5624 +--- a/drivers/md/dm.c
5625 ++++ b/drivers/md/dm.c
5626 +@@ -1191,6 +1191,8 @@ static void dm_unprep_request(struct request *rq)
5627 +
5628 + if (clone)
5629 + free_rq_clone(clone);
5630 ++ else if (!tio->md->queue->mq_ops)
5631 ++ free_rq_tio(tio);
5632 + }
5633 +
5634 + /*
5635 +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
5636 +index fca6dbcf9a47..7e44005595c1 100644
5637 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c
5638 ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
5639 +@@ -152,12 +152,9 @@ static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
5640 +
5641 + static int brb_pop(struct bop_ring_buffer *brb)
5642 + {
5643 +- struct block_op *bop;
5644 +-
5645 + if (brb_empty(brb))
5646 + return -ENODATA;
5647 +
5648 +- bop = brb->bops + brb->begin;
5649 + brb->begin = brb_next(brb, brb->begin);
5650 +
5651 + return 0;
5652 +diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
5653 +index c38ef1a72b4a..e2a3833170e3 100644
5654 +--- a/drivers/media/dvb-core/dvb_frontend.c
5655 ++++ b/drivers/media/dvb-core/dvb_frontend.c
5656 +@@ -2313,9 +2313,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
5657 + dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
5658 + __func__, c->delivery_system, fe->ops.info.type);
5659 +
5660 +- /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
5661 +- * do it, it is done for it. */
5662 +- info->caps |= FE_CAN_INVERSION_AUTO;
5663 ++ /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
5664 ++ if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
5665 ++ info->caps |= FE_CAN_INVERSION_AUTO;
5666 + err = 0;
5667 + break;
5668 + }
5669 +diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
5670 +index 0e209b56c76c..c6abeb4fba9d 100644
5671 +--- a/drivers/media/dvb-frontends/tda1004x.c
5672 ++++ b/drivers/media/dvb-frontends/tda1004x.c
5673 +@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
5674 + {
5675 + struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
5676 + struct tda1004x_state* state = fe->demodulator_priv;
5677 ++ int status;
5678 +
5679 + dprintk("%s\n", __func__);
5680 +
5681 ++ status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
5682 ++ if (status == -1)
5683 ++ return -EIO;
5684 ++
5685 ++ /* Only update the properties cache if device is locked */
5686 ++ if (!(status & 8))
5687 ++ return 0;
5688 ++
5689 + // inversion status
5690 + fe_params->inversion = INVERSION_OFF;
5691 + if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
5692 +diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
5693 +index 7830aef3db45..40f77685cc4a 100644
5694 +--- a/drivers/media/rc/sunxi-cir.c
5695 ++++ b/drivers/media/rc/sunxi-cir.c
5696 +@@ -153,6 +153,8 @@ static int sunxi_ir_probe(struct platform_device *pdev)
5697 + if (!ir)
5698 + return -ENOMEM;
5699 +
5700 ++ spin_lock_init(&ir->ir_lock);
5701 ++
5702 + if (of_device_is_compatible(dn, "allwinner,sun5i-a13-ir"))
5703 + ir->fifo_size = 64;
5704 + else
5705 +diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
5706 +index ce157edd45fa..0e1ca2b00e61 100644
5707 +--- a/drivers/media/tuners/si2157.c
5708 ++++ b/drivers/media/tuners/si2157.c
5709 +@@ -168,6 +168,7 @@ static int si2157_init(struct dvb_frontend *fe)
5710 + len = fw->data[fw->size - remaining];
5711 + if (len > SI2157_ARGLEN) {
5712 + dev_err(&client->dev, "Bad firmware length\n");
5713 ++ ret = -EINVAL;
5714 + goto err_release_firmware;
5715 + }
5716 + memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
5717 +diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
5718 +index 146071b8e116..bfff1d1c70ab 100644
5719 +--- a/drivers/media/usb/gspca/ov534.c
5720 ++++ b/drivers/media/usb/gspca/ov534.c
5721 +@@ -1491,8 +1491,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
5722 + struct v4l2_fract *tpf = &cp->timeperframe;
5723 + struct sd *sd = (struct sd *) gspca_dev;
5724 +
5725 +- /* Set requested framerate */
5726 +- sd->frame_rate = tpf->denominator / tpf->numerator;
5727 ++ if (tpf->numerator == 0 || tpf->denominator == 0)
5728 ++ /* Set default framerate */
5729 ++ sd->frame_rate = 30;
5730 ++ else
5731 ++ /* Set requested framerate */
5732 ++ sd->frame_rate = tpf->denominator / tpf->numerator;
5733 ++
5734 + if (gspca_dev->streaming)
5735 + set_frame_rate(gspca_dev);
5736 +
5737 +diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
5738 +index c70ff406b07a..c028a5c2438e 100644
5739 +--- a/drivers/media/usb/gspca/topro.c
5740 ++++ b/drivers/media/usb/gspca/topro.c
5741 +@@ -4802,7 +4802,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
5742 + struct v4l2_fract *tpf = &cp->timeperframe;
5743 + int fr, i;
5744 +
5745 +- sd->framerate = tpf->denominator / tpf->numerator;
5746 ++ if (tpf->numerator == 0 || tpf->denominator == 0)
5747 ++ sd->framerate = 30;
5748 ++ else
5749 ++ sd->framerate = tpf->denominator / tpf->numerator;
5750 ++
5751 + if (gspca_dev->streaming)
5752 + setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
5753 +
5754 +diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
5755 +index 27b4b9e7c0c2..502984c724ff 100644
5756 +--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
5757 ++++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
5758 +@@ -822,10 +822,10 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
5759 + return res | POLLERR;
5760 +
5761 + /*
5762 +- * For output streams you can write as long as there are fewer buffers
5763 +- * queued than there are buffers available.
5764 ++ * For output streams you can call write() as long as there are fewer
5765 ++ * buffers queued than there are buffers available.
5766 + */
5767 +- if (q->is_output && q->queued_count < q->num_buffers)
5768 ++ if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
5769 + return res | POLLOUT | POLLWRNORM;
5770 +
5771 + if (list_empty(&q->done_list)) {
5772 +diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
5773 +index c241e15cacb1..cbd4331fb45c 100644
5774 +--- a/drivers/misc/cxl/vphb.c
5775 ++++ b/drivers/misc/cxl/vphb.c
5776 +@@ -203,7 +203,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
5777 + mask <<= shift;
5778 + val <<= shift;
5779 +
5780 +- v = (in_le32(ioaddr) & ~mask) || (val & mask);
5781 ++ v = (in_le32(ioaddr) & ~mask) | (val & mask);
5782 +
5783 + out_le32(ioaddr, v);
5784 + return PCIBIOS_SUCCESSFUL;
5785 +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
5786 +index b2f2486b3d75..80f9afcb1382 100644
5787 +--- a/drivers/misc/mei/main.c
5788 ++++ b/drivers/misc/mei/main.c
5789 +@@ -458,7 +458,11 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request)
5790 + {
5791 + struct mei_cl *cl = file->private_data;
5792 +
5793 +- return mei_cl_notify_request(cl, file, request);
5794 ++ if (request != MEI_HBM_NOTIFICATION_START &&
5795 ++ request != MEI_HBM_NOTIFICATION_STOP)
5796 ++ return -EINVAL;
5797 ++
5798 ++ return mei_cl_notify_request(cl, file, (u8)request);
5799 + }
5800 +
5801 + /**
5802 +@@ -657,7 +661,9 @@ out:
5803 + * @file: pointer to file structure
5804 + * @band: band bitmap
5805 + *
5806 +- * Return: poll mask
5807 ++ * Return: negative on error,
5808 ++ * 0 if it did no changes,
5809 ++ * and positive a process was added or deleted
5810 + */
5811 + static int mei_fasync(int fd, struct file *file, int band)
5812 + {
5813 +@@ -665,7 +671,7 @@ static int mei_fasync(int fd, struct file *file, int band)
5814 + struct mei_cl *cl = file->private_data;
5815 +
5816 + if (!mei_cl_is_connected(cl))
5817 +- return POLLERR;
5818 ++ return -ENODEV;
5819 +
5820 + return fasync_helper(fd, file, band, &cl->ev_async);
5821 + }
5822 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
5823 +index 3a9a79ec4343..3d5087b03999 100644
5824 +--- a/drivers/mmc/core/mmc.c
5825 ++++ b/drivers/mmc/core/mmc.c
5826 +@@ -1076,8 +1076,7 @@ static int mmc_select_hs400(struct mmc_card *card)
5827 + mmc_set_clock(host, max_dtr);
5828 +
5829 + /* Switch card to HS mode */
5830 +- val = EXT_CSD_TIMING_HS |
5831 +- card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
5832 ++ val = EXT_CSD_TIMING_HS;
5833 + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
5834 + EXT_CSD_HS_TIMING, val,
5835 + card->ext_csd.generic_cmd6_time,
5836 +@@ -1160,8 +1159,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
5837 + mmc_set_clock(host, max_dtr);
5838 +
5839 + /* Switch HS400 to HS DDR */
5840 +- val = EXT_CSD_TIMING_HS |
5841 +- card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
5842 ++ val = EXT_CSD_TIMING_HS;
5843 + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
5844 + val, card->ext_csd.generic_cmd6_time,
5845 + true, send_status, true);
5846 +diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
5847 +index 141eaa923e18..967535d76e34 100644
5848 +--- a/drivers/mmc/core/sd.c
5849 ++++ b/drivers/mmc/core/sd.c
5850 +@@ -626,9 +626,9 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
5851 + * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
5852 + */
5853 + if (!mmc_host_is_spi(card->host) &&
5854 +- (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
5855 +- card->sd_bus_speed == UHS_DDR50_BUS_SPEED ||
5856 +- card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) {
5857 ++ (card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
5858 ++ card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
5859 ++ card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
5860 + err = mmc_execute_tuning(card);
5861 +
5862 + /*
5863 +@@ -638,7 +638,7 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
5864 + * difference between v3.00 and 3.01 spec means that CMD19
5865 + * tuning is also available for DDR50 mode.
5866 + */
5867 +- if (err && card->sd_bus_speed == UHS_DDR50_BUS_SPEED) {
5868 ++ if (err && card->host->ios.timing == MMC_TIMING_UHS_DDR50) {
5869 + pr_warn("%s: ddr50 tuning failed\n",
5870 + mmc_hostname(card->host));
5871 + err = 0;
5872 +diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
5873 +index 16d838e6d623..467b3cf80c44 100644
5874 +--- a/drivers/mmc/core/sdio.c
5875 ++++ b/drivers/mmc/core/sdio.c
5876 +@@ -535,8 +535,8 @@ static int mmc_sdio_init_uhs_card(struct mmc_card *card)
5877 + * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
5878 + */
5879 + if (!mmc_host_is_spi(card->host) &&
5880 +- ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) ||
5881 +- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)))
5882 ++ ((card->host->ios.timing == MMC_TIMING_UHS_SDR50) ||
5883 ++ (card->host->ios.timing == MMC_TIMING_UHS_SDR104)))
5884 + err = mmc_execute_tuning(card);
5885 + out:
5886 + return err;
5887 +@@ -630,7 +630,7 @@ try_again:
5888 + */
5889 + if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
5890 + err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
5891 +- ocr);
5892 ++ ocr_card);
5893 + if (err == -EAGAIN) {
5894 + sdio_reset(host);
5895 + mmc_go_idle(host);
5896 +diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
5897 +index fb266745f824..acece3299756 100644
5898 +--- a/drivers/mmc/host/mmci.c
5899 ++++ b/drivers/mmc/host/mmci.c
5900 +@@ -1886,7 +1886,7 @@ static struct amba_id mmci_ids[] = {
5901 + {
5902 + .id = 0x00280180,
5903 + .mask = 0x00ffffff,
5904 +- .data = &variant_u300,
5905 ++ .data = &variant_nomadik,
5906 + },
5907 + {
5908 + .id = 0x00480180,
5909 +diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
5910 +index ce08896b9d69..28a057fae0a1 100644
5911 +--- a/drivers/mmc/host/pxamci.c
5912 ++++ b/drivers/mmc/host/pxamci.c
5913 +@@ -804,7 +804,7 @@ static int pxamci_probe(struct platform_device *pdev)
5914 + dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
5915 + goto out;
5916 + } else {
5917 +- mmc->caps |= host->pdata->gpio_card_ro_invert ?
5918 ++ mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
5919 + 0 : MMC_CAP2_RO_ACTIVE_HIGH;
5920 + }
5921 +
5922 +diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
5923 +index f6047fc94062..a5cda926d38e 100644
5924 +--- a/drivers/mmc/host/sdhci-acpi.c
5925 ++++ b/drivers/mmc/host/sdhci-acpi.c
5926 +@@ -146,6 +146,33 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
5927 + .ops = &sdhci_acpi_ops_int,
5928 + };
5929 +
5930 ++static int bxt_get_cd(struct mmc_host *mmc)
5931 ++{
5932 ++ int gpio_cd = mmc_gpio_get_cd(mmc);
5933 ++ struct sdhci_host *host = mmc_priv(mmc);
5934 ++ unsigned long flags;
5935 ++ int ret = 0;
5936 ++
5937 ++ if (!gpio_cd)
5938 ++ return 0;
5939 ++
5940 ++ pm_runtime_get_sync(mmc->parent);
5941 ++
5942 ++ spin_lock_irqsave(&host->lock, flags);
5943 ++
5944 ++ if (host->flags & SDHCI_DEVICE_DEAD)
5945 ++ goto out;
5946 ++
5947 ++ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
5948 ++out:
5949 ++ spin_unlock_irqrestore(&host->lock, flags);
5950 ++
5951 ++ pm_runtime_mark_last_busy(mmc->parent);
5952 ++ pm_runtime_put_autosuspend(mmc->parent);
5953 ++
5954 ++ return ret;
5955 ++}
5956 ++
5957 + static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
5958 + const char *hid, const char *uid)
5959 + {
5960 +@@ -196,6 +223,9 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
5961 +
5962 + /* Platform specific code during sd probe slot goes here */
5963 +
5964 ++ if (hid && !strcmp(hid, "80865ACA"))
5965 ++ host->mmc_host_ops.get_cd = bxt_get_cd;
5966 ++
5967 + return 0;
5968 + }
5969 +
5970 +diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
5971 +index cf7ad458b4f4..45ee07d3a761 100644
5972 +--- a/drivers/mmc/host/sdhci-pci-core.c
5973 ++++ b/drivers/mmc/host/sdhci-pci-core.c
5974 +@@ -277,7 +277,7 @@ static int spt_select_drive_strength(struct sdhci_host *host,
5975 + if (sdhci_pci_spt_drive_strength > 0)
5976 + drive_strength = sdhci_pci_spt_drive_strength & 0xf;
5977 + else
5978 +- drive_strength = 1; /* 33-ohm */
5979 ++ drive_strength = 0; /* Default 50-ohm */
5980 +
5981 + if ((mmc_driver_type_mask(drive_strength) & card_drv) == 0)
5982 + drive_strength = 0; /* Default 50-ohm */
5983 +@@ -330,6 +330,33 @@ static void spt_read_drive_strength(struct sdhci_host *host)
5984 + sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf);
5985 + }
5986 +
5987 ++static int bxt_get_cd(struct mmc_host *mmc)
5988 ++{
5989 ++ int gpio_cd = mmc_gpio_get_cd(mmc);
5990 ++ struct sdhci_host *host = mmc_priv(mmc);
5991 ++ unsigned long flags;
5992 ++ int ret = 0;
5993 ++
5994 ++ if (!gpio_cd)
5995 ++ return 0;
5996 ++
5997 ++ pm_runtime_get_sync(mmc->parent);
5998 ++
5999 ++ spin_lock_irqsave(&host->lock, flags);
6000 ++
6001 ++ if (host->flags & SDHCI_DEVICE_DEAD)
6002 ++ goto out;
6003 ++
6004 ++ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
6005 ++out:
6006 ++ spin_unlock_irqrestore(&host->lock, flags);
6007 ++
6008 ++ pm_runtime_mark_last_busy(mmc->parent);
6009 ++ pm_runtime_put_autosuspend(mmc->parent);
6010 ++
6011 ++ return ret;
6012 ++}
6013 ++
6014 + static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
6015 + {
6016 + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
6017 +@@ -362,6 +389,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
6018 + slot->cd_con_id = NULL;
6019 + slot->cd_idx = 0;
6020 + slot->cd_override_level = true;
6021 ++ if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
6022 ++ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
6023 ++ slot->host->mmc_host_ops.get_cd = bxt_get_cd;
6024 ++
6025 + return 0;
6026 + }
6027 +
6028 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
6029 +index b48565ed5616..8814eb6b83bf 100644
6030 +--- a/drivers/mmc/host/sdhci.c
6031 ++++ b/drivers/mmc/host/sdhci.c
6032 +@@ -540,9 +540,12 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
6033 +
6034 + BUG_ON(len > 65536);
6035 +
6036 +- /* tran, valid */
6037 +- sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
6038 +- desc += host->desc_sz;
6039 ++ if (len) {
6040 ++ /* tran, valid */
6041 ++ sdhci_adma_write_desc(host, desc, addr, len,
6042 ++ ADMA2_TRAN_VALID);
6043 ++ desc += host->desc_sz;
6044 ++ }
6045 +
6046 + /*
6047 + * If this triggers then we have a calculation bug
6048 +@@ -1364,7 +1367,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
6049 + sdhci_runtime_pm_get(host);
6050 +
6051 + /* Firstly check card presence */
6052 +- present = sdhci_do_get_cd(host);
6053 ++ present = mmc->ops->get_cd(mmc);
6054 +
6055 + spin_lock_irqsave(&host->lock, flags);
6056 +
6057 +@@ -2760,7 +2763,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
6058 +
6059 + static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
6060 + {
6061 +- if (host->runtime_suspended || host->bus_on)
6062 ++ if (host->bus_on)
6063 + return;
6064 + host->bus_on = true;
6065 + pm_runtime_get_noresume(host->mmc->parent);
6066 +@@ -2768,7 +2771,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
6067 +
6068 + static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
6069 + {
6070 +- if (host->runtime_suspended || !host->bus_on)
6071 ++ if (!host->bus_on)
6072 + return;
6073 + host->bus_on = false;
6074 + pm_runtime_put_noidle(host->mmc->parent);
6075 +@@ -2861,6 +2864,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
6076 +
6077 + host = mmc_priv(mmc);
6078 + host->mmc = mmc;
6079 ++ host->mmc_host_ops = sdhci_ops;
6080 ++ mmc->ops = &host->mmc_host_ops;
6081 +
6082 + return host;
6083 + }
6084 +@@ -3057,7 +3062,6 @@ int sdhci_add_host(struct sdhci_host *host)
6085 + /*
6086 + * Set host parameters.
6087 + */
6088 +- mmc->ops = &sdhci_ops;
6089 + max_clk = host->max_clk;
6090 +
6091 + if (host->ops->get_min_clock)
6092 +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
6093 +index 9d4aa31b683a..9c331ac5ad6b 100644
6094 +--- a/drivers/mmc/host/sdhci.h
6095 ++++ b/drivers/mmc/host/sdhci.h
6096 +@@ -425,6 +425,7 @@ struct sdhci_host {
6097 +
6098 + /* Internal data */
6099 + struct mmc_host *mmc; /* MMC structure */
6100 ++ struct mmc_host_ops mmc_host_ops; /* MMC host ops */
6101 + u64 dma_mask; /* custom DMA mask */
6102 +
6103 + #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
6104 +diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
6105 +index 4498e92116b8..b47122d3e8d8 100644
6106 +--- a/drivers/mmc/host/usdhi6rol0.c
6107 ++++ b/drivers/mmc/host/usdhi6rol0.c
6108 +@@ -1634,7 +1634,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
6109 + struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
6110 + struct mmc_request *mrq = host->mrq;
6111 + struct mmc_data *data = mrq ? mrq->data : NULL;
6112 +- struct scatterlist *sg = host->sg ?: data->sg;
6113 ++ struct scatterlist *sg;
6114 +
6115 + dev_warn(mmc_dev(host->mmc),
6116 + "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
6117 +@@ -1666,6 +1666,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
6118 + case USDHI6_WAIT_FOR_MWRITE:
6119 + case USDHI6_WAIT_FOR_READ:
6120 + case USDHI6_WAIT_FOR_WRITE:
6121 ++ sg = host->sg ?: data->sg;
6122 + dev_dbg(mmc_dev(host->mmc),
6123 + "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
6124 + data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
6125 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
6126 +index f1692e418fe4..28bbca0af238 100644
6127 +--- a/drivers/net/bonding/bond_main.c
6128 ++++ b/drivers/net/bonding/bond_main.c
6129 +@@ -214,6 +214,8 @@ static void bond_uninit(struct net_device *bond_dev);
6130 + static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
6131 + struct rtnl_link_stats64 *stats);
6132 + static void bond_slave_arr_handler(struct work_struct *work);
6133 ++static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
6134 ++ int mod);
6135 +
6136 + /*---------------------------- General routines -----------------------------*/
6137 +
6138 +@@ -2418,7 +2420,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
6139 + struct slave *slave)
6140 + {
6141 + struct arphdr *arp = (struct arphdr *)skb->data;
6142 +- struct slave *curr_active_slave;
6143 ++ struct slave *curr_active_slave, *curr_arp_slave;
6144 + unsigned char *arp_ptr;
6145 + __be32 sip, tip;
6146 + int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
6147 +@@ -2465,26 +2467,41 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
6148 + &sip, &tip);
6149 +
6150 + curr_active_slave = rcu_dereference(bond->curr_active_slave);
6151 ++ curr_arp_slave = rcu_dereference(bond->current_arp_slave);
6152 +
6153 +- /* Backup slaves won't see the ARP reply, but do come through
6154 +- * here for each ARP probe (so we swap the sip/tip to validate
6155 +- * the probe). In a "redundant switch, common router" type of
6156 +- * configuration, the ARP probe will (hopefully) travel from
6157 +- * the active, through one switch, the router, then the other
6158 +- * switch before reaching the backup.
6159 ++ /* We 'trust' the received ARP enough to validate it if:
6160 ++ *
6161 ++ * (a) the slave receiving the ARP is active (which includes the
6162 ++ * current ARP slave, if any), or
6163 ++ *
6164 ++ * (b) the receiving slave isn't active, but there is a currently
6165 ++ * active slave and it received valid arp reply(s) after it became
6166 ++ * the currently active slave, or
6167 ++ *
6168 ++ * (c) there is an ARP slave that sent an ARP during the prior ARP
6169 ++ * interval, and we receive an ARP reply on any slave. We accept
6170 ++ * these because switch FDB update delays may deliver the ARP
6171 ++ * reply to a slave other than the sender of the ARP request.
6172 + *
6173 +- * We 'trust' the arp requests if there is an active slave and
6174 +- * it received valid arp reply(s) after it became active. This
6175 +- * is done to avoid endless looping when we can't reach the
6176 ++ * Note: for (b), backup slaves are receiving the broadcast ARP
6177 ++ * request, not a reply. This request passes from the sending
6178 ++ * slave through the L2 switch(es) to the receiving slave. Since
6179 ++ * this is checking the request, sip/tip are swapped for
6180 ++ * validation.
6181 ++ *
6182 ++ * This is done to avoid endless looping when we can't reach the
6183 + * arp_ip_target and fool ourselves with our own arp requests.
6184 + */
6185 +-
6186 + if (bond_is_active_slave(slave))
6187 + bond_validate_arp(bond, slave, sip, tip);
6188 + else if (curr_active_slave &&
6189 + time_after(slave_last_rx(bond, curr_active_slave),
6190 + curr_active_slave->last_link_up))
6191 + bond_validate_arp(bond, slave, tip, sip);
6192 ++ else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
6193 ++ bond_time_in_interval(bond,
6194 ++ dev_trans_start(curr_arp_slave->dev), 1))
6195 ++ bond_validate_arp(bond, slave, sip, tip);
6196 +
6197 + out_unlock:
6198 + if (arp != (struct arphdr *)skb->data)
6199 +diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
6200 +index fc5b75675cd8..eb7192fab593 100644
6201 +--- a/drivers/net/can/usb/ems_usb.c
6202 ++++ b/drivers/net/can/usb/ems_usb.c
6203 +@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
6204 + */
6205 + #define EMS_USB_ARM7_CLOCK 8000000
6206 +
6207 ++#define CPC_TX_QUEUE_TRIGGER_LOW 25
6208 ++#define CPC_TX_QUEUE_TRIGGER_HIGH 35
6209 ++
6210 + /*
6211 + * CAN-Message representation in a CPC_MSG. Message object type is
6212 + * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
6213 +@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
6214 + switch (urb->status) {
6215 + case 0:
6216 + dev->free_slots = dev->intr_in_buffer[1];
6217 ++ if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
6218 ++ if (netif_queue_stopped(netdev)){
6219 ++ netif_wake_queue(netdev);
6220 ++ }
6221 ++ }
6222 + break;
6223 +
6224 + case -ECONNRESET: /* unlink */
6225 +@@ -526,8 +534,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
6226 + /* Release context */
6227 + context->echo_index = MAX_TX_URBS;
6228 +
6229 +- if (netif_queue_stopped(netdev))
6230 +- netif_wake_queue(netdev);
6231 + }
6232 +
6233 + /*
6234 +@@ -587,7 +593,7 @@ static int ems_usb_start(struct ems_usb *dev)
6235 + int err, i;
6236 +
6237 + dev->intr_in_buffer[0] = 0;
6238 +- dev->free_slots = 15; /* initial size */
6239 ++ dev->free_slots = 50; /* initial size */
6240 +
6241 + for (i = 0; i < MAX_RX_URBS; i++) {
6242 + struct urb *urb = NULL;
6243 +@@ -835,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
6244 +
6245 + /* Slow down tx path */
6246 + if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
6247 +- dev->free_slots < 5) {
6248 ++ dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
6249 + netif_stop_queue(netdev);
6250 + }
6251 + }
6252 +diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
6253 +index b06dba05594a..2dea39b5cb0b 100644
6254 +--- a/drivers/net/dsa/mv88e6xxx.c
6255 ++++ b/drivers/net/dsa/mv88e6xxx.c
6256 +@@ -1519,7 +1519,7 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
6257 +
6258 + /* no PVID with ranges, otherwise it's a bug */
6259 + if (pvid)
6260 +- err = _mv88e6xxx_port_pvid_set(ds, port, vid);
6261 ++ err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
6262 + unlock:
6263 + mutex_unlock(&ps->smi_mutex);
6264 +
6265 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
6266 +index 79789d8e52da..ca5ac5d6f4e6 100644
6267 +--- a/drivers/net/ethernet/broadcom/tg3.c
6268 ++++ b/drivers/net/ethernet/broadcom/tg3.c
6269 +@@ -7833,6 +7833,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6270 + return ret;
6271 + }
6272 +
6273 ++static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
6274 ++{
6275 ++ /* Check if we will never have enough descriptors,
6276 ++ * as gso_segs can be more than current ring size
6277 ++ */
6278 ++ return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
6279 ++}
6280 ++
6281 + static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6282 +
6283 + /* Use GSO to workaround all TSO packets that meet HW bug conditions
6284 +@@ -7936,14 +7944,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6285 + * vlan encapsulated.
6286 + */
6287 + if (skb->protocol == htons(ETH_P_8021Q) ||
6288 +- skb->protocol == htons(ETH_P_8021AD))
6289 +- return tg3_tso_bug(tp, tnapi, txq, skb);
6290 ++ skb->protocol == htons(ETH_P_8021AD)) {
6291 ++ if (tg3_tso_bug_gso_check(tnapi, skb))
6292 ++ return tg3_tso_bug(tp, tnapi, txq, skb);
6293 ++ goto drop;
6294 ++ }
6295 +
6296 + if (!skb_is_gso_v6(skb)) {
6297 + if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6298 +- tg3_flag(tp, TSO_BUG))
6299 +- return tg3_tso_bug(tp, tnapi, txq, skb);
6300 +-
6301 ++ tg3_flag(tp, TSO_BUG)) {
6302 ++ if (tg3_tso_bug_gso_check(tnapi, skb))
6303 ++ return tg3_tso_bug(tp, tnapi, txq, skb);
6304 ++ goto drop;
6305 ++ }
6306 + ip_csum = iph->check;
6307 + ip_tot_len = iph->tot_len;
6308 + iph->check = 0;
6309 +@@ -8075,7 +8088,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6310 + if (would_hit_hwbug) {
6311 + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6312 +
6313 +- if (mss) {
6314 ++ if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
6315 + /* If it's a TSO packet, do GSO instead of
6316 + * allocating and copying to a large linear SKB
6317 + */
6318 +diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
6319 +index 1671fa3332c2..7ba6d530b0c0 100644
6320 +--- a/drivers/net/ethernet/cisco/enic/enic.h
6321 ++++ b/drivers/net/ethernet/cisco/enic/enic.h
6322 +@@ -33,7 +33,7 @@
6323 +
6324 + #define DRV_NAME "enic"
6325 + #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
6326 +-#define DRV_VERSION "2.3.0.12"
6327 ++#define DRV_VERSION "2.3.0.20"
6328 + #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
6329 +
6330 + #define ENIC_BARS_MAX 6
6331 +diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
6332 +index 1ffd1050860b..1fdf5fe12a95 100644
6333 +--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
6334 ++++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
6335 +@@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
6336 + int wait)
6337 + {
6338 + struct devcmd2_controller *dc2c = vdev->devcmd2;
6339 +- struct devcmd2_result *result = dc2c->result + dc2c->next_result;
6340 ++ struct devcmd2_result *result;
6341 ++ u8 color;
6342 + unsigned int i;
6343 + int delay, err;
6344 + u32 fetch_index, new_posted;
6345 +@@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
6346 + if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
6347 + return 0;
6348 +
6349 ++ result = dc2c->result + dc2c->next_result;
6350 ++ color = dc2c->color;
6351 ++
6352 ++ dc2c->next_result++;
6353 ++ if (dc2c->next_result == dc2c->result_size) {
6354 ++ dc2c->next_result = 0;
6355 ++ dc2c->color = dc2c->color ? 0 : 1;
6356 ++ }
6357 ++
6358 + for (delay = 0; delay < wait; delay++) {
6359 +- if (result->color == dc2c->color) {
6360 +- dc2c->next_result++;
6361 +- if (dc2c->next_result == dc2c->result_size) {
6362 +- dc2c->next_result = 0;
6363 +- dc2c->color = dc2c->color ? 0 : 1;
6364 +- }
6365 ++ if (result->color == color) {
6366 + if (result->error) {
6367 + err = result->error;
6368 + if (err != ERR_ECMDUNKNOWN ||
6369 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
6370 +index 038f9ce391e6..1494997c4f7e 100644
6371 +--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
6372 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
6373 +@@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
6374 + .enable = mlx4_en_phc_enable,
6375 + };
6376 +
6377 ++#define MLX4_EN_WRAP_AROUND_SEC 10ULL
6378 ++
6379 ++/* This function calculates the max shift that enables the user range
6380 ++ * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
6381 ++ */
6382 ++static u32 freq_to_shift(u16 freq)
6383 ++{
6384 ++ u32 freq_khz = freq * 1000;
6385 ++ u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
6386 ++ u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
6387 ++ max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
6388 ++ /* calculate max possible multiplier in order to fit in 64bit */
6389 ++ u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
6390 ++
6391 ++ /* This comes from the reverse of clocksource_khz2mult */
6392 ++ return ilog2(div_u64(max_mul * freq_khz, 1000000));
6393 ++}
6394 ++
6395 + void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
6396 + {
6397 + struct mlx4_dev *dev = mdev->dev;
6398 +@@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
6399 + memset(&mdev->cycles, 0, sizeof(mdev->cycles));
6400 + mdev->cycles.read = mlx4_en_read_clock;
6401 + mdev->cycles.mask = CLOCKSOURCE_MASK(48);
6402 +- /* Using shift to make calculation more accurate. Since current HW
6403 +- * clock frequency is 427 MHz, and cycles are given using a 48 bits
6404 +- * register, the biggest shift when calculating using u64, is 14
6405 +- * (max_cycles * multiplier < 2^64)
6406 +- */
6407 +- mdev->cycles.shift = 14;
6408 ++ mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
6409 + mdev->cycles.mult =
6410 + clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
6411 + mdev->nominal_c_mult = mdev->cycles.mult;
6412 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
6413 +index 7869f97de5da..67e9633ea9c7 100644
6414 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
6415 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
6416 +@@ -2381,8 +2381,6 @@ out:
6417 + /* set offloads */
6418 + priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
6419 + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
6420 +- priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
6421 +- priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
6422 + }
6423 +
6424 + static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
6425 +@@ -2393,8 +2391,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
6426 + /* unset offloads */
6427 + priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
6428 + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
6429 +- priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
6430 +- priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
6431 +
6432 + ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
6433 + VXLAN_STEER_BY_OUTER_MAC, 0);
6434 +@@ -3020,6 +3016,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
6435 + priv->rss_hash_fn = ETH_RSS_HASH_TOP;
6436 + }
6437 +
6438 ++ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
6439 ++ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
6440 ++ dev->features |= NETIF_F_GSO_UDP_TUNNEL;
6441 ++ }
6442 ++
6443 + mdev->pndev[port] = dev;
6444 + mdev->upper[port] = NULL;
6445 +
6446 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
6447 +index ee99e67187f5..3904b5fc0b7c 100644
6448 +--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
6449 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
6450 +@@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
6451 + stats->collisions = 0;
6452 + stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
6453 + stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
6454 +- stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
6455 ++ stats->rx_over_errors = 0;
6456 + stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
6457 + stats->rx_frame_errors = 0;
6458 + stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
6459 +- stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
6460 ++ stats->rx_missed_errors = 0;
6461 + stats->tx_aborted_errors = 0;
6462 + stats->tx_carrier_errors = 0;
6463 + stats->tx_fifo_errors = 0;
6464 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
6465 +index 617fb22b5d81..7dbeafa65934 100644
6466 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
6467 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
6468 +@@ -45,6 +45,7 @@
6469 + #include <linux/if_bridge.h>
6470 + #include <linux/workqueue.h>
6471 + #include <linux/jiffies.h>
6472 ++#include <linux/rtnetlink.h>
6473 + #include <net/switchdev.h>
6474 +
6475 + #include "spectrum.h"
6476 +@@ -812,6 +813,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
6477 +
6478 + mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
6479 +
6480 ++ rtnl_lock();
6481 + do {
6482 + mlxsw_reg_sfn_pack(sfn_pl);
6483 + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
6484 +@@ -824,6 +826,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
6485 + mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
6486 +
6487 + } while (num_rec);
6488 ++ rtnl_unlock();
6489 +
6490 + kfree(sfn_pl);
6491 + mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
6492 +diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
6493 +index e9f2349e98bc..52ec3d6e056a 100644
6494 +--- a/drivers/net/ethernet/rocker/rocker.c
6495 ++++ b/drivers/net/ethernet/rocker/rocker.c
6496 +@@ -3531,12 +3531,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
6497 + info.addr = lw->addr;
6498 + info.vid = lw->vid;
6499 +
6500 ++ rtnl_lock();
6501 + if (learned && removing)
6502 + call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
6503 + lw->rocker_port->dev, &info.info);
6504 + else if (learned && !removing)
6505 + call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
6506 + lw->rocker_port->dev, &info.info);
6507 ++ rtnl_unlock();
6508 +
6509 + rocker_port_kfree(lw->trans, work);
6510 + }
6511 +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
6512 +index 47b711739ba9..e6cefd0e3262 100644
6513 +--- a/drivers/net/phy/dp83640.c
6514 ++++ b/drivers/net/phy/dp83640.c
6515 +@@ -845,6 +845,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
6516 + struct skb_shared_hwtstamps *shhwtstamps = NULL;
6517 + struct sk_buff *skb;
6518 + unsigned long flags;
6519 ++ u8 overflow;
6520 ++
6521 ++ overflow = (phy_rxts->ns_hi >> 14) & 0x3;
6522 ++ if (overflow)
6523 ++ pr_debug("rx timestamp queue overflow, count %d\n", overflow);
6524 +
6525 + spin_lock_irqsave(&dp83640->rx_lock, flags);
6526 +
6527 +@@ -887,6 +892,7 @@ static void decode_txts(struct dp83640_private *dp83640,
6528 + struct skb_shared_hwtstamps shhwtstamps;
6529 + struct sk_buff *skb;
6530 + u64 ns;
6531 ++ u8 overflow;
6532 +
6533 + /* We must already have the skb that triggered this. */
6534 +
6535 +@@ -896,6 +902,17 @@ static void decode_txts(struct dp83640_private *dp83640,
6536 + pr_debug("have timestamp but tx_queue empty\n");
6537 + return;
6538 + }
6539 ++
6540 ++ overflow = (phy_txts->ns_hi >> 14) & 0x3;
6541 ++ if (overflow) {
6542 ++ pr_debug("tx timestamp queue overflow, count %d\n", overflow);
6543 ++ while (skb) {
6544 ++ skb_complete_tx_timestamp(skb, NULL);
6545 ++ skb = skb_dequeue(&dp83640->tx_queue);
6546 ++ }
6547 ++ return;
6548 ++ }
6549 ++
6550 + ns = phy2txts(phy_txts);
6551 + memset(&shhwtstamps, 0, sizeof(shhwtstamps));
6552 + shhwtstamps.hwtstamp = ns_to_ktime(ns);
6553 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
6554 +index 0a37f840fcc5..4e0068e775f9 100644
6555 +--- a/drivers/net/ppp/pppoe.c
6556 ++++ b/drivers/net/ppp/pppoe.c
6557 +@@ -395,6 +395,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
6558 +
6559 + if (!__pppoe_xmit(sk_pppox(relay_po), skb))
6560 + goto abort_put;
6561 ++
6562 ++ sock_put(sk_pppox(relay_po));
6563 + } else {
6564 + if (sock_queue_rcv_skb(sk, skb))
6565 + goto abort_kfree;
6566 +diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
6567 +index 597c53e0a2ec..f7e8c79349ad 100644
6568 +--- a/drivers/net/ppp/pptp.c
6569 ++++ b/drivers/net/ppp/pptp.c
6570 +@@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
6571 + return i < MAX_CALLID;
6572 + }
6573 +
6574 +-static int add_chan(struct pppox_sock *sock)
6575 ++static int add_chan(struct pppox_sock *sock,
6576 ++ struct pptp_addr *sa)
6577 + {
6578 + static int call_id;
6579 +
6580 + spin_lock(&chan_lock);
6581 +- if (!sock->proto.pptp.src_addr.call_id) {
6582 ++ if (!sa->call_id) {
6583 + call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
6584 + if (call_id == MAX_CALLID) {
6585 + call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
6586 + if (call_id == MAX_CALLID)
6587 + goto out_err;
6588 + }
6589 +- sock->proto.pptp.src_addr.call_id = call_id;
6590 +- } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
6591 ++ sa->call_id = call_id;
6592 ++ } else if (test_bit(sa->call_id, callid_bitmap)) {
6593 + goto out_err;
6594 ++ }
6595 +
6596 +- set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
6597 +- rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
6598 ++ sock->proto.pptp.src_addr = *sa;
6599 ++ set_bit(sa->call_id, callid_bitmap);
6600 ++ rcu_assign_pointer(callid_sock[sa->call_id], sock);
6601 + spin_unlock(&chan_lock);
6602 +
6603 + return 0;
6604 +@@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
6605 + struct sock *sk = sock->sk;
6606 + struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
6607 + struct pppox_sock *po = pppox_sk(sk);
6608 +- struct pptp_opt *opt = &po->proto.pptp;
6609 + int error = 0;
6610 +
6611 + if (sockaddr_len < sizeof(struct sockaddr_pppox))
6612 +@@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
6613 +
6614 + lock_sock(sk);
6615 +
6616 +- opt->src_addr = sp->sa_addr.pptp;
6617 +- if (add_chan(po))
6618 ++ if (sk->sk_state & PPPOX_DEAD) {
6619 ++ error = -EALREADY;
6620 ++ goto out;
6621 ++ }
6622 ++
6623 ++ if (sk->sk_state & PPPOX_BOUND) {
6624 + error = -EBUSY;
6625 ++ goto out;
6626 ++ }
6627 ++
6628 ++ if (add_chan(po, &sp->sa_addr.pptp))
6629 ++ error = -EBUSY;
6630 ++ else
6631 ++ sk->sk_state |= PPPOX_BOUND;
6632 +
6633 ++out:
6634 + release_sock(sk);
6635 + return error;
6636 + }
6637 +@@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
6638 + }
6639 +
6640 + opt->dst_addr = sp->sa_addr.pptp;
6641 +- sk->sk_state = PPPOX_CONNECTED;
6642 ++ sk->sk_state |= PPPOX_CONNECTED;
6643 +
6644 + end:
6645 + release_sock(sk);
6646 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
6647 +index 5fccc5a8153f..982e0acd1a36 100644
6648 +--- a/drivers/net/usb/qmi_wwan.c
6649 ++++ b/drivers/net/usb/qmi_wwan.c
6650 +@@ -492,6 +492,7 @@ static const struct usb_device_id products[] = {
6651 +
6652 + /* 3. Combined interface devices matching on interface number */
6653 + {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
6654 ++ {QMI_FIXED_INTF(0x05c6, 0x6001, 3)}, /* 4G LTE usb-modem U901 */
6655 + {QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
6656 + {QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
6657 + {QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
6658 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
6659 +index 405a7b6cca25..e0fcda4ddd55 100644
6660 +--- a/drivers/net/vxlan.c
6661 ++++ b/drivers/net/vxlan.c
6662 +@@ -1984,11 +1984,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
6663 + vxlan->cfg.port_max, true);
6664 +
6665 + if (info) {
6666 +- if (info->key.tun_flags & TUNNEL_CSUM)
6667 +- flags |= VXLAN_F_UDP_CSUM;
6668 +- else
6669 +- flags &= ~VXLAN_F_UDP_CSUM;
6670 +-
6671 + ttl = info->key.ttl;
6672 + tos = info->key.tos;
6673 +
6674 +@@ -2003,8 +1998,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
6675 + goto drop;
6676 + sk = vxlan->vn4_sock->sock->sk;
6677 +
6678 +- if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
6679 +- df = htons(IP_DF);
6680 ++ if (info) {
6681 ++ if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
6682 ++ df = htons(IP_DF);
6683 ++
6684 ++ if (info->key.tun_flags & TUNNEL_CSUM)
6685 ++ flags |= VXLAN_F_UDP_CSUM;
6686 ++ else
6687 ++ flags &= ~VXLAN_F_UDP_CSUM;
6688 ++ }
6689 +
6690 + memset(&fl4, 0, sizeof(fl4));
6691 + fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
6692 +@@ -2102,6 +2104,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
6693 + return;
6694 + }
6695 +
6696 ++ if (info) {
6697 ++ if (info->key.tun_flags & TUNNEL_CSUM)
6698 ++ flags &= ~VXLAN_F_UDP_ZERO_CSUM6_TX;
6699 ++ else
6700 ++ flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
6701 ++ }
6702 ++
6703 + ttl = ttl ? : ip6_dst_hoplimit(ndst);
6704 + err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
6705 + 0, ttl, src_port, dst_port, htonl(vni << 8), md,
6706 +diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
6707 +index e18629a16fb0..0961f33de05e 100644
6708 +--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
6709 ++++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
6710 +@@ -1154,6 +1154,9 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
6711 +
6712 + priv->ucode_loaded = false;
6713 + iwl_trans_stop_device(priv->trans);
6714 ++ ret = iwl_trans_start_hw(priv->trans);
6715 ++ if (ret)
6716 ++ goto out;
6717 +
6718 + priv->wowlan = true;
6719 +
6720 +diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
6721 +index d6e0c1b5c20c..8215d7405f64 100644
6722 +--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
6723 ++++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
6724 +@@ -1267,6 +1267,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
6725 + return -EBUSY;
6726 + }
6727 +
6728 ++ /* we don't support "match all" in the firmware */
6729 ++ if (!req->n_match_sets)
6730 ++ return -EOPNOTSUPP;
6731 ++
6732 + ret = iwl_mvm_check_running_scans(mvm, type);
6733 + if (ret)
6734 + return ret;
6735 +diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
6736 +index 639761fb2bfb..d58c094f2f04 100644
6737 +--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
6738 ++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
6739 +@@ -384,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
6740 + {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
6741 + {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
6742 + {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
6743 ++ {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)},
6744 + {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
6745 + {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
6746 + {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
6747 +@@ -401,10 +402,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
6748 + {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
6749 + {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
6750 + {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
6751 +- {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
6752 ++ {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)},
6753 + {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
6754 + {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
6755 +- {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
6756 ++ {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)},
6757 + {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
6758 + {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
6759 + {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
6760 +diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
6761 +index 90283453073c..8c7204738aa3 100644
6762 +--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
6763 ++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
6764 +@@ -7,6 +7,7 @@
6765 + *
6766 + * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
6767 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
6768 ++ * Copyright(c) 2016 Intel Deutschland GmbH
6769 + *
6770 + * This program is free software; you can redistribute it and/or modify
6771 + * it under the terms of version 2 of the GNU General Public License as
6772 +@@ -33,6 +34,7 @@
6773 + *
6774 + * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
6775 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
6776 ++ * Copyright(c) 2016 Intel Deutschland GmbH
6777 + * All rights reserved.
6778 + *
6779 + * Redistribution and use in source and binary forms, with or without
6780 +@@ -924,9 +926,16 @@ monitor:
6781 + if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
6782 + iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
6783 + trans_pcie->fw_mon_phys >> dest->base_shift);
6784 +- iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
6785 +- (trans_pcie->fw_mon_phys +
6786 +- trans_pcie->fw_mon_size) >> dest->end_shift);
6787 ++ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
6788 ++ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
6789 ++ (trans_pcie->fw_mon_phys +
6790 ++ trans_pcie->fw_mon_size - 256) >>
6791 ++ dest->end_shift);
6792 ++ else
6793 ++ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
6794 ++ (trans_pcie->fw_mon_phys +
6795 ++ trans_pcie->fw_mon_size) >>
6796 ++ dest->end_shift);
6797 + }
6798 + }
6799 +
6800 +diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
6801 +index f46c9d7f6528..7f471bff435c 100644
6802 +--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
6803 ++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
6804 +@@ -801,7 +801,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
6805 + hw_queue);
6806 + if (rx_remained_cnt == 0)
6807 + return;
6808 +-
6809 ++ buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
6810 ++ rtlpci->rx_ring[rxring_idx].idx];
6811 ++ pdesc = (struct rtl_rx_desc *)skb->data;
6812 + } else { /* rx descriptor */
6813 + pdesc = &rtlpci->rx_ring[rxring_idx].desc[
6814 + rtlpci->rx_ring[rxring_idx].idx];
6815 +@@ -824,13 +826,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
6816 + new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
6817 + if (unlikely(!new_skb))
6818 + goto no_new;
6819 +- if (rtlpriv->use_new_trx_flow) {
6820 +- buffer_desc =
6821 +- &rtlpci->rx_ring[rxring_idx].buffer_desc
6822 +- [rtlpci->rx_ring[rxring_idx].idx];
6823 +- /*means rx wifi info*/
6824 +- pdesc = (struct rtl_rx_desc *)skb->data;
6825 +- }
6826 + memset(&rx_status , 0 , sizeof(rx_status));
6827 + rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
6828 + &rx_status, (u8 *)pdesc, skb);
6829 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
6830 +index 11344121c55e..47e32cb0ec1a 100644
6831 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
6832 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
6833 +@@ -88,8 +88,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
6834 + u8 tid;
6835 +
6836 + rtl8188ee_bt_reg_init(hw);
6837 +- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6838 +-
6839 + rtlpriv->dm.dm_initialgain_enable = 1;
6840 + rtlpriv->dm.dm_flag = 0;
6841 + rtlpriv->dm.disable_framebursting = 0;
6842 +@@ -138,6 +136,11 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
6843 + rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
6844 + rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6845 + rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6846 ++ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6847 ++ rtlpriv->cfg->mod_params->sw_crypto =
6848 ++ rtlpriv->cfg->mod_params->sw_crypto;
6849 ++ rtlpriv->cfg->mod_params->disable_watchdog =
6850 ++ rtlpriv->cfg->mod_params->disable_watchdog;
6851 + if (rtlpriv->cfg->mod_params->disable_watchdog)
6852 + pr_info("watchdog disabled\n");
6853 + if (!rtlpriv->psc.inactiveps)
6854 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
6855 +index de6cb6c3a48c..4780bdc63b2b 100644
6856 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
6857 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
6858 +@@ -139,6 +139,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
6859 + rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
6860 + rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6861 + rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6862 ++ rtlpriv->cfg->mod_params->sw_crypto =
6863 ++ rtlpriv->cfg->mod_params->sw_crypto;
6864 + if (!rtlpriv->psc.inactiveps)
6865 + pr_info("rtl8192ce: Power Save off (module option)\n");
6866 + if (!rtlpriv->psc.fwctrl_lps)
6867 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
6868 +index fd4a5353d216..7c6f7f0d18c6 100644
6869 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
6870 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
6871 +@@ -65,6 +65,8 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
6872 + rtlpriv->dm.disable_framebursting = false;
6873 + rtlpriv->dm.thermalvalue = 0;
6874 + rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
6875 ++ rtlpriv->cfg->mod_params->sw_crypto =
6876 ++ rtlpriv->cfg->mod_params->sw_crypto;
6877 +
6878 + /* for firmware buf */
6879 + rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
6880 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
6881 +index b19d0398215f..c6e09a19de1a 100644
6882 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
6883 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
6884 +@@ -376,8 +376,8 @@ module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444);
6885 + module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444);
6886 + MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
6887 + MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
6888 +-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
6889 +-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
6890 ++MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
6891 ++MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
6892 + MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
6893 +
6894 + static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
6895 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
6896 +index e1fd27c888bf..31baca41ac2f 100644
6897 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
6898 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
6899 +@@ -187,6 +187,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
6900 + rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
6901 + rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6902 + rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6903 ++ rtlpriv->cfg->mod_params->sw_crypto =
6904 ++ rtlpriv->cfg->mod_params->sw_crypto;
6905 + if (!rtlpriv->psc.inactiveps)
6906 + pr_info("Power Save off (module option)\n");
6907 + if (!rtlpriv->psc.fwctrl_lps)
6908 +@@ -425,8 +427,8 @@ module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444);
6909 + module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444);
6910 + MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
6911 + MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
6912 +-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
6913 +-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
6914 ++MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
6915 ++MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
6916 + MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
6917 +
6918 + static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
6919 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
6920 +index 3859b3e3d158..ff49a8c0ff61 100644
6921 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
6922 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
6923 +@@ -150,6 +150,11 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
6924 + rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
6925 + rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6926 + rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6927 ++ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6928 ++ rtlpriv->cfg->mod_params->sw_crypto =
6929 ++ rtlpriv->cfg->mod_params->sw_crypto;
6930 ++ rtlpriv->cfg->mod_params->disable_watchdog =
6931 ++ rtlpriv->cfg->mod_params->disable_watchdog;
6932 + if (rtlpriv->cfg->mod_params->disable_watchdog)
6933 + pr_info("watchdog disabled\n");
6934 + rtlpriv->psc.reg_fwctrl_lps = 3;
6935 +@@ -267,6 +272,8 @@ static struct rtl_mod_params rtl8723e_mod_params = {
6936 + .swctrl_lps = false,
6937 + .fwctrl_lps = true,
6938 + .debug = DBG_EMERG,
6939 ++ .msi_support = false,
6940 ++ .disable_watchdog = false,
6941 + };
6942 +
6943 + static struct rtl_hal_cfg rtl8723e_hal_cfg = {
6944 +@@ -383,12 +390,14 @@ module_param_named(debug, rtl8723e_mod_params.debug, int, 0444);
6945 + module_param_named(ips, rtl8723e_mod_params.inactiveps, bool, 0444);
6946 + module_param_named(swlps, rtl8723e_mod_params.swctrl_lps, bool, 0444);
6947 + module_param_named(fwlps, rtl8723e_mod_params.fwctrl_lps, bool, 0444);
6948 ++module_param_named(msi, rtl8723e_mod_params.msi_support, bool, 0444);
6949 + module_param_named(disable_watchdog, rtl8723e_mod_params.disable_watchdog,
6950 + bool, 0444);
6951 + MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
6952 + MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
6953 + MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
6954 + MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
6955 ++MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
6956 + MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
6957 + MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
6958 +
6959 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
6960 +index d091f1d5f91e..a78eaeda0008 100644
6961 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
6962 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
6963 +@@ -93,7 +93,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
6964 + struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
6965 +
6966 + rtl8723be_bt_reg_init(hw);
6967 +- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6968 + rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
6969 +
6970 + rtlpriv->dm.dm_initialgain_enable = 1;
6971 +@@ -151,6 +150,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
6972 + rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6973 + rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6974 + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6975 ++ rtlpriv->cfg->mod_params->sw_crypto =
6976 ++ rtlpriv->cfg->mod_params->sw_crypto;
6977 ++ rtlpriv->cfg->mod_params->disable_watchdog =
6978 ++ rtlpriv->cfg->mod_params->disable_watchdog;
6979 + if (rtlpriv->cfg->mod_params->disable_watchdog)
6980 + pr_info("watchdog disabled\n");
6981 + rtlpriv->psc.reg_fwctrl_lps = 3;
6982 +@@ -267,6 +270,9 @@ static struct rtl_mod_params rtl8723be_mod_params = {
6983 + .inactiveps = true,
6984 + .swctrl_lps = false,
6985 + .fwctrl_lps = true,
6986 ++ .msi_support = false,
6987 ++ .disable_watchdog = false,
6988 ++ .debug = DBG_EMERG,
6989 + };
6990 +
6991 + static struct rtl_hal_cfg rtl8723be_hal_cfg = {
6992 +diff --git a/drivers/of/irq.c b/drivers/of/irq.c
6993 +index 4fa916dffc91..72a2c1969646 100644
6994 +--- a/drivers/of/irq.c
6995 ++++ b/drivers/of/irq.c
6996 +@@ -636,6 +636,13 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
6997 + msi_base = be32_to_cpup(msi_map + 2);
6998 + rid_len = be32_to_cpup(msi_map + 3);
6999 +
7000 ++ if (rid_base & ~map_mask) {
7001 ++ dev_err(parent_dev,
7002 ++ "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
7003 ++ map_mask, rid_base);
7004 ++ return rid_out;
7005 ++ }
7006 ++
7007 + msi_controller_node = of_find_node_by_phandle(phandle);
7008 +
7009 + matched = (masked_rid >= rid_base &&
7010 +@@ -655,7 +662,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
7011 + if (!matched)
7012 + return rid_out;
7013 +
7014 +- rid_out = masked_rid + msi_base;
7015 ++ rid_out = masked_rid - rid_base + msi_base;
7016 + dev_dbg(dev,
7017 + "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
7018 + dev_name(parent_dev), map_mask, rid_base, msi_base,
7019 +diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
7020 +index ff538568a617..0b3e0bfa7be5 100644
7021 +--- a/drivers/pci/hotplug/acpiphp_glue.c
7022 ++++ b/drivers/pci/hotplug/acpiphp_glue.c
7023 +@@ -953,8 +953,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
7024 + {
7025 + pci_lock_rescan_remove();
7026 +
7027 +- if (slot->flags & SLOT_IS_GOING_AWAY)
7028 ++ if (slot->flags & SLOT_IS_GOING_AWAY) {
7029 ++ pci_unlock_rescan_remove();
7030 + return -ENODEV;
7031 ++ }
7032 +
7033 + /* configure all functions */
7034 + if (!(slot->flags & SLOT_ENABLED))
7035 +diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
7036 +index 0bf82a20a0fb..48d21e0edd56 100644
7037 +--- a/drivers/pci/pcie/aer/aerdrv.c
7038 ++++ b/drivers/pci/pcie/aer/aerdrv.c
7039 +@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
7040 + rpc->rpd = dev;
7041 + INIT_WORK(&rpc->dpc_handler, aer_isr);
7042 + mutex_init(&rpc->rpc_mutex);
7043 +- init_waitqueue_head(&rpc->wait_release);
7044 +
7045 + /* Use PCIe bus function to store rpc into PCIe device */
7046 + set_service_data(dev, rpc);
7047 +@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
7048 + if (rpc->isr)
7049 + free_irq(dev->irq, dev);
7050 +
7051 +- wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
7052 +-
7053 ++ flush_work(&rpc->dpc_handler);
7054 + aer_disable_rootport(rpc);
7055 + kfree(rpc);
7056 + set_service_data(dev, NULL);
7057 +diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
7058 +index 84420b7c9456..945c939a86c5 100644
7059 +--- a/drivers/pci/pcie/aer/aerdrv.h
7060 ++++ b/drivers/pci/pcie/aer/aerdrv.h
7061 +@@ -72,7 +72,6 @@ struct aer_rpc {
7062 + * recovery on the same
7063 + * root port hierarchy
7064 + */
7065 +- wait_queue_head_t wait_release;
7066 + };
7067 +
7068 + struct aer_broadcast_data {
7069 +diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
7070 +index fba785e9df75..4e14de0f0f98 100644
7071 +--- a/drivers/pci/pcie/aer/aerdrv_core.c
7072 ++++ b/drivers/pci/pcie/aer/aerdrv_core.c
7073 +@@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work)
7074 + while (get_e_source(rpc, &e_src))
7075 + aer_isr_one_error(p_device, &e_src);
7076 + mutex_unlock(&rpc->rpc_mutex);
7077 +-
7078 +- wake_up(&rpc->wait_release);
7079 + }
7080 +
7081 + /**
7082 +diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
7083 +index c777b97207d5..5f70fee59a94 100644
7084 +--- a/drivers/pci/xen-pcifront.c
7085 ++++ b/drivers/pci/xen-pcifront.c
7086 +@@ -53,7 +53,7 @@ struct pcifront_device {
7087 + };
7088 +
7089 + struct pcifront_sd {
7090 +- int domain;
7091 ++ struct pci_sysdata sd;
7092 + struct pcifront_device *pdev;
7093 + };
7094 +
7095 +@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
7096 + unsigned int domain, unsigned int bus,
7097 + struct pcifront_device *pdev)
7098 + {
7099 +- sd->domain = domain;
7100 ++ /* Because we do not expose that information via XenBus. */
7101 ++ sd->sd.node = first_online_node;
7102 ++ sd->sd.domain = domain;
7103 + sd->pdev = pdev;
7104 + }
7105 +
7106 +@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
7107 + dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
7108 + domain, bus);
7109 +
7110 +- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
7111 +- sd = kmalloc(sizeof(*sd), GFP_KERNEL);
7112 ++ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
7113 ++ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
7114 + if (!bus_entry || !sd) {
7115 + err = -ENOMEM;
7116 + goto err_out;
7117 +diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
7118 +index 8c7f27db6ad3..e7e574dc667a 100644
7119 +--- a/drivers/phy/phy-core.c
7120 ++++ b/drivers/phy/phy-core.c
7121 +@@ -275,20 +275,21 @@ EXPORT_SYMBOL_GPL(phy_exit);
7122 +
7123 + int phy_power_on(struct phy *phy)
7124 + {
7125 +- int ret;
7126 ++ int ret = 0;
7127 +
7128 + if (!phy)
7129 +- return 0;
7130 ++ goto out;
7131 +
7132 + if (phy->pwr) {
7133 + ret = regulator_enable(phy->pwr);
7134 + if (ret)
7135 +- return ret;
7136 ++ goto out;
7137 + }
7138 +
7139 + ret = phy_pm_runtime_get_sync(phy);
7140 + if (ret < 0 && ret != -ENOTSUPP)
7141 +- return ret;
7142 ++ goto err_pm_sync;
7143 ++
7144 + ret = 0; /* Override possible ret == -ENOTSUPP */
7145 +
7146 + mutex_lock(&phy->mutex);
7147 +@@ -296,19 +297,20 @@ int phy_power_on(struct phy *phy)
7148 + ret = phy->ops->power_on(phy);
7149 + if (ret < 0) {
7150 + dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
7151 +- goto out;
7152 ++ goto err_pwr_on;
7153 + }
7154 + }
7155 + ++phy->power_count;
7156 + mutex_unlock(&phy->mutex);
7157 + return 0;
7158 +
7159 +-out:
7160 ++err_pwr_on:
7161 + mutex_unlock(&phy->mutex);
7162 + phy_pm_runtime_put_sync(phy);
7163 ++err_pm_sync:
7164 + if (phy->pwr)
7165 + regulator_disable(phy->pwr);
7166 +-
7167 ++out:
7168 + return ret;
7169 + }
7170 + EXPORT_SYMBOL_GPL(phy_power_on);
7171 +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
7172 +index a313dfc0245f..d78ee151c9e4 100644
7173 +--- a/drivers/platform/x86/ideapad-laptop.c
7174 ++++ b/drivers/platform/x86/ideapad-laptop.c
7175 +@@ -865,6 +865,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
7176 + },
7177 + },
7178 + {
7179 ++ .ident = "Lenovo ideapad Y700-17ISK",
7180 ++ .matches = {
7181 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
7182 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-17ISK"),
7183 ++ },
7184 ++ },
7185 ++ {
7186 + .ident = "Lenovo Yoga 2 11 / 13 / Pro",
7187 + .matches = {
7188 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
7189 +@@ -893,6 +900,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
7190 + },
7191 + },
7192 + {
7193 ++ .ident = "Lenovo Yoga 700",
7194 ++ .matches = {
7195 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
7196 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"),
7197 ++ },
7198 ++ },
7199 ++ {
7200 + .ident = "Lenovo Yoga 900",
7201 + .matches = {
7202 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
7203 +diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
7204 +index c01302989ee4..b0f62141ea4d 100644
7205 +--- a/drivers/platform/x86/toshiba_acpi.c
7206 ++++ b/drivers/platform/x86/toshiba_acpi.c
7207 +@@ -2484,6 +2484,14 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
7208 + brightness = __get_lcd_brightness(dev);
7209 + if (brightness < 0)
7210 + return 0;
7211 ++ /*
7212 ++ * If transflective backlight is supported and the brightness is zero
7213 ++ * (lowest brightness level), the set_lcd_brightness function will
7214 ++ * activate the transflective backlight, making the LCD appear to be
7215 ++ * turned off, simply increment the brightness level to avoid that.
7216 ++ */
7217 ++ if (dev->tr_backlight_supported && brightness == 0)
7218 ++ brightness++;
7219 + ret = set_lcd_brightness(dev, brightness);
7220 + if (ret) {
7221 + pr_debug("Backlight method is read-only, disabling backlight support\n");
7222 +diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
7223 +index 8df0b0e62976..00676208080e 100644
7224 +--- a/drivers/regulator/Kconfig
7225 ++++ b/drivers/regulator/Kconfig
7226 +@@ -446,6 +446,7 @@ config REGULATOR_MC13892
7227 + config REGULATOR_MT6311
7228 + tristate "MediaTek MT6311 PMIC"
7229 + depends on I2C
7230 ++ select REGMAP_I2C
7231 + help
7232 + Say y here to select this option to enable the power regulator of
7233 + MediaTek MT6311 PMIC.
7234 +diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
7235 +index 35de22fdb7a0..f2e1a39ce0f3 100644
7236 +--- a/drivers/regulator/axp20x-regulator.c
7237 ++++ b/drivers/regulator/axp20x-regulator.c
7238 +@@ -27,8 +27,8 @@
7239 + #define AXP20X_IO_ENABLED 0x03
7240 + #define AXP20X_IO_DISABLED 0x07
7241 +
7242 +-#define AXP22X_IO_ENABLED 0x04
7243 +-#define AXP22X_IO_DISABLED 0x03
7244 ++#define AXP22X_IO_ENABLED 0x03
7245 ++#define AXP22X_IO_DISABLED 0x04
7246 +
7247 + #define AXP20X_WORKMODE_DCDC2_MASK BIT(2)
7248 + #define AXP20X_WORKMODE_DCDC3_MASK BIT(1)
7249 +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
7250 +index a263c10359e1..4abfbdb285ec 100644
7251 +--- a/drivers/s390/block/dasd.c
7252 ++++ b/drivers/s390/block/dasd.c
7253 +@@ -3031,6 +3031,7 @@ static void dasd_setup_queue(struct dasd_block *block)
7254 + max = block->base->discipline->max_blocks << block->s2b_shift;
7255 + }
7256 + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
7257 ++ block->request_queue->limits.max_dev_sectors = max;
7258 + blk_queue_logical_block_size(block->request_queue,
7259 + block->bp_block);
7260 + blk_queue_max_hw_sectors(block->request_queue, max);
7261 +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
7262 +index 184b1dbeb554..286782c60da4 100644
7263 +--- a/drivers/s390/block/dasd_alias.c
7264 ++++ b/drivers/s390/block/dasd_alias.c
7265 +@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
7266 + spin_unlock_irqrestore(&lcu->lock, flags);
7267 + cancel_work_sync(&lcu->suc_data.worker);
7268 + spin_lock_irqsave(&lcu->lock, flags);
7269 +- if (device == lcu->suc_data.device)
7270 ++ if (device == lcu->suc_data.device) {
7271 ++ dasd_put_device(device);
7272 + lcu->suc_data.device = NULL;
7273 ++ }
7274 + }
7275 + was_pending = 0;
7276 + if (device == lcu->ruac_data.device) {
7277 +@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
7278 + was_pending = 1;
7279 + cancel_delayed_work_sync(&lcu->ruac_data.dwork);
7280 + spin_lock_irqsave(&lcu->lock, flags);
7281 +- if (device == lcu->ruac_data.device)
7282 ++ if (device == lcu->ruac_data.device) {
7283 ++ dasd_put_device(device);
7284 + lcu->ruac_data.device = NULL;
7285 ++ }
7286 + }
7287 + private->lcu = NULL;
7288 + spin_unlock_irqrestore(&lcu->lock, flags);
7289 +@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
7290 + if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
7291 + DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
7292 + " alias data in lcu (rc = %d), retry later", rc);
7293 +- schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
7294 ++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
7295 ++ dasd_put_device(device);
7296 + } else {
7297 ++ dasd_put_device(device);
7298 + lcu->ruac_data.device = NULL;
7299 + lcu->flags &= ~UPDATE_PENDING;
7300 + }
7301 +@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
7302 + */
7303 + if (!usedev)
7304 + return -EINVAL;
7305 ++ dasd_get_device(usedev);
7306 + lcu->ruac_data.device = usedev;
7307 +- schedule_delayed_work(&lcu->ruac_data.dwork, 0);
7308 ++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
7309 ++ dasd_put_device(usedev);
7310 + return 0;
7311 + }
7312 +
7313 +@@ -723,7 +731,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
7314 + ASCEBC((char *) &cqr->magic, 4);
7315 + ccw = cqr->cpaddr;
7316 + ccw->cmd_code = DASD_ECKD_CCW_RSCK;
7317 +- ccw->flags = 0 ;
7318 ++ ccw->flags = CCW_FLAG_SLI;
7319 + ccw->count = 16;
7320 + ccw->cda = (__u32)(addr_t) cqr->data;
7321 + ((char *)cqr->data)[0] = reason;
7322 +@@ -930,6 +938,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
7323 + /* 3. read new alias configuration */
7324 + _schedule_lcu_update(lcu, device);
7325 + lcu->suc_data.device = NULL;
7326 ++ dasd_put_device(device);
7327 + spin_unlock_irqrestore(&lcu->lock, flags);
7328 + }
7329 +
7330 +@@ -989,6 +998,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
7331 + }
7332 + lcu->suc_data.reason = reason;
7333 + lcu->suc_data.device = device;
7334 ++ dasd_get_device(device);
7335 + spin_unlock(&lcu->lock);
7336 +- schedule_work(&lcu->suc_data.worker);
7337 ++ if (!schedule_work(&lcu->suc_data.worker))
7338 ++ dasd_put_device(device);
7339 + };
7340 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
7341 +index 16a1935cc9c1..e197c6f39de2 100644
7342 +--- a/drivers/scsi/qla2xxx/qla_init.c
7343 ++++ b/drivers/scsi/qla2xxx/qla_init.c
7344 +@@ -2192,7 +2192,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
7345 + /* Clear outstanding commands array. */
7346 + for (que = 0; que < ha->max_req_queues; que++) {
7347 + req = ha->req_q_map[que];
7348 +- if (!req)
7349 ++ if (!req || !test_bit(que, ha->req_qid_map))
7350 + continue;
7351 + req->out_ptr = (void *)(req->ring + req->length);
7352 + *req->out_ptr = 0;
7353 +@@ -2209,7 +2209,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
7354 +
7355 + for (que = 0; que < ha->max_rsp_queues; que++) {
7356 + rsp = ha->rsp_q_map[que];
7357 +- if (!rsp)
7358 ++ if (!rsp || !test_bit(que, ha->rsp_qid_map))
7359 + continue;
7360 + rsp->in_ptr = (void *)(rsp->ring + rsp->length);
7361 + *rsp->in_ptr = 0;
7362 +@@ -4961,7 +4961,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
7363 +
7364 + for (i = 1; i < ha->max_rsp_queues; i++) {
7365 + rsp = ha->rsp_q_map[i];
7366 +- if (rsp) {
7367 ++ if (rsp && test_bit(i, ha->rsp_qid_map)) {
7368 + rsp->options &= ~BIT_0;
7369 + ret = qla25xx_init_rsp_que(base_vha, rsp);
7370 + if (ret != QLA_SUCCESS)
7371 +@@ -4976,8 +4976,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
7372 + }
7373 + for (i = 1; i < ha->max_req_queues; i++) {
7374 + req = ha->req_q_map[i];
7375 +- if (req) {
7376 +- /* Clear outstanding commands array. */
7377 ++ if (req && test_bit(i, ha->req_qid_map)) {
7378 ++ /* Clear outstanding commands array. */
7379 + req->options &= ~BIT_0;
7380 + ret = qla25xx_init_req_que(base_vha, req);
7381 + if (ret != QLA_SUCCESS)
7382 +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
7383 +index ccf6a7f99024..0e59731f95ad 100644
7384 +--- a/drivers/scsi/qla2xxx/qla_isr.c
7385 ++++ b/drivers/scsi/qla2xxx/qla_isr.c
7386 +@@ -3018,9 +3018,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
7387 + "MSI-X: Failed to enable support "
7388 + "-- %d/%d\n Retry with %d vectors.\n",
7389 + ha->msix_count, ret, ret);
7390 ++ ha->msix_count = ret;
7391 ++ ha->max_rsp_queues = ha->msix_count - 1;
7392 + }
7393 +- ha->msix_count = ret;
7394 +- ha->max_rsp_queues = ha->msix_count - 1;
7395 + ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
7396 + ha->msix_count, GFP_KERNEL);
7397 + if (!ha->msix_entries) {
7398 +diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
7399 +index c5dd594f6c31..cf7ba52bae66 100644
7400 +--- a/drivers/scsi/qla2xxx/qla_mid.c
7401 ++++ b/drivers/scsi/qla2xxx/qla_mid.c
7402 +@@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
7403 + /* Delete request queues */
7404 + for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
7405 + req = ha->req_q_map[cnt];
7406 +- if (req) {
7407 ++ if (req && test_bit(cnt, ha->req_qid_map)) {
7408 + ret = qla25xx_delete_req_que(vha, req);
7409 + if (ret != QLA_SUCCESS) {
7410 + ql_log(ql_log_warn, vha, 0x00ea,
7411 +@@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
7412 + /* Delete response queues */
7413 + for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
7414 + rsp = ha->rsp_q_map[cnt];
7415 +- if (rsp) {
7416 ++ if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
7417 + ret = qla25xx_delete_rsp_que(vha, rsp);
7418 + if (ret != QLA_SUCCESS) {
7419 + ql_log(ql_log_warn, vha, 0x00eb,
7420 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
7421 +index bfa9a64c316b..fc6674db4f2d 100644
7422 +--- a/drivers/scsi/qla2xxx/qla_os.c
7423 ++++ b/drivers/scsi/qla2xxx/qla_os.c
7424 +@@ -397,6 +397,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
7425 + int cnt;
7426 +
7427 + for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
7428 ++ if (!test_bit(cnt, ha->req_qid_map))
7429 ++ continue;
7430 ++
7431 + req = ha->req_q_map[cnt];
7432 + qla2x00_free_req_que(ha, req);
7433 + }
7434 +@@ -404,6 +407,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
7435 + ha->req_q_map = NULL;
7436 +
7437 + for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
7438 ++ if (!test_bit(cnt, ha->rsp_qid_map))
7439 ++ continue;
7440 ++
7441 + rsp = ha->rsp_q_map[cnt];
7442 + qla2x00_free_rsp_que(ha, rsp);
7443 + }
7444 +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
7445 +index ddbe2e7ac14d..c3e622524604 100644
7446 +--- a/drivers/scsi/qla2xxx/qla_tmpl.c
7447 ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c
7448 +@@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
7449 + if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
7450 + for (i = 0; i < vha->hw->max_req_queues; i++) {
7451 + struct req_que *req = vha->hw->req_q_map[i];
7452 ++
7453 ++ if (!test_bit(i, vha->hw->req_qid_map))
7454 ++ continue;
7455 ++
7456 + if (req || !buf) {
7457 + length = req ?
7458 + req->length : REQUEST_ENTRY_CNT_24XX;
7459 +@@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
7460 + } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
7461 + for (i = 0; i < vha->hw->max_rsp_queues; i++) {
7462 + struct rsp_que *rsp = vha->hw->rsp_q_map[i];
7463 ++
7464 ++ if (!test_bit(i, vha->hw->rsp_qid_map))
7465 ++ continue;
7466 ++
7467 + if (rsp || !buf) {
7468 + length = rsp ?
7469 + rsp->length : RESPONSE_ENTRY_CNT_MQ;
7470 +@@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
7471 + if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
7472 + for (i = 0; i < vha->hw->max_req_queues; i++) {
7473 + struct req_que *req = vha->hw->req_q_map[i];
7474 ++
7475 ++ if (!test_bit(i, vha->hw->req_qid_map))
7476 ++ continue;
7477 ++
7478 + if (req || !buf) {
7479 + qla27xx_insert16(i, buf, len);
7480 + qla27xx_insert16(1, buf, len);
7481 +@@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
7482 + } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
7483 + for (i = 0; i < vha->hw->max_rsp_queues; i++) {
7484 + struct rsp_que *rsp = vha->hw->rsp_q_map[i];
7485 ++
7486 ++ if (!test_bit(i, vha->hw->rsp_qid_map))
7487 ++ continue;
7488 ++
7489 + if (rsp || !buf) {
7490 + qla27xx_insert16(i, buf, len);
7491 + qla27xx_insert16(1, buf, len);
7492 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
7493 +index 84fa4c46eaa6..bb669d32ccd0 100644
7494 +--- a/drivers/scsi/sd.c
7495 ++++ b/drivers/scsi/sd.c
7496 +@@ -2893,7 +2893,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
7497 + sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
7498 + sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
7499 + rw_max = q->limits.io_opt =
7500 +- logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
7501 ++ sdkp->opt_xfer_blocks * sdp->sector_size;
7502 + else
7503 + rw_max = BLK_DEF_MAX_SECTORS;
7504 +
7505 +diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
7506 +index aebad36391c9..8feac599e9ab 100644
7507 +--- a/drivers/spi/spi-atmel.c
7508 ++++ b/drivers/spi/spi-atmel.c
7509 +@@ -1571,6 +1571,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
7510 +
7511 + as->use_cs_gpios = true;
7512 + if (atmel_spi_is_v2(as) &&
7513 ++ pdev->dev.of_node &&
7514 + !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
7515 + as->use_cs_gpios = false;
7516 + master->num_chipselect = 4;
7517 +diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
7518 +index 1f8903d356e5..ed8283e7397a 100644
7519 +--- a/drivers/spi/spi-omap2-mcspi.c
7520 ++++ b/drivers/spi/spi-omap2-mcspi.c
7521 +@@ -1024,6 +1024,16 @@ static int omap2_mcspi_setup(struct spi_device *spi)
7522 + spi->controller_state = cs;
7523 + /* Link this to context save list */
7524 + list_add_tail(&cs->node, &ctx->cs);
7525 ++
7526 ++ if (gpio_is_valid(spi->cs_gpio)) {
7527 ++ ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
7528 ++ if (ret) {
7529 ++ dev_err(&spi->dev, "failed to request gpio\n");
7530 ++ return ret;
7531 ++ }
7532 ++ gpio_direction_output(spi->cs_gpio,
7533 ++ !(spi->mode & SPI_CS_HIGH));
7534 ++ }
7535 + }
7536 +
7537 + if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
7538 +@@ -1032,15 +1042,6 @@ static int omap2_mcspi_setup(struct spi_device *spi)
7539 + return ret;
7540 + }
7541 +
7542 +- if (gpio_is_valid(spi->cs_gpio)) {
7543 +- ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
7544 +- if (ret) {
7545 +- dev_err(&spi->dev, "failed to request gpio\n");
7546 +- return ret;
7547 +- }
7548 +- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
7549 +- }
7550 +-
7551 + ret = pm_runtime_get_sync(mcspi->dev);
7552 + if (ret < 0)
7553 + return ret;
7554 +diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
7555 +index 79ac19246548..70b8f4fabfad 100644
7556 +--- a/drivers/staging/panel/panel.c
7557 ++++ b/drivers/staging/panel/panel.c
7558 +@@ -825,8 +825,7 @@ static void lcd_write_cmd_s(int cmd)
7559 + lcd_send_serial(0x1F); /* R/W=W, RS=0 */
7560 + lcd_send_serial(cmd & 0x0F);
7561 + lcd_send_serial((cmd >> 4) & 0x0F);
7562 +- /* the shortest command takes at least 40 us */
7563 +- usleep_range(40, 100);
7564 ++ udelay(40); /* the shortest command takes at least 40 us */
7565 + spin_unlock_irq(&pprt_lock);
7566 + }
7567 +
7568 +@@ -837,8 +836,7 @@ static void lcd_write_data_s(int data)
7569 + lcd_send_serial(0x5F); /* R/W=W, RS=1 */
7570 + lcd_send_serial(data & 0x0F);
7571 + lcd_send_serial((data >> 4) & 0x0F);
7572 +- /* the shortest data takes at least 40 us */
7573 +- usleep_range(40, 100);
7574 ++ udelay(40); /* the shortest data takes at least 40 us */
7575 + spin_unlock_irq(&pprt_lock);
7576 + }
7577 +
7578 +@@ -848,20 +846,19 @@ static void lcd_write_cmd_p8(int cmd)
7579 + spin_lock_irq(&pprt_lock);
7580 + /* present the data to the data port */
7581 + w_dtr(pprt, cmd);
7582 +- /* maintain the data during 20 us before the strobe */
7583 +- usleep_range(20, 100);
7584 ++ udelay(20); /* maintain the data during 20 us before the strobe */
7585 +
7586 + bits.e = BIT_SET;
7587 + bits.rs = BIT_CLR;
7588 + bits.rw = BIT_CLR;
7589 + set_ctrl_bits();
7590 +
7591 +- usleep_range(40, 100); /* maintain the strobe during 40 us */
7592 ++ udelay(40); /* maintain the strobe during 40 us */
7593 +
7594 + bits.e = BIT_CLR;
7595 + set_ctrl_bits();
7596 +
7597 +- usleep_range(120, 500); /* the shortest command takes at least 120 us */
7598 ++ udelay(120); /* the shortest command takes at least 120 us */
7599 + spin_unlock_irq(&pprt_lock);
7600 + }
7601 +
7602 +@@ -871,20 +868,19 @@ static void lcd_write_data_p8(int data)
7603 + spin_lock_irq(&pprt_lock);
7604 + /* present the data to the data port */
7605 + w_dtr(pprt, data);
7606 +- /* maintain the data during 20 us before the strobe */
7607 +- usleep_range(20, 100);
7608 ++ udelay(20); /* maintain the data during 20 us before the strobe */
7609 +
7610 + bits.e = BIT_SET;
7611 + bits.rs = BIT_SET;
7612 + bits.rw = BIT_CLR;
7613 + set_ctrl_bits();
7614 +
7615 +- usleep_range(40, 100); /* maintain the strobe during 40 us */
7616 ++ udelay(40); /* maintain the strobe during 40 us */
7617 +
7618 + bits.e = BIT_CLR;
7619 + set_ctrl_bits();
7620 +
7621 +- usleep_range(45, 100); /* the shortest data takes at least 45 us */
7622 ++ udelay(45); /* the shortest data takes at least 45 us */
7623 + spin_unlock_irq(&pprt_lock);
7624 + }
7625 +
7626 +@@ -894,7 +890,7 @@ static void lcd_write_cmd_tilcd(int cmd)
7627 + spin_lock_irq(&pprt_lock);
7628 + /* present the data to the control port */
7629 + w_ctr(pprt, cmd);
7630 +- usleep_range(60, 120);
7631 ++ udelay(60);
7632 + spin_unlock_irq(&pprt_lock);
7633 + }
7634 +
7635 +@@ -904,7 +900,7 @@ static void lcd_write_data_tilcd(int data)
7636 + spin_lock_irq(&pprt_lock);
7637 + /* present the data to the data port */
7638 + w_dtr(pprt, data);
7639 +- usleep_range(60, 120);
7640 ++ udelay(60);
7641 + spin_unlock_irq(&pprt_lock);
7642 + }
7643 +
7644 +@@ -947,7 +943,7 @@ static void lcd_clear_fast_s(void)
7645 + lcd_send_serial(0x5F); /* R/W=W, RS=1 */
7646 + lcd_send_serial(' ' & 0x0F);
7647 + lcd_send_serial((' ' >> 4) & 0x0F);
7648 +- usleep_range(40, 100); /* the shortest data takes at least 40 us */
7649 ++ udelay(40); /* the shortest data takes at least 40 us */
7650 + }
7651 + spin_unlock_irq(&pprt_lock);
7652 +
7653 +@@ -971,7 +967,7 @@ static void lcd_clear_fast_p8(void)
7654 + w_dtr(pprt, ' ');
7655 +
7656 + /* maintain the data during 20 us before the strobe */
7657 +- usleep_range(20, 100);
7658 ++ udelay(20);
7659 +
7660 + bits.e = BIT_SET;
7661 + bits.rs = BIT_SET;
7662 +@@ -979,13 +975,13 @@ static void lcd_clear_fast_p8(void)
7663 + set_ctrl_bits();
7664 +
7665 + /* maintain the strobe during 40 us */
7666 +- usleep_range(40, 100);
7667 ++ udelay(40);
7668 +
7669 + bits.e = BIT_CLR;
7670 + set_ctrl_bits();
7671 +
7672 + /* the shortest data takes at least 45 us */
7673 +- usleep_range(45, 100);
7674 ++ udelay(45);
7675 + }
7676 + spin_unlock_irq(&pprt_lock);
7677 +
7678 +@@ -1007,7 +1003,7 @@ static void lcd_clear_fast_tilcd(void)
7679 + for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
7680 + /* present the data to the data port */
7681 + w_dtr(pprt, ' ');
7682 +- usleep_range(60, 120);
7683 ++ udelay(60);
7684 + }
7685 +
7686 + spin_unlock_irq(&pprt_lock);
7687 +diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
7688 +index 3b5835b28128..a5bbb338f275 100644
7689 +--- a/drivers/staging/speakup/serialio.c
7690 ++++ b/drivers/staging/speakup/serialio.c
7691 +@@ -6,6 +6,11 @@
7692 + #include "spk_priv.h"
7693 + #include "serialio.h"
7694 +
7695 ++#include <linux/serial_core.h>
7696 ++/* WARNING: Do not change this to <linux/serial.h> without testing that
7697 ++ * SERIAL_PORT_DFNS does get defined to the appropriate value. */
7698 ++#include <asm/serial.h>
7699 ++
7700 + #ifndef SERIAL_PORT_DFNS
7701 + #define SERIAL_PORT_DFNS
7702 + #endif
7703 +@@ -23,9 +28,15 @@ const struct old_serial_port *spk_serial_init(int index)
7704 + int baud = 9600, quot = 0;
7705 + unsigned int cval = 0;
7706 + int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
7707 +- const struct old_serial_port *ser = rs_table + index;
7708 ++ const struct old_serial_port *ser;
7709 + int err;
7710 +
7711 ++ if (index >= ARRAY_SIZE(rs_table)) {
7712 ++ pr_info("no port info for ttyS%d\n", index);
7713 ++ return NULL;
7714 ++ }
7715 ++ ser = rs_table + index;
7716 ++
7717 + /* Divisor, bytesize and parity */
7718 + quot = ser->baud_base / baud;
7719 + cval = cflag & (CSIZE | CSTOPB);
7720 +diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
7721 +index 28fb3016370f..88029cc6de5e 100644
7722 +--- a/drivers/target/target_core_tmr.c
7723 ++++ b/drivers/target/target_core_tmr.c
7724 +@@ -68,23 +68,25 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
7725 +
7726 + if (dev) {
7727 + spin_lock_irqsave(&dev->se_tmr_lock, flags);
7728 +- list_del(&tmr->tmr_list);
7729 ++ list_del_init(&tmr->tmr_list);
7730 + spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
7731 + }
7732 +
7733 + kfree(tmr);
7734 + }
7735 +
7736 +-static void core_tmr_handle_tas_abort(
7737 +- struct se_node_acl *tmr_nacl,
7738 +- struct se_cmd *cmd,
7739 +- int tas)
7740 ++static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
7741 + {
7742 +- bool remove = true;
7743 ++ unsigned long flags;
7744 ++ bool remove = true, send_tas;
7745 + /*
7746 + * TASK ABORTED status (TAS) bit support
7747 + */
7748 +- if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
7749 ++ spin_lock_irqsave(&cmd->t_state_lock, flags);
7750 ++ send_tas = (cmd->transport_state & CMD_T_TAS);
7751 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
7752 ++
7753 ++ if (send_tas) {
7754 + remove = false;
7755 + transport_send_task_abort(cmd);
7756 + }
7757 +@@ -107,6 +109,46 @@ static int target_check_cdb_and_preempt(struct list_head *list,
7758 + return 1;
7759 + }
7760 +
7761 ++static bool __target_check_io_state(struct se_cmd *se_cmd,
7762 ++ struct se_session *tmr_sess, int tas)
7763 ++{
7764 ++ struct se_session *sess = se_cmd->se_sess;
7765 ++
7766 ++ assert_spin_locked(&sess->sess_cmd_lock);
7767 ++ WARN_ON_ONCE(!irqs_disabled());
7768 ++ /*
7769 ++ * If command already reached CMD_T_COMPLETE state within
7770 ++ * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
7771 ++ * this se_cmd has been passed to fabric driver and will
7772 ++ * not be aborted.
7773 ++ *
7774 ++ * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
7775 ++ * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
7776 ++ * long as se_cmd->cmd_kref is still active unless zero.
7777 ++ */
7778 ++ spin_lock(&se_cmd->t_state_lock);
7779 ++ if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
7780 ++ pr_debug("Attempted to abort io tag: %llu already complete or"
7781 ++ " fabric stop, skipping\n", se_cmd->tag);
7782 ++ spin_unlock(&se_cmd->t_state_lock);
7783 ++ return false;
7784 ++ }
7785 ++ if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
7786 ++ pr_debug("Attempted to abort io tag: %llu already shutdown,"
7787 ++ " skipping\n", se_cmd->tag);
7788 ++ spin_unlock(&se_cmd->t_state_lock);
7789 ++ return false;
7790 ++ }
7791 ++ se_cmd->transport_state |= CMD_T_ABORTED;
7792 ++
7793 ++ if ((tmr_sess != se_cmd->se_sess) && tas)
7794 ++ se_cmd->transport_state |= CMD_T_TAS;
7795 ++
7796 ++ spin_unlock(&se_cmd->t_state_lock);
7797 ++
7798 ++ return kref_get_unless_zero(&se_cmd->cmd_kref);
7799 ++}
7800 ++
7801 + void core_tmr_abort_task(
7802 + struct se_device *dev,
7803 + struct se_tmr_req *tmr,
7804 +@@ -130,34 +172,22 @@ void core_tmr_abort_task(
7805 + if (tmr->ref_task_tag != ref_tag)
7806 + continue;
7807 +
7808 +- if (!kref_get_unless_zero(&se_cmd->cmd_kref))
7809 +- continue;
7810 +-
7811 + printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
7812 + se_cmd->se_tfo->get_fabric_name(), ref_tag);
7813 +
7814 +- spin_lock(&se_cmd->t_state_lock);
7815 +- if (se_cmd->transport_state & CMD_T_COMPLETE) {
7816 +- printk("ABORT_TASK: ref_tag: %llu already complete,"
7817 +- " skipping\n", ref_tag);
7818 +- spin_unlock(&se_cmd->t_state_lock);
7819 ++ if (!__target_check_io_state(se_cmd, se_sess, 0)) {
7820 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
7821 +-
7822 + target_put_sess_cmd(se_cmd);
7823 +-
7824 + goto out;
7825 + }
7826 +- se_cmd->transport_state |= CMD_T_ABORTED;
7827 +- spin_unlock(&se_cmd->t_state_lock);
7828 +-
7829 + list_del_init(&se_cmd->se_cmd_list);
7830 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
7831 +
7832 + cancel_work_sync(&se_cmd->work);
7833 + transport_wait_for_tasks(se_cmd);
7834 +
7835 +- target_put_sess_cmd(se_cmd);
7836 + transport_cmd_finish_abort(se_cmd, true);
7837 ++ target_put_sess_cmd(se_cmd);
7838 +
7839 + printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
7840 + " ref_tag: %llu\n", ref_tag);
7841 +@@ -178,9 +208,11 @@ static void core_tmr_drain_tmr_list(
7842 + struct list_head *preempt_and_abort_list)
7843 + {
7844 + LIST_HEAD(drain_tmr_list);
7845 ++ struct se_session *sess;
7846 + struct se_tmr_req *tmr_p, *tmr_pp;
7847 + struct se_cmd *cmd;
7848 + unsigned long flags;
7849 ++ bool rc;
7850 + /*
7851 + * Release all pending and outgoing TMRs aside from the received
7852 + * LUN_RESET tmr..
7853 +@@ -206,17 +238,39 @@ static void core_tmr_drain_tmr_list(
7854 + if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
7855 + continue;
7856 +
7857 ++ sess = cmd->se_sess;
7858 ++ if (WARN_ON_ONCE(!sess))
7859 ++ continue;
7860 ++
7861 ++ spin_lock(&sess->sess_cmd_lock);
7862 + spin_lock(&cmd->t_state_lock);
7863 +- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
7864 ++ if (!(cmd->transport_state & CMD_T_ACTIVE) ||
7865 ++ (cmd->transport_state & CMD_T_FABRIC_STOP)) {
7866 + spin_unlock(&cmd->t_state_lock);
7867 ++ spin_unlock(&sess->sess_cmd_lock);
7868 + continue;
7869 + }
7870 + if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
7871 + spin_unlock(&cmd->t_state_lock);
7872 ++ spin_unlock(&sess->sess_cmd_lock);
7873 + continue;
7874 + }
7875 ++ if (sess->sess_tearing_down || cmd->cmd_wait_set) {
7876 ++ spin_unlock(&cmd->t_state_lock);
7877 ++ spin_unlock(&sess->sess_cmd_lock);
7878 ++ continue;
7879 ++ }
7880 ++ cmd->transport_state |= CMD_T_ABORTED;
7881 + spin_unlock(&cmd->t_state_lock);
7882 +
7883 ++ rc = kref_get_unless_zero(&cmd->cmd_kref);
7884 ++ if (!rc) {
7885 ++ printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
7886 ++ spin_unlock(&sess->sess_cmd_lock);
7887 ++ continue;
7888 ++ }
7889 ++ spin_unlock(&sess->sess_cmd_lock);
7890 ++
7891 + list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
7892 + }
7893 + spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
7894 +@@ -230,20 +284,26 @@ static void core_tmr_drain_tmr_list(
7895 + (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
7896 + tmr_p->function, tmr_p->response, cmd->t_state);
7897 +
7898 ++ cancel_work_sync(&cmd->work);
7899 ++ transport_wait_for_tasks(cmd);
7900 ++
7901 + transport_cmd_finish_abort(cmd, 1);
7902 ++ target_put_sess_cmd(cmd);
7903 + }
7904 + }
7905 +
7906 + static void core_tmr_drain_state_list(
7907 + struct se_device *dev,
7908 + struct se_cmd *prout_cmd,
7909 +- struct se_node_acl *tmr_nacl,
7910 ++ struct se_session *tmr_sess,
7911 + int tas,
7912 + struct list_head *preempt_and_abort_list)
7913 + {
7914 + LIST_HEAD(drain_task_list);
7915 ++ struct se_session *sess;
7916 + struct se_cmd *cmd, *next;
7917 + unsigned long flags;
7918 ++ int rc;
7919 +
7920 + /*
7921 + * Complete outstanding commands with TASK_ABORTED SAM status.
7922 +@@ -282,6 +342,16 @@ static void core_tmr_drain_state_list(
7923 + if (prout_cmd == cmd)
7924 + continue;
7925 +
7926 ++ sess = cmd->se_sess;
7927 ++ if (WARN_ON_ONCE(!sess))
7928 ++ continue;
7929 ++
7930 ++ spin_lock(&sess->sess_cmd_lock);
7931 ++ rc = __target_check_io_state(cmd, tmr_sess, tas);
7932 ++ spin_unlock(&sess->sess_cmd_lock);
7933 ++ if (!rc)
7934 ++ continue;
7935 ++
7936 + list_move_tail(&cmd->state_list, &drain_task_list);
7937 + cmd->state_active = false;
7938 + }
7939 +@@ -289,7 +359,7 @@ static void core_tmr_drain_state_list(
7940 +
7941 + while (!list_empty(&drain_task_list)) {
7942 + cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
7943 +- list_del(&cmd->state_list);
7944 ++ list_del_init(&cmd->state_list);
7945 +
7946 + pr_debug("LUN_RESET: %s cmd: %p"
7947 + " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
7948 +@@ -313,16 +383,11 @@ static void core_tmr_drain_state_list(
7949 + * loop above, but we do it down here given that
7950 + * cancel_work_sync may block.
7951 + */
7952 +- if (cmd->t_state == TRANSPORT_COMPLETE)
7953 +- cancel_work_sync(&cmd->work);
7954 +-
7955 +- spin_lock_irqsave(&cmd->t_state_lock, flags);
7956 +- target_stop_cmd(cmd, &flags);
7957 +-
7958 +- cmd->transport_state |= CMD_T_ABORTED;
7959 +- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
7960 ++ cancel_work_sync(&cmd->work);
7961 ++ transport_wait_for_tasks(cmd);
7962 +
7963 +- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
7964 ++ core_tmr_handle_tas_abort(cmd, tas);
7965 ++ target_put_sess_cmd(cmd);
7966 + }
7967 + }
7968 +
7969 +@@ -334,6 +399,7 @@ int core_tmr_lun_reset(
7970 + {
7971 + struct se_node_acl *tmr_nacl = NULL;
7972 + struct se_portal_group *tmr_tpg = NULL;
7973 ++ struct se_session *tmr_sess = NULL;
7974 + int tas;
7975 + /*
7976 + * TASK_ABORTED status bit, this is configurable via ConfigFS
7977 +@@ -352,8 +418,9 @@ int core_tmr_lun_reset(
7978 + * or struct se_device passthrough..
7979 + */
7980 + if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
7981 +- tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
7982 +- tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
7983 ++ tmr_sess = tmr->task_cmd->se_sess;
7984 ++ tmr_nacl = tmr_sess->se_node_acl;
7985 ++ tmr_tpg = tmr_sess->se_tpg;
7986 + if (tmr_nacl && tmr_tpg) {
7987 + pr_debug("LUN_RESET: TMR caller fabric: %s"
7988 + " initiator port %s\n",
7989 +@@ -366,7 +433,7 @@ int core_tmr_lun_reset(
7990 + dev->transport->name, tas);
7991 +
7992 + core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
7993 +- core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
7994 ++ core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
7995 + preempt_and_abort_list);
7996 +
7997 + /*
7998 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
7999 +index 4fdcee2006d1..94f4ffac723f 100644
8000 +--- a/drivers/target/target_core_transport.c
8001 ++++ b/drivers/target/target_core_transport.c
8002 +@@ -528,9 +528,6 @@ void transport_deregister_session(struct se_session *se_sess)
8003 + }
8004 + EXPORT_SYMBOL(transport_deregister_session);
8005 +
8006 +-/*
8007 +- * Called with cmd->t_state_lock held.
8008 +- */
8009 + static void target_remove_from_state_list(struct se_cmd *cmd)
8010 + {
8011 + struct se_device *dev = cmd->se_dev;
8012 +@@ -555,10 +552,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
8013 + {
8014 + unsigned long flags;
8015 +
8016 +- spin_lock_irqsave(&cmd->t_state_lock, flags);
8017 +- if (write_pending)
8018 +- cmd->t_state = TRANSPORT_WRITE_PENDING;
8019 +-
8020 + if (remove_from_lists) {
8021 + target_remove_from_state_list(cmd);
8022 +
8023 +@@ -568,6 +561,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
8024 + cmd->se_lun = NULL;
8025 + }
8026 +
8027 ++ spin_lock_irqsave(&cmd->t_state_lock, flags);
8028 ++ if (write_pending)
8029 ++ cmd->t_state = TRANSPORT_WRITE_PENDING;
8030 ++
8031 + /*
8032 + * Determine if frontend context caller is requesting the stopping of
8033 + * this command for frontend exceptions.
8034 +@@ -621,6 +618,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
8035 +
8036 + void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
8037 + {
8038 ++ bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
8039 ++
8040 + if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
8041 + transport_lun_remove_cmd(cmd);
8042 + /*
8043 +@@ -632,7 +631,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
8044 +
8045 + if (transport_cmd_check_stop_to_fabric(cmd))
8046 + return;
8047 +- if (remove)
8048 ++ if (remove && ack_kref)
8049 + transport_put_cmd(cmd);
8050 + }
8051 +
8052 +@@ -700,7 +699,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
8053 + * Check for case where an explicit ABORT_TASK has been received
8054 + * and transport_wait_for_tasks() will be waiting for completion..
8055 + */
8056 +- if (cmd->transport_state & CMD_T_ABORTED &&
8057 ++ if (cmd->transport_state & CMD_T_ABORTED ||
8058 + cmd->transport_state & CMD_T_STOP) {
8059 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8060 + complete_all(&cmd->t_transport_stop_comp);
8061 +@@ -1850,19 +1849,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
8062 + return true;
8063 + }
8064 +
8065 ++static int __transport_check_aborted_status(struct se_cmd *, int);
8066 ++
8067 + void target_execute_cmd(struct se_cmd *cmd)
8068 + {
8069 + /*
8070 +- * If the received CDB has aleady been aborted stop processing it here.
8071 +- */
8072 +- if (transport_check_aborted_status(cmd, 1))
8073 +- return;
8074 +-
8075 +- /*
8076 + * Determine if frontend context caller is requesting the stopping of
8077 + * this command for frontend exceptions.
8078 ++ *
8079 ++ * If the received CDB has aleady been aborted stop processing it here.
8080 + */
8081 + spin_lock_irq(&cmd->t_state_lock);
8082 ++ if (__transport_check_aborted_status(cmd, 1)) {
8083 ++ spin_unlock_irq(&cmd->t_state_lock);
8084 ++ return;
8085 ++ }
8086 + if (cmd->transport_state & CMD_T_STOP) {
8087 + pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
8088 + __func__, __LINE__, cmd->tag);
8089 +@@ -2213,20 +2214,14 @@ static inline void transport_free_pages(struct se_cmd *cmd)
8090 + }
8091 +
8092 + /**
8093 +- * transport_release_cmd - free a command
8094 +- * @cmd: command to free
8095 ++ * transport_put_cmd - release a reference to a command
8096 ++ * @cmd: command to release
8097 + *
8098 +- * This routine unconditionally frees a command, and reference counting
8099 +- * or list removal must be done in the caller.
8100 ++ * This routine releases our reference to the command and frees it if possible.
8101 + */
8102 +-static int transport_release_cmd(struct se_cmd *cmd)
8103 ++static int transport_put_cmd(struct se_cmd *cmd)
8104 + {
8105 + BUG_ON(!cmd->se_tfo);
8106 +-
8107 +- if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
8108 +- core_tmr_release_req(cmd->se_tmr_req);
8109 +- if (cmd->t_task_cdb != cmd->__t_task_cdb)
8110 +- kfree(cmd->t_task_cdb);
8111 + /*
8112 + * If this cmd has been setup with target_get_sess_cmd(), drop
8113 + * the kref and call ->release_cmd() in kref callback.
8114 +@@ -2234,18 +2229,6 @@ static int transport_release_cmd(struct se_cmd *cmd)
8115 + return target_put_sess_cmd(cmd);
8116 + }
8117 +
8118 +-/**
8119 +- * transport_put_cmd - release a reference to a command
8120 +- * @cmd: command to release
8121 +- *
8122 +- * This routine releases our reference to the command and frees it if possible.
8123 +- */
8124 +-static int transport_put_cmd(struct se_cmd *cmd)
8125 +-{
8126 +- transport_free_pages(cmd);
8127 +- return transport_release_cmd(cmd);
8128 +-}
8129 +-
8130 + void *transport_kmap_data_sg(struct se_cmd *cmd)
8131 + {
8132 + struct scatterlist *sg = cmd->t_data_sg;
8133 +@@ -2441,34 +2424,58 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
8134 + }
8135 + }
8136 +
8137 +-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
8138 ++static bool
8139 ++__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
8140 ++ unsigned long *flags);
8141 ++
8142 ++static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
8143 + {
8144 + unsigned long flags;
8145 ++
8146 ++ spin_lock_irqsave(&cmd->t_state_lock, flags);
8147 ++ __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
8148 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8149 ++}
8150 ++
8151 ++int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
8152 ++{
8153 + int ret = 0;
8154 ++ bool aborted = false, tas = false;
8155 +
8156 + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
8157 + if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
8158 +- transport_wait_for_tasks(cmd);
8159 ++ target_wait_free_cmd(cmd, &aborted, &tas);
8160 +
8161 +- ret = transport_release_cmd(cmd);
8162 ++ if (!aborted || tas)
8163 ++ ret = transport_put_cmd(cmd);
8164 + } else {
8165 + if (wait_for_tasks)
8166 +- transport_wait_for_tasks(cmd);
8167 ++ target_wait_free_cmd(cmd, &aborted, &tas);
8168 + /*
8169 + * Handle WRITE failure case where transport_generic_new_cmd()
8170 + * has already added se_cmd to state_list, but fabric has
8171 + * failed command before I/O submission.
8172 + */
8173 +- if (cmd->state_active) {
8174 +- spin_lock_irqsave(&cmd->t_state_lock, flags);
8175 ++ if (cmd->state_active)
8176 + target_remove_from_state_list(cmd);
8177 +- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8178 +- }
8179 +
8180 + if (cmd->se_lun)
8181 + transport_lun_remove_cmd(cmd);
8182 +
8183 +- ret = transport_put_cmd(cmd);
8184 ++ if (!aborted || tas)
8185 ++ ret = transport_put_cmd(cmd);
8186 ++ }
8187 ++ /*
8188 ++ * If the task has been internally aborted due to TMR ABORT_TASK
8189 ++ * or LUN_RESET, target_core_tmr.c is responsible for performing
8190 ++ * the remaining calls to target_put_sess_cmd(), and not the
8191 ++ * callers of this function.
8192 ++ */
8193 ++ if (aborted) {
8194 ++ pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
8195 ++ wait_for_completion(&cmd->cmd_wait_comp);
8196 ++ cmd->se_tfo->release_cmd(cmd);
8197 ++ ret = 1;
8198 + }
8199 + return ret;
8200 + }
8201 +@@ -2508,26 +2515,46 @@ out:
8202 + }
8203 + EXPORT_SYMBOL(target_get_sess_cmd);
8204 +
8205 ++static void target_free_cmd_mem(struct se_cmd *cmd)
8206 ++{
8207 ++ transport_free_pages(cmd);
8208 ++
8209 ++ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
8210 ++ core_tmr_release_req(cmd->se_tmr_req);
8211 ++ if (cmd->t_task_cdb != cmd->__t_task_cdb)
8212 ++ kfree(cmd->t_task_cdb);
8213 ++}
8214 ++
8215 + static void target_release_cmd_kref(struct kref *kref)
8216 + {
8217 + struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
8218 + struct se_session *se_sess = se_cmd->se_sess;
8219 + unsigned long flags;
8220 ++ bool fabric_stop;
8221 +
8222 + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
8223 + if (list_empty(&se_cmd->se_cmd_list)) {
8224 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
8225 ++ target_free_cmd_mem(se_cmd);
8226 + se_cmd->se_tfo->release_cmd(se_cmd);
8227 + return;
8228 + }
8229 +- if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
8230 ++
8231 ++ spin_lock(&se_cmd->t_state_lock);
8232 ++ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
8233 ++ spin_unlock(&se_cmd->t_state_lock);
8234 ++
8235 ++ if (se_cmd->cmd_wait_set || fabric_stop) {
8236 ++ list_del_init(&se_cmd->se_cmd_list);
8237 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
8238 ++ target_free_cmd_mem(se_cmd);
8239 + complete(&se_cmd->cmd_wait_comp);
8240 + return;
8241 + }
8242 +- list_del(&se_cmd->se_cmd_list);
8243 ++ list_del_init(&se_cmd->se_cmd_list);
8244 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
8245 +
8246 ++ target_free_cmd_mem(se_cmd);
8247 + se_cmd->se_tfo->release_cmd(se_cmd);
8248 + }
8249 +
8250 +@@ -2539,6 +2566,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
8251 + struct se_session *se_sess = se_cmd->se_sess;
8252 +
8253 + if (!se_sess) {
8254 ++ target_free_cmd_mem(se_cmd);
8255 + se_cmd->se_tfo->release_cmd(se_cmd);
8256 + return 1;
8257 + }
8258 +@@ -2555,6 +2583,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
8259 + {
8260 + struct se_cmd *se_cmd;
8261 + unsigned long flags;
8262 ++ int rc;
8263 +
8264 + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
8265 + if (se_sess->sess_tearing_down) {
8266 +@@ -2564,8 +2593,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
8267 + se_sess->sess_tearing_down = 1;
8268 + list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
8269 +
8270 +- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
8271 +- se_cmd->cmd_wait_set = 1;
8272 ++ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
8273 ++ rc = kref_get_unless_zero(&se_cmd->cmd_kref);
8274 ++ if (rc) {
8275 ++ se_cmd->cmd_wait_set = 1;
8276 ++ spin_lock(&se_cmd->t_state_lock);
8277 ++ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
8278 ++ spin_unlock(&se_cmd->t_state_lock);
8279 ++ }
8280 ++ }
8281 +
8282 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
8283 + }
8284 +@@ -2578,15 +2614,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
8285 + {
8286 + struct se_cmd *se_cmd, *tmp_cmd;
8287 + unsigned long flags;
8288 ++ bool tas;
8289 +
8290 + list_for_each_entry_safe(se_cmd, tmp_cmd,
8291 + &se_sess->sess_wait_list, se_cmd_list) {
8292 +- list_del(&se_cmd->se_cmd_list);
8293 ++ list_del_init(&se_cmd->se_cmd_list);
8294 +
8295 + pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
8296 + " %d\n", se_cmd, se_cmd->t_state,
8297 + se_cmd->se_tfo->get_cmd_state(se_cmd));
8298 +
8299 ++ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
8300 ++ tas = (se_cmd->transport_state & CMD_T_TAS);
8301 ++ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
8302 ++
8303 ++ if (!target_put_sess_cmd(se_cmd)) {
8304 ++ if (tas)
8305 ++ target_put_sess_cmd(se_cmd);
8306 ++ }
8307 ++
8308 + wait_for_completion(&se_cmd->cmd_wait_comp);
8309 + pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
8310 + " fabric state: %d\n", se_cmd, se_cmd->t_state,
8311 +@@ -2608,53 +2654,75 @@ void transport_clear_lun_ref(struct se_lun *lun)
8312 + wait_for_completion(&lun->lun_ref_comp);
8313 + }
8314 +
8315 +-/**
8316 +- * transport_wait_for_tasks - wait for completion to occur
8317 +- * @cmd: command to wait
8318 +- *
8319 +- * Called from frontend fabric context to wait for storage engine
8320 +- * to pause and/or release frontend generated struct se_cmd.
8321 +- */
8322 +-bool transport_wait_for_tasks(struct se_cmd *cmd)
8323 ++static bool
8324 ++__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
8325 ++ bool *aborted, bool *tas, unsigned long *flags)
8326 ++ __releases(&cmd->t_state_lock)
8327 ++ __acquires(&cmd->t_state_lock)
8328 + {
8329 +- unsigned long flags;
8330 +
8331 +- spin_lock_irqsave(&cmd->t_state_lock, flags);
8332 ++ assert_spin_locked(&cmd->t_state_lock);
8333 ++ WARN_ON_ONCE(!irqs_disabled());
8334 ++
8335 ++ if (fabric_stop)
8336 ++ cmd->transport_state |= CMD_T_FABRIC_STOP;
8337 ++
8338 ++ if (cmd->transport_state & CMD_T_ABORTED)
8339 ++ *aborted = true;
8340 ++
8341 ++ if (cmd->transport_state & CMD_T_TAS)
8342 ++ *tas = true;
8343 ++
8344 + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
8345 +- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
8346 +- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8347 ++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
8348 + return false;
8349 +- }
8350 +
8351 + if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
8352 +- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
8353 +- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8354 ++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
8355 + return false;
8356 +- }
8357 +
8358 +- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
8359 +- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8360 ++ if (!(cmd->transport_state & CMD_T_ACTIVE))
8361 ++ return false;
8362 ++
8363 ++ if (fabric_stop && *aborted)
8364 + return false;
8365 +- }
8366 +
8367 + cmd->transport_state |= CMD_T_STOP;
8368 +
8369 +- pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
8370 +- cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
8371 ++ pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
8372 ++ " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
8373 ++ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
8374 +
8375 +- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8376 ++ spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
8377 +
8378 + wait_for_completion(&cmd->t_transport_stop_comp);
8379 +
8380 +- spin_lock_irqsave(&cmd->t_state_lock, flags);
8381 ++ spin_lock_irqsave(&cmd->t_state_lock, *flags);
8382 + cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
8383 +
8384 +- pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
8385 +- cmd->tag);
8386 ++ pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
8387 ++ "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
8388 ++
8389 ++ return true;
8390 ++}
8391 +
8392 ++/**
8393 ++ * transport_wait_for_tasks - wait for completion to occur
8394 ++ * @cmd: command to wait
8395 ++ *
8396 ++ * Called from frontend fabric context to wait for storage engine
8397 ++ * to pause and/or release frontend generated struct se_cmd.
8398 ++ */
8399 ++bool transport_wait_for_tasks(struct se_cmd *cmd)
8400 ++{
8401 ++ unsigned long flags;
8402 ++ bool ret, aborted = false, tas = false;
8403 ++
8404 ++ spin_lock_irqsave(&cmd->t_state_lock, flags);
8405 ++ ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
8406 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8407 +
8408 +- return true;
8409 ++ return ret;
8410 + }
8411 + EXPORT_SYMBOL(transport_wait_for_tasks);
8412 +
8413 +@@ -2836,28 +2904,49 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
8414 + }
8415 + EXPORT_SYMBOL(transport_send_check_condition_and_sense);
8416 +
8417 +-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
8418 ++static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
8419 ++ __releases(&cmd->t_state_lock)
8420 ++ __acquires(&cmd->t_state_lock)
8421 + {
8422 ++ assert_spin_locked(&cmd->t_state_lock);
8423 ++ WARN_ON_ONCE(!irqs_disabled());
8424 ++
8425 + if (!(cmd->transport_state & CMD_T_ABORTED))
8426 + return 0;
8427 +-
8428 + /*
8429 + * If cmd has been aborted but either no status is to be sent or it has
8430 + * already been sent, just return
8431 + */
8432 +- if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
8433 ++ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
8434 ++ if (send_status)
8435 ++ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
8436 + return 1;
8437 ++ }
8438 +
8439 +- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
8440 +- cmd->t_task_cdb[0], cmd->tag);
8441 ++ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
8442 ++ " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
8443 +
8444 + cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
8445 + cmd->scsi_status = SAM_STAT_TASK_ABORTED;
8446 + trace_target_cmd_complete(cmd);
8447 ++
8448 ++ spin_unlock_irq(&cmd->t_state_lock);
8449 + cmd->se_tfo->queue_status(cmd);
8450 ++ spin_lock_irq(&cmd->t_state_lock);
8451 +
8452 + return 1;
8453 + }
8454 ++
8455 ++int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
8456 ++{
8457 ++ int ret;
8458 ++
8459 ++ spin_lock_irq(&cmd->t_state_lock);
8460 ++ ret = __transport_check_aborted_status(cmd, send_status);
8461 ++ spin_unlock_irq(&cmd->t_state_lock);
8462 ++
8463 ++ return ret;
8464 ++}
8465 + EXPORT_SYMBOL(transport_check_aborted_status);
8466 +
8467 + void transport_send_task_abort(struct se_cmd *cmd)
8468 +@@ -2879,11 +2968,17 @@ void transport_send_task_abort(struct se_cmd *cmd)
8469 + */
8470 + if (cmd->data_direction == DMA_TO_DEVICE) {
8471 + if (cmd->se_tfo->write_pending_status(cmd) != 0) {
8472 +- cmd->transport_state |= CMD_T_ABORTED;
8473 ++ spin_lock_irqsave(&cmd->t_state_lock, flags);
8474 ++ if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
8475 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8476 ++ goto send_abort;
8477 ++ }
8478 + cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
8479 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8480 + return;
8481 + }
8482 + }
8483 ++send_abort:
8484 + cmd->scsi_status = SAM_STAT_TASK_ABORTED;
8485 +
8486 + transport_lun_remove_cmd(cmd);
8487 +@@ -2900,8 +2995,17 @@ static void target_tmr_work(struct work_struct *work)
8488 + struct se_cmd *cmd = container_of(work, struct se_cmd, work);
8489 + struct se_device *dev = cmd->se_dev;
8490 + struct se_tmr_req *tmr = cmd->se_tmr_req;
8491 ++ unsigned long flags;
8492 + int ret;
8493 +
8494 ++ spin_lock_irqsave(&cmd->t_state_lock, flags);
8495 ++ if (cmd->transport_state & CMD_T_ABORTED) {
8496 ++ tmr->response = TMR_FUNCTION_REJECTED;
8497 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8498 ++ goto check_stop;
8499 ++ }
8500 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8501 ++
8502 + switch (tmr->function) {
8503 + case TMR_ABORT_TASK:
8504 + core_tmr_abort_task(dev, tmr, cmd->se_sess);
8505 +@@ -2934,9 +3038,17 @@ static void target_tmr_work(struct work_struct *work)
8506 + break;
8507 + }
8508 +
8509 ++ spin_lock_irqsave(&cmd->t_state_lock, flags);
8510 ++ if (cmd->transport_state & CMD_T_ABORTED) {
8511 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8512 ++ goto check_stop;
8513 ++ }
8514 + cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
8515 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8516 ++
8517 + cmd->se_tfo->queue_tm_rsp(cmd);
8518 +
8519 ++check_stop:
8520 + transport_cmd_check_stop_to_fabric(cmd);
8521 + }
8522 +
8523 +diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
8524 +index 2f9f7086ac3d..ea9366ad3e6b 100644
8525 +--- a/drivers/thermal/step_wise.c
8526 ++++ b/drivers/thermal/step_wise.c
8527 +@@ -63,6 +63,19 @@ static unsigned long get_target_state(struct thermal_instance *instance,
8528 + next_target = instance->target;
8529 + dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state);
8530 +
8531 ++ if (!instance->initialized) {
8532 ++ if (throttle) {
8533 ++ next_target = (cur_state + 1) >= instance->upper ?
8534 ++ instance->upper :
8535 ++ ((cur_state + 1) < instance->lower ?
8536 ++ instance->lower : (cur_state + 1));
8537 ++ } else {
8538 ++ next_target = THERMAL_NO_TARGET;
8539 ++ }
8540 ++
8541 ++ return next_target;
8542 ++ }
8543 ++
8544 + switch (trend) {
8545 + case THERMAL_TREND_RAISING:
8546 + if (throttle) {
8547 +@@ -149,7 +162,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
8548 + dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
8549 + old_target, (int)instance->target);
8550 +
8551 +- if (old_target == instance->target)
8552 ++ if (instance->initialized && old_target == instance->target)
8553 + continue;
8554 +
8555 + /* Activate a passive thermal instance */
8556 +@@ -161,7 +174,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
8557 + instance->target == THERMAL_NO_TARGET)
8558 + update_passive_instance(tz, trip_type, -1);
8559 +
8560 +-
8561 ++ instance->initialized = true;
8562 + instance->cdev->updated = false; /* cdev needs update */
8563 + }
8564 +
8565 +diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
8566 +index d9e525cc9c1c..ba08b5521382 100644
8567 +--- a/drivers/thermal/thermal_core.c
8568 ++++ b/drivers/thermal/thermal_core.c
8569 +@@ -37,6 +37,7 @@
8570 + #include <linux/of.h>
8571 + #include <net/netlink.h>
8572 + #include <net/genetlink.h>
8573 ++#include <linux/suspend.h>
8574 +
8575 + #define CREATE_TRACE_POINTS
8576 + #include <trace/events/thermal.h>
8577 +@@ -59,6 +60,8 @@ static LIST_HEAD(thermal_governor_list);
8578 + static DEFINE_MUTEX(thermal_list_lock);
8579 + static DEFINE_MUTEX(thermal_governor_lock);
8580 +
8581 ++static atomic_t in_suspend;
8582 ++
8583 + static struct thermal_governor *def_governor;
8584 +
8585 + static struct thermal_governor *__find_governor(const char *name)
8586 +@@ -532,14 +535,31 @@ static void update_temperature(struct thermal_zone_device *tz)
8587 + mutex_unlock(&tz->lock);
8588 +
8589 + trace_thermal_temperature(tz);
8590 +- dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
8591 +- tz->last_temperature, tz->temperature);
8592 ++ if (tz->last_temperature == THERMAL_TEMP_INVALID)
8593 ++ dev_dbg(&tz->device, "last_temperature N/A, current_temperature=%d\n",
8594 ++ tz->temperature);
8595 ++ else
8596 ++ dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
8597 ++ tz->last_temperature, tz->temperature);
8598 ++}
8599 ++
8600 ++static void thermal_zone_device_reset(struct thermal_zone_device *tz)
8601 ++{
8602 ++ struct thermal_instance *pos;
8603 ++
8604 ++ tz->temperature = THERMAL_TEMP_INVALID;
8605 ++ tz->passive = 0;
8606 ++ list_for_each_entry(pos, &tz->thermal_instances, tz_node)
8607 ++ pos->initialized = false;
8608 + }
8609 +
8610 + void thermal_zone_device_update(struct thermal_zone_device *tz)
8611 + {
8612 + int count;
8613 +
8614 ++ if (atomic_read(&in_suspend))
8615 ++ return;
8616 ++
8617 + if (!tz->ops->get_temp)
8618 + return;
8619 +
8620 +@@ -1321,6 +1341,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
8621 + if (!result) {
8622 + list_add_tail(&dev->tz_node, &tz->thermal_instances);
8623 + list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
8624 ++ atomic_set(&tz->need_update, 1);
8625 + }
8626 + mutex_unlock(&cdev->lock);
8627 + mutex_unlock(&tz->lock);
8628 +@@ -1430,6 +1451,7 @@ __thermal_cooling_device_register(struct device_node *np,
8629 + const struct thermal_cooling_device_ops *ops)
8630 + {
8631 + struct thermal_cooling_device *cdev;
8632 ++ struct thermal_zone_device *pos = NULL;
8633 + int result;
8634 +
8635 + if (type && strlen(type) >= THERMAL_NAME_LENGTH)
8636 +@@ -1474,6 +1496,12 @@ __thermal_cooling_device_register(struct device_node *np,
8637 + /* Update binding information for 'this' new cdev */
8638 + bind_cdev(cdev);
8639 +
8640 ++ mutex_lock(&thermal_list_lock);
8641 ++ list_for_each_entry(pos, &thermal_tz_list, node)
8642 ++ if (atomic_cmpxchg(&pos->need_update, 1, 0))
8643 ++ thermal_zone_device_update(pos);
8644 ++ mutex_unlock(&thermal_list_lock);
8645 ++
8646 + return cdev;
8647 + }
8648 +
8649 +@@ -1806,6 +1834,8 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
8650 + tz->trips = trips;
8651 + tz->passive_delay = passive_delay;
8652 + tz->polling_delay = polling_delay;
8653 ++ /* A new thermal zone needs to be updated anyway. */
8654 ++ atomic_set(&tz->need_update, 1);
8655 +
8656 + dev_set_name(&tz->device, "thermal_zone%d", tz->id);
8657 + result = device_register(&tz->device);
8658 +@@ -1900,7 +1930,10 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
8659 +
8660 + INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
8661 +
8662 +- thermal_zone_device_update(tz);
8663 ++ thermal_zone_device_reset(tz);
8664 ++ /* Update the new thermal zone and mark it as already updated. */
8665 ++ if (atomic_cmpxchg(&tz->need_update, 1, 0))
8666 ++ thermal_zone_device_update(tz);
8667 +
8668 + return tz;
8669 +
8670 +@@ -2140,6 +2173,36 @@ static void thermal_unregister_governors(void)
8671 + thermal_gov_power_allocator_unregister();
8672 + }
8673 +
8674 ++static int thermal_pm_notify(struct notifier_block *nb,
8675 ++ unsigned long mode, void *_unused)
8676 ++{
8677 ++ struct thermal_zone_device *tz;
8678 ++
8679 ++ switch (mode) {
8680 ++ case PM_HIBERNATION_PREPARE:
8681 ++ case PM_RESTORE_PREPARE:
8682 ++ case PM_SUSPEND_PREPARE:
8683 ++ atomic_set(&in_suspend, 1);
8684 ++ break;
8685 ++ case PM_POST_HIBERNATION:
8686 ++ case PM_POST_RESTORE:
8687 ++ case PM_POST_SUSPEND:
8688 ++ atomic_set(&in_suspend, 0);
8689 ++ list_for_each_entry(tz, &thermal_tz_list, node) {
8690 ++ thermal_zone_device_reset(tz);
8691 ++ thermal_zone_device_update(tz);
8692 ++ }
8693 ++ break;
8694 ++ default:
8695 ++ break;
8696 ++ }
8697 ++ return 0;
8698 ++}
8699 ++
8700 ++static struct notifier_block thermal_pm_nb = {
8701 ++ .notifier_call = thermal_pm_notify,
8702 ++};
8703 ++
8704 + static int __init thermal_init(void)
8705 + {
8706 + int result;
8707 +@@ -2160,6 +2223,11 @@ static int __init thermal_init(void)
8708 + if (result)
8709 + goto exit_netlink;
8710 +
8711 ++ result = register_pm_notifier(&thermal_pm_nb);
8712 ++ if (result)
8713 ++ pr_warn("Thermal: Can not register suspend notifier, return %d\n",
8714 ++ result);
8715 ++
8716 + return 0;
8717 +
8718 + exit_netlink:
8719 +@@ -2179,6 +2247,7 @@ error:
8720 +
8721 + static void __exit thermal_exit(void)
8722 + {
8723 ++ unregister_pm_notifier(&thermal_pm_nb);
8724 + of_thermal_destroy_zones();
8725 + genetlink_exit();
8726 + class_unregister(&thermal_class);
8727 +diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
8728 +index d7ac1fccd659..749d41abfbab 100644
8729 +--- a/drivers/thermal/thermal_core.h
8730 ++++ b/drivers/thermal/thermal_core.h
8731 +@@ -41,6 +41,7 @@ struct thermal_instance {
8732 + struct thermal_zone_device *tz;
8733 + struct thermal_cooling_device *cdev;
8734 + int trip;
8735 ++ bool initialized;
8736 + unsigned long upper; /* Highest cooling state for this trip point */
8737 + unsigned long lower; /* Lowest cooling state for this trip point */
8738 + unsigned long target; /* expected cooling state */
8739 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
8740 +index e4c70dce3e7c..fa4e23930614 100644
8741 +--- a/drivers/usb/class/cdc-acm.c
8742 ++++ b/drivers/usb/class/cdc-acm.c
8743 +@@ -1841,6 +1841,11 @@ static const struct usb_device_id acm_ids[] = {
8744 + },
8745 + #endif
8746 +
8747 ++ /*Samsung phone in firmware update mode */
8748 ++ { USB_DEVICE(0x04e8, 0x685d),
8749 ++ .driver_info = IGNORE_DEVICE,
8750 ++ },
8751 ++
8752 + /* Exclude Infineon Flash Loader utility */
8753 + { USB_DEVICE(0x058b, 0x0041),
8754 + .driver_info = IGNORE_DEVICE,
8755 +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
8756 +index 36f1cb74588c..78be201d81f4 100644
8757 +--- a/drivers/usb/dwc3/core.h
8758 ++++ b/drivers/usb/dwc3/core.h
8759 +@@ -853,7 +853,6 @@ struct dwc3 {
8760 + unsigned pullups_connected:1;
8761 + unsigned resize_fifos:1;
8762 + unsigned setup_packet_pending:1;
8763 +- unsigned start_config_issued:1;
8764 + unsigned three_stage_setup:1;
8765 + unsigned usb3_lpm_capable:1;
8766 +
8767 +diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
8768 +index 5320e939e090..b13912d5fa99 100644
8769 +--- a/drivers/usb/dwc3/ep0.c
8770 ++++ b/drivers/usb/dwc3/ep0.c
8771 +@@ -555,7 +555,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
8772 + int ret;
8773 + u32 reg;
8774 +
8775 +- dwc->start_config_issued = false;
8776 + cfg = le16_to_cpu(ctrl->wValue);
8777 +
8778 + switch (state) {
8779 +@@ -737,10 +736,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
8780 + dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
8781 + ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
8782 + break;
8783 +- case USB_REQ_SET_INTERFACE:
8784 +- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
8785 +- dwc->start_config_issued = false;
8786 +- /* Fall through */
8787 + default:
8788 + dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
8789 + ret = dwc3_ep0_delegate_req(dwc, ctrl);
8790 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
8791 +index a58376fd65fe..69ffe6e8d77f 100644
8792 +--- a/drivers/usb/dwc3/gadget.c
8793 ++++ b/drivers/usb/dwc3/gadget.c
8794 +@@ -388,24 +388,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
8795 + dep->trb_pool_dma = 0;
8796 + }
8797 +
8798 ++static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
8799 ++
8800 ++/**
8801 ++ * dwc3_gadget_start_config - Configure EP resources
8802 ++ * @dwc: pointer to our controller context structure
8803 ++ * @dep: endpoint that is being enabled
8804 ++ *
8805 ++ * The assignment of transfer resources cannot perfectly follow the
8806 ++ * data book due to the fact that the controller driver does not have
8807 ++ * all knowledge of the configuration in advance. It is given this
8808 ++ * information piecemeal by the composite gadget framework after every
8809 ++ * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
8810 ++ * programming model in this scenario can cause errors. For two
8811 ++ * reasons:
8812 ++ *
8813 ++ * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
8814 ++ * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
8815 ++ * multiple interfaces.
8816 ++ *
8817 ++ * 2) The databook does not mention doing more DEPXFERCFG for new
8818 ++ * endpoint on alt setting (8.1.6).
8819 ++ *
8820 ++ * The following simplified method is used instead:
8821 ++ *
8822 ++ * All hardware endpoints can be assigned a transfer resource and this
8823 ++ * setting will stay persistent until either a core reset or
8824 ++ * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
8825 ++ * do DEPXFERCFG for every hardware endpoint as well. We are
8826 ++ * guaranteed that there are as many transfer resources as endpoints.
8827 ++ *
8828 ++ * This function is called for each endpoint when it is being enabled
8829 ++ * but is triggered only when called for EP0-out, which always happens
8830 ++ * first, and which should only happen in one of the above conditions.
8831 ++ */
8832 + static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
8833 + {
8834 + struct dwc3_gadget_ep_cmd_params params;
8835 + u32 cmd;
8836 ++ int i;
8837 ++ int ret;
8838 ++
8839 ++ if (dep->number)
8840 ++ return 0;
8841 +
8842 + memset(&params, 0x00, sizeof(params));
8843 ++ cmd = DWC3_DEPCMD_DEPSTARTCFG;
8844 +
8845 +- if (dep->number != 1) {
8846 +- cmd = DWC3_DEPCMD_DEPSTARTCFG;
8847 +- /* XferRscIdx == 0 for ep0 and 2 for the remaining */
8848 +- if (dep->number > 1) {
8849 +- if (dwc->start_config_issued)
8850 +- return 0;
8851 +- dwc->start_config_issued = true;
8852 +- cmd |= DWC3_DEPCMD_PARAM(2);
8853 +- }
8854 ++ ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
8855 ++ if (ret)
8856 ++ return ret;
8857 +
8858 +- return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
8859 ++ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
8860 ++ struct dwc3_ep *dep = dwc->eps[i];
8861 ++
8862 ++ if (!dep)
8863 ++ continue;
8864 ++
8865 ++ ret = dwc3_gadget_set_xfer_resource(dwc, dep);
8866 ++ if (ret)
8867 ++ return ret;
8868 + }
8869 +
8870 + return 0;
8871 +@@ -519,10 +561,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
8872 + struct dwc3_trb *trb_st_hw;
8873 + struct dwc3_trb *trb_link;
8874 +
8875 +- ret = dwc3_gadget_set_xfer_resource(dwc, dep);
8876 +- if (ret)
8877 +- return ret;
8878 +-
8879 + dep->endpoint.desc = desc;
8880 + dep->comp_desc = comp_desc;
8881 + dep->type = usb_endpoint_type(desc);
8882 +@@ -1604,8 +1642,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
8883 + }
8884 + dwc3_writel(dwc->regs, DWC3_DCFG, reg);
8885 +
8886 +- dwc->start_config_issued = false;
8887 +-
8888 + /* Start with SuperSpeed Default */
8889 + dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
8890 +
8891 +@@ -2202,7 +2238,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
8892 + dwc3_writel(dwc->regs, DWC3_DCTL, reg);
8893 +
8894 + dwc3_disconnect_gadget(dwc);
8895 +- dwc->start_config_issued = false;
8896 +
8897 + dwc->gadget.speed = USB_SPEED_UNKNOWN;
8898 + dwc->setup_packet_pending = false;
8899 +@@ -2253,7 +2288,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
8900 +
8901 + dwc3_stop_active_transfers(dwc);
8902 + dwc3_clear_stall_all_ep(dwc);
8903 +- dwc->start_config_issued = false;
8904 +
8905 + /* Reset device address to zero */
8906 + reg = dwc3_readl(dwc->regs, DWC3_DCFG);
8907 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
8908 +index 1dd9919081f8..a7caf53d8b5e 100644
8909 +--- a/drivers/usb/serial/cp210x.c
8910 ++++ b/drivers/usb/serial/cp210x.c
8911 +@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
8912 + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
8913 + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
8914 + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
8915 ++ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
8916 ++ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
8917 + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
8918 + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
8919 + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
8920 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
8921 +index db86e512e0fc..8849439a8f18 100644
8922 +--- a/drivers/usb/serial/option.c
8923 ++++ b/drivers/usb/serial/option.c
8924 +@@ -315,6 +315,7 @@ static void option_instat_callback(struct urb *urb);
8925 + #define TOSHIBA_PRODUCT_G450 0x0d45
8926 +
8927 + #define ALINK_VENDOR_ID 0x1e0e
8928 ++#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
8929 + #define ALINK_PRODUCT_PH300 0x9100
8930 + #define ALINK_PRODUCT_3GU 0x9200
8931 +
8932 +@@ -607,6 +608,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
8933 + .reserved = BIT(3) | BIT(4),
8934 + };
8935 +
8936 ++static const struct option_blacklist_info simcom_sim7100e_blacklist = {
8937 ++ .reserved = BIT(5) | BIT(6),
8938 ++};
8939 ++
8940 + static const struct option_blacklist_info telit_le910_blacklist = {
8941 + .sendsetup = BIT(0),
8942 + .reserved = BIT(1) | BIT(2),
8943 +@@ -1122,6 +1127,8 @@ static const struct usb_device_id option_ids[] = {
8944 + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
8945 + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
8946 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
8947 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
8948 ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
8949 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
8950 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
8951 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
8952 +@@ -1645,6 +1652,8 @@ static const struct usb_device_id option_ids[] = {
8953 + { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
8954 + { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
8955 + { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
8956 ++ { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
8957 ++ .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
8958 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
8959 + .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
8960 + },
8961 +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
8962 +index 7efc32945810..7d3e5d0e9aa4 100644
8963 +--- a/drivers/virtio/virtio_balloon.c
8964 ++++ b/drivers/virtio/virtio_balloon.c
8965 +@@ -209,8 +209,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
8966 + */
8967 + if (vb->num_pfns != 0)
8968 + tell_host(vb, vb->deflate_vq);
8969 +- mutex_unlock(&vb->balloon_lock);
8970 + release_pages_balloon(vb);
8971 ++ mutex_unlock(&vb->balloon_lock);
8972 + return num_freed_pages;
8973 + }
8974 +
8975 +diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
8976 +index 78f804af6c20..2046a68ad0ba 100644
8977 +--- a/drivers/virtio/virtio_pci_common.c
8978 ++++ b/drivers/virtio/virtio_pci_common.c
8979 +@@ -545,6 +545,7 @@ err_enable_device:
8980 + static void virtio_pci_remove(struct pci_dev *pci_dev)
8981 + {
8982 + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
8983 ++ struct device *dev = get_device(&vp_dev->vdev.dev);
8984 +
8985 + unregister_virtio_device(&vp_dev->vdev);
8986 +
8987 +@@ -554,6 +555,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
8988 + virtio_pci_modern_remove(vp_dev);
8989 +
8990 + pci_disable_device(pci_dev);
8991 ++ put_device(dev);
8992 + }
8993 +
8994 + static struct pci_driver virtio_pci_driver = {
8995 +diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
8996 +index 73dafdc494aa..fb0221434f81 100644
8997 +--- a/drivers/xen/xen-pciback/pciback_ops.c
8998 ++++ b/drivers/xen/xen-pciback/pciback_ops.c
8999 +@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
9000 + /*
9001 + * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
9002 + * to access the BARs where the MSI-X entries reside.
9003 ++ * But VF devices are unique in which the PF needs to be checked.
9004 + */
9005 +- pci_read_config_word(dev, PCI_COMMAND, &cmd);
9006 ++ pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
9007 + if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
9008 + return -ENXIO;
9009 +
9010 +@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
9011 + struct xen_pcibk_dev_data *dev_data = NULL;
9012 + struct xen_pci_op *op = &pdev->op;
9013 + int test_intx = 0;
9014 ++#ifdef CONFIG_PCI_MSI
9015 ++ unsigned int nr = 0;
9016 ++#endif
9017 +
9018 + *op = pdev->sh_info->op;
9019 + barrier();
9020 +@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
9021 + op->err = xen_pcibk_disable_msi(pdev, dev, op);
9022 + break;
9023 + case XEN_PCI_OP_enable_msix:
9024 ++ nr = op->value;
9025 + op->err = xen_pcibk_enable_msix(pdev, dev, op);
9026 + break;
9027 + case XEN_PCI_OP_disable_msix:
9028 +@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
9029 + if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
9030 + unsigned int i;
9031 +
9032 +- for (i = 0; i < op->value; i++)
9033 ++ for (i = 0; i < nr; i++)
9034 + pdev->sh_info->op.msix_entries[i].vector =
9035 + op->msix_entries[i].vector;
9036 + }
9037 +diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
9038 +index ad4eb1024d1f..51387d75c7bf 100644
9039 +--- a/drivers/xen/xen-scsiback.c
9040 ++++ b/drivers/xen/xen-scsiback.c
9041 +@@ -939,12 +939,12 @@ out:
9042 + spin_unlock_irqrestore(&info->v2p_lock, flags);
9043 +
9044 + out_free:
9045 +- mutex_lock(&tpg->tv_tpg_mutex);
9046 +- tpg->tv_tpg_fe_count--;
9047 +- mutex_unlock(&tpg->tv_tpg_mutex);
9048 +-
9049 +- if (err)
9050 ++ if (err) {
9051 ++ mutex_lock(&tpg->tv_tpg_mutex);
9052 ++ tpg->tv_tpg_fe_count--;
9053 ++ mutex_unlock(&tpg->tv_tpg_mutex);
9054 + kfree(new);
9055 ++ }
9056 +
9057 + return err;
9058 + }
9059 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
9060 +index 0ddca6734494..4958360a44f7 100644
9061 +--- a/fs/btrfs/disk-io.c
9062 ++++ b/fs/btrfs/disk-io.c
9063 +@@ -1582,8 +1582,23 @@ int btrfs_init_fs_root(struct btrfs_root *root)
9064 + ret = get_anon_bdev(&root->anon_dev);
9065 + if (ret)
9066 + goto free_writers;
9067 ++
9068 ++ mutex_lock(&root->objectid_mutex);
9069 ++ ret = btrfs_find_highest_objectid(root,
9070 ++ &root->highest_objectid);
9071 ++ if (ret) {
9072 ++ mutex_unlock(&root->objectid_mutex);
9073 ++ goto free_root_dev;
9074 ++ }
9075 ++
9076 ++ ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
9077 ++
9078 ++ mutex_unlock(&root->objectid_mutex);
9079 ++
9080 + return 0;
9081 +
9082 ++free_root_dev:
9083 ++ free_anon_bdev(root->anon_dev);
9084 + free_writers:
9085 + btrfs_free_subvolume_writers(root->subv_writers);
9086 + fail:
9087 +@@ -2667,6 +2682,7 @@ int open_ctree(struct super_block *sb,
9088 + if (btrfs_check_super_csum(bh->b_data)) {
9089 + printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
9090 + err = -EINVAL;
9091 ++ brelse(bh);
9092 + goto fail_alloc;
9093 + }
9094 +
9095 +@@ -2899,6 +2915,18 @@ retry_root_backup:
9096 + tree_root->commit_root = btrfs_root_node(tree_root);
9097 + btrfs_set_root_refs(&tree_root->root_item, 1);
9098 +
9099 ++ mutex_lock(&tree_root->objectid_mutex);
9100 ++ ret = btrfs_find_highest_objectid(tree_root,
9101 ++ &tree_root->highest_objectid);
9102 ++ if (ret) {
9103 ++ mutex_unlock(&tree_root->objectid_mutex);
9104 ++ goto recovery_tree_root;
9105 ++ }
9106 ++
9107 ++ ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
9108 ++
9109 ++ mutex_unlock(&tree_root->objectid_mutex);
9110 ++
9111 + ret = btrfs_read_roots(fs_info, tree_root);
9112 + if (ret)
9113 + goto recovery_tree_root;
9114 +diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
9115 +index 767a6056ac45..07573dc1614a 100644
9116 +--- a/fs/btrfs/inode-map.c
9117 ++++ b/fs/btrfs/inode-map.c
9118 +@@ -515,7 +515,7 @@ out:
9119 + return ret;
9120 + }
9121 +
9122 +-static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
9123 ++int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
9124 + {
9125 + struct btrfs_path *path;
9126 + int ret;
9127 +@@ -555,13 +555,6 @@ int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
9128 + int ret;
9129 + mutex_lock(&root->objectid_mutex);
9130 +
9131 +- if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
9132 +- ret = btrfs_find_highest_objectid(root,
9133 +- &root->highest_objectid);
9134 +- if (ret)
9135 +- goto out;
9136 +- }
9137 +-
9138 + if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
9139 + ret = -ENOSPC;
9140 + goto out;
9141 +diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h
9142 +index ddb347bfee23..c8e864b2d530 100644
9143 +--- a/fs/btrfs/inode-map.h
9144 ++++ b/fs/btrfs/inode-map.h
9145 +@@ -9,5 +9,6 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
9146 + struct btrfs_trans_handle *trans);
9147 +
9148 + int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
9149 ++int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid);
9150 +
9151 + #endif
9152 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
9153 +index 54b5f0de623b..52fc1b5e9f03 100644
9154 +--- a/fs/btrfs/inode.c
9155 ++++ b/fs/btrfs/inode.c
9156 +@@ -6493,7 +6493,7 @@ out_unlock_inode:
9157 + static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
9158 + struct dentry *dentry)
9159 + {
9160 +- struct btrfs_trans_handle *trans;
9161 ++ struct btrfs_trans_handle *trans = NULL;
9162 + struct btrfs_root *root = BTRFS_I(dir)->root;
9163 + struct inode *inode = d_inode(old_dentry);
9164 + u64 index;
9165 +@@ -6519,6 +6519,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
9166 + trans = btrfs_start_transaction(root, 5);
9167 + if (IS_ERR(trans)) {
9168 + err = PTR_ERR(trans);
9169 ++ trans = NULL;
9170 + goto fail;
9171 + }
9172 +
9173 +@@ -6552,9 +6553,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
9174 + btrfs_log_new_name(trans, inode, NULL, parent);
9175 + }
9176 +
9177 +- btrfs_end_transaction(trans, root);
9178 + btrfs_balance_delayed_items(root);
9179 + fail:
9180 ++ if (trans)
9181 ++ btrfs_end_transaction(trans, root);
9182 + if (drop_inode) {
9183 + inode_dec_link_count(inode);
9184 + iput(inode);
9185 +@@ -8548,15 +8550,28 @@ int btrfs_readpage(struct file *file, struct page *page)
9186 + static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
9187 + {
9188 + struct extent_io_tree *tree;
9189 +-
9190 ++ struct inode *inode = page->mapping->host;
9191 ++ int ret;
9192 +
9193 + if (current->flags & PF_MEMALLOC) {
9194 + redirty_page_for_writepage(wbc, page);
9195 + unlock_page(page);
9196 + return 0;
9197 + }
9198 ++
9199 ++ /*
9200 ++ * If we are under memory pressure we will call this directly from the
9201 ++ * VM, we need to make sure we have the inode referenced for the ordered
9202 ++ * extent. If not just return like we didn't do anything.
9203 ++ */
9204 ++ if (!igrab(inode)) {
9205 ++ redirty_page_for_writepage(wbc, page);
9206 ++ return AOP_WRITEPAGE_ACTIVATE;
9207 ++ }
9208 + tree = &BTRFS_I(page->mapping->host)->io_tree;
9209 +- return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
9210 ++ ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
9211 ++ btrfs_add_delayed_iput(inode);
9212 ++ return ret;
9213 + }
9214 +
9215 + static int btrfs_writepages(struct address_space *mapping,
9216 +@@ -9650,9 +9665,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
9217 + /*
9218 + * 2 items for inode item and ref
9219 + * 2 items for dir items
9220 ++ * 1 item for updating parent inode item
9221 ++ * 1 item for the inline extent item
9222 + * 1 item for xattr if selinux is on
9223 + */
9224 +- trans = btrfs_start_transaction(root, 5);
9225 ++ trans = btrfs_start_transaction(root, 7);
9226 + if (IS_ERR(trans))
9227 + return PTR_ERR(trans);
9228 +
9229 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
9230 +index 08fd3f0f34fd..f07d01bc4875 100644
9231 +--- a/fs/btrfs/ioctl.c
9232 ++++ b/fs/btrfs/ioctl.c
9233 +@@ -568,6 +568,10 @@ static noinline int create_subvol(struct inode *dir,
9234 + goto fail;
9235 + }
9236 +
9237 ++ mutex_lock(&new_root->objectid_mutex);
9238 ++ new_root->highest_objectid = new_dirid;
9239 ++ mutex_unlock(&new_root->objectid_mutex);
9240 ++
9241 + /*
9242 + * insert the directory item
9243 + */
9244 +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
9245 +index 355a458cba1a..63a6152be04b 100644
9246 +--- a/fs/btrfs/send.c
9247 ++++ b/fs/btrfs/send.c
9248 +@@ -1469,7 +1469,21 @@ static int read_symlink(struct btrfs_root *root,
9249 + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
9250 + if (ret < 0)
9251 + goto out;
9252 +- BUG_ON(ret);
9253 ++ if (ret) {
9254 ++ /*
9255 ++ * An empty symlink inode. Can happen in rare error paths when
9256 ++ * creating a symlink (transaction committed before the inode
9257 ++ * eviction handler removed the symlink inode items and a crash
9258 ++ * happened in between or the subvol was snapshoted in between).
9259 ++ * Print an informative message to dmesg/syslog so that the user
9260 ++ * can delete the symlink.
9261 ++ */
9262 ++ btrfs_err(root->fs_info,
9263 ++ "Found empty symlink inode %llu at root %llu",
9264 ++ ino, root->root_key.objectid);
9265 ++ ret = -EIO;
9266 ++ goto out;
9267 ++ }
9268 +
9269 + ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
9270 + struct btrfs_file_extent_item);
9271 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
9272 +index 24154e422945..fe609b81dd1b 100644
9273 +--- a/fs/btrfs/super.c
9274 ++++ b/fs/btrfs/super.c
9275 +@@ -1956,6 +1956,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
9276 + * there are other factors that may change the result (like a new metadata
9277 + * chunk).
9278 + *
9279 ++ * If metadata is exhausted, f_bavail will be 0.
9280 ++ *
9281 + * FIXME: not accurate for mixed block groups, total and free/used are ok,
9282 + * available appears slightly larger.
9283 + */
9284 +@@ -1967,11 +1969,13 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
9285 + struct btrfs_space_info *found;
9286 + u64 total_used = 0;
9287 + u64 total_free_data = 0;
9288 ++ u64 total_free_meta = 0;
9289 + int bits = dentry->d_sb->s_blocksize_bits;
9290 + __be32 *fsid = (__be32 *)fs_info->fsid;
9291 + unsigned factor = 1;
9292 + struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
9293 + int ret;
9294 ++ u64 thresh = 0;
9295 +
9296 + /*
9297 + * holding chunk_muext to avoid allocating new chunks, holding
9298 +@@ -1997,6 +2001,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
9299 + }
9300 + }
9301 + }
9302 ++ if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
9303 ++ total_free_meta += found->disk_total - found->disk_used;
9304 +
9305 + total_used += found->disk_used;
9306 + }
9307 +@@ -2019,6 +2025,24 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
9308 + buf->f_bavail += div_u64(total_free_data, factor);
9309 + buf->f_bavail = buf->f_bavail >> bits;
9310 +
9311 ++ /*
9312 ++ * We calculate the remaining metadata space minus global reserve. If
9313 ++ * this is (supposedly) smaller than zero, there's no space. But this
9314 ++ * does not hold in practice, the exhausted state happens where's still
9315 ++ * some positive delta. So we apply some guesswork and compare the
9316 ++ * delta to a 4M threshold. (Practically observed delta was ~2M.)
9317 ++ *
9318 ++ * We probably cannot calculate the exact threshold value because this
9319 ++ * depends on the internal reservations requested by various
9320 ++ * operations, so some operations that consume a few metadata will
9321 ++ * succeed even if the Avail is zero. But this is better than the other
9322 ++ * way around.
9323 ++ */
9324 ++ thresh = 4 * 1024 * 1024;
9325 ++
9326 ++ if (total_free_meta - thresh < block_rsv->size)
9327 ++ buf->f_bavail = 0;
9328 ++
9329 + buf->f_type = BTRFS_SUPER_MAGIC;
9330 + buf->f_bsize = dentry->d_sb->s_blocksize;
9331 + buf->f_namelen = BTRFS_NAME_LEN;
9332 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
9333 +index 9e084477d320..9c62a6f9757a 100644
9334 +--- a/fs/btrfs/volumes.c
9335 ++++ b/fs/btrfs/volumes.c
9336 +@@ -232,6 +232,7 @@ static struct btrfs_device *__alloc_device(void)
9337 + spin_lock_init(&dev->reada_lock);
9338 + atomic_set(&dev->reada_in_flight, 0);
9339 + atomic_set(&dev->dev_stats_ccnt, 0);
9340 ++ btrfs_device_data_ordered_init(dev);
9341 + INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
9342 + INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
9343 +
9344 +diff --git a/fs/direct-io.c b/fs/direct-io.c
9345 +index 602e8441bc0f..01171d8a6ee9 100644
9346 +--- a/fs/direct-io.c
9347 ++++ b/fs/direct-io.c
9348 +@@ -472,8 +472,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
9349 + dio->io_error = -EIO;
9350 +
9351 + if (dio->is_async && dio->rw == READ && dio->should_dirty) {
9352 +- bio_check_pages_dirty(bio); /* transfers ownership */
9353 + err = bio->bi_error;
9354 ++ bio_check_pages_dirty(bio); /* transfers ownership */
9355 + } else {
9356 + bio_for_each_segment_all(bvec, bio, i) {
9357 + struct page *page = bvec->bv_page;
9358 +diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
9359 +index 90001da9abfd..66842e55c48c 100644
9360 +--- a/fs/efivarfs/file.c
9361 ++++ b/fs/efivarfs/file.c
9362 +@@ -10,6 +10,7 @@
9363 + #include <linux/efi.h>
9364 + #include <linux/fs.h>
9365 + #include <linux/slab.h>
9366 ++#include <linux/mount.h>
9367 +
9368 + #include "internal.h"
9369 +
9370 +@@ -103,9 +104,78 @@ out_free:
9371 + return size;
9372 + }
9373 +
9374 ++static int
9375 ++efivarfs_ioc_getxflags(struct file *file, void __user *arg)
9376 ++{
9377 ++ struct inode *inode = file->f_mapping->host;
9378 ++ unsigned int i_flags;
9379 ++ unsigned int flags = 0;
9380 ++
9381 ++ i_flags = inode->i_flags;
9382 ++ if (i_flags & S_IMMUTABLE)
9383 ++ flags |= FS_IMMUTABLE_FL;
9384 ++
9385 ++ if (copy_to_user(arg, &flags, sizeof(flags)))
9386 ++ return -EFAULT;
9387 ++ return 0;
9388 ++}
9389 ++
9390 ++static int
9391 ++efivarfs_ioc_setxflags(struct file *file, void __user *arg)
9392 ++{
9393 ++ struct inode *inode = file->f_mapping->host;
9394 ++ unsigned int flags;
9395 ++ unsigned int i_flags = 0;
9396 ++ int error;
9397 ++
9398 ++ if (!inode_owner_or_capable(inode))
9399 ++ return -EACCES;
9400 ++
9401 ++ if (copy_from_user(&flags, arg, sizeof(flags)))
9402 ++ return -EFAULT;
9403 ++
9404 ++ if (flags & ~FS_IMMUTABLE_FL)
9405 ++ return -EOPNOTSUPP;
9406 ++
9407 ++ if (!capable(CAP_LINUX_IMMUTABLE))
9408 ++ return -EPERM;
9409 ++
9410 ++ if (flags & FS_IMMUTABLE_FL)
9411 ++ i_flags |= S_IMMUTABLE;
9412 ++
9413 ++
9414 ++ error = mnt_want_write_file(file);
9415 ++ if (error)
9416 ++ return error;
9417 ++
9418 ++ mutex_lock(&inode->i_mutex);
9419 ++ inode_set_flags(inode, i_flags, S_IMMUTABLE);
9420 ++ mutex_unlock(&inode->i_mutex);
9421 ++
9422 ++ mnt_drop_write_file(file);
9423 ++
9424 ++ return 0;
9425 ++}
9426 ++
9427 ++long
9428 ++efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
9429 ++{
9430 ++ void __user *arg = (void __user *)p;
9431 ++
9432 ++ switch (cmd) {
9433 ++ case FS_IOC_GETFLAGS:
9434 ++ return efivarfs_ioc_getxflags(file, arg);
9435 ++ case FS_IOC_SETFLAGS:
9436 ++ return efivarfs_ioc_setxflags(file, arg);
9437 ++ }
9438 ++
9439 ++ return -ENOTTY;
9440 ++}
9441 ++
9442 + const struct file_operations efivarfs_file_operations = {
9443 + .open = simple_open,
9444 + .read = efivarfs_file_read,
9445 + .write = efivarfs_file_write,
9446 + .llseek = no_llseek,
9447 ++ .unlocked_ioctl = efivarfs_file_ioctl,
9448 + };
9449 +diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
9450 +index 3381b9da9ee6..e2ab6d0497f2 100644
9451 +--- a/fs/efivarfs/inode.c
9452 ++++ b/fs/efivarfs/inode.c
9453 +@@ -15,7 +15,8 @@
9454 + #include "internal.h"
9455 +
9456 + struct inode *efivarfs_get_inode(struct super_block *sb,
9457 +- const struct inode *dir, int mode, dev_t dev)
9458 ++ const struct inode *dir, int mode,
9459 ++ dev_t dev, bool is_removable)
9460 + {
9461 + struct inode *inode = new_inode(sb);
9462 +
9463 +@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
9464 + inode->i_ino = get_next_ino();
9465 + inode->i_mode = mode;
9466 + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
9467 ++ inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
9468 + switch (mode & S_IFMT) {
9469 + case S_IFREG:
9470 + inode->i_fop = &efivarfs_file_operations;
9471 +@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
9472 + static int efivarfs_create(struct inode *dir, struct dentry *dentry,
9473 + umode_t mode, bool excl)
9474 + {
9475 +- struct inode *inode;
9476 ++ struct inode *inode = NULL;
9477 + struct efivar_entry *var;
9478 + int namelen, i = 0, err = 0;
9479 ++ bool is_removable = false;
9480 +
9481 + if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
9482 + return -EINVAL;
9483 +
9484 +- inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
9485 +- if (!inode)
9486 +- return -ENOMEM;
9487 +-
9488 + var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
9489 +- if (!var) {
9490 +- err = -ENOMEM;
9491 +- goto out;
9492 +- }
9493 ++ if (!var)
9494 ++ return -ENOMEM;
9495 +
9496 + /* length of the variable name itself: remove GUID and separator */
9497 + namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
9498 +@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
9499 + efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
9500 + &var->var.VendorGuid);
9501 +
9502 ++ if (efivar_variable_is_removable(var->var.VendorGuid,
9503 ++ dentry->d_name.name, namelen))
9504 ++ is_removable = true;
9505 ++
9506 ++ inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
9507 ++ if (!inode) {
9508 ++ err = -ENOMEM;
9509 ++ goto out;
9510 ++ }
9511 ++
9512 + for (i = 0; i < namelen; i++)
9513 + var->var.VariableName[i] = dentry->d_name.name[i];
9514 +
9515 +@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
9516 + out:
9517 + if (err) {
9518 + kfree(var);
9519 +- iput(inode);
9520 ++ if (inode)
9521 ++ iput(inode);
9522 + }
9523 + return err;
9524 + }
9525 +diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
9526 +index b5ff16addb7c..b4505188e799 100644
9527 +--- a/fs/efivarfs/internal.h
9528 ++++ b/fs/efivarfs/internal.h
9529 +@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations;
9530 + extern const struct inode_operations efivarfs_dir_inode_operations;
9531 + extern bool efivarfs_valid_name(const char *str, int len);
9532 + extern struct inode *efivarfs_get_inode(struct super_block *sb,
9533 +- const struct inode *dir, int mode, dev_t dev);
9534 ++ const struct inode *dir, int mode, dev_t dev,
9535 ++ bool is_removable);
9536 +
9537 + extern struct list_head efivarfs_list;
9538 +
9539 +diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
9540 +index 86a2121828c3..abb244b06024 100644
9541 +--- a/fs/efivarfs/super.c
9542 ++++ b/fs/efivarfs/super.c
9543 +@@ -118,8 +118,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
9544 + struct dentry *dentry, *root = sb->s_root;
9545 + unsigned long size = 0;
9546 + char *name;
9547 +- int len, i;
9548 ++ int len;
9549 + int err = -ENOMEM;
9550 ++ bool is_removable = false;
9551 +
9552 + entry = kzalloc(sizeof(*entry), GFP_KERNEL);
9553 + if (!entry)
9554 +@@ -128,15 +129,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
9555 + memcpy(entry->var.VariableName, name16, name_size);
9556 + memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
9557 +
9558 +- len = ucs2_strlen(entry->var.VariableName);
9559 ++ len = ucs2_utf8size(entry->var.VariableName);
9560 +
9561 + /* name, plus '-', plus GUID, plus NUL*/
9562 + name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
9563 + if (!name)
9564 + goto fail;
9565 +
9566 +- for (i = 0; i < len; i++)
9567 +- name[i] = entry->var.VariableName[i] & 0xFF;
9568 ++ ucs2_as_utf8(name, entry->var.VariableName, len);
9569 ++
9570 ++ if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
9571 ++ is_removable = true;
9572 +
9573 + name[len] = '-';
9574 +
9575 +@@ -144,7 +147,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
9576 +
9577 + name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
9578 +
9579 +- inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0);
9580 ++ inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
9581 ++ is_removable);
9582 + if (!inode)
9583 + goto fail_name;
9584 +
9585 +@@ -200,7 +204,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
9586 + sb->s_d_op = &efivarfs_d_ops;
9587 + sb->s_time_gran = 1;
9588 +
9589 +- inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
9590 ++ inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true);
9591 + if (!inode)
9592 + return -ENOMEM;
9593 + inode->i_op = &efivarfs_dir_inode_operations;
9594 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
9595 +index ea433a7f4bca..06bda0361e7c 100644
9596 +--- a/fs/ext4/inode.c
9597 ++++ b/fs/ext4/inode.c
9598 +@@ -657,6 +657,34 @@ has_zeroout:
9599 + return retval;
9600 + }
9601 +
9602 ++/*
9603 ++ * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
9604 ++ * we have to be careful as someone else may be manipulating b_state as well.
9605 ++ */
9606 ++static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
9607 ++{
9608 ++ unsigned long old_state;
9609 ++ unsigned long new_state;
9610 ++
9611 ++ flags &= EXT4_MAP_FLAGS;
9612 ++
9613 ++ /* Dummy buffer_head? Set non-atomically. */
9614 ++ if (!bh->b_page) {
9615 ++ bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
9616 ++ return;
9617 ++ }
9618 ++ /*
9619 ++ * Someone else may be modifying b_state. Be careful! This is ugly but
9620 ++ * once we get rid of using bh as a container for mapping information
9621 ++ * to pass to / from get_block functions, this can go away.
9622 ++ */
9623 ++ do {
9624 ++ old_state = READ_ONCE(bh->b_state);
9625 ++ new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
9626 ++ } while (unlikely(
9627 ++ cmpxchg(&bh->b_state, old_state, new_state) != old_state));
9628 ++}
9629 ++
9630 + /* Maximum number of blocks we map for direct IO at once. */
9631 + #define DIO_MAX_BLOCKS 4096
9632 +
9633 +@@ -693,7 +721,7 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
9634 + ext4_io_end_t *io_end = ext4_inode_aio(inode);
9635 +
9636 + map_bh(bh, inode->i_sb, map.m_pblk);
9637 +- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
9638 ++ ext4_update_bh_state(bh, map.m_flags);
9639 + if (IS_DAX(inode) && buffer_unwritten(bh)) {
9640 + /*
9641 + * dgc: I suspect unwritten conversion on ext4+DAX is
9642 +@@ -1669,7 +1697,7 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
9643 + return ret;
9644 +
9645 + map_bh(bh, inode->i_sb, map.m_pblk);
9646 +- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
9647 ++ ext4_update_bh_state(bh, map.m_flags);
9648 +
9649 + if (buffer_unwritten(bh)) {
9650 + /* A delayed write to unwritten bh should be marked
9651 +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
9652 +index 023f6a1f23cd..e5232bbcbe3d 100644
9653 +--- a/fs/fs-writeback.c
9654 ++++ b/fs/fs-writeback.c
9655 +@@ -317,6 +317,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
9656 + struct inode_switch_wbs_context *isw =
9657 + container_of(work, struct inode_switch_wbs_context, work);
9658 + struct inode *inode = isw->inode;
9659 ++ struct super_block *sb = inode->i_sb;
9660 + struct address_space *mapping = inode->i_mapping;
9661 + struct bdi_writeback *old_wb = inode->i_wb;
9662 + struct bdi_writeback *new_wb = isw->new_wb;
9663 +@@ -423,6 +424,7 @@ skip_switch:
9664 + wb_put(new_wb);
9665 +
9666 + iput(inode);
9667 ++ deactivate_super(sb);
9668 + kfree(isw);
9669 + }
9670 +
9671 +@@ -469,11 +471,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
9672 +
9673 + /* while holding I_WB_SWITCH, no one else can update the association */
9674 + spin_lock(&inode->i_lock);
9675 ++
9676 + if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
9677 +- inode_to_wb(inode) == isw->new_wb) {
9678 +- spin_unlock(&inode->i_lock);
9679 +- goto out_free;
9680 +- }
9681 ++ inode_to_wb(inode) == isw->new_wb)
9682 ++ goto out_unlock;
9683 ++
9684 ++ if (!atomic_inc_not_zero(&inode->i_sb->s_active))
9685 ++ goto out_unlock;
9686 ++
9687 + inode->i_state |= I_WB_SWITCH;
9688 + spin_unlock(&inode->i_lock);
9689 +
9690 +@@ -489,6 +494,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
9691 + call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
9692 + return;
9693 +
9694 ++out_unlock:
9695 ++ spin_unlock(&inode->i_lock);
9696 + out_free:
9697 + if (isw->new_wb)
9698 + wb_put(isw->new_wb);
9699 +diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
9700 +index 2ac99db3750e..5a7b3229b956 100644
9701 +--- a/fs/hostfs/hostfs_kern.c
9702 ++++ b/fs/hostfs/hostfs_kern.c
9703 +@@ -730,15 +730,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
9704 +
9705 + init_special_inode(inode, mode, dev);
9706 + err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
9707 +- if (!err)
9708 ++ if (err)
9709 + goto out_free;
9710 +
9711 + err = read_name(inode, name);
9712 + __putname(name);
9713 + if (err)
9714 + goto out_put;
9715 +- if (err)
9716 +- goto out_put;
9717 +
9718 + d_instantiate(dentry, inode);
9719 + return 0;
9720 +diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
9721 +index ae4d5a1fa4c9..bffb908acbd4 100644
9722 +--- a/fs/hpfs/namei.c
9723 ++++ b/fs/hpfs/namei.c
9724 +@@ -375,12 +375,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
9725 + struct inode *inode = d_inode(dentry);
9726 + dnode_secno dno;
9727 + int r;
9728 +- int rep = 0;
9729 + int err;
9730 +
9731 + hpfs_lock(dir->i_sb);
9732 + hpfs_adjust_length(name, &len);
9733 +-again:
9734 ++
9735 + err = -ENOENT;
9736 + de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
9737 + if (!de)
9738 +@@ -400,33 +399,9 @@ again:
9739 + hpfs_error(dir->i_sb, "there was error when removing dirent");
9740 + err = -EFSERROR;
9741 + break;
9742 +- case 2: /* no space for deleting, try to truncate file */
9743 +-
9744 ++ case 2: /* no space for deleting */
9745 + err = -ENOSPC;
9746 +- if (rep++)
9747 +- break;
9748 +-
9749 +- dentry_unhash(dentry);
9750 +- if (!d_unhashed(dentry)) {
9751 +- hpfs_unlock(dir->i_sb);
9752 +- return -ENOSPC;
9753 +- }
9754 +- if (generic_permission(inode, MAY_WRITE) ||
9755 +- !S_ISREG(inode->i_mode) ||
9756 +- get_write_access(inode)) {
9757 +- d_rehash(dentry);
9758 +- } else {
9759 +- struct iattr newattrs;
9760 +- /*pr_info("truncating file before delete.\n");*/
9761 +- newattrs.ia_size = 0;
9762 +- newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
9763 +- err = notify_change(dentry, &newattrs, NULL);
9764 +- put_write_access(inode);
9765 +- if (!err)
9766 +- goto again;
9767 +- }
9768 +- hpfs_unlock(dir->i_sb);
9769 +- return -ENOSPC;
9770 ++ break;
9771 + default:
9772 + drop_nlink(inode);
9773 + err = 0;
9774 +diff --git a/fs/locks.c b/fs/locks.c
9775 +index 0d2b3267e2a3..6333263b7bc8 100644
9776 +--- a/fs/locks.c
9777 ++++ b/fs/locks.c
9778 +@@ -2182,7 +2182,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
9779 + goto out;
9780 + }
9781 +
9782 +-again:
9783 + error = flock_to_posix_lock(filp, file_lock, &flock);
9784 + if (error)
9785 + goto out;
9786 +@@ -2224,19 +2223,22 @@ again:
9787 + * Attempt to detect a close/fcntl race and recover by
9788 + * releasing the lock that was just acquired.
9789 + */
9790 +- /*
9791 +- * we need that spin_lock here - it prevents reordering between
9792 +- * update of i_flctx->flc_posix and check for it done in close().
9793 +- * rcu_read_lock() wouldn't do.
9794 +- */
9795 +- spin_lock(&current->files->file_lock);
9796 +- f = fcheck(fd);
9797 +- spin_unlock(&current->files->file_lock);
9798 +- if (!error && f != filp && flock.l_type != F_UNLCK) {
9799 +- flock.l_type = F_UNLCK;
9800 +- goto again;
9801 ++ if (!error && file_lock->fl_type != F_UNLCK) {
9802 ++ /*
9803 ++ * We need that spin_lock here - it prevents reordering between
9804 ++ * update of i_flctx->flc_posix and check for it done in
9805 ++ * close(). rcu_read_lock() wouldn't do.
9806 ++ */
9807 ++ spin_lock(&current->files->file_lock);
9808 ++ f = fcheck(fd);
9809 ++ spin_unlock(&current->files->file_lock);
9810 ++ if (f != filp) {
9811 ++ file_lock->fl_type = F_UNLCK;
9812 ++ error = do_lock_file_wait(filp, cmd, file_lock);
9813 ++ WARN_ON_ONCE(error);
9814 ++ error = -EBADF;
9815 ++ }
9816 + }
9817 +-
9818 + out:
9819 + locks_free_lock(file_lock);
9820 + return error;
9821 +@@ -2322,7 +2324,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
9822 + goto out;
9823 + }
9824 +
9825 +-again:
9826 + error = flock64_to_posix_lock(filp, file_lock, &flock);
9827 + if (error)
9828 + goto out;
9829 +@@ -2364,14 +2365,22 @@ again:
9830 + * Attempt to detect a close/fcntl race and recover by
9831 + * releasing the lock that was just acquired.
9832 + */
9833 +- spin_lock(&current->files->file_lock);
9834 +- f = fcheck(fd);
9835 +- spin_unlock(&current->files->file_lock);
9836 +- if (!error && f != filp && flock.l_type != F_UNLCK) {
9837 +- flock.l_type = F_UNLCK;
9838 +- goto again;
9839 ++ if (!error && file_lock->fl_type != F_UNLCK) {
9840 ++ /*
9841 ++ * We need that spin_lock here - it prevents reordering between
9842 ++ * update of i_flctx->flc_posix and check for it done in
9843 ++ * close(). rcu_read_lock() wouldn't do.
9844 ++ */
9845 ++ spin_lock(&current->files->file_lock);
9846 ++ f = fcheck(fd);
9847 ++ spin_unlock(&current->files->file_lock);
9848 ++ if (f != filp) {
9849 ++ file_lock->fl_type = F_UNLCK;
9850 ++ error = do_lock_file_wait(filp, cmd, file_lock);
9851 ++ WARN_ON_ONCE(error);
9852 ++ error = -EBADF;
9853 ++ }
9854 + }
9855 +-
9856 + out:
9857 + locks_free_lock(file_lock);
9858 + return error;
9859 +diff --git a/fs/namei.c b/fs/namei.c
9860 +index 0c3974cd3ecd..d8ee4da93650 100644
9861 +--- a/fs/namei.c
9862 ++++ b/fs/namei.c
9863 +@@ -1711,6 +1711,11 @@ static inline int should_follow_link(struct nameidata *nd, struct path *link,
9864 + return 0;
9865 + if (!follow)
9866 + return 0;
9867 ++ /* make sure that d_is_symlink above matches inode */
9868 ++ if (nd->flags & LOOKUP_RCU) {
9869 ++ if (read_seqcount_retry(&link->dentry->d_seq, seq))
9870 ++ return -ECHILD;
9871 ++ }
9872 + return pick_link(nd, link, inode, seq);
9873 + }
9874 +
9875 +@@ -1742,11 +1747,11 @@ static int walk_component(struct nameidata *nd, int flags)
9876 + if (err < 0)
9877 + return err;
9878 +
9879 +- inode = d_backing_inode(path.dentry);
9880 + seq = 0; /* we are already out of RCU mode */
9881 + err = -ENOENT;
9882 + if (d_is_negative(path.dentry))
9883 + goto out_path_put;
9884 ++ inode = d_backing_inode(path.dentry);
9885 + }
9886 +
9887 + if (flags & WALK_PUT)
9888 +@@ -3130,12 +3135,12 @@ retry_lookup:
9889 + return error;
9890 +
9891 + BUG_ON(nd->flags & LOOKUP_RCU);
9892 +- inode = d_backing_inode(path.dentry);
9893 + seq = 0; /* out of RCU mode, so the value doesn't matter */
9894 + if (unlikely(d_is_negative(path.dentry))) {
9895 + path_to_nameidata(&path, nd);
9896 + return -ENOENT;
9897 + }
9898 ++ inode = d_backing_inode(path.dentry);
9899 + finish_lookup:
9900 + if (nd->depth)
9901 + put_link(nd);
9902 +@@ -3144,11 +3149,6 @@ finish_lookup:
9903 + if (unlikely(error))
9904 + return error;
9905 +
9906 +- if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
9907 +- path_to_nameidata(&path, nd);
9908 +- return -ELOOP;
9909 +- }
9910 +-
9911 + if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
9912 + path_to_nameidata(&path, nd);
9913 + } else {
9914 +@@ -3167,6 +3167,10 @@ finish_open:
9915 + return error;
9916 + }
9917 + audit_inode(nd->name, nd->path.dentry, 0);
9918 ++ if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
9919 ++ error = -ELOOP;
9920 ++ goto out;
9921 ++ }
9922 + error = -EISDIR;
9923 + if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
9924 + goto out;
9925 +@@ -3210,6 +3214,10 @@ opened:
9926 + goto exit_fput;
9927 + }
9928 + out:
9929 ++ if (unlikely(error > 0)) {
9930 ++ WARN_ON(1);
9931 ++ error = -EINVAL;
9932 ++ }
9933 + if (got_write)
9934 + mnt_drop_write(nd->path.mnt);
9935 + path_put(&save_parent);
9936 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
9937 +index f496ed721d27..98a44157353a 100644
9938 +--- a/fs/nfs/nfs4proc.c
9939 ++++ b/fs/nfs/nfs4proc.c
9940 +@@ -2461,9 +2461,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
9941 + dentry = d_add_unique(dentry, igrab(state->inode));
9942 + if (dentry == NULL) {
9943 + dentry = opendata->dentry;
9944 +- } else if (dentry != ctx->dentry) {
9945 ++ } else {
9946 + dput(ctx->dentry);
9947 +- ctx->dentry = dget(dentry);
9948 ++ ctx->dentry = dentry;
9949 + }
9950 + nfs_set_verifier(dentry,
9951 + nfs_save_change_attribute(d_inode(opendata->dir)));
9952 +diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
9953 +index 7f604727f487..e6795c7c76a8 100644
9954 +--- a/fs/ocfs2/aops.c
9955 ++++ b/fs/ocfs2/aops.c
9956 +@@ -956,6 +956,7 @@ clean_orphan:
9957 + tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
9958 + update_isize, end);
9959 + if (tmp_ret < 0) {
9960 ++ ocfs2_inode_unlock(inode, 1);
9961 + ret = tmp_ret;
9962 + mlog_errno(ret);
9963 + brelse(di_bh);
9964 +diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
9965 +index 0419485891f2..0f1c6f315cdc 100644
9966 +--- a/include/asm-generic/cputime_nsecs.h
9967 ++++ b/include/asm-generic/cputime_nsecs.h
9968 +@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
9969 + */
9970 + static inline cputime_t timespec_to_cputime(const struct timespec *val)
9971 + {
9972 +- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
9973 ++ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
9974 + return (__force cputime_t) ret;
9975 + }
9976 + static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
9977 +@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
9978 + */
9979 + static inline cputime_t timeval_to_cputime(const struct timeval *val)
9980 + {
9981 +- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
9982 ++ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
9983 ++ val->tv_usec * NSEC_PER_USEC;
9984 + return (__force cputime_t) ret;
9985 + }
9986 + static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
9987 +diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
9988 +index 7bfb063029d8..461a0558bca4 100644
9989 +--- a/include/drm/drm_cache.h
9990 ++++ b/include/drm/drm_cache.h
9991 +@@ -35,4 +35,13 @@
9992 +
9993 + void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
9994 +
9995 ++static inline bool drm_arch_can_wc_memory(void)
9996 ++{
9997 ++#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
9998 ++ return false;
9999 ++#else
10000 ++ return true;
10001 ++#endif
10002 ++}
10003 ++
10004 + #endif
10005 +diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
10006 +index 5340099741ae..f356f9716474 100644
10007 +--- a/include/drm/drm_dp_mst_helper.h
10008 ++++ b/include/drm/drm_dp_mst_helper.h
10009 +@@ -44,8 +44,6 @@ struct drm_dp_vcpi {
10010 + /**
10011 + * struct drm_dp_mst_port - MST port
10012 + * @kref: reference count for this port.
10013 +- * @guid_valid: for DP 1.2 devices if we have validated the GUID.
10014 +- * @guid: guid for DP 1.2 device on this port.
10015 + * @port_num: port number
10016 + * @input: if this port is an input port.
10017 + * @mcs: message capability status - DP 1.2 spec.
10018 +@@ -70,10 +68,6 @@ struct drm_dp_vcpi {
10019 + struct drm_dp_mst_port {
10020 + struct kref kref;
10021 +
10022 +- /* if dpcd 1.2 device is on this port - its GUID info */
10023 +- bool guid_valid;
10024 +- u8 guid[16];
10025 +-
10026 + u8 port_num;
10027 + bool input;
10028 + bool mcs;
10029 +@@ -109,10 +103,12 @@ struct drm_dp_mst_port {
10030 + * @tx_slots: transmission slots for this device.
10031 + * @last_seqno: last sequence number used to talk to this.
10032 + * @link_address_sent: if a link address message has been sent to this device yet.
10033 ++ * @guid: guid for DP 1.2 branch device. port under this branch can be
10034 ++ * identified by port #.
10035 + *
10036 + * This structure represents an MST branch device, there is one
10037 +- * primary branch device at the root, along with any others connected
10038 +- * to downstream ports
10039 ++ * primary branch device at the root, along with any other branches connected
10040 ++ * to downstream port of parent branches.
10041 + */
10042 + struct drm_dp_mst_branch {
10043 + struct kref kref;
10044 +@@ -131,6 +127,9 @@ struct drm_dp_mst_branch {
10045 + struct drm_dp_sideband_msg_tx *tx_slots[2];
10046 + int last_seqno;
10047 + bool link_address_sent;
10048 ++
10049 ++ /* global unique identifier to identify branch devices */
10050 ++ u8 guid[16];
10051 + };
10052 +
10053 +
10054 +@@ -405,11 +404,9 @@ struct drm_dp_payload {
10055 + * @conn_base_id: DRM connector ID this mgr is connected to.
10056 + * @down_rep_recv: msg receiver state for down replies.
10057 + * @up_req_recv: msg receiver state for up requests.
10058 +- * @lock: protects mst state, primary, guid, dpcd.
10059 ++ * @lock: protects mst state, primary, dpcd.
10060 + * @mst_state: if this manager is enabled for an MST capable port.
10061 + * @mst_primary: pointer to the primary branch device.
10062 +- * @guid_valid: GUID valid for the primary branch device.
10063 +- * @guid: GUID for primary port.
10064 + * @dpcd: cache of DPCD for primary port.
10065 + * @pbn_div: PBN to slots divisor.
10066 + *
10067 +@@ -431,13 +428,11 @@ struct drm_dp_mst_topology_mgr {
10068 + struct drm_dp_sideband_msg_rx up_req_recv;
10069 +
10070 + /* pointer to info about the initial MST device */
10071 +- struct mutex lock; /* protects mst_state + primary + guid + dpcd */
10072 ++ struct mutex lock; /* protects mst_state + primary + dpcd */
10073 +
10074 + bool mst_state;
10075 + struct drm_dp_mst_branch *mst_primary;
10076 +- /* primary MST device GUID */
10077 +- bool guid_valid;
10078 +- u8 guid[16];
10079 ++
10080 + u8 dpcd[DP_RECEIVER_CAP_SIZE];
10081 + u8 sink_count;
10082 + int pbn_div;
10083 +@@ -450,9 +445,7 @@ struct drm_dp_mst_topology_mgr {
10084 + the mstb tx_slots and txmsg->state once they are queued */
10085 + struct mutex qlock;
10086 + struct list_head tx_msg_downq;
10087 +- struct list_head tx_msg_upq;
10088 + bool tx_down_in_progress;
10089 +- bool tx_up_in_progress;
10090 +
10091 + /* payload info + lock for it */
10092 + struct mutex payload_lock;
10093 +diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
10094 +index d639049a613d..553210c02ee0 100644
10095 +--- a/include/drm/drm_fixed.h
10096 ++++ b/include/drm/drm_fixed.h
10097 +@@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
10098 + #define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
10099 + #define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
10100 + #define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
10101 ++#define DRM_FIXED_EPSILON 1LL
10102 ++#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
10103 +
10104 + static inline s64 drm_int2fixp(int a)
10105 + {
10106 + return ((s64)a) << DRM_FIXED_POINT;
10107 + }
10108 +
10109 +-static inline int drm_fixp2int(int64_t a)
10110 ++static inline int drm_fixp2int(s64 a)
10111 + {
10112 + return ((s64)a) >> DRM_FIXED_POINT;
10113 + }
10114 +
10115 +-static inline unsigned drm_fixp_msbset(int64_t a)
10116 ++static inline int drm_fixp2int_ceil(s64 a)
10117 ++{
10118 ++ if (a > 0)
10119 ++ return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
10120 ++ else
10121 ++ return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
10122 ++}
10123 ++
10124 ++static inline unsigned drm_fixp_msbset(s64 a)
10125 + {
10126 + unsigned shift, sign = (a >> 63) & 1;
10127 +
10128 +@@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
10129 + return result;
10130 + }
10131 +
10132 ++static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
10133 ++{
10134 ++ s64 res;
10135 ++ bool a_neg = a < 0;
10136 ++ bool b_neg = b < 0;
10137 ++ u64 a_abs = a_neg ? -a : a;
10138 ++ u64 b_abs = b_neg ? -b : b;
10139 ++ u64 rem;
10140 ++
10141 ++ /* determine integer part */
10142 ++ u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem);
10143 ++
10144 ++ /* determine fractional part */
10145 ++ {
10146 ++ u32 i = DRM_FIXED_POINT;
10147 ++
10148 ++ do {
10149 ++ rem <<= 1;
10150 ++ res_abs <<= 1;
10151 ++ if (rem >= b_abs) {
10152 ++ res_abs |= 1;
10153 ++ rem -= b_abs;
10154 ++ }
10155 ++ } while (--i != 0);
10156 ++ }
10157 ++
10158 ++ /* round up LSB */
10159 ++ {
10160 ++ u64 summand = (rem << 1) >= b_abs;
10161 ++
10162 ++ res_abs += summand;
10163 ++ }
10164 ++
10165 ++ res = (s64) res_abs;
10166 ++ if (a_neg ^ b_neg)
10167 ++ res = -res;
10168 ++ return res;
10169 ++}
10170 ++
10171 + static inline s64 drm_fixp_exp(s64 x)
10172 + {
10173 + s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
10174 +diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
10175 +index 71b1d6cdcb5d..8dbd7879fdc6 100644
10176 +--- a/include/linux/ceph/messenger.h
10177 ++++ b/include/linux/ceph/messenger.h
10178 +@@ -220,6 +220,7 @@ struct ceph_connection {
10179 + struct ceph_entity_addr actual_peer_addr;
10180 +
10181 + /* message out temps */
10182 ++ struct ceph_msg_header out_hdr;
10183 + struct ceph_msg *out_msg; /* sending message (== tail of
10184 + out_sent) */
10185 + bool out_msg_done;
10186 +@@ -229,7 +230,6 @@ struct ceph_connection {
10187 + int out_kvec_left; /* kvec's left in out_kvec */
10188 + int out_skip; /* skip this many bytes */
10189 + int out_kvec_bytes; /* total bytes left */
10190 +- bool out_kvec_is_msg; /* kvec refers to out_msg */
10191 + int out_more; /* there is more data after the kvecs */
10192 + __le64 out_temp_ack; /* for writing an ack */
10193 + struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
10194 +diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
10195 +index 06b77f9dd3f2..8e30faeab183 100644
10196 +--- a/include/linux/cgroup-defs.h
10197 ++++ b/include/linux/cgroup-defs.h
10198 +@@ -133,6 +133,12 @@ struct cgroup_subsys_state {
10199 + */
10200 + u64 serial_nr;
10201 +
10202 ++ /*
10203 ++ * Incremented by online self and children. Used to guarantee that
10204 ++ * parents are not offlined before their children.
10205 ++ */
10206 ++ atomic_t online_cnt;
10207 ++
10208 + /* percpu_ref killing and RCU release */
10209 + struct rcu_head rcu_head;
10210 + struct work_struct destroy_work;
10211 +diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
10212 +index 85a868ccb493..fea160ee5803 100644
10213 +--- a/include/linux/cpuset.h
10214 ++++ b/include/linux/cpuset.h
10215 +@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
10216 + task_unlock(current);
10217 + }
10218 +
10219 ++extern void cpuset_post_attach_flush(void);
10220 ++
10221 + #else /* !CONFIG_CPUSETS */
10222 +
10223 + static inline bool cpusets_enabled(void) { return false; }
10224 +@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
10225 + return false;
10226 + }
10227 +
10228 ++static inline void cpuset_post_attach_flush(void)
10229 ++{
10230 ++}
10231 ++
10232 + #endif /* !CONFIG_CPUSETS */
10233 +
10234 + #endif /* _LINUX_CPUSET_H */
10235 +diff --git a/include/linux/efi.h b/include/linux/efi.h
10236 +index 569b5a866bb1..47be3ad7d3e5 100644
10237 +--- a/include/linux/efi.h
10238 ++++ b/include/linux/efi.h
10239 +@@ -1199,7 +1199,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
10240 + struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
10241 + struct list_head *head, bool remove);
10242 +
10243 +-bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
10244 ++bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
10245 ++ unsigned long data_size);
10246 ++bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
10247 ++ size_t len);
10248 +
10249 + extern struct work_struct efivar_work;
10250 + void efivar_run_worker(void);
10251 +diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
10252 +index 8fdc17b84739..ae6a711dcd1d 100644
10253 +--- a/include/linux/hyperv.h
10254 ++++ b/include/linux/hyperv.h
10255 +@@ -630,6 +630,11 @@ struct hv_input_signal_event_buffer {
10256 + struct hv_input_signal_event event;
10257 + };
10258 +
10259 ++enum hv_signal_policy {
10260 ++ HV_SIGNAL_POLICY_DEFAULT = 0,
10261 ++ HV_SIGNAL_POLICY_EXPLICIT,
10262 ++};
10263 ++
10264 + struct vmbus_channel {
10265 + /* Unique channel id */
10266 + int id;
10267 +@@ -757,8 +762,21 @@ struct vmbus_channel {
10268 + * link up channels based on their CPU affinity.
10269 + */
10270 + struct list_head percpu_list;
10271 ++ /*
10272 ++ * Host signaling policy: The default policy will be
10273 ++ * based on the ring buffer state. We will also support
10274 ++ * a policy where the client driver can have explicit
10275 ++ * signaling control.
10276 ++ */
10277 ++ enum hv_signal_policy signal_policy;
10278 + };
10279 +
10280 ++static inline void set_channel_signal_state(struct vmbus_channel *c,
10281 ++ enum hv_signal_policy policy)
10282 ++{
10283 ++ c->signal_policy = policy;
10284 ++}
10285 ++
10286 + static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
10287 + {
10288 + c->batched_reading = state;
10289 +diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
10290 +index c0e961474a52..5455b660bd88 100644
10291 +--- a/include/linux/nfs_fs.h
10292 ++++ b/include/linux/nfs_fs.h
10293 +@@ -544,9 +544,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
10294 +
10295 + static inline loff_t nfs_size_to_loff_t(__u64 size)
10296 + {
10297 +- if (size > (__u64) OFFSET_MAX - 1)
10298 +- return OFFSET_MAX - 1;
10299 +- return (loff_t) size;
10300 ++ return min_t(u64, size, OFFSET_MAX);
10301 + }
10302 +
10303 + static inline ino_t
10304 +diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
10305 +index 50777b5b1e4c..92d112aeec68 100644
10306 +--- a/include/linux/shmem_fs.h
10307 ++++ b/include/linux/shmem_fs.h
10308 +@@ -15,10 +15,7 @@ struct shmem_inode_info {
10309 + unsigned int seals; /* shmem seals */
10310 + unsigned long flags;
10311 + unsigned long alloced; /* data pages alloced to file */
10312 +- union {
10313 +- unsigned long swapped; /* subtotal assigned to swap */
10314 +- char *symlink; /* unswappable short symlink */
10315 +- };
10316 ++ unsigned long swapped; /* subtotal assigned to swap */
10317 + struct shared_policy policy; /* NUMA memory alloc policy */
10318 + struct list_head swaplist; /* chain of maybes on swap */
10319 + struct simple_xattrs xattrs; /* list of xattrs */
10320 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
10321 +index 9147f9f34cbe..75f136a22a5e 100644
10322 +--- a/include/linux/skbuff.h
10323 ++++ b/include/linux/skbuff.h
10324 +@@ -219,6 +219,7 @@ struct sk_buff;
10325 + #else
10326 + #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
10327 + #endif
10328 ++extern int sysctl_max_skb_frags;
10329 +
10330 + typedef struct skb_frag_struct skb_frag_t;
10331 +
10332 +diff --git a/include/linux/thermal.h b/include/linux/thermal.h
10333 +index 613c29bd6baf..e13a1ace50e9 100644
10334 +--- a/include/linux/thermal.h
10335 ++++ b/include/linux/thermal.h
10336 +@@ -43,6 +43,9 @@
10337 + /* Default weight of a bound cooling device */
10338 + #define THERMAL_WEIGHT_DEFAULT 0
10339 +
10340 ++/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
10341 ++#define THERMAL_TEMP_INVALID -274000
10342 ++
10343 + /* Unit conversion macros */
10344 + #define DECI_KELVIN_TO_CELSIUS(t) ({ \
10345 + long _t = (t); \
10346 +@@ -167,6 +170,7 @@ struct thermal_attr {
10347 + * @forced_passive: If > 0, temperature at which to switch on all ACPI
10348 + * processor cooling devices. Currently only used by the
10349 + * step-wise governor.
10350 ++ * @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
10351 + * @ops: operations this &thermal_zone_device supports
10352 + * @tzp: thermal zone parameters
10353 + * @governor: pointer to the governor for this thermal zone
10354 +@@ -194,6 +198,7 @@ struct thermal_zone_device {
10355 + int emul_temperature;
10356 + int passive;
10357 + unsigned int forced_passive;
10358 ++ atomic_t need_update;
10359 + struct thermal_zone_device_ops *ops;
10360 + struct thermal_zone_params *tzp;
10361 + struct thermal_governor *governor;
10362 +diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
10363 +index cbb20afdbc01..bb679b48f408 100644
10364 +--- a/include/linux/ucs2_string.h
10365 ++++ b/include/linux/ucs2_string.h
10366 +@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s);
10367 + unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
10368 + int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
10369 +
10370 ++unsigned long ucs2_utf8size(const ucs2_char_t *src);
10371 ++unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
10372 ++ unsigned long maxlength);
10373 ++
10374 + #endif /* _LINUX_UCS2_STRING_H_ */
10375 +diff --git a/include/net/af_unix.h b/include/net/af_unix.h
10376 +index 2a91a0561a47..9b4c418bebd8 100644
10377 +--- a/include/net/af_unix.h
10378 ++++ b/include/net/af_unix.h
10379 +@@ -6,8 +6,8 @@
10380 + #include <linux/mutex.h>
10381 + #include <net/sock.h>
10382 +
10383 +-void unix_inflight(struct file *fp);
10384 +-void unix_notinflight(struct file *fp);
10385 ++void unix_inflight(struct user_struct *user, struct file *fp);
10386 ++void unix_notinflight(struct user_struct *user, struct file *fp);
10387 + void unix_gc(void);
10388 + void wait_for_unix_gc(void);
10389 + struct sock *unix_get_socket(struct file *filp);
10390 +diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
10391 +index 6816f0fa5693..30a56ab2ccfb 100644
10392 +--- a/include/net/dst_metadata.h
10393 ++++ b/include/net/dst_metadata.h
10394 +@@ -44,6 +44,24 @@ static inline bool skb_valid_dst(const struct sk_buff *skb)
10395 + return dst && !(dst->flags & DST_METADATA);
10396 + }
10397 +
10398 ++static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
10399 ++ const struct sk_buff *skb_b)
10400 ++{
10401 ++ const struct metadata_dst *a, *b;
10402 ++
10403 ++ if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
10404 ++ return 0;
10405 ++
10406 ++ a = (const struct metadata_dst *) skb_dst(skb_a);
10407 ++ b = (const struct metadata_dst *) skb_dst(skb_b);
10408 ++
10409 ++ if (!a != !b || a->u.tun_info.options_len != b->u.tun_info.options_len)
10410 ++ return 1;
10411 ++
10412 ++ return memcmp(&a->u.tun_info, &b->u.tun_info,
10413 ++ sizeof(a->u.tun_info) + a->u.tun_info.options_len);
10414 ++}
10415 ++
10416 + struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
10417 + struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
10418 +
10419 +diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
10420 +index 481fe1c9044c..49dcad4fe99e 100644
10421 +--- a/include/net/inet_connection_sock.h
10422 ++++ b/include/net/inet_connection_sock.h
10423 +@@ -270,8 +270,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
10424 + struct sock *newsk,
10425 + const struct request_sock *req);
10426 +
10427 +-void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
10428 +- struct sock *child);
10429 ++struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
10430 ++ struct request_sock *req,
10431 ++ struct sock *child);
10432 + void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
10433 + unsigned long timeout);
10434 + struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
10435 +diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
10436 +index 877f682989b8..295d291269e2 100644
10437 +--- a/include/net/ip6_route.h
10438 ++++ b/include/net/ip6_route.h
10439 +@@ -64,8 +64,16 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
10440 +
10441 + void ip6_route_input(struct sk_buff *skb);
10442 +
10443 +-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
10444 +- struct flowi6 *fl6);
10445 ++struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
10446 ++ struct flowi6 *fl6, int flags);
10447 ++
10448 ++static inline struct dst_entry *ip6_route_output(struct net *net,
10449 ++ const struct sock *sk,
10450 ++ struct flowi6 *fl6)
10451 ++{
10452 ++ return ip6_route_output_flags(net, sk, fl6, 0);
10453 ++}
10454 ++
10455 + struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
10456 + int flags);
10457 +
10458 +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
10459 +index 9f4df68105ab..3f98233388fb 100644
10460 +--- a/include/net/ip_fib.h
10461 ++++ b/include/net/ip_fib.h
10462 +@@ -61,6 +61,7 @@ struct fib_nh_exception {
10463 + struct rtable __rcu *fnhe_rth_input;
10464 + struct rtable __rcu *fnhe_rth_output;
10465 + unsigned long fnhe_stamp;
10466 ++ struct rcu_head rcu;
10467 + };
10468 +
10469 + struct fnhe_hash_bucket {
10470 +diff --git a/include/net/scm.h b/include/net/scm.h
10471 +index 262532d111f5..59fa93c01d2a 100644
10472 +--- a/include/net/scm.h
10473 ++++ b/include/net/scm.h
10474 +@@ -21,6 +21,7 @@ struct scm_creds {
10475 + struct scm_fp_list {
10476 + short count;
10477 + short max;
10478 ++ struct user_struct *user;
10479 + struct file *fp[SCM_MAX_FD];
10480 + };
10481 +
10482 +diff --git a/include/net/tcp.h b/include/net/tcp.h
10483 +index f80e74c5ad18..414d822bc1db 100644
10484 +--- a/include/net/tcp.h
10485 ++++ b/include/net/tcp.h
10486 +@@ -449,7 +449,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
10487 +
10488 + void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
10489 + void tcp_v4_mtu_reduced(struct sock *sk);
10490 +-void tcp_req_err(struct sock *sk, u32 seq);
10491 ++void tcp_req_err(struct sock *sk, u32 seq, bool abort);
10492 + int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
10493 + struct sock *tcp_create_openreq_child(const struct sock *sk,
10494 + struct request_sock *req,
10495 +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
10496 +index aabf0aca0171..689f4d207122 100644
10497 +--- a/include/target/target_core_base.h
10498 ++++ b/include/target/target_core_base.h
10499 +@@ -138,6 +138,7 @@ enum se_cmd_flags_table {
10500 + SCF_COMPARE_AND_WRITE = 0x00080000,
10501 + SCF_COMPARE_AND_WRITE_POST = 0x00100000,
10502 + SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
10503 ++ SCF_ACK_KREF = 0x00400000,
10504 + };
10505 +
10506 + /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
10507 +@@ -490,6 +491,8 @@ struct se_cmd {
10508 + #define CMD_T_DEV_ACTIVE (1 << 7)
10509 + #define CMD_T_REQUEST_STOP (1 << 8)
10510 + #define CMD_T_BUSY (1 << 9)
10511 ++#define CMD_T_TAS (1 << 10)
10512 ++#define CMD_T_FABRIC_STOP (1 << 11)
10513 + spinlock_t t_state_lock;
10514 + struct kref cmd_kref;
10515 + struct completion t_transport_stop_comp;
10516 +diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
10517 +index c2e5d6cb34e3..ebd10e624598 100644
10518 +--- a/include/uapi/linux/Kbuild
10519 ++++ b/include/uapi/linux/Kbuild
10520 +@@ -307,7 +307,7 @@ header-y += nfs_mount.h
10521 + header-y += nl80211.h
10522 + header-y += n_r3964.h
10523 + header-y += nubus.h
10524 +-header-y += nvme.h
10525 ++header-y += nvme_ioctl.h
10526 + header-y += nvram.h
10527 + header-y += omap3isp.h
10528 + header-y += omapfb.h
10529 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
10530 +index d1d3e8f57de9..2e7f7ab739e4 100644
10531 +--- a/kernel/bpf/verifier.c
10532 ++++ b/kernel/bpf/verifier.c
10533 +@@ -2082,7 +2082,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
10534 + /* adjust offset of jmps if necessary */
10535 + if (i < pos && i + insn->off + 1 > pos)
10536 + insn->off += delta;
10537 +- else if (i > pos && i + insn->off + 1 < pos)
10538 ++ else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
10539 + insn->off -= delta;
10540 + }
10541 + }
10542 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
10543 +index 470f6536b9e8..fb1ecfd2decd 100644
10544 +--- a/kernel/cgroup.c
10545 ++++ b/kernel/cgroup.c
10546 +@@ -57,7 +57,7 @@
10547 + #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
10548 + #include <linux/kthread.h>
10549 + #include <linux/delay.h>
10550 +-
10551 ++#include <linux/cpuset.h>
10552 + #include <linux/atomic.h>
10553 +
10554 + /*
10555 +@@ -2764,6 +2764,7 @@ out_unlock_rcu:
10556 + out_unlock_threadgroup:
10557 + percpu_up_write(&cgroup_threadgroup_rwsem);
10558 + cgroup_kn_unlock(of->kn);
10559 ++ cpuset_post_attach_flush();
10560 + return ret ?: nbytes;
10561 + }
10562 +
10563 +@@ -4783,6 +4784,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
10564 + INIT_LIST_HEAD(&css->sibling);
10565 + INIT_LIST_HEAD(&css->children);
10566 + css->serial_nr = css_serial_nr_next++;
10567 ++ atomic_set(&css->online_cnt, 0);
10568 +
10569 + if (cgroup_parent(cgrp)) {
10570 + css->parent = cgroup_css(cgroup_parent(cgrp), ss);
10571 +@@ -4805,6 +4807,10 @@ static int online_css(struct cgroup_subsys_state *css)
10572 + if (!ret) {
10573 + css->flags |= CSS_ONLINE;
10574 + rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
10575 ++
10576 ++ atomic_inc(&css->online_cnt);
10577 ++ if (css->parent)
10578 ++ atomic_inc(&css->parent->online_cnt);
10579 + }
10580 + return ret;
10581 + }
10582 +@@ -5036,10 +5042,15 @@ static void css_killed_work_fn(struct work_struct *work)
10583 + container_of(work, struct cgroup_subsys_state, destroy_work);
10584 +
10585 + mutex_lock(&cgroup_mutex);
10586 +- offline_css(css);
10587 +- mutex_unlock(&cgroup_mutex);
10588 +
10589 +- css_put(css);
10590 ++ do {
10591 ++ offline_css(css);
10592 ++ css_put(css);
10593 ++ /* @css can't go away while we're holding cgroup_mutex */
10594 ++ css = css->parent;
10595 ++ } while (css && atomic_dec_and_test(&css->online_cnt));
10596 ++
10597 ++ mutex_unlock(&cgroup_mutex);
10598 + }
10599 +
10600 + /* css kill confirmation processing requires process context, bounce */
10601 +@@ -5048,8 +5059,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
10602 + struct cgroup_subsys_state *css =
10603 + container_of(ref, struct cgroup_subsys_state, refcnt);
10604 +
10605 +- INIT_WORK(&css->destroy_work, css_killed_work_fn);
10606 +- queue_work(cgroup_destroy_wq, &css->destroy_work);
10607 ++ if (atomic_dec_and_test(&css->online_cnt)) {
10608 ++ INIT_WORK(&css->destroy_work, css_killed_work_fn);
10609 ++ queue_work(cgroup_destroy_wq, &css->destroy_work);
10610 ++ }
10611 + }
10612 +
10613 + /**
10614 +diff --git a/kernel/cpuset.c b/kernel/cpuset.c
10615 +index 02a8ea5c9963..2ade632197d5 100644
10616 +--- a/kernel/cpuset.c
10617 ++++ b/kernel/cpuset.c
10618 +@@ -286,6 +286,8 @@ static struct cpuset top_cpuset = {
10619 + static DEFINE_MUTEX(cpuset_mutex);
10620 + static DEFINE_SPINLOCK(callback_lock);
10621 +
10622 ++static struct workqueue_struct *cpuset_migrate_mm_wq;
10623 ++
10624 + /*
10625 + * CPU / memory hotplug is handled asynchronously.
10626 + */
10627 +@@ -971,31 +973,51 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
10628 + }
10629 +
10630 + /*
10631 +- * cpuset_migrate_mm
10632 +- *
10633 +- * Migrate memory region from one set of nodes to another.
10634 +- *
10635 +- * Temporarilly set tasks mems_allowed to target nodes of migration,
10636 +- * so that the migration code can allocate pages on these nodes.
10637 +- *
10638 +- * While the mm_struct we are migrating is typically from some
10639 +- * other task, the task_struct mems_allowed that we are hacking
10640 +- * is for our current task, which must allocate new pages for that
10641 +- * migrating memory region.
10642 ++ * Migrate memory region from one set of nodes to another. This is
10643 ++ * performed asynchronously as it can be called from process migration path
10644 ++ * holding locks involved in process management. All mm migrations are
10645 ++ * performed in the queued order and can be waited for by flushing
10646 ++ * cpuset_migrate_mm_wq.
10647 + */
10648 +
10649 ++struct cpuset_migrate_mm_work {
10650 ++ struct work_struct work;
10651 ++ struct mm_struct *mm;
10652 ++ nodemask_t from;
10653 ++ nodemask_t to;
10654 ++};
10655 ++
10656 ++static void cpuset_migrate_mm_workfn(struct work_struct *work)
10657 ++{
10658 ++ struct cpuset_migrate_mm_work *mwork =
10659 ++ container_of(work, struct cpuset_migrate_mm_work, work);
10660 ++
10661 ++ /* on a wq worker, no need to worry about %current's mems_allowed */
10662 ++ do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
10663 ++ mmput(mwork->mm);
10664 ++ kfree(mwork);
10665 ++}
10666 ++
10667 + static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
10668 + const nodemask_t *to)
10669 + {
10670 +- struct task_struct *tsk = current;
10671 +-
10672 +- tsk->mems_allowed = *to;
10673 ++ struct cpuset_migrate_mm_work *mwork;
10674 +
10675 +- do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
10676 ++ mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
10677 ++ if (mwork) {
10678 ++ mwork->mm = mm;
10679 ++ mwork->from = *from;
10680 ++ mwork->to = *to;
10681 ++ INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
10682 ++ queue_work(cpuset_migrate_mm_wq, &mwork->work);
10683 ++ } else {
10684 ++ mmput(mm);
10685 ++ }
10686 ++}
10687 +
10688 +- rcu_read_lock();
10689 +- guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
10690 +- rcu_read_unlock();
10691 ++void cpuset_post_attach_flush(void)
10692 ++{
10693 ++ flush_workqueue(cpuset_migrate_mm_wq);
10694 + }
10695 +
10696 + /*
10697 +@@ -1096,7 +1118,8 @@ static void update_tasks_nodemask(struct cpuset *cs)
10698 + mpol_rebind_mm(mm, &cs->mems_allowed);
10699 + if (migrate)
10700 + cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
10701 +- mmput(mm);
10702 ++ else
10703 ++ mmput(mm);
10704 + }
10705 + css_task_iter_end(&it);
10706 +
10707 +@@ -1541,11 +1564,11 @@ static void cpuset_attach(struct cgroup_taskset *tset)
10708 + * @old_mems_allowed is the right nodesets that we
10709 + * migrate mm from.
10710 + */
10711 +- if (is_memory_migrate(cs)) {
10712 ++ if (is_memory_migrate(cs))
10713 + cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
10714 + &cpuset_attach_nodemask_to);
10715 +- }
10716 +- mmput(mm);
10717 ++ else
10718 ++ mmput(mm);
10719 + }
10720 + }
10721 +
10722 +@@ -1710,6 +1733,7 @@ out_unlock:
10723 + mutex_unlock(&cpuset_mutex);
10724 + kernfs_unbreak_active_protection(of->kn);
10725 + css_put(&cs->css);
10726 ++ flush_workqueue(cpuset_migrate_mm_wq);
10727 + return retval ?: nbytes;
10728 + }
10729 +
10730 +@@ -2355,6 +2379,9 @@ void __init cpuset_init_smp(void)
10731 + top_cpuset.effective_mems = node_states[N_MEMORY];
10732 +
10733 + register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
10734 ++
10735 ++ cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
10736 ++ BUG_ON(!cpuset_migrate_mm_wq);
10737 + }
10738 +
10739 + /**
10740 +diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
10741 +index a302cf9a2126..57bff7857e87 100644
10742 +--- a/kernel/irq/handle.c
10743 ++++ b/kernel/irq/handle.c
10744 +@@ -138,7 +138,8 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
10745 + unsigned int flags = 0, irq = desc->irq_data.irq;
10746 + struct irqaction *action = desc->action;
10747 +
10748 +- do {
10749 ++ /* action might have become NULL since we dropped the lock */
10750 ++ while (action) {
10751 + irqreturn_t res;
10752 +
10753 + trace_irq_handler_entry(irq, action);
10754 +@@ -173,7 +174,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
10755 +
10756 + retval |= res;
10757 + action = action->next;
10758 +- } while (action);
10759 ++ }
10760 +
10761 + add_interrupt_randomness(irq, flags);
10762 +
10763 +diff --git a/kernel/memremap.c b/kernel/memremap.c
10764 +index 7a4e473cea4d..25ced161ebeb 100644
10765 +--- a/kernel/memremap.c
10766 ++++ b/kernel/memremap.c
10767 +@@ -133,8 +133,10 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
10768 + if (addr) {
10769 + *ptr = addr;
10770 + devres_add(dev, ptr);
10771 +- } else
10772 ++ } else {
10773 + devres_free(ptr);
10774 ++ return ERR_PTR(-ENXIO);
10775 ++ }
10776 +
10777 + return addr;
10778 + }
10779 +diff --git a/kernel/resource.c b/kernel/resource.c
10780 +index f150dbbe6f62..249b1eb1e6e1 100644
10781 +--- a/kernel/resource.c
10782 ++++ b/kernel/resource.c
10783 +@@ -1083,9 +1083,10 @@ struct resource * __request_region(struct resource *parent,
10784 + if (!conflict)
10785 + break;
10786 + if (conflict != parent) {
10787 +- parent = conflict;
10788 +- if (!(conflict->flags & IORESOURCE_BUSY))
10789 ++ if (!(conflict->flags & IORESOURCE_BUSY)) {
10790 ++ parent = conflict;
10791 + continue;
10792 ++ }
10793 + }
10794 + if (conflict->flags & flags & IORESOURCE_MUXED) {
10795 + add_wait_queue(&muxed_resource_wait, &wait);
10796 +diff --git a/kernel/seccomp.c b/kernel/seccomp.c
10797 +index 580ac2d4024f..15a1795bbba1 100644
10798 +--- a/kernel/seccomp.c
10799 ++++ b/kernel/seccomp.c
10800 +@@ -316,24 +316,24 @@ static inline void seccomp_sync_threads(void)
10801 + put_seccomp_filter(thread);
10802 + smp_store_release(&thread->seccomp.filter,
10803 + caller->seccomp.filter);
10804 ++
10805 ++ /*
10806 ++ * Don't let an unprivileged task work around
10807 ++ * the no_new_privs restriction by creating
10808 ++ * a thread that sets it up, enters seccomp,
10809 ++ * then dies.
10810 ++ */
10811 ++ if (task_no_new_privs(caller))
10812 ++ task_set_no_new_privs(thread);
10813 ++
10814 + /*
10815 + * Opt the other thread into seccomp if needed.
10816 + * As threads are considered to be trust-realm
10817 + * equivalent (see ptrace_may_access), it is safe to
10818 + * allow one thread to transition the other.
10819 + */
10820 +- if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
10821 +- /*
10822 +- * Don't let an unprivileged task work around
10823 +- * the no_new_privs restriction by creating
10824 +- * a thread that sets it up, enters seccomp,
10825 +- * then dies.
10826 +- */
10827 +- if (task_no_new_privs(caller))
10828 +- task_set_no_new_privs(thread);
10829 +-
10830 ++ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
10831 + seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
10832 +- }
10833 + }
10834 + }
10835 +
10836 +diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
10837 +index ce033c7aa2e8..9cff0ab82b63 100644
10838 +--- a/kernel/time/posix-clock.c
10839 ++++ b/kernel/time/posix-clock.c
10840 +@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
10841 + static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
10842 + {
10843 + struct posix_clock *clk = get_posix_clock(fp);
10844 +- int result = 0;
10845 ++ unsigned int result = 0;
10846 +
10847 + if (!clk)
10848 +- return -ENODEV;
10849 ++ return POLLERR;
10850 +
10851 + if (clk->ops.poll)
10852 + result = clk->ops.poll(clk, fp, wait);
10853 +diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
10854 +index 7c7ec4515983..22c57e191a23 100644
10855 +--- a/kernel/time/tick-sched.c
10856 ++++ b/kernel/time/tick-sched.c
10857 +@@ -977,9 +977,9 @@ static void tick_nohz_switch_to_nohz(void)
10858 + /* Get the next period */
10859 + next = tick_init_jiffy_update();
10860 +
10861 +- hrtimer_forward_now(&ts->sched_timer, tick_period);
10862 + hrtimer_set_expires(&ts->sched_timer, next);
10863 +- tick_program_event(next, 1);
10864 ++ hrtimer_forward_now(&ts->sched_timer, tick_period);
10865 ++ tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
10866 + tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
10867 + }
10868 +
10869 +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
10870 +index d563c1960302..99188ee5d9d0 100644
10871 +--- a/kernel/time/timekeeping.c
10872 ++++ b/kernel/time/timekeeping.c
10873 +@@ -305,8 +305,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
10874 +
10875 + delta = timekeeping_get_delta(tkr);
10876 +
10877 +- nsec = delta * tkr->mult + tkr->xtime_nsec;
10878 +- nsec >>= tkr->shift;
10879 ++ nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift;
10880 +
10881 + /* If arch requires, add in get_arch_timeoffset() */
10882 + return nsec + arch_gettimeoffset();
10883 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
10884 +index 4f6ef6912e00..debf6e878076 100644
10885 +--- a/kernel/trace/trace_events.c
10886 ++++ b/kernel/trace/trace_events.c
10887 +@@ -869,7 +869,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
10888 + * The ftrace subsystem is for showing formats only.
10889 + * They can not be enabled or disabled via the event files.
10890 + */
10891 +- if (call->class && call->class->reg)
10892 ++ if (call->class && call->class->reg &&
10893 ++ !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
10894 + return file;
10895 + }
10896 +
10897 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
10898 +index c579dbab2e36..450c21fd0e6e 100644
10899 +--- a/kernel/workqueue.c
10900 ++++ b/kernel/workqueue.c
10901 +@@ -568,6 +568,16 @@ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
10902 + int node)
10903 + {
10904 + assert_rcu_or_wq_mutex_or_pool_mutex(wq);
10905 ++
10906 ++ /*
10907 ++ * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
10908 ++ * delayed item is pending. The plan is to keep CPU -> NODE
10909 ++ * mapping valid and stable across CPU on/offlines. Once that
10910 ++ * happens, this workaround can be removed.
10911 ++ */
10912 ++ if (unlikely(node == NUMA_NO_NODE))
10913 ++ return wq->dfl_pwq;
10914 ++
10915 + return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
10916 + }
10917 +
10918 +@@ -1458,13 +1468,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
10919 + timer_stats_timer_set_start_info(&dwork->timer);
10920 +
10921 + dwork->wq = wq;
10922 +- /* timer isn't guaranteed to run in this cpu, record earlier */
10923 +- if (cpu == WORK_CPU_UNBOUND)
10924 +- cpu = raw_smp_processor_id();
10925 + dwork->cpu = cpu;
10926 + timer->expires = jiffies + delay;
10927 +
10928 +- add_timer_on(timer, cpu);
10929 ++ if (unlikely(cpu != WORK_CPU_UNBOUND))
10930 ++ add_timer_on(timer, cpu);
10931 ++ else
10932 ++ add_timer(timer);
10933 + }
10934 +
10935 + /**
10936 +diff --git a/lib/Kconfig b/lib/Kconfig
10937 +index f0df318104e7..1a48744253d7 100644
10938 +--- a/lib/Kconfig
10939 ++++ b/lib/Kconfig
10940 +@@ -210,9 +210,11 @@ config RANDOM32_SELFTEST
10941 + # compression support is select'ed if needed
10942 + #
10943 + config 842_COMPRESS
10944 ++ select CRC32
10945 + tristate
10946 +
10947 + config 842_DECOMPRESS
10948 ++ select CRC32
10949 + tristate
10950 +
10951 + config ZLIB_INFLATE
10952 +diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
10953 +index 6f500ef2301d..f0b323abb4c6 100644
10954 +--- a/lib/ucs2_string.c
10955 ++++ b/lib/ucs2_string.c
10956 +@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
10957 + }
10958 + }
10959 + EXPORT_SYMBOL(ucs2_strncmp);
10960 ++
10961 ++unsigned long
10962 ++ucs2_utf8size(const ucs2_char_t *src)
10963 ++{
10964 ++ unsigned long i;
10965 ++ unsigned long j = 0;
10966 ++
10967 ++ for (i = 0; i < ucs2_strlen(src); i++) {
10968 ++ u16 c = src[i];
10969 ++
10970 ++ if (c >= 0x800)
10971 ++ j += 3;
10972 ++ else if (c >= 0x80)
10973 ++ j += 2;
10974 ++ else
10975 ++ j += 1;
10976 ++ }
10977 ++
10978 ++ return j;
10979 ++}
10980 ++EXPORT_SYMBOL(ucs2_utf8size);
10981 ++
10982 ++/*
10983 ++ * copy at most maxlength bytes of whole utf8 characters to dest from the
10984 ++ * ucs2 string src.
10985 ++ *
10986 ++ * The return value is the number of characters copied, not including the
10987 ++ * final NUL character.
10988 ++ */
10989 ++unsigned long
10990 ++ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
10991 ++{
10992 ++ unsigned int i;
10993 ++ unsigned long j = 0;
10994 ++ unsigned long limit = ucs2_strnlen(src, maxlength);
10995 ++
10996 ++ for (i = 0; maxlength && i < limit; i++) {
10997 ++ u16 c = src[i];
10998 ++
10999 ++ if (c >= 0x800) {
11000 ++ if (maxlength < 3)
11001 ++ break;
11002 ++ maxlength -= 3;
11003 ++ dest[j++] = 0xe0 | (c & 0xf000) >> 12;
11004 ++ dest[j++] = 0x80 | (c & 0x0fc0) >> 6;
11005 ++ dest[j++] = 0x80 | (c & 0x003f);
11006 ++ } else if (c >= 0x80) {
11007 ++ if (maxlength < 2)
11008 ++ break;
11009 ++ maxlength -= 2;
11010 ++ dest[j++] = 0xc0 | (c & 0x7c0) >> 6;
11011 ++ dest[j++] = 0x80 | (c & 0x03f);
11012 ++ } else {
11013 ++ maxlength -= 1;
11014 ++ dest[j++] = c & 0x7f;
11015 ++ }
11016 ++ }
11017 ++ if (maxlength)
11018 ++ dest[j] = '\0';
11019 ++ return j;
11020 ++}
11021 ++EXPORT_SYMBOL(ucs2_as_utf8);
11022 +diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
11023 +index d3116be5a00f..300117f1a08f 100644
11024 +--- a/mm/balloon_compaction.c
11025 ++++ b/mm/balloon_compaction.c
11026 +@@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
11027 + bool dequeued_page;
11028 +
11029 + dequeued_page = false;
11030 ++ spin_lock_irqsave(&b_dev_info->pages_lock, flags);
11031 + list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
11032 + /*
11033 + * Block others from accessing the 'page' while we get around
11034 +@@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
11035 + continue;
11036 + }
11037 + #endif
11038 +- spin_lock_irqsave(&b_dev_info->pages_lock, flags);
11039 + balloon_page_delete(page);
11040 + __count_vm_event(BALLOON_DEFLATE);
11041 +- spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
11042 + unlock_page(page);
11043 + dequeued_page = true;
11044 + break;
11045 + }
11046 + }
11047 ++ spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
11048 +
11049 + if (!dequeued_page) {
11050 + /*
11051 +diff --git a/mm/memory.c b/mm/memory.c
11052 +index c387430f06c3..b80bf4746b67 100644
11053 +--- a/mm/memory.c
11054 ++++ b/mm/memory.c
11055 +@@ -3399,8 +3399,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
11056 + if (unlikely(pmd_none(*pmd)) &&
11057 + unlikely(__pte_alloc(mm, vma, pmd, address)))
11058 + return VM_FAULT_OOM;
11059 +- /* if an huge pmd materialized from under us just retry later */
11060 +- if (unlikely(pmd_trans_huge(*pmd)))
11061 ++ /*
11062 ++ * If a huge pmd materialized under us just retry later. Use
11063 ++ * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
11064 ++ * didn't become pmd_trans_huge under us and then back to pmd_none, as
11065 ++ * a result of MADV_DONTNEED running immediately after a huge pmd fault
11066 ++ * in a different thread of this mm, in turn leading to a misleading
11067 ++ * pmd_trans_huge() retval. All we have to ensure is that it is a
11068 ++ * regular pmd that we can walk with pte_offset_map() and we can do that
11069 ++ * through an atomic read in C, which is what pmd_trans_unstable()
11070 ++ * provides.
11071 ++ */
11072 ++ if (unlikely(pmd_trans_unstable(pmd)))
11073 + return 0;
11074 + /*
11075 + * A regular pmd is established and it can't morph into a huge pmd
11076 +diff --git a/mm/migrate.c b/mm/migrate.c
11077 +index 7890d0bb5e23..6d17e0ab42d4 100644
11078 +--- a/mm/migrate.c
11079 ++++ b/mm/migrate.c
11080 +@@ -1578,7 +1578,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
11081 + (GFP_HIGHUSER_MOVABLE |
11082 + __GFP_THISNODE | __GFP_NOMEMALLOC |
11083 + __GFP_NORETRY | __GFP_NOWARN) &
11084 +- ~(__GFP_IO | __GFP_FS), 0);
11085 ++ ~__GFP_RECLAIM, 0);
11086 +
11087 + return newpage;
11088 + }
11089 +diff --git a/mm/shmem.c b/mm/shmem.c
11090 +index 2afcdbbdb685..ea5a70cfc1d8 100644
11091 +--- a/mm/shmem.c
11092 ++++ b/mm/shmem.c
11093 +@@ -620,8 +620,7 @@ static void shmem_evict_inode(struct inode *inode)
11094 + list_del_init(&info->swaplist);
11095 + mutex_unlock(&shmem_swaplist_mutex);
11096 + }
11097 +- } else
11098 +- kfree(info->symlink);
11099 ++ }
11100 +
11101 + simple_xattrs_free(&info->xattrs);
11102 + WARN_ON(inode->i_blocks);
11103 +@@ -2462,13 +2461,12 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
11104 + info = SHMEM_I(inode);
11105 + inode->i_size = len-1;
11106 + if (len <= SHORT_SYMLINK_LEN) {
11107 +- info->symlink = kmemdup(symname, len, GFP_KERNEL);
11108 +- if (!info->symlink) {
11109 ++ inode->i_link = kmemdup(symname, len, GFP_KERNEL);
11110 ++ if (!inode->i_link) {
11111 + iput(inode);
11112 + return -ENOMEM;
11113 + }
11114 + inode->i_op = &shmem_short_symlink_operations;
11115 +- inode->i_link = info->symlink;
11116 + } else {
11117 + error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
11118 + if (error) {
11119 +@@ -3083,6 +3081,7 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
11120 + static void shmem_destroy_callback(struct rcu_head *head)
11121 + {
11122 + struct inode *inode = container_of(head, struct inode, i_rcu);
11123 ++ kfree(inode->i_link);
11124 + kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
11125 + }
11126 +
11127 +diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
11128 +index 9e9cca3689a0..795ddd8b2f77 100644
11129 +--- a/net/bluetooth/6lowpan.c
11130 ++++ b/net/bluetooth/6lowpan.c
11131 +@@ -307,6 +307,9 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
11132 +
11133 + /* check that it's our buffer */
11134 + if (lowpan_is_ipv6(*skb_network_header(skb))) {
11135 ++ /* Pull off the 1-byte of 6lowpan header. */
11136 ++ skb_pull(skb, 1);
11137 ++
11138 + /* Copy the packet so that the IPv6 header is
11139 + * properly aligned.
11140 + */
11141 +@@ -317,6 +320,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
11142 +
11143 + local_skb->protocol = htons(ETH_P_IPV6);
11144 + local_skb->pkt_type = PACKET_HOST;
11145 ++ local_skb->dev = dev;
11146 +
11147 + skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
11148 +
11149 +@@ -335,6 +339,8 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
11150 + if (!local_skb)
11151 + goto drop;
11152 +
11153 ++ local_skb->dev = dev;
11154 ++
11155 + ret = iphc_decompress(local_skb, dev, chan);
11156 + if (ret < 0) {
11157 + kfree_skb(local_skb);
11158 +@@ -343,7 +349,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
11159 +
11160 + local_skb->protocol = htons(ETH_P_IPV6);
11161 + local_skb->pkt_type = PACKET_HOST;
11162 +- local_skb->dev = dev;
11163 +
11164 + if (give_skb_to_upper(local_skb, dev)
11165 + != NET_RX_SUCCESS) {
11166 +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
11167 +index 85b82f7adbd2..24e9410923d0 100644
11168 +--- a/net/bluetooth/hci_conn.c
11169 ++++ b/net/bluetooth/hci_conn.c
11170 +@@ -722,8 +722,12 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
11171 + if (hci_update_random_address(req, false, &own_addr_type))
11172 + return;
11173 +
11174 ++ /* Set window to be the same value as the interval to enable
11175 ++ * continuous scanning.
11176 ++ */
11177 + cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
11178 +- cp.scan_window = cpu_to_le16(hdev->le_scan_window);
11179 ++ cp.scan_window = cp.scan_interval;
11180 ++
11181 + bacpy(&cp.peer_addr, &conn->dst);
11182 + cp.peer_addr_type = conn->dst_type;
11183 + cp.own_address_type = own_addr_type;
11184 +diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
11185 +index 981f8a202c27..02778c5bc149 100644
11186 +--- a/net/bluetooth/hci_request.c
11187 ++++ b/net/bluetooth/hci_request.c
11188 +@@ -175,21 +175,29 @@ static u8 update_white_list(struct hci_request *req)
11189 + * command to remove it from the controller.
11190 + */
11191 + list_for_each_entry(b, &hdev->le_white_list, list) {
11192 +- struct hci_cp_le_del_from_white_list cp;
11193 ++ /* If the device is neither in pend_le_conns nor
11194 ++ * pend_le_reports then remove it from the whitelist.
11195 ++ */
11196 ++ if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
11197 ++ &b->bdaddr, b->bdaddr_type) &&
11198 ++ !hci_pend_le_action_lookup(&hdev->pend_le_reports,
11199 ++ &b->bdaddr, b->bdaddr_type)) {
11200 ++ struct hci_cp_le_del_from_white_list cp;
11201 ++
11202 ++ cp.bdaddr_type = b->bdaddr_type;
11203 ++ bacpy(&cp.bdaddr, &b->bdaddr);
11204 +
11205 +- if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
11206 +- &b->bdaddr, b->bdaddr_type) ||
11207 +- hci_pend_le_action_lookup(&hdev->pend_le_reports,
11208 +- &b->bdaddr, b->bdaddr_type)) {
11209 +- white_list_entries++;
11210 ++ hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
11211 ++ sizeof(cp), &cp);
11212 + continue;
11213 + }
11214 +
11215 +- cp.bdaddr_type = b->bdaddr_type;
11216 +- bacpy(&cp.bdaddr, &b->bdaddr);
11217 ++ if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
11218 ++ /* White list can not be used with RPAs */
11219 ++ return 0x00;
11220 ++ }
11221 +
11222 +- hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
11223 +- sizeof(cp), &cp);
11224 ++ white_list_entries++;
11225 + }
11226 +
11227 + /* Since all no longer valid white list entries have been
11228 +diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
11229 +index ffed8a1d4f27..4b175df35184 100644
11230 +--- a/net/bluetooth/smp.c
11231 ++++ b/net/bluetooth/smp.c
11232 +@@ -1072,22 +1072,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
11233 + hcon->dst_type = smp->remote_irk->addr_type;
11234 + queue_work(hdev->workqueue, &conn->id_addr_update_work);
11235 + }
11236 +-
11237 +- /* When receiving an indentity resolving key for
11238 +- * a remote device that does not use a resolvable
11239 +- * private address, just remove the key so that
11240 +- * it is possible to use the controller white
11241 +- * list for scanning.
11242 +- *
11243 +- * Userspace will have been told to not store
11244 +- * this key at this point. So it is safe to
11245 +- * just remove it.
11246 +- */
11247 +- if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) {
11248 +- list_del_rcu(&smp->remote_irk->list);
11249 +- kfree_rcu(smp->remote_irk, rcu);
11250 +- smp->remote_irk = NULL;
11251 +- }
11252 + }
11253 +
11254 + if (smp->csrk) {
11255 +diff --git a/net/bridge/br.c b/net/bridge/br.c
11256 +index a1abe4936fe1..3addc05b9a16 100644
11257 +--- a/net/bridge/br.c
11258 ++++ b/net/bridge/br.c
11259 +@@ -121,6 +121,7 @@ static struct notifier_block br_device_notifier = {
11260 + .notifier_call = br_device_event
11261 + };
11262 +
11263 ++/* called with RTNL */
11264 + static int br_switchdev_event(struct notifier_block *unused,
11265 + unsigned long event, void *ptr)
11266 + {
11267 +@@ -130,7 +131,6 @@ static int br_switchdev_event(struct notifier_block *unused,
11268 + struct switchdev_notifier_fdb_info *fdb_info;
11269 + int err = NOTIFY_DONE;
11270 +
11271 +- rtnl_lock();
11272 + p = br_port_get_rtnl(dev);
11273 + if (!p)
11274 + goto out;
11275 +@@ -155,7 +155,6 @@ static int br_switchdev_event(struct notifier_block *unused,
11276 + }
11277 +
11278 + out:
11279 +- rtnl_unlock();
11280 + return err;
11281 + }
11282 +
11283 +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
11284 +index 9981039ef4ff..63ae5dd24fc5 100644
11285 +--- a/net/ceph/messenger.c
11286 ++++ b/net/ceph/messenger.c
11287 +@@ -672,6 +672,8 @@ static void reset_connection(struct ceph_connection *con)
11288 + }
11289 + con->in_seq = 0;
11290 + con->in_seq_acked = 0;
11291 ++
11292 ++ con->out_skip = 0;
11293 + }
11294 +
11295 + /*
11296 +@@ -771,6 +773,8 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
11297 +
11298 + static void con_out_kvec_reset(struct ceph_connection *con)
11299 + {
11300 ++ BUG_ON(con->out_skip);
11301 ++
11302 + con->out_kvec_left = 0;
11303 + con->out_kvec_bytes = 0;
11304 + con->out_kvec_cur = &con->out_kvec[0];
11305 +@@ -779,9 +783,9 @@ static void con_out_kvec_reset(struct ceph_connection *con)
11306 + static void con_out_kvec_add(struct ceph_connection *con,
11307 + size_t size, void *data)
11308 + {
11309 +- int index;
11310 ++ int index = con->out_kvec_left;
11311 +
11312 +- index = con->out_kvec_left;
11313 ++ BUG_ON(con->out_skip);
11314 + BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
11315 +
11316 + con->out_kvec[index].iov_len = size;
11317 +@@ -790,6 +794,27 @@ static void con_out_kvec_add(struct ceph_connection *con,
11318 + con->out_kvec_bytes += size;
11319 + }
11320 +
11321 ++/*
11322 ++ * Chop off a kvec from the end. Return residual number of bytes for
11323 ++ * that kvec, i.e. how many bytes would have been written if the kvec
11324 ++ * hadn't been nuked.
11325 ++ */
11326 ++static int con_out_kvec_skip(struct ceph_connection *con)
11327 ++{
11328 ++ int off = con->out_kvec_cur - con->out_kvec;
11329 ++ int skip = 0;
11330 ++
11331 ++ if (con->out_kvec_bytes > 0) {
11332 ++ skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
11333 ++ BUG_ON(con->out_kvec_bytes < skip);
11334 ++ BUG_ON(!con->out_kvec_left);
11335 ++ con->out_kvec_bytes -= skip;
11336 ++ con->out_kvec_left--;
11337 ++ }
11338 ++
11339 ++ return skip;
11340 ++}
11341 ++
11342 + #ifdef CONFIG_BLOCK
11343 +
11344 + /*
11345 +@@ -1175,6 +1200,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
11346 + return new_piece;
11347 + }
11348 +
11349 ++static size_t sizeof_footer(struct ceph_connection *con)
11350 ++{
11351 ++ return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
11352 ++ sizeof(struct ceph_msg_footer) :
11353 ++ sizeof(struct ceph_msg_footer_old);
11354 ++}
11355 ++
11356 + static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
11357 + {
11358 + BUG_ON(!msg);
11359 +@@ -1197,7 +1229,6 @@ static void prepare_write_message_footer(struct ceph_connection *con)
11360 + m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
11361 +
11362 + dout("prepare_write_message_footer %p\n", con);
11363 +- con->out_kvec_is_msg = true;
11364 + con->out_kvec[v].iov_base = &m->footer;
11365 + if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
11366 + if (con->ops->sign_message)
11367 +@@ -1225,7 +1256,6 @@ static void prepare_write_message(struct ceph_connection *con)
11368 + u32 crc;
11369 +
11370 + con_out_kvec_reset(con);
11371 +- con->out_kvec_is_msg = true;
11372 + con->out_msg_done = false;
11373 +
11374 + /* Sneak an ack in there first? If we can get it into the same
11375 +@@ -1265,18 +1295,19 @@ static void prepare_write_message(struct ceph_connection *con)
11376 +
11377 + /* tag + hdr + front + middle */
11378 + con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
11379 +- con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
11380 ++ con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
11381 + con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
11382 +
11383 + if (m->middle)
11384 + con_out_kvec_add(con, m->middle->vec.iov_len,
11385 + m->middle->vec.iov_base);
11386 +
11387 +- /* fill in crc (except data pages), footer */
11388 ++ /* fill in hdr crc and finalize hdr */
11389 + crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
11390 + con->out_msg->hdr.crc = cpu_to_le32(crc);
11391 +- con->out_msg->footer.flags = 0;
11392 ++ memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
11393 +
11394 ++ /* fill in front and middle crc, footer */
11395 + crc = crc32c(0, m->front.iov_base, m->front.iov_len);
11396 + con->out_msg->footer.front_crc = cpu_to_le32(crc);
11397 + if (m->middle) {
11398 +@@ -1288,6 +1319,7 @@ static void prepare_write_message(struct ceph_connection *con)
11399 + dout("%s front_crc %u middle_crc %u\n", __func__,
11400 + le32_to_cpu(con->out_msg->footer.front_crc),
11401 + le32_to_cpu(con->out_msg->footer.middle_crc));
11402 ++ con->out_msg->footer.flags = 0;
11403 +
11404 + /* is there a data payload? */
11405 + con->out_msg->footer.data_crc = 0;
11406 +@@ -1492,7 +1524,6 @@ static int write_partial_kvec(struct ceph_connection *con)
11407 + }
11408 + }
11409 + con->out_kvec_left = 0;
11410 +- con->out_kvec_is_msg = false;
11411 + ret = 1;
11412 + out:
11413 + dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
11414 +@@ -1584,6 +1615,7 @@ static int write_partial_skip(struct ceph_connection *con)
11415 + {
11416 + int ret;
11417 +
11418 ++ dout("%s %p %d left\n", __func__, con, con->out_skip);
11419 + while (con->out_skip > 0) {
11420 + size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
11421 +
11422 +@@ -2313,9 +2345,9 @@ static int read_partial_message(struct ceph_connection *con)
11423 + ceph_pr_addr(&con->peer_addr.in_addr),
11424 + seq, con->in_seq + 1);
11425 + con->in_base_pos = -front_len - middle_len - data_len -
11426 +- sizeof(m->footer);
11427 ++ sizeof_footer(con);
11428 + con->in_tag = CEPH_MSGR_TAG_READY;
11429 +- return 0;
11430 ++ return 1;
11431 + } else if ((s64)seq - (s64)con->in_seq > 1) {
11432 + pr_err("read_partial_message bad seq %lld expected %lld\n",
11433 + seq, con->in_seq + 1);
11434 +@@ -2338,10 +2370,10 @@ static int read_partial_message(struct ceph_connection *con)
11435 + /* skip this message */
11436 + dout("alloc_msg said skip message\n");
11437 + con->in_base_pos = -front_len - middle_len - data_len -
11438 +- sizeof(m->footer);
11439 ++ sizeof_footer(con);
11440 + con->in_tag = CEPH_MSGR_TAG_READY;
11441 + con->in_seq++;
11442 +- return 0;
11443 ++ return 1;
11444 + }
11445 +
11446 + BUG_ON(!con->in_msg);
11447 +@@ -2506,13 +2538,13 @@ more:
11448 +
11449 + more_kvec:
11450 + /* kvec data queued? */
11451 +- if (con->out_skip) {
11452 +- ret = write_partial_skip(con);
11453 ++ if (con->out_kvec_left) {
11454 ++ ret = write_partial_kvec(con);
11455 + if (ret <= 0)
11456 + goto out;
11457 + }
11458 +- if (con->out_kvec_left) {
11459 +- ret = write_partial_kvec(con);
11460 ++ if (con->out_skip) {
11461 ++ ret = write_partial_skip(con);
11462 + if (ret <= 0)
11463 + goto out;
11464 + }
11465 +@@ -3050,16 +3082,31 @@ void ceph_msg_revoke(struct ceph_msg *msg)
11466 + ceph_msg_put(msg);
11467 + }
11468 + if (con->out_msg == msg) {
11469 +- dout("%s %p msg %p - was sending\n", __func__, con, msg);
11470 +- con->out_msg = NULL;
11471 +- if (con->out_kvec_is_msg) {
11472 +- con->out_skip = con->out_kvec_bytes;
11473 +- con->out_kvec_is_msg = false;
11474 ++ BUG_ON(con->out_skip);
11475 ++ /* footer */
11476 ++ if (con->out_msg_done) {
11477 ++ con->out_skip += con_out_kvec_skip(con);
11478 ++ } else {
11479 ++ BUG_ON(!msg->data_length);
11480 ++ if (con->peer_features & CEPH_FEATURE_MSG_AUTH)
11481 ++ con->out_skip += sizeof(msg->footer);
11482 ++ else
11483 ++ con->out_skip += sizeof(msg->old_footer);
11484 + }
11485 ++ /* data, middle, front */
11486 ++ if (msg->data_length)
11487 ++ con->out_skip += msg->cursor.total_resid;
11488 ++ if (msg->middle)
11489 ++ con->out_skip += con_out_kvec_skip(con);
11490 ++ con->out_skip += con_out_kvec_skip(con);
11491 ++
11492 ++ dout("%s %p msg %p - was sending, will write %d skip %d\n",
11493 ++ __func__, con, msg, con->out_kvec_bytes, con->out_skip);
11494 + msg->hdr.seq = 0;
11495 +-
11496 ++ con->out_msg = NULL;
11497 + ceph_msg_put(msg);
11498 + }
11499 ++
11500 + mutex_unlock(&con->mutex);
11501 + }
11502 +
11503 +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
11504 +index f8f235930d88..a28e47ff1b1b 100644
11505 +--- a/net/ceph/osd_client.c
11506 ++++ b/net/ceph/osd_client.c
11507 +@@ -2843,8 +2843,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
11508 + mutex_lock(&osdc->request_mutex);
11509 + req = __lookup_request(osdc, tid);
11510 + if (!req) {
11511 +- pr_warn("%s osd%d tid %llu unknown, skipping\n",
11512 +- __func__, osd->o_osd, tid);
11513 ++ dout("%s osd%d tid %llu unknown, skipping\n", __func__,
11514 ++ osd->o_osd, tid);
11515 + m = NULL;
11516 + *skip = 1;
11517 + goto out;
11518 +diff --git a/net/core/dev.c b/net/core/dev.c
11519 +index 7f00f2439770..9efbdb3ff78a 100644
11520 +--- a/net/core/dev.c
11521 ++++ b/net/core/dev.c
11522 +@@ -4145,6 +4145,7 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
11523 +
11524 + diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
11525 + diffs |= p->vlan_tci ^ skb->vlan_tci;
11526 ++ diffs |= skb_metadata_dst_cmp(p, skb);
11527 + if (maclen == ETH_HLEN)
11528 + diffs |= compare_ether_header(skb_mac_header(p),
11529 + skb_mac_header(skb));
11530 +@@ -4342,10 +4343,12 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
11531 + break;
11532 +
11533 + case GRO_MERGED_FREE:
11534 +- if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
11535 ++ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
11536 ++ skb_dst_drop(skb);
11537 + kmem_cache_free(skbuff_head_cache, skb);
11538 +- else
11539 ++ } else {
11540 + __kfree_skb(skb);
11541 ++ }
11542 + break;
11543 +
11544 + case GRO_HELD:
11545 +@@ -7125,8 +7128,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
11546 + dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
11547 + setup(dev);
11548 +
11549 +- if (!dev->tx_queue_len)
11550 ++ if (!dev->tx_queue_len) {
11551 + dev->priv_flags |= IFF_NO_QUEUE;
11552 ++ dev->tx_queue_len = 1;
11553 ++ }
11554 +
11555 + dev->num_tx_queues = txqs;
11556 + dev->real_num_tx_queues = txqs;
11557 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
11558 +index d79699c9d1b9..12e700332010 100644
11559 +--- a/net/core/flow_dissector.c
11560 ++++ b/net/core/flow_dissector.c
11561 +@@ -208,7 +208,6 @@ ip:
11562 + case htons(ETH_P_IPV6): {
11563 + const struct ipv6hdr *iph;
11564 + struct ipv6hdr _iph;
11565 +- __be32 flow_label;
11566 +
11567 + ipv6:
11568 + iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
11569 +@@ -230,8 +229,12 @@ ipv6:
11570 + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
11571 + }
11572 +
11573 +- flow_label = ip6_flowlabel(iph);
11574 +- if (flow_label) {
11575 ++ if ((dissector_uses_key(flow_dissector,
11576 ++ FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
11577 ++ (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
11578 ++ ip6_flowlabel(iph)) {
11579 ++ __be32 flow_label = ip6_flowlabel(iph);
11580 ++
11581 + if (dissector_uses_key(flow_dissector,
11582 + FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
11583 + key_tags = skb_flow_dissector_target(flow_dissector,
11584 +@@ -396,6 +399,13 @@ ip_proto_again:
11585 + goto out_bad;
11586 + proto = eth->h_proto;
11587 + nhoff += sizeof(*eth);
11588 ++
11589 ++ /* Cap headers that we access via pointers at the
11590 ++ * end of the Ethernet header as our maximum alignment
11591 ++ * at that point is only 2 bytes.
11592 ++ */
11593 ++ if (NET_IP_ALIGN)
11594 ++ hlen = nhoff;
11595 + }
11596 +
11597 + key_control->flags |= FLOW_DIS_ENCAPSULATION;
11598 +diff --git a/net/core/scm.c b/net/core/scm.c
11599 +index 8a1741b14302..dce0acb929f1 100644
11600 +--- a/net/core/scm.c
11601 ++++ b/net/core/scm.c
11602 +@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
11603 + *fplp = fpl;
11604 + fpl->count = 0;
11605 + fpl->max = SCM_MAX_FD;
11606 ++ fpl->user = NULL;
11607 + }
11608 + fpp = &fpl->fp[fpl->count];
11609 +
11610 +@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
11611 + *fpp++ = file;
11612 + fpl->count++;
11613 + }
11614 ++
11615 ++ if (!fpl->user)
11616 ++ fpl->user = get_uid(current_user());
11617 ++
11618 + return num;
11619 + }
11620 +
11621 +@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
11622 + scm->fp = NULL;
11623 + for (i=fpl->count-1; i>=0; i--)
11624 + fput(fpl->fp[i]);
11625 ++ free_uid(fpl->user);
11626 + kfree(fpl);
11627 + }
11628 + }
11629 +@@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
11630 + for (i = 0; i < fpl->count; i++)
11631 + get_file(fpl->fp[i]);
11632 + new_fpl->max = new_fpl->count;
11633 ++ new_fpl->user = get_uid(fpl->user);
11634 + }
11635 + return new_fpl;
11636 + }
11637 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
11638 +index b2df375ec9c2..5bf88f58bee7 100644
11639 +--- a/net/core/skbuff.c
11640 ++++ b/net/core/skbuff.c
11641 +@@ -79,6 +79,8 @@
11642 +
11643 + struct kmem_cache *skbuff_head_cache __read_mostly;
11644 + static struct kmem_cache *skbuff_fclone_cache __read_mostly;
11645 ++int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
11646 ++EXPORT_SYMBOL(sysctl_max_skb_frags);
11647 +
11648 + /**
11649 + * skb_panic - private function for out-of-line support
11650 +diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
11651 +index 95b6139d710c..a6beb7b6ae55 100644
11652 +--- a/net/core/sysctl_net_core.c
11653 ++++ b/net/core/sysctl_net_core.c
11654 +@@ -26,6 +26,7 @@ static int zero = 0;
11655 + static int one = 1;
11656 + static int min_sndbuf = SOCK_MIN_SNDBUF;
11657 + static int min_rcvbuf = SOCK_MIN_RCVBUF;
11658 ++static int max_skb_frags = MAX_SKB_FRAGS;
11659 +
11660 + static int net_msg_warn; /* Unused, but still a sysctl */
11661 +
11662 +@@ -392,6 +393,15 @@ static struct ctl_table net_core_table[] = {
11663 + .mode = 0644,
11664 + .proc_handler = proc_dointvec
11665 + },
11666 ++ {
11667 ++ .procname = "max_skb_frags",
11668 ++ .data = &sysctl_max_skb_frags,
11669 ++ .maxlen = sizeof(int),
11670 ++ .mode = 0644,
11671 ++ .proc_handler = proc_dointvec_minmax,
11672 ++ .extra1 = &one,
11673 ++ .extra2 = &max_skb_frags,
11674 ++ },
11675 + { }
11676 + };
11677 +
11678 +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
11679 +index 5684e14932bd..902d606324a0 100644
11680 +--- a/net/dccp/ipv4.c
11681 ++++ b/net/dccp/ipv4.c
11682 +@@ -824,26 +824,26 @@ lookup:
11683 +
11684 + if (sk->sk_state == DCCP_NEW_SYN_RECV) {
11685 + struct request_sock *req = inet_reqsk(sk);
11686 +- struct sock *nsk = NULL;
11687 ++ struct sock *nsk;
11688 +
11689 + sk = req->rsk_listener;
11690 +- if (likely(sk->sk_state == DCCP_LISTEN)) {
11691 +- nsk = dccp_check_req(sk, skb, req);
11692 +- } else {
11693 ++ if (unlikely(sk->sk_state != DCCP_LISTEN)) {
11694 + inet_csk_reqsk_queue_drop_and_put(sk, req);
11695 + goto lookup;
11696 + }
11697 ++ sock_hold(sk);
11698 ++ nsk = dccp_check_req(sk, skb, req);
11699 + if (!nsk) {
11700 + reqsk_put(req);
11701 +- goto discard_it;
11702 ++ goto discard_and_relse;
11703 + }
11704 + if (nsk == sk) {
11705 +- sock_hold(sk);
11706 + reqsk_put(req);
11707 + } else if (dccp_child_process(sk, nsk, skb)) {
11708 + dccp_v4_ctl_send_reset(sk, skb);
11709 +- goto discard_it;
11710 ++ goto discard_and_relse;
11711 + } else {
11712 ++ sock_put(sk);
11713 + return 0;
11714 + }
11715 + }
11716 +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
11717 +index 9c6d0508e63a..b8608b71a66d 100644
11718 +--- a/net/dccp/ipv6.c
11719 ++++ b/net/dccp/ipv6.c
11720 +@@ -691,26 +691,26 @@ lookup:
11721 +
11722 + if (sk->sk_state == DCCP_NEW_SYN_RECV) {
11723 + struct request_sock *req = inet_reqsk(sk);
11724 +- struct sock *nsk = NULL;
11725 ++ struct sock *nsk;
11726 +
11727 + sk = req->rsk_listener;
11728 +- if (likely(sk->sk_state == DCCP_LISTEN)) {
11729 +- nsk = dccp_check_req(sk, skb, req);
11730 +- } else {
11731 ++ if (unlikely(sk->sk_state != DCCP_LISTEN)) {
11732 + inet_csk_reqsk_queue_drop_and_put(sk, req);
11733 + goto lookup;
11734 + }
11735 ++ sock_hold(sk);
11736 ++ nsk = dccp_check_req(sk, skb, req);
11737 + if (!nsk) {
11738 + reqsk_put(req);
11739 +- goto discard_it;
11740 ++ goto discard_and_relse;
11741 + }
11742 + if (nsk == sk) {
11743 +- sock_hold(sk);
11744 + reqsk_put(req);
11745 + } else if (dccp_child_process(sk, nsk, skb)) {
11746 + dccp_v6_ctl_send_reset(sk, skb);
11747 +- goto discard_it;
11748 ++ goto discard_and_relse;
11749 + } else {
11750 ++ sock_put(sk);
11751 + return 0;
11752 + }
11753 + }
11754 +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
11755 +index cebd9d31e65a..f6303b17546b 100644
11756 +--- a/net/ipv4/devinet.c
11757 ++++ b/net/ipv4/devinet.c
11758 +@@ -1847,7 +1847,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
11759 + if (err < 0)
11760 + goto errout;
11761 +
11762 +- err = EINVAL;
11763 ++ err = -EINVAL;
11764 + if (!tb[NETCONFA_IFINDEX])
11765 + goto errout;
11766 +
11767 +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
11768 +index 46b9c887bede..64148914803a 100644
11769 +--- a/net/ipv4/inet_connection_sock.c
11770 ++++ b/net/ipv4/inet_connection_sock.c
11771 +@@ -789,14 +789,16 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
11772 + reqsk_put(req);
11773 + }
11774 +
11775 +-void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
11776 +- struct sock *child)
11777 ++struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
11778 ++ struct request_sock *req,
11779 ++ struct sock *child)
11780 + {
11781 + struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
11782 +
11783 + spin_lock(&queue->rskq_lock);
11784 + if (unlikely(sk->sk_state != TCP_LISTEN)) {
11785 + inet_child_forget(sk, req, child);
11786 ++ child = NULL;
11787 + } else {
11788 + req->sk = child;
11789 + req->dl_next = NULL;
11790 +@@ -808,6 +810,7 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
11791 + sk_acceptq_added(sk);
11792 + }
11793 + spin_unlock(&queue->rskq_lock);
11794 ++ return child;
11795 + }
11796 + EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
11797 +
11798 +@@ -817,11 +820,8 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
11799 + if (own_req) {
11800 + inet_csk_reqsk_queue_drop(sk, req);
11801 + reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
11802 +- inet_csk_reqsk_queue_add(sk, req, child);
11803 +- /* Warning: caller must not call reqsk_put(req);
11804 +- * child stole last reference on it.
11805 +- */
11806 +- return child;
11807 ++ if (inet_csk_reqsk_queue_add(sk, req, child))
11808 ++ return child;
11809 + }
11810 + /* Too bad, another child took ownership of the request, undo. */
11811 + bh_unlock_sock(child);
11812 +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
11813 +index 1fe55ae81781..b8a0607dab96 100644
11814 +--- a/net/ipv4/ip_fragment.c
11815 ++++ b/net/ipv4/ip_fragment.c
11816 +@@ -661,6 +661,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
11817 + struct ipq *qp;
11818 +
11819 + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
11820 ++ skb_orphan(skb);
11821 +
11822 + /* Lookup (or create) queue header */
11823 + qp = ip_find(net, ip_hdr(skb), user, vif);
11824 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
11825 +index 5f73a7c03e27..a50124260f5a 100644
11826 +--- a/net/ipv4/ip_sockglue.c
11827 ++++ b/net/ipv4/ip_sockglue.c
11828 +@@ -249,6 +249,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
11829 + switch (cmsg->cmsg_type) {
11830 + case IP_RETOPTS:
11831 + err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
11832 ++
11833 ++ /* Our caller is responsible for freeing ipc->opt */
11834 + err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
11835 + err < 40 ? err : 40);
11836 + if (err)
11837 +diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
11838 +index 6fb869f646bf..a04dee536b8e 100644
11839 +--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
11840 ++++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
11841 +@@ -27,8 +27,6 @@ static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
11842 + {
11843 + int err;
11844 +
11845 +- skb_orphan(skb);
11846 +-
11847 + local_bh_disable();
11848 + err = ip_defrag(net, skb, user);
11849 + local_bh_enable();
11850 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
11851 +index e89094ab5ddb..aa67e0e64b69 100644
11852 +--- a/net/ipv4/ping.c
11853 ++++ b/net/ipv4/ping.c
11854 +@@ -746,8 +746,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
11855 +
11856 + if (msg->msg_controllen) {
11857 + err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
11858 +- if (err)
11859 ++ if (unlikely(err)) {
11860 ++ kfree(ipc.opt);
11861 + return err;
11862 ++ }
11863 + if (ipc.opt)
11864 + free = 1;
11865 + }
11866 +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
11867 +index bc35f1842512..7113bae4e6a0 100644
11868 +--- a/net/ipv4/raw.c
11869 ++++ b/net/ipv4/raw.c
11870 +@@ -547,8 +547,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
11871 +
11872 + if (msg->msg_controllen) {
11873 + err = ip_cmsg_send(net, msg, &ipc, false);
11874 +- if (err)
11875 ++ if (unlikely(err)) {
11876 ++ kfree(ipc.opt);
11877 + goto out;
11878 ++ }
11879 + if (ipc.opt)
11880 + free = 1;
11881 + }
11882 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
11883 +index 85f184e429c6..02c62299d717 100644
11884 +--- a/net/ipv4/route.c
11885 ++++ b/net/ipv4/route.c
11886 +@@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
11887 + static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
11888 + static int ip_rt_min_advmss __read_mostly = 256;
11889 +
11890 ++static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
11891 + /*
11892 + * Interface to generic destination cache.
11893 + */
11894 +@@ -755,7 +756,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
11895 + struct fib_nh *nh = &FIB_RES_NH(res);
11896 +
11897 + update_or_create_fnhe(nh, fl4->daddr, new_gw,
11898 +- 0, 0);
11899 ++ 0, jiffies + ip_rt_gc_timeout);
11900 + }
11901 + if (kill_route)
11902 + rt->dst.obsolete = DST_OBSOLETE_KILL;
11903 +@@ -1556,6 +1557,36 @@ static void ip_handle_martian_source(struct net_device *dev,
11904 + #endif
11905 + }
11906 +
11907 ++static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
11908 ++{
11909 ++ struct fnhe_hash_bucket *hash;
11910 ++ struct fib_nh_exception *fnhe, __rcu **fnhe_p;
11911 ++ u32 hval = fnhe_hashfun(daddr);
11912 ++
11913 ++ spin_lock_bh(&fnhe_lock);
11914 ++
11915 ++ hash = rcu_dereference_protected(nh->nh_exceptions,
11916 ++ lockdep_is_held(&fnhe_lock));
11917 ++ hash += hval;
11918 ++
11919 ++ fnhe_p = &hash->chain;
11920 ++ fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
11921 ++ while (fnhe) {
11922 ++ if (fnhe->fnhe_daddr == daddr) {
11923 ++ rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
11924 ++ fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
11925 ++ fnhe_flush_routes(fnhe);
11926 ++ kfree_rcu(fnhe, rcu);
11927 ++ break;
11928 ++ }
11929 ++ fnhe_p = &fnhe->fnhe_next;
11930 ++ fnhe = rcu_dereference_protected(fnhe->fnhe_next,
11931 ++ lockdep_is_held(&fnhe_lock));
11932 ++ }
11933 ++
11934 ++ spin_unlock_bh(&fnhe_lock);
11935 ++}
11936 ++
11937 + /* called in rcu_read_lock() section */
11938 + static int __mkroute_input(struct sk_buff *skb,
11939 + const struct fib_result *res,
11940 +@@ -1609,11 +1640,20 @@ static int __mkroute_input(struct sk_buff *skb,
11941 +
11942 + fnhe = find_exception(&FIB_RES_NH(*res), daddr);
11943 + if (do_cache) {
11944 +- if (fnhe)
11945 ++ if (fnhe) {
11946 + rth = rcu_dereference(fnhe->fnhe_rth_input);
11947 +- else
11948 +- rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
11949 ++ if (rth && rth->dst.expires &&
11950 ++ time_after(jiffies, rth->dst.expires)) {
11951 ++ ip_del_fnhe(&FIB_RES_NH(*res), daddr);
11952 ++ fnhe = NULL;
11953 ++ } else {
11954 ++ goto rt_cache;
11955 ++ }
11956 ++ }
11957 ++
11958 ++ rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
11959 +
11960 ++rt_cache:
11961 + if (rt_cache_valid(rth)) {
11962 + skb_dst_set_noref(skb, &rth->dst);
11963 + goto out;
11964 +@@ -2014,19 +2054,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
11965 + struct fib_nh *nh = &FIB_RES_NH(*res);
11966 +
11967 + fnhe = find_exception(nh, fl4->daddr);
11968 +- if (fnhe)
11969 ++ if (fnhe) {
11970 + prth = &fnhe->fnhe_rth_output;
11971 +- else {
11972 +- if (unlikely(fl4->flowi4_flags &
11973 +- FLOWI_FLAG_KNOWN_NH &&
11974 +- !(nh->nh_gw &&
11975 +- nh->nh_scope == RT_SCOPE_LINK))) {
11976 +- do_cache = false;
11977 +- goto add;
11978 ++ rth = rcu_dereference(*prth);
11979 ++ if (rth && rth->dst.expires &&
11980 ++ time_after(jiffies, rth->dst.expires)) {
11981 ++ ip_del_fnhe(nh, fl4->daddr);
11982 ++ fnhe = NULL;
11983 ++ } else {
11984 ++ goto rt_cache;
11985 + }
11986 +- prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
11987 + }
11988 ++
11989 ++ if (unlikely(fl4->flowi4_flags &
11990 ++ FLOWI_FLAG_KNOWN_NH &&
11991 ++ !(nh->nh_gw &&
11992 ++ nh->nh_scope == RT_SCOPE_LINK))) {
11993 ++ do_cache = false;
11994 ++ goto add;
11995 ++ }
11996 ++ prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
11997 + rth = rcu_dereference(*prth);
11998 ++
11999 ++rt_cache:
12000 + if (rt_cache_valid(rth)) {
12001 + dst_hold(&rth->dst);
12002 + return rth;
12003 +@@ -2569,7 +2619,6 @@ void ip_rt_multicast_event(struct in_device *in_dev)
12004 + }
12005 +
12006 + #ifdef CONFIG_SYSCTL
12007 +-static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
12008 + static int ip_rt_gc_interval __read_mostly = 60 * HZ;
12009 + static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
12010 + static int ip_rt_gc_elasticity __read_mostly = 8;
12011 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
12012 +index c82cca18c90f..036a76ba2ac2 100644
12013 +--- a/net/ipv4/tcp.c
12014 ++++ b/net/ipv4/tcp.c
12015 +@@ -279,6 +279,7 @@
12016 +
12017 + #include <asm/uaccess.h>
12018 + #include <asm/ioctls.h>
12019 ++#include <asm/unaligned.h>
12020 + #include <net/busy_poll.h>
12021 +
12022 + int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
12023 +@@ -938,7 +939,7 @@ new_segment:
12024 +
12025 + i = skb_shinfo(skb)->nr_frags;
12026 + can_coalesce = skb_can_coalesce(skb, i, page, offset);
12027 +- if (!can_coalesce && i >= MAX_SKB_FRAGS) {
12028 ++ if (!can_coalesce && i >= sysctl_max_skb_frags) {
12029 + tcp_mark_push(tp, skb);
12030 + goto new_segment;
12031 + }
12032 +@@ -1211,7 +1212,7 @@ new_segment:
12033 +
12034 + if (!skb_can_coalesce(skb, i, pfrag->page,
12035 + pfrag->offset)) {
12036 +- if (i == MAX_SKB_FRAGS || !sg) {
12037 ++ if (i == sysctl_max_skb_frags || !sg) {
12038 + tcp_mark_push(tp, skb);
12039 + goto new_segment;
12040 + }
12041 +@@ -2637,6 +2638,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
12042 + const struct inet_connection_sock *icsk = inet_csk(sk);
12043 + u32 now = tcp_time_stamp;
12044 + unsigned int start;
12045 ++ u64 rate64;
12046 + u32 rate;
12047 +
12048 + memset(info, 0, sizeof(*info));
12049 +@@ -2702,15 +2704,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
12050 + info->tcpi_total_retrans = tp->total_retrans;
12051 +
12052 + rate = READ_ONCE(sk->sk_pacing_rate);
12053 +- info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
12054 ++ rate64 = rate != ~0U ? rate : ~0ULL;
12055 ++ put_unaligned(rate64, &info->tcpi_pacing_rate);
12056 +
12057 + rate = READ_ONCE(sk->sk_max_pacing_rate);
12058 +- info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
12059 ++ rate64 = rate != ~0U ? rate : ~0ULL;
12060 ++ put_unaligned(rate64, &info->tcpi_max_pacing_rate);
12061 +
12062 + do {
12063 + start = u64_stats_fetch_begin_irq(&tp->syncp);
12064 +- info->tcpi_bytes_acked = tp->bytes_acked;
12065 +- info->tcpi_bytes_received = tp->bytes_received;
12066 ++ put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
12067 ++ put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
12068 + } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
12069 + info->tcpi_segs_out = tp->segs_out;
12070 + info->tcpi_segs_in = tp->segs_in;
12071 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
12072 +index d8841a2f1569..8c7e63163e92 100644
12073 +--- a/net/ipv4/tcp_ipv4.c
12074 ++++ b/net/ipv4/tcp_ipv4.c
12075 +@@ -312,7 +312,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
12076 +
12077 +
12078 + /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
12079 +-void tcp_req_err(struct sock *sk, u32 seq)
12080 ++void tcp_req_err(struct sock *sk, u32 seq, bool abort)
12081 + {
12082 + struct request_sock *req = inet_reqsk(sk);
12083 + struct net *net = sock_net(sk);
12084 +@@ -324,7 +324,7 @@ void tcp_req_err(struct sock *sk, u32 seq)
12085 +
12086 + if (seq != tcp_rsk(req)->snt_isn) {
12087 + NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
12088 +- } else {
12089 ++ } else if (abort) {
12090 + /*
12091 + * Still in SYN_RECV, just remove it silently.
12092 + * There is no good way to pass the error to the newly
12093 +@@ -384,7 +384,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
12094 + }
12095 + seq = ntohl(th->seq);
12096 + if (sk->sk_state == TCP_NEW_SYN_RECV)
12097 +- return tcp_req_err(sk, seq);
12098 ++ return tcp_req_err(sk, seq,
12099 ++ type == ICMP_PARAMETERPROB ||
12100 ++ type == ICMP_TIME_EXCEEDED ||
12101 ++ (type == ICMP_DEST_UNREACH &&
12102 ++ (code == ICMP_NET_UNREACH ||
12103 ++ code == ICMP_HOST_UNREACH)));
12104 +
12105 + bh_lock_sock(sk);
12106 + /* If too many ICMPs get dropped on busy
12107 +@@ -705,7 +710,8 @@ release_sk1:
12108 + outside socket context is ugly, certainly. What can I do?
12109 + */
12110 +
12111 +-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
12112 ++static void tcp_v4_send_ack(struct net *net,
12113 ++ struct sk_buff *skb, u32 seq, u32 ack,
12114 + u32 win, u32 tsval, u32 tsecr, int oif,
12115 + struct tcp_md5sig_key *key,
12116 + int reply_flags, u8 tos)
12117 +@@ -720,7 +726,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
12118 + ];
12119 + } rep;
12120 + struct ip_reply_arg arg;
12121 +- struct net *net = dev_net(skb_dst(skb)->dev);
12122 +
12123 + memset(&rep.th, 0, sizeof(struct tcphdr));
12124 + memset(&arg, 0, sizeof(arg));
12125 +@@ -782,7 +787,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
12126 + struct inet_timewait_sock *tw = inet_twsk(sk);
12127 + struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
12128 +
12129 +- tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
12130 ++ tcp_v4_send_ack(sock_net(sk), skb,
12131 ++ tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
12132 + tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
12133 + tcp_time_stamp + tcptw->tw_ts_offset,
12134 + tcptw->tw_ts_recent,
12135 +@@ -801,8 +807,10 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
12136 + /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
12137 + * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
12138 + */
12139 +- tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
12140 +- tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
12141 ++ u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
12142 ++ tcp_sk(sk)->snd_nxt;
12143 ++
12144 ++ tcp_v4_send_ack(sock_net(sk), skb, seq,
12145 + tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
12146 + tcp_time_stamp,
12147 + req->ts_recent,
12148 +@@ -1586,28 +1594,30 @@ process:
12149 +
12150 + if (sk->sk_state == TCP_NEW_SYN_RECV) {
12151 + struct request_sock *req = inet_reqsk(sk);
12152 +- struct sock *nsk = NULL;
12153 ++ struct sock *nsk;
12154 +
12155 + sk = req->rsk_listener;
12156 +- if (tcp_v4_inbound_md5_hash(sk, skb))
12157 +- goto discard_and_relse;
12158 +- if (likely(sk->sk_state == TCP_LISTEN)) {
12159 +- nsk = tcp_check_req(sk, skb, req, false);
12160 +- } else {
12161 ++ if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
12162 ++ reqsk_put(req);
12163 ++ goto discard_it;
12164 ++ }
12165 ++ if (unlikely(sk->sk_state != TCP_LISTEN)) {
12166 + inet_csk_reqsk_queue_drop_and_put(sk, req);
12167 + goto lookup;
12168 + }
12169 ++ sock_hold(sk);
12170 ++ nsk = tcp_check_req(sk, skb, req, false);
12171 + if (!nsk) {
12172 + reqsk_put(req);
12173 +- goto discard_it;
12174 ++ goto discard_and_relse;
12175 + }
12176 + if (nsk == sk) {
12177 +- sock_hold(sk);
12178 + reqsk_put(req);
12179 + } else if (tcp_child_process(sk, nsk, skb)) {
12180 + tcp_v4_send_reset(nsk, skb);
12181 +- goto discard_it;
12182 ++ goto discard_and_relse;
12183 + } else {
12184 ++ sock_put(sk);
12185 + return 0;
12186 + }
12187 + }
12188 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
12189 +index c43890848641..7f8ab46adf61 100644
12190 +--- a/net/ipv4/udp.c
12191 ++++ b/net/ipv4/udp.c
12192 +@@ -966,8 +966,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
12193 + if (msg->msg_controllen) {
12194 + err = ip_cmsg_send(sock_net(sk), msg, &ipc,
12195 + sk->sk_family == AF_INET6);
12196 +- if (err)
12197 ++ if (unlikely(err)) {
12198 ++ kfree(ipc.opt);
12199 + return err;
12200 ++ }
12201 + if (ipc.opt)
12202 + free = 1;
12203 + connected = 0;
12204 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
12205 +index 1f21087accab..e8d3da0817d3 100644
12206 +--- a/net/ipv6/addrconf.c
12207 ++++ b/net/ipv6/addrconf.c
12208 +@@ -583,7 +583,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
12209 + if (err < 0)
12210 + goto errout;
12211 +
12212 +- err = EINVAL;
12213 ++ err = -EINVAL;
12214 + if (!tb[NETCONFA_IFINDEX])
12215 + goto errout;
12216 +
12217 +@@ -3506,6 +3506,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
12218 + {
12219 + struct inet6_dev *idev = ifp->idev;
12220 + struct net_device *dev = idev->dev;
12221 ++ bool notify = false;
12222 +
12223 + addrconf_join_solict(dev, &ifp->addr);
12224 +
12225 +@@ -3551,7 +3552,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
12226 + /* Because optimistic nodes can use this address,
12227 + * notify listeners. If DAD fails, RTM_DELADDR is sent.
12228 + */
12229 +- ipv6_ifa_notify(RTM_NEWADDR, ifp);
12230 ++ notify = true;
12231 + }
12232 + }
12233 +
12234 +@@ -3559,6 +3560,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
12235 + out:
12236 + spin_unlock(&ifp->lock);
12237 + read_unlock_bh(&idev->lock);
12238 ++ if (notify)
12239 ++ ipv6_ifa_notify(RTM_NEWADDR, ifp);
12240 + }
12241 +
12242 + static void addrconf_dad_start(struct inet6_ifaddr *ifp)
12243 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
12244 +index 517c55b01ba8..428162155280 100644
12245 +--- a/net/ipv6/datagram.c
12246 ++++ b/net/ipv6/datagram.c
12247 +@@ -162,6 +162,9 @@ ipv4_connected:
12248 + fl6.fl6_dport = inet->inet_dport;
12249 + fl6.fl6_sport = inet->inet_sport;
12250 +
12251 ++ if (!fl6.flowi6_oif)
12252 ++ fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
12253 ++
12254 + if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
12255 + fl6.flowi6_oif = np->mcast_oif;
12256 +
12257 +diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
12258 +index 1f9ebe3cbb4a..dc2db4f7b182 100644
12259 +--- a/net/ipv6/ip6_flowlabel.c
12260 ++++ b/net/ipv6/ip6_flowlabel.c
12261 +@@ -540,12 +540,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
12262 + }
12263 + spin_lock_bh(&ip6_sk_fl_lock);
12264 + for (sflp = &np->ipv6_fl_list;
12265 +- (sfl = rcu_dereference(*sflp)) != NULL;
12266 ++ (sfl = rcu_dereference_protected(*sflp,
12267 ++ lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
12268 + sflp = &sfl->next) {
12269 + if (sfl->fl->label == freq.flr_label) {
12270 + if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
12271 + np->flow_label &= ~IPV6_FLOWLABEL_MASK;
12272 +- *sflp = rcu_dereference(sfl->next);
12273 ++ *sflp = sfl->next;
12274 + spin_unlock_bh(&ip6_sk_fl_lock);
12275 + fl_release(sfl->fl);
12276 + kfree_rcu(sfl, rcu);
12277 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
12278 +index 6473889f1736..31144c486c52 100644
12279 +--- a/net/ipv6/ip6_output.c
12280 ++++ b/net/ipv6/ip6_output.c
12281 +@@ -909,6 +909,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
12282 + struct rt6_info *rt;
12283 + #endif
12284 + int err;
12285 ++ int flags = 0;
12286 +
12287 + /* The correct way to handle this would be to do
12288 + * ip6_route_get_saddr, and then ip6_route_output; however,
12289 +@@ -940,10 +941,13 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
12290 + dst_release(*dst);
12291 + *dst = NULL;
12292 + }
12293 ++
12294 ++ if (fl6->flowi6_oif)
12295 ++ flags |= RT6_LOOKUP_F_IFACE;
12296 + }
12297 +
12298 + if (!*dst)
12299 +- *dst = ip6_route_output(net, sk, fl6);
12300 ++ *dst = ip6_route_output_flags(net, sk, fl6, flags);
12301 +
12302 + err = (*dst)->error;
12303 + if (err)
12304 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
12305 +index 826e6aa44f8d..3f164d3aaee2 100644
12306 +--- a/net/ipv6/route.c
12307 ++++ b/net/ipv6/route.c
12308 +@@ -1174,11 +1174,10 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
12309 + return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
12310 + }
12311 +
12312 +-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
12313 +- struct flowi6 *fl6)
12314 ++struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
12315 ++ struct flowi6 *fl6, int flags)
12316 + {
12317 + struct dst_entry *dst;
12318 +- int flags = 0;
12319 + bool any_src;
12320 +
12321 + dst = l3mdev_rt6_dst_by_oif(net, fl6);
12322 +@@ -1199,7 +1198,7 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
12323 +
12324 + return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
12325 + }
12326 +-EXPORT_SYMBOL(ip6_route_output);
12327 ++EXPORT_SYMBOL_GPL(ip6_route_output_flags);
12328 +
12329 + struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
12330 + {
12331 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
12332 +index bd100b47c717..b8d405623f4f 100644
12333 +--- a/net/ipv6/tcp_ipv6.c
12334 ++++ b/net/ipv6/tcp_ipv6.c
12335 +@@ -328,6 +328,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
12336 + struct tcp_sock *tp;
12337 + __u32 seq, snd_una;
12338 + struct sock *sk;
12339 ++ bool fatal;
12340 + int err;
12341 +
12342 + sk = __inet6_lookup_established(net, &tcp_hashinfo,
12343 +@@ -346,8 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
12344 + return;
12345 + }
12346 + seq = ntohl(th->seq);
12347 ++ fatal = icmpv6_err_convert(type, code, &err);
12348 + if (sk->sk_state == TCP_NEW_SYN_RECV)
12349 +- return tcp_req_err(sk, seq);
12350 ++ return tcp_req_err(sk, seq, fatal);
12351 +
12352 + bh_lock_sock(sk);
12353 + if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
12354 +@@ -401,7 +403,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
12355 + goto out;
12356 + }
12357 +
12358 +- icmpv6_err_convert(type, code, &err);
12359 +
12360 + /* Might be for an request_sock */
12361 + switch (sk->sk_state) {
12362 +@@ -1387,7 +1388,7 @@ process:
12363 +
12364 + if (sk->sk_state == TCP_NEW_SYN_RECV) {
12365 + struct request_sock *req = inet_reqsk(sk);
12366 +- struct sock *nsk = NULL;
12367 ++ struct sock *nsk;
12368 +
12369 + sk = req->rsk_listener;
12370 + tcp_v6_fill_cb(skb, hdr, th);
12371 +@@ -1395,24 +1396,24 @@ process:
12372 + reqsk_put(req);
12373 + goto discard_it;
12374 + }
12375 +- if (likely(sk->sk_state == TCP_LISTEN)) {
12376 +- nsk = tcp_check_req(sk, skb, req, false);
12377 +- } else {
12378 ++ if (unlikely(sk->sk_state != TCP_LISTEN)) {
12379 + inet_csk_reqsk_queue_drop_and_put(sk, req);
12380 + goto lookup;
12381 + }
12382 ++ sock_hold(sk);
12383 ++ nsk = tcp_check_req(sk, skb, req, false);
12384 + if (!nsk) {
12385 + reqsk_put(req);
12386 +- goto discard_it;
12387 ++ goto discard_and_relse;
12388 + }
12389 + if (nsk == sk) {
12390 +- sock_hold(sk);
12391 + reqsk_put(req);
12392 + tcp_v6_restore_cb(skb);
12393 + } else if (tcp_child_process(sk, nsk, skb)) {
12394 + tcp_v6_send_reset(nsk, skb);
12395 +- goto discard_it;
12396 ++ goto discard_and_relse;
12397 + } else {
12398 ++ sock_put(sk);
12399 + return 0;
12400 + }
12401 + }
12402 +diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
12403 +index 435608c4306d..20ab7b2ec463 100644
12404 +--- a/net/iucv/af_iucv.c
12405 ++++ b/net/iucv/af_iucv.c
12406 +@@ -708,6 +708,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
12407 + if (!addr || addr->sa_family != AF_IUCV)
12408 + return -EINVAL;
12409 +
12410 ++ if (addr_len < sizeof(struct sockaddr_iucv))
12411 ++ return -EINVAL;
12412 ++
12413 + lock_sock(sk);
12414 + if (sk->sk_state != IUCV_OPEN) {
12415 + err = -EBADFD;
12416 +diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
12417 +index f93c5be612a7..2caaa84ce92d 100644
12418 +--- a/net/l2tp/l2tp_netlink.c
12419 ++++ b/net/l2tp/l2tp_netlink.c
12420 +@@ -124,8 +124,13 @@ static int l2tp_tunnel_notify(struct genl_family *family,
12421 + ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
12422 + NLM_F_ACK, tunnel, cmd);
12423 +
12424 +- if (ret >= 0)
12425 +- return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
12426 ++ if (ret >= 0) {
12427 ++ ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
12428 ++ /* We don't care if no one is listening */
12429 ++ if (ret == -ESRCH)
12430 ++ ret = 0;
12431 ++ return ret;
12432 ++ }
12433 +
12434 + nlmsg_free(msg);
12435 +
12436 +@@ -147,8 +152,13 @@ static int l2tp_session_notify(struct genl_family *family,
12437 + ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
12438 + NLM_F_ACK, session, cmd);
12439 +
12440 +- if (ret >= 0)
12441 +- return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
12442 ++ if (ret >= 0) {
12443 ++ ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
12444 ++ /* We don't care if no one is listening */
12445 ++ if (ret == -ESRCH)
12446 ++ ret = 0;
12447 ++ return ret;
12448 ++ }
12449 +
12450 + nlmsg_free(msg);
12451 +
12452 +diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
12453 +index 337bb5d78003..6a12b0f5cac8 100644
12454 +--- a/net/mac80211/ibss.c
12455 ++++ b/net/mac80211/ibss.c
12456 +@@ -1732,7 +1732,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
12457 + if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
12458 + continue;
12459 + sdata->u.ibss.last_scan_completed = jiffies;
12460 +- ieee80211_queue_work(&local->hw, &sdata->work);
12461 + }
12462 + mutex_unlock(&local->iflist_mtx);
12463 + }
12464 +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
12465 +index fa28500f28fd..6f85b6ab8e51 100644
12466 +--- a/net/mac80211/mesh.c
12467 ++++ b/net/mac80211/mesh.c
12468 +@@ -1370,17 +1370,6 @@ out:
12469 + sdata_unlock(sdata);
12470 + }
12471 +
12472 +-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
12473 +-{
12474 +- struct ieee80211_sub_if_data *sdata;
12475 +-
12476 +- rcu_read_lock();
12477 +- list_for_each_entry_rcu(sdata, &local->interfaces, list)
12478 +- if (ieee80211_vif_is_mesh(&sdata->vif) &&
12479 +- ieee80211_sdata_running(sdata))
12480 +- ieee80211_queue_work(&local->hw, &sdata->work);
12481 +- rcu_read_unlock();
12482 +-}
12483 +
12484 + void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
12485 + {
12486 +diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
12487 +index a1596344c3ba..4a8019f79fb2 100644
12488 +--- a/net/mac80211/mesh.h
12489 ++++ b/net/mac80211/mesh.h
12490 +@@ -362,14 +362,10 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
12491 + return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
12492 + }
12493 +
12494 +-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
12495 +-
12496 + void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
12497 + void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
12498 + void ieee80211s_stop(void);
12499 + #else
12500 +-static inline void
12501 +-ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
12502 + static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
12503 + { return false; }
12504 + static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
12505 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
12506 +index 3aa04344942b..83097c3832d1 100644
12507 +--- a/net/mac80211/mlme.c
12508 ++++ b/net/mac80211/mlme.c
12509 +@@ -4003,8 +4003,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
12510 + if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
12511 + ieee80211_queue_work(&sdata->local->hw,
12512 + &sdata->u.mgd.monitor_work);
12513 +- /* and do all the other regular work too */
12514 +- ieee80211_queue_work(&sdata->local->hw, &sdata->work);
12515 + }
12516 + }
12517 +
12518 +diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
12519 +index a413e52f7691..acbe182b75d1 100644
12520 +--- a/net/mac80211/scan.c
12521 ++++ b/net/mac80211/scan.c
12522 +@@ -314,6 +314,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
12523 + bool was_scanning = local->scanning;
12524 + struct cfg80211_scan_request *scan_req;
12525 + struct ieee80211_sub_if_data *scan_sdata;
12526 ++ struct ieee80211_sub_if_data *sdata;
12527 +
12528 + lockdep_assert_held(&local->mtx);
12529 +
12530 +@@ -373,7 +374,16 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
12531 +
12532 + ieee80211_mlme_notify_scan_completed(local);
12533 + ieee80211_ibss_notify_scan_completed(local);
12534 +- ieee80211_mesh_notify_scan_completed(local);
12535 ++
12536 ++ /* Requeue all the work that might have been ignored while
12537 ++ * the scan was in progress; if there was none this will
12538 ++ * just be a no-op for the particular interface.
12539 ++ */
12540 ++ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
12541 ++ if (ieee80211_sdata_running(sdata))
12542 ++ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
12543 ++ }
12544 ++
12545 + if (was_scanning)
12546 + ieee80211_start_next_roc(local);
12547 + }
12548 +diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
12549 +index 1605691d9414..d933cb89efac 100644
12550 +--- a/net/openvswitch/vport-vxlan.c
12551 ++++ b/net/openvswitch/vport-vxlan.c
12552 +@@ -90,7 +90,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
12553 + int err;
12554 + struct vxlan_config conf = {
12555 + .no_share = true,
12556 +- .flags = VXLAN_F_COLLECT_METADATA,
12557 ++ .flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX,
12558 + };
12559 +
12560 + if (!options) {
12561 +diff --git a/net/rfkill/core.c b/net/rfkill/core.c
12562 +index f53bf3b6558b..cf5b69ab1829 100644
12563 +--- a/net/rfkill/core.c
12564 ++++ b/net/rfkill/core.c
12565 +@@ -1095,17 +1095,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
12566 + return res;
12567 + }
12568 +
12569 +-static bool rfkill_readable(struct rfkill_data *data)
12570 +-{
12571 +- bool r;
12572 +-
12573 +- mutex_lock(&data->mtx);
12574 +- r = !list_empty(&data->events);
12575 +- mutex_unlock(&data->mtx);
12576 +-
12577 +- return r;
12578 +-}
12579 +-
12580 + static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
12581 + size_t count, loff_t *pos)
12582 + {
12583 +@@ -1122,8 +1111,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
12584 + goto out;
12585 + }
12586 + mutex_unlock(&data->mtx);
12587 ++ /* since we re-check and it just compares pointers,
12588 ++ * using !list_empty() without locking isn't a problem
12589 ++ */
12590 + ret = wait_event_interruptible(data->read_wait,
12591 +- rfkill_readable(data));
12592 ++ !list_empty(&data->events));
12593 + mutex_lock(&data->mtx);
12594 +
12595 + if (ret)
12596 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
12597 +index b5c2cf2aa6d4..af1acf009866 100644
12598 +--- a/net/sched/sch_api.c
12599 ++++ b/net/sched/sch_api.c
12600 +@@ -1852,6 +1852,7 @@ reset:
12601 + }
12602 +
12603 + tp = old_tp;
12604 ++ protocol = tc_skb_protocol(skb);
12605 + goto reclassify;
12606 + #endif
12607 + }
12608 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
12609 +index 3d9ea9a48289..8b4ff315695e 100644
12610 +--- a/net/sctp/protocol.c
12611 ++++ b/net/sctp/protocol.c
12612 +@@ -60,6 +60,8 @@
12613 + #include <net/inet_common.h>
12614 + #include <net/inet_ecn.h>
12615 +
12616 ++#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
12617 ++
12618 + /* Global data structures. */
12619 + struct sctp_globals sctp_globals __read_mostly;
12620 +
12621 +@@ -1352,6 +1354,8 @@ static __init int sctp_init(void)
12622 + unsigned long limit;
12623 + int max_share;
12624 + int order;
12625 ++ int num_entries;
12626 ++ int max_entry_order;
12627 +
12628 + sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
12629 +
12630 +@@ -1404,14 +1408,24 @@ static __init int sctp_init(void)
12631 +
12632 + /* Size and allocate the association hash table.
12633 + * The methodology is similar to that of the tcp hash tables.
12634 ++ * Though not identical. Start by getting a goal size
12635 + */
12636 + if (totalram_pages >= (128 * 1024))
12637 + goal = totalram_pages >> (22 - PAGE_SHIFT);
12638 + else
12639 + goal = totalram_pages >> (24 - PAGE_SHIFT);
12640 +
12641 +- for (order = 0; (1UL << order) < goal; order++)
12642 +- ;
12643 ++ /* Then compute the page order for said goal */
12644 ++ order = get_order(goal);
12645 ++
12646 ++ /* Now compute the required page order for the maximum sized table we
12647 ++ * want to create
12648 ++ */
12649 ++ max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
12650 ++ sizeof(struct sctp_bind_hashbucket));
12651 ++
12652 ++ /* Limit the page order by that maximum hash table size */
12653 ++ order = min(order, max_entry_order);
12654 +
12655 + do {
12656 + sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE /
12657 +@@ -1445,20 +1459,35 @@ static __init int sctp_init(void)
12658 + INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
12659 + }
12660 +
12661 +- /* Allocate and initialize the SCTP port hash table. */
12662 ++ /* Allocate and initialize the SCTP port hash table.
12663 ++ * Note that order is initalized to start at the max sized
12664 ++ * table we want to support. If we can't get that many pages
12665 ++ * reduce the order and try again
12666 ++ */
12667 + do {
12668 +- sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
12669 +- sizeof(struct sctp_bind_hashbucket);
12670 +- if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
12671 +- continue;
12672 + sctp_port_hashtable = (struct sctp_bind_hashbucket *)
12673 + __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
12674 + } while (!sctp_port_hashtable && --order > 0);
12675 ++
12676 + if (!sctp_port_hashtable) {
12677 + pr_err("Failed bind hash alloc\n");
12678 + status = -ENOMEM;
12679 + goto err_bhash_alloc;
12680 + }
12681 ++
12682 ++ /* Now compute the number of entries that will fit in the
12683 ++ * port hash space we allocated
12684 ++ */
12685 ++ num_entries = (1UL << order) * PAGE_SIZE /
12686 ++ sizeof(struct sctp_bind_hashbucket);
12687 ++
12688 ++ /* And finish by rounding it down to the nearest power of two
12689 ++ * this wastes some memory of course, but its needed because
12690 ++ * the hash function operates based on the assumption that
12691 ++ * that the number of entries is a power of two
12692 ++ */
12693 ++ sctp_port_hashsize = rounddown_pow_of_two(num_entries);
12694 ++
12695 + for (i = 0; i < sctp_port_hashsize; i++) {
12696 + spin_lock_init(&sctp_port_hashtable[i].lock);
12697 + INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
12698 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
12699 +index ef1d90fdc773..be1489fc3234 100644
12700 +--- a/net/sctp/socket.c
12701 ++++ b/net/sctp/socket.c
12702 +@@ -5542,6 +5542,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
12703 + struct sctp_hmac_algo_param *hmacs;
12704 + __u16 data_len = 0;
12705 + u32 num_idents;
12706 ++ int i;
12707 +
12708 + if (!ep->auth_enable)
12709 + return -EACCES;
12710 +@@ -5559,8 +5560,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
12711 + return -EFAULT;
12712 + if (put_user(num_idents, &p->shmac_num_idents))
12713 + return -EFAULT;
12714 +- if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
12715 +- return -EFAULT;
12716 ++ for (i = 0; i < num_idents; i++) {
12717 ++ __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
12718 ++
12719 ++ if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
12720 ++ return -EFAULT;
12721 ++ }
12722 + return 0;
12723 + }
12724 +
12725 +@@ -6640,6 +6645,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
12726 +
12727 + if (cmsgs->srinfo->sinfo_flags &
12728 + ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
12729 ++ SCTP_SACK_IMMEDIATELY |
12730 + SCTP_ABORT | SCTP_EOF))
12731 + return -EINVAL;
12732 + break;
12733 +@@ -6663,6 +6669,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
12734 +
12735 + if (cmsgs->sinfo->snd_flags &
12736 + ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
12737 ++ SCTP_SACK_IMMEDIATELY |
12738 + SCTP_ABORT | SCTP_EOF))
12739 + return -EINVAL;
12740 + break;
12741 +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
12742 +index 5e4f815c2b34..21e20353178e 100644
12743 +--- a/net/sunrpc/cache.c
12744 ++++ b/net/sunrpc/cache.c
12745 +@@ -1225,7 +1225,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
12746 + if (bp[0] == '\\' && bp[1] == 'x') {
12747 + /* HEX STRING */
12748 + bp += 2;
12749 +- while (len < bufsize) {
12750 ++ while (len < bufsize - 1) {
12751 + int h, l;
12752 +
12753 + h = hex_to_bin(bp[0]);
12754 +diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
12755 +index f34e535e93bd..d5d7132ac847 100644
12756 +--- a/net/switchdev/switchdev.c
12757 ++++ b/net/switchdev/switchdev.c
12758 +@@ -20,6 +20,7 @@
12759 + #include <linux/list.h>
12760 + #include <linux/workqueue.h>
12761 + #include <linux/if_vlan.h>
12762 ++#include <linux/rtnetlink.h>
12763 + #include <net/ip_fib.h>
12764 + #include <net/switchdev.h>
12765 +
12766 +@@ -565,7 +566,6 @@ int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
12767 + }
12768 + EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
12769 +
12770 +-static DEFINE_MUTEX(switchdev_mutex);
12771 + static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
12772 +
12773 + /**
12774 +@@ -580,9 +580,9 @@ int register_switchdev_notifier(struct notifier_block *nb)
12775 + {
12776 + int err;
12777 +
12778 +- mutex_lock(&switchdev_mutex);
12779 ++ rtnl_lock();
12780 + err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
12781 +- mutex_unlock(&switchdev_mutex);
12782 ++ rtnl_unlock();
12783 + return err;
12784 + }
12785 + EXPORT_SYMBOL_GPL(register_switchdev_notifier);
12786 +@@ -598,9 +598,9 @@ int unregister_switchdev_notifier(struct notifier_block *nb)
12787 + {
12788 + int err;
12789 +
12790 +- mutex_lock(&switchdev_mutex);
12791 ++ rtnl_lock();
12792 + err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
12793 +- mutex_unlock(&switchdev_mutex);
12794 ++ rtnl_unlock();
12795 + return err;
12796 + }
12797 + EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
12798 +@@ -614,16 +614,17 @@ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
12799 + * Call all network notifier blocks. This should be called by driver
12800 + * when it needs to propagate hardware event.
12801 + * Return values are same as for atomic_notifier_call_chain().
12802 ++ * rtnl_lock must be held.
12803 + */
12804 + int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
12805 + struct switchdev_notifier_info *info)
12806 + {
12807 + int err;
12808 +
12809 ++ ASSERT_RTNL();
12810 ++
12811 + info->dev = dev;
12812 +- mutex_lock(&switchdev_mutex);
12813 + err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
12814 +- mutex_unlock(&switchdev_mutex);
12815 + return err;
12816 + }
12817 + EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
12818 +diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
12819 +index 9dc239dfe192..92e367a0a5ce 100644
12820 +--- a/net/tipc/bcast.c
12821 ++++ b/net/tipc/bcast.c
12822 +@@ -399,8 +399,10 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
12823 +
12824 + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
12825 + NLM_F_MULTI, TIPC_NL_LINK_GET);
12826 +- if (!hdr)
12827 ++ if (!hdr) {
12828 ++ tipc_bcast_unlock(net);
12829 + return -EMSGSIZE;
12830 ++ }
12831 +
12832 + attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
12833 + if (!attrs)
12834 +diff --git a/net/tipc/node.c b/net/tipc/node.c
12835 +index 20cddec0a43c..3926b561f873 100644
12836 +--- a/net/tipc/node.c
12837 ++++ b/net/tipc/node.c
12838 +@@ -168,12 +168,6 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
12839 + skb_queue_head_init(&n_ptr->bc_entry.inputq1);
12840 + __skb_queue_head_init(&n_ptr->bc_entry.arrvq);
12841 + skb_queue_head_init(&n_ptr->bc_entry.inputq2);
12842 +- hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
12843 +- list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
12844 +- if (n_ptr->addr < temp_node->addr)
12845 +- break;
12846 +- }
12847 +- list_add_tail_rcu(&n_ptr->list, &temp_node->list);
12848 + n_ptr->state = SELF_DOWN_PEER_LEAVING;
12849 + n_ptr->signature = INVALID_NODE_SIG;
12850 + n_ptr->active_links[0] = INVALID_BEARER_ID;
12851 +@@ -193,6 +187,12 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
12852 + tipc_node_get(n_ptr);
12853 + setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
12854 + n_ptr->keepalive_intv = U32_MAX;
12855 ++ hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
12856 ++ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
12857 ++ if (n_ptr->addr < temp_node->addr)
12858 ++ break;
12859 ++ }
12860 ++ list_add_tail_rcu(&n_ptr->list, &temp_node->list);
12861 + exit:
12862 + spin_unlock_bh(&tn->node_list_lock);
12863 + return n_ptr;
12864 +diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
12865 +index 350cca33ee0a..69ee2eeef968 100644
12866 +--- a/net/tipc/subscr.c
12867 ++++ b/net/tipc/subscr.c
12868 +@@ -289,15 +289,14 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
12869 + struct sockaddr_tipc *addr, void *usr_data,
12870 + void *buf, size_t len)
12871 + {
12872 +- struct tipc_subscriber *subscriber = usr_data;
12873 ++ struct tipc_subscriber *subscrb = usr_data;
12874 + struct tipc_subscription *sub = NULL;
12875 + struct tipc_net *tn = net_generic(net, tipc_net_id);
12876 +
12877 +- tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscriber, &sub);
12878 +- if (sub)
12879 +- tipc_nametbl_subscribe(sub);
12880 +- else
12881 +- tipc_conn_terminate(tn->topsrv, subscriber->conid);
12882 ++ if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub))
12883 ++ return tipc_conn_terminate(tn->topsrv, subscrb->conid);
12884 ++
12885 ++ tipc_nametbl_subscribe(sub);
12886 + }
12887 +
12888 + /* Handle one request to establish a new subscriber */
12889 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
12890 +index e3f85bc8b135..898a53a562b8 100644
12891 +--- a/net/unix/af_unix.c
12892 ++++ b/net/unix/af_unix.c
12893 +@@ -1496,7 +1496,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
12894 + UNIXCB(skb).fp = NULL;
12895 +
12896 + for (i = scm->fp->count-1; i >= 0; i--)
12897 +- unix_notinflight(scm->fp->fp[i]);
12898 ++ unix_notinflight(scm->fp->user, scm->fp->fp[i]);
12899 + }
12900 +
12901 + static void unix_destruct_scm(struct sk_buff *skb)
12902 +@@ -1561,7 +1561,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
12903 + return -ENOMEM;
12904 +
12905 + for (i = scm->fp->count - 1; i >= 0; i--)
12906 +- unix_inflight(scm->fp->fp[i]);
12907 ++ unix_inflight(scm->fp->user, scm->fp->fp[i]);
12908 + return max_level;
12909 + }
12910 +
12911 +@@ -1781,7 +1781,12 @@ restart_locked:
12912 + goto out_unlock;
12913 + }
12914 +
12915 +- if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
12916 ++ /* other == sk && unix_peer(other) != sk if
12917 ++ * - unix_peer(sk) == NULL, destination address bound to sk
12918 ++ * - unix_peer(sk) == sk by time of get but disconnected before lock
12919 ++ */
12920 ++ if (other != sk &&
12921 ++ unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
12922 + if (timeo) {
12923 + timeo = unix_wait_for_peer(other, timeo);
12924 +
12925 +@@ -2270,13 +2275,15 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
12926 + size_t size = state->size;
12927 + unsigned int last_len;
12928 +
12929 +- err = -EINVAL;
12930 +- if (sk->sk_state != TCP_ESTABLISHED)
12931 ++ if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
12932 ++ err = -EINVAL;
12933 + goto out;
12934 ++ }
12935 +
12936 +- err = -EOPNOTSUPP;
12937 +- if (flags & MSG_OOB)
12938 ++ if (unlikely(flags & MSG_OOB)) {
12939 ++ err = -EOPNOTSUPP;
12940 + goto out;
12941 ++ }
12942 +
12943 + target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
12944 + timeo = sock_rcvtimeo(sk, noblock);
12945 +@@ -2322,9 +2329,11 @@ again:
12946 + goto unlock;
12947 +
12948 + unix_state_unlock(sk);
12949 +- err = -EAGAIN;
12950 +- if (!timeo)
12951 ++ if (!timeo) {
12952 ++ err = -EAGAIN;
12953 + break;
12954 ++ }
12955 ++
12956 + mutex_unlock(&u->readlock);
12957 +
12958 + timeo = unix_stream_data_wait(sk, timeo, last,
12959 +@@ -2332,6 +2341,7 @@ again:
12960 +
12961 + if (signal_pending(current)) {
12962 + err = sock_intr_errno(timeo);
12963 ++ scm_destroy(&scm);
12964 + goto out;
12965 + }
12966 +
12967 +diff --git a/net/unix/diag.c b/net/unix/diag.c
12968 +index c512f64d5287..4d9679701a6d 100644
12969 +--- a/net/unix/diag.c
12970 ++++ b/net/unix/diag.c
12971 +@@ -220,7 +220,7 @@ done:
12972 + return skb->len;
12973 + }
12974 +
12975 +-static struct sock *unix_lookup_by_ino(int ino)
12976 ++static struct sock *unix_lookup_by_ino(unsigned int ino)
12977 + {
12978 + int i;
12979 + struct sock *sk;
12980 +diff --git a/net/unix/garbage.c b/net/unix/garbage.c
12981 +index 8fcdc2283af5..6a0d48525fcf 100644
12982 +--- a/net/unix/garbage.c
12983 ++++ b/net/unix/garbage.c
12984 +@@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp)
12985 + * descriptor if it is for an AF_UNIX socket.
12986 + */
12987 +
12988 +-void unix_inflight(struct file *fp)
12989 ++void unix_inflight(struct user_struct *user, struct file *fp)
12990 + {
12991 + struct sock *s = unix_get_socket(fp);
12992 +
12993 +@@ -133,11 +133,11 @@ void unix_inflight(struct file *fp)
12994 + }
12995 + unix_tot_inflight++;
12996 + }
12997 +- fp->f_cred->user->unix_inflight++;
12998 ++ user->unix_inflight++;
12999 + spin_unlock(&unix_gc_lock);
13000 + }
13001 +
13002 +-void unix_notinflight(struct file *fp)
13003 ++void unix_notinflight(struct user_struct *user, struct file *fp)
13004 + {
13005 + struct sock *s = unix_get_socket(fp);
13006 +
13007 +@@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp)
13008 + list_del_init(&u->link);
13009 + unix_tot_inflight--;
13010 + }
13011 +- fp->f_cred->user->unix_inflight--;
13012 ++ user->unix_inflight--;
13013 + spin_unlock(&unix_gc_lock);
13014 + }
13015 +
13016 +diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
13017 +index dacf71a43ad4..ba6c34ea5429 100755
13018 +--- a/scripts/link-vmlinux.sh
13019 ++++ b/scripts/link-vmlinux.sh
13020 +@@ -62,7 +62,7 @@ vmlinux_link()
13021 + -Wl,--start-group \
13022 + ${KBUILD_VMLINUX_MAIN} \
13023 + -Wl,--end-group \
13024 +- -lutil -lrt ${1}
13025 ++ -lutil -lrt -lpthread ${1}
13026 + rm -f linux
13027 + fi
13028 + }
13029 +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
13030 +index ff81026f6ddb..7c57c7fcf5a2 100644
13031 +--- a/security/smack/smack_lsm.c
13032 ++++ b/security/smack/smack_lsm.c
13033 +@@ -398,12 +398,10 @@ static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead,
13034 + */
13035 + static inline unsigned int smk_ptrace_mode(unsigned int mode)
13036 + {
13037 +- switch (mode) {
13038 +- case PTRACE_MODE_READ:
13039 +- return MAY_READ;
13040 +- case PTRACE_MODE_ATTACH:
13041 ++ if (mode & PTRACE_MODE_ATTACH)
13042 + return MAY_READWRITE;
13043 +- }
13044 ++ if (mode & PTRACE_MODE_READ)
13045 ++ return MAY_READ;
13046 +
13047 + return 0;
13048 + }
13049 +diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
13050 +index d3c19c970a06..cb6ed10816d4 100644
13051 +--- a/security/yama/yama_lsm.c
13052 ++++ b/security/yama/yama_lsm.c
13053 +@@ -281,7 +281,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
13054 + int rc = 0;
13055 +
13056 + /* require ptrace target be a child of ptracer on attach */
13057 +- if (mode == PTRACE_MODE_ATTACH) {
13058 ++ if (mode & PTRACE_MODE_ATTACH) {
13059 + switch (ptrace_scope) {
13060 + case YAMA_SCOPE_DISABLED:
13061 + /* No additional restrictions. */
13062 +@@ -307,7 +307,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
13063 + }
13064 + }
13065 +
13066 +- if (rc) {
13067 ++ if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) {
13068 + printk_ratelimited(KERN_NOTICE
13069 + "ptrace of pid %d was attempted by: %s (pid %d)\n",
13070 + child->pid, current->comm, current->pid);
13071 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
13072 +index 2c13298e80b7..2ff692dd2c5f 100644
13073 +--- a/sound/pci/hda/hda_intel.c
13074 ++++ b/sound/pci/hda/hda_intel.c
13075 +@@ -357,7 +357,10 @@ enum {
13076 + ((pci)->device == 0x0d0c) || \
13077 + ((pci)->device == 0x160c))
13078 +
13079 +-#define IS_BROXTON(pci) ((pci)->device == 0x5a98)
13080 ++#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
13081 ++#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
13082 ++#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
13083 ++#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
13084 +
13085 + static char *driver_short_names[] = {
13086 + [AZX_DRIVER_ICH] = "HDA Intel",
13087 +@@ -534,13 +537,13 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
13088 +
13089 + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
13090 + snd_hdac_set_codec_wakeup(bus, true);
13091 +- if (IS_BROXTON(pci)) {
13092 ++ if (IS_SKL_PLUS(pci)) {
13093 + pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
13094 + val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
13095 + pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
13096 + }
13097 + azx_init_chip(chip, full_reset);
13098 +- if (IS_BROXTON(pci)) {
13099 ++ if (IS_SKL_PLUS(pci)) {
13100 + pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
13101 + val = val | INTEL_HDA_CGCTL_MISCBDCGE;
13102 + pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
13103 +@@ -549,7 +552,7 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
13104 + snd_hdac_set_codec_wakeup(bus, false);
13105 +
13106 + /* reduce dma latency to avoid noise */
13107 +- if (IS_BROXTON(pci))
13108 ++ if (IS_BXT(pci))
13109 + bxt_reduce_dma_latency(chip);
13110 + }
13111 +
13112 +@@ -971,11 +974,6 @@ static int azx_resume(struct device *dev)
13113 + /* put codec down to D3 at hibernation for Intel SKL+;
13114 + * otherwise BIOS may still access the codec and screw up the driver
13115 + */
13116 +-#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
13117 +-#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
13118 +-#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
13119 +-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
13120 +-
13121 + static int azx_freeze_noirq(struct device *dev)
13122 + {
13123 + struct pci_dev *pci = to_pci_dev(dev);
13124 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
13125 +index efd4980cffb8..72fa58dd7723 100644
13126 +--- a/sound/pci/hda/patch_realtek.c
13127 ++++ b/sound/pci/hda/patch_realtek.c
13128 +@@ -4749,6 +4749,7 @@ enum {
13129 + ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
13130 + ALC293_FIXUP_LENOVO_SPK_NOISE,
13131 + ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
13132 ++ ALC255_FIXUP_DELL_SPK_NOISE,
13133 + };
13134 +
13135 + static const struct hda_fixup alc269_fixups[] = {
13136 +@@ -5368,6 +5369,12 @@ static const struct hda_fixup alc269_fixups[] = {
13137 + .type = HDA_FIXUP_FUNC,
13138 + .v.func = alc233_fixup_lenovo_line2_mic_hotkey,
13139 + },
13140 ++ [ALC255_FIXUP_DELL_SPK_NOISE] = {
13141 ++ .type = HDA_FIXUP_FUNC,
13142 ++ .v.func = alc_fixup_disable_aamix,
13143 ++ .chained = true,
13144 ++ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
13145 ++ },
13146 + };
13147 +
13148 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
13149 +@@ -5410,6 +5417,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
13150 + SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
13151 + SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
13152 + SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
13153 ++ SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
13154 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
13155 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
13156 + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
13157 +diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
13158 +index 96234b638249..5d51d6ff08e6 100644
13159 +--- a/tools/hv/hv_vss_daemon.c
13160 ++++ b/tools/hv/hv_vss_daemon.c
13161 +@@ -254,7 +254,7 @@ int main(int argc, char *argv[])
13162 + syslog(LOG_ERR, "Illegal op:%d\n", op);
13163 + }
13164 + vss_msg->error = error;
13165 +- len = write(vss_fd, &error, sizeof(struct hv_vss_msg));
13166 ++ len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg));
13167 + if (len != sizeof(struct hv_vss_msg)) {
13168 + syslog(LOG_ERR, "write failed; error: %d %s", errno,
13169 + strerror(errno));
13170 +diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
13171 +index 2d9d8306dbd3..4a3a72cb5805 100644
13172 +--- a/tools/perf/util/stat.c
13173 ++++ b/tools/perf/util/stat.c
13174 +@@ -310,7 +310,6 @@ int perf_stat_process_counter(struct perf_stat_config *config,
13175 + int i, ret;
13176 +
13177 + aggr->val = aggr->ena = aggr->run = 0;
13178 +- init_stats(ps->res_stats);
13179 +
13180 + if (counter->per_pkg)
13181 + zero_per_pkg(counter);
13182 +diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
13183 +index 77edcdcc016b..057278448515 100755
13184 +--- a/tools/testing/selftests/efivarfs/efivarfs.sh
13185 ++++ b/tools/testing/selftests/efivarfs/efivarfs.sh
13186 +@@ -88,7 +88,11 @@ test_delete()
13187 + exit 1
13188 + fi
13189 +
13190 +- rm $file
13191 ++ rm $file 2>/dev/null
13192 ++ if [ $? -ne 0 ]; then
13193 ++ chattr -i $file
13194 ++ rm $file
13195 ++ fi
13196 +
13197 + if [ -e $file ]; then
13198 + echo "$file couldn't be deleted" >&2
13199 +@@ -111,6 +115,7 @@ test_zero_size_delete()
13200 + exit 1
13201 + fi
13202 +
13203 ++ chattr -i $file
13204 + printf "$attrs" > $file
13205 +
13206 + if [ -e $file ]; then
13207 +@@ -141,7 +146,11 @@ test_valid_filenames()
13208 + echo "$file could not be created" >&2
13209 + ret=1
13210 + else
13211 +- rm $file
13212 ++ rm $file 2>/dev/null
13213 ++ if [ $? -ne 0 ]; then
13214 ++ chattr -i $file
13215 ++ rm $file
13216 ++ fi
13217 + fi
13218 + done
13219 +
13220 +@@ -174,7 +183,11 @@ test_invalid_filenames()
13221 +
13222 + if [ -e $file ]; then
13223 + echo "Creating $file should have failed" >&2
13224 +- rm $file
13225 ++ rm $file 2>/dev/null
13226 ++ if [ $? -ne 0 ]; then
13227 ++ chattr -i $file
13228 ++ rm $file
13229 ++ fi
13230 + ret=1
13231 + fi
13232 + done
13233 +diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
13234 +index 8c0764407b3c..4af74f733036 100644
13235 +--- a/tools/testing/selftests/efivarfs/open-unlink.c
13236 ++++ b/tools/testing/selftests/efivarfs/open-unlink.c
13237 +@@ -1,10 +1,68 @@
13238 ++#include <errno.h>
13239 + #include <stdio.h>
13240 + #include <stdint.h>
13241 + #include <stdlib.h>
13242 + #include <unistd.h>
13243 ++#include <sys/ioctl.h>
13244 + #include <sys/types.h>
13245 + #include <sys/stat.h>
13246 + #include <fcntl.h>
13247 ++#include <linux/fs.h>
13248 ++
13249 ++static int set_immutable(const char *path, int immutable)
13250 ++{
13251 ++ unsigned int flags;
13252 ++ int fd;
13253 ++ int rc;
13254 ++ int error;
13255 ++
13256 ++ fd = open(path, O_RDONLY);
13257 ++ if (fd < 0)
13258 ++ return fd;
13259 ++
13260 ++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
13261 ++ if (rc < 0) {
13262 ++ error = errno;
13263 ++ close(fd);
13264 ++ errno = error;
13265 ++ return rc;
13266 ++ }
13267 ++
13268 ++ if (immutable)
13269 ++ flags |= FS_IMMUTABLE_FL;
13270 ++ else
13271 ++ flags &= ~FS_IMMUTABLE_FL;
13272 ++
13273 ++ rc = ioctl(fd, FS_IOC_SETFLAGS, &flags);
13274 ++ error = errno;
13275 ++ close(fd);
13276 ++ errno = error;
13277 ++ return rc;
13278 ++}
13279 ++
13280 ++static int get_immutable(const char *path)
13281 ++{
13282 ++ unsigned int flags;
13283 ++ int fd;
13284 ++ int rc;
13285 ++ int error;
13286 ++
13287 ++ fd = open(path, O_RDONLY);
13288 ++ if (fd < 0)
13289 ++ return fd;
13290 ++
13291 ++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
13292 ++ if (rc < 0) {
13293 ++ error = errno;
13294 ++ close(fd);
13295 ++ errno = error;
13296 ++ return rc;
13297 ++ }
13298 ++ close(fd);
13299 ++ if (flags & FS_IMMUTABLE_FL)
13300 ++ return 1;
13301 ++ return 0;
13302 ++}
13303 +
13304 + int main(int argc, char **argv)
13305 + {
13306 +@@ -27,7 +85,7 @@ int main(int argc, char **argv)
13307 + buf[4] = 0;
13308 +
13309 + /* create a test variable */
13310 +- fd = open(path, O_WRONLY | O_CREAT);
13311 ++ fd = open(path, O_WRONLY | O_CREAT, 0600);
13312 + if (fd < 0) {
13313 + perror("open(O_WRONLY)");
13314 + return EXIT_FAILURE;
13315 +@@ -41,6 +99,18 @@ int main(int argc, char **argv)
13316 +
13317 + close(fd);
13318 +
13319 ++ rc = get_immutable(path);
13320 ++ if (rc < 0) {
13321 ++ perror("ioctl(FS_IOC_GETFLAGS)");
13322 ++ return EXIT_FAILURE;
13323 ++ } else if (rc) {
13324 ++ rc = set_immutable(path, 0);
13325 ++ if (rc < 0) {
13326 ++ perror("ioctl(FS_IOC_SETFLAGS)");
13327 ++ return EXIT_FAILURE;
13328 ++ }
13329 ++ }
13330 ++
13331 + fd = open(path, O_RDONLY);
13332 + if (fd < 0) {
13333 + perror("open");
13334 +diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
13335 +index 7a2f449bd85d..5d10f104f3eb 100644
13336 +--- a/virt/kvm/arm/vgic.c
13337 ++++ b/virt/kvm/arm/vgic.c
13338 +@@ -1875,8 +1875,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
13339 + static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
13340 + {
13341 + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
13342 +-
13343 +- int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
13344 ++ int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
13345 ++ int sz = nr_longs * sizeof(unsigned long);
13346 + vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
13347 + vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
13348 + vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
13349 +diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
13350 +index 77d42be6970e..4f70d12e392d 100644
13351 +--- a/virt/kvm/async_pf.c
13352 ++++ b/virt/kvm/async_pf.c
13353 +@@ -173,7 +173,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
13354 + * do alloc nowait since if we are going to sleep anyway we
13355 + * may as well sleep faulting in page
13356 + */
13357 +- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
13358 ++ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
13359 + if (!work)
13360 + return 0;
13361 +