Gentoo Archives: gentoo-commits

From: "Anthony G. Basile" <blueness@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/hardened-patchset:master commit in: 4.4.3/, 4.4.4/
Date: Fri, 04 Mar 2016 12:03:54
Message-Id: 1457093598.774d82a71af823de5172c7cbb11ae4355c27aabb.blueness@gentoo
1 commit: 774d82a71af823de5172c7cbb11ae4355c27aabb
2 Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
3 AuthorDate: Fri Mar 4 12:13:18 2016 +0000
4 Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
5 CommitDate: Fri Mar 4 12:13:18 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=774d82a7
7
8 grsecurity-3.1-4.4.4-201603032158
9
10 {4.4.3 => 4.4.4}/0000_README | 6 +-
11 4.4.4/1003_linux-4.4.4.patch | 13326 +++++++++++++++++++
12 .../4420_grsecurity-3.1-4.4.4-201603032158.patch | 1352 +-
13 {4.4.3 => 4.4.4}/4425_grsec_remove_EI_PAX.patch | 0
14 {4.4.3 => 4.4.4}/4427_force_XATTR_PAX_tmpfs.patch | 0
15 .../4430_grsec-remove-localversion-grsec.patch | 0
16 {4.4.3 => 4.4.4}/4435_grsec-mute-warnings.patch | 0
17 .../4440_grsec-remove-protected-paths.patch | 0
18 .../4450_grsec-kconfig-default-gids.patch | 0
19 .../4465_selinux-avc_audit-log-curr_ip.patch | 0
20 {4.4.3 => 4.4.4}/4470_disable-compat_vdso.patch | 0
21 {4.4.3 => 4.4.4}/4475_emutramp_default_on.patch | 0
22 12 files changed, 13987 insertions(+), 697 deletions(-)
23
24 diff --git a/4.4.3/0000_README b/4.4.4/0000_README
25 similarity index 92%
26 rename from 4.4.3/0000_README
27 rename to 4.4.4/0000_README
28 index 25f9ab4..5fcf793 100644
29 --- a/4.4.3/0000_README
30 +++ b/4.4.4/0000_README
31 @@ -2,7 +2,11 @@ README
32 -----------------------------------------------------------------------------
33 Individual Patch Descriptions:
34 -----------------------------------------------------------------------------
35 -Patch: 4420_grsecurity-3.1-4.4.3-201602282149.patch
36 +Patch: 1003_linux-4.4.4.patch
37 +From: https://www.kernel.org/
38 +Desc: Linux 4.4.4
39 +
40 +Patch: 4420_grsecurity-3.1-4.4.4-201603032158.patch
41 From: http://www.grsecurity.net
42 Desc: hardened-sources base patch from upstream grsecurity
43
44
45 diff --git a/4.4.4/1003_linux-4.4.4.patch b/4.4.4/1003_linux-4.4.4.patch
46 new file mode 100644
47 index 0000000..57fd383
48 --- /dev/null
49 +++ b/4.4.4/1003_linux-4.4.4.patch
50 @@ -0,0 +1,13326 @@
51 +diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt
52 +index c477af0..686a64b 100644
53 +--- a/Documentation/filesystems/efivarfs.txt
54 ++++ b/Documentation/filesystems/efivarfs.txt
55 +@@ -14,3 +14,10 @@ filesystem.
56 + efivarfs is typically mounted like this,
57 +
58 + mount -t efivarfs none /sys/firmware/efi/efivars
59 ++
60 ++Due to the presence of numerous firmware bugs where removing non-standard
61 ++UEFI variables causes the system firmware to fail to POST, efivarfs
62 ++files that are not well-known standardized variables are created
63 ++as immutable files. This doesn't prevent removal - "chattr -i" will work -
64 ++but it does prevent this kind of failure from being accomplished
65 ++accidentally.
66 +diff --git a/Makefile b/Makefile
67 +index 802be10..344bc6f 100644
68 +--- a/Makefile
69 ++++ b/Makefile
70 +@@ -1,6 +1,6 @@
71 + VERSION = 4
72 + PATCHLEVEL = 4
73 +-SUBLEVEL = 3
74 ++SUBLEVEL = 4
75 + EXTRAVERSION =
76 + NAME = Blurry Fish Butt
77 +
78 +diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
79 +index 258b0e5..68b60923 100644
80 +--- a/arch/arc/include/asm/irqflags-arcv2.h
81 ++++ b/arch/arc/include/asm/irqflags-arcv2.h
82 +@@ -22,6 +22,7 @@
83 + #define AUX_IRQ_CTRL 0x00E
84 + #define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
85 + #define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */
86 ++#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
87 + #define AUX_IRQ_PRIORITY 0x206
88 + #define ICAUSE 0x40a
89 + #define AUX_IRQ_SELECT 0x40b
90 +@@ -112,6 +113,16 @@ static inline int arch_irqs_disabled(void)
91 + return arch_irqs_disabled_flags(arch_local_save_flags());
92 + }
93 +
94 ++static inline void arc_softirq_trigger(int irq)
95 ++{
96 ++ write_aux_reg(AUX_IRQ_HINT, irq);
97 ++}
98 ++
99 ++static inline void arc_softirq_clear(int irq)
100 ++{
101 ++ write_aux_reg(AUX_IRQ_HINT, 0);
102 ++}
103 ++
104 + #else
105 +
106 + .macro IRQ_DISABLE scratch
107 +diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
108 +index cbfec79..c126460 100644
109 +--- a/arch/arc/kernel/entry-arcv2.S
110 ++++ b/arch/arc/kernel/entry-arcv2.S
111 +@@ -45,11 +45,12 @@ VECTOR reserved ; Reserved slots
112 + VECTOR handle_interrupt ; (16) Timer0
113 + VECTOR handle_interrupt ; unused (Timer1)
114 + VECTOR handle_interrupt ; unused (WDT)
115 +-VECTOR handle_interrupt ; (19) ICI (inter core interrupt)
116 +-VECTOR handle_interrupt
117 +-VECTOR handle_interrupt
118 +-VECTOR handle_interrupt
119 +-VECTOR handle_interrupt ; (23) End of fixed IRQs
120 ++VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI)
121 ++VECTOR handle_interrupt ; (20) perf Interrupt
122 ++VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI)
123 ++VECTOR handle_interrupt ; unused
124 ++VECTOR handle_interrupt ; (23) unused
125 ++# End of fixed IRQs
126 +
127 + .rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8
128 + VECTOR handle_interrupt
129 +@@ -211,7 +212,11 @@ debug_marker_syscall:
130 + ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
131 + ; entry was via Exception in DS which got preempted in kernel).
132 + ;
133 +-; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling
134 ++; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
135 ++;
136 ++; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
137 ++; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
138 ++
139 + .Lintr_ret_to_delay_slot:
140 + debug_marker_ds:
141 +
142 +@@ -222,18 +227,23 @@ debug_marker_ds:
143 + ld r2, [sp, PT_ret]
144 + ld r3, [sp, PT_status32]
145 +
146 ++ ; STAT32 for Int return created from scratch
147 ++ ; (No delay dlot, disable Further intr in trampoline)
148 ++
149 + bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
150 + st r0, [sp, PT_status32]
151 +
152 + mov r1, .Lintr_ret_to_delay_slot_2
153 + st r1, [sp, PT_ret]
154 +
155 ++ ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
156 + st r2, [sp, 0]
157 + st r3, [sp, 4]
158 +
159 + b .Lisr_ret_fast_path
160 +
161 + .Lintr_ret_to_delay_slot_2:
162 ++ ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
163 + sub sp, sp, SZ_PT_REGS
164 + st r9, [sp, -4]
165 +
166 +@@ -243,11 +253,19 @@ debug_marker_ds:
167 + ld r9, [sp, 4]
168 + sr r9, [erstatus]
169 +
170 ++ ; restore AUX_USER_SP if returning to U mode
171 ++ bbit0 r9, STATUS_U_BIT, 1f
172 ++ ld r9, [sp, PT_sp]
173 ++ sr r9, [AUX_USER_SP]
174 ++
175 ++1:
176 + ld r9, [sp, 8]
177 + sr r9, [erbta]
178 +
179 + ld r9, [sp, -4]
180 + add sp, sp, SZ_PT_REGS
181 ++
182 ++ ; return from pure kernel mode to delay slot
183 + rtie
184 +
185 + END(ret_from_exception)
186 +diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
187 +index bd237ac..30d806c 100644
188 +--- a/arch/arc/kernel/mcip.c
189 ++++ b/arch/arc/kernel/mcip.c
190 +@@ -11,9 +11,12 @@
191 + #include <linux/smp.h>
192 + #include <linux/irq.h>
193 + #include <linux/spinlock.h>
194 ++#include <asm/irqflags-arcv2.h>
195 + #include <asm/mcip.h>
196 + #include <asm/setup.h>
197 +
198 ++#define SOFTIRQ_IRQ 21
199 ++
200 + static char smp_cpuinfo_buf[128];
201 + static int idu_detected;
202 +
203 +@@ -22,6 +25,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
204 + static void mcip_setup_per_cpu(int cpu)
205 + {
206 + smp_ipi_irq_setup(cpu, IPI_IRQ);
207 ++ smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
208 + }
209 +
210 + static void mcip_ipi_send(int cpu)
211 +@@ -29,6 +33,12 @@ static void mcip_ipi_send(int cpu)
212 + unsigned long flags;
213 + int ipi_was_pending;
214 +
215 ++ /* ARConnect can only send IPI to others */
216 ++ if (unlikely(cpu == raw_smp_processor_id())) {
217 ++ arc_softirq_trigger(SOFTIRQ_IRQ);
218 ++ return;
219 ++ }
220 ++
221 + /*
222 + * NOTE: We must spin here if the other cpu hasn't yet
223 + * serviced a previous message. This can burn lots
224 +@@ -63,6 +73,11 @@ static void mcip_ipi_clear(int irq)
225 + unsigned long flags;
226 + unsigned int __maybe_unused copy;
227 +
228 ++ if (unlikely(irq == SOFTIRQ_IRQ)) {
229 ++ arc_softirq_clear(irq);
230 ++ return;
231 ++ }
232 ++
233 + raw_spin_lock_irqsave(&mcip_lock, flags);
234 +
235 + /* Who sent the IPI */
236 +diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
237 +index 259c0ca..ddbb361 100644
238 +--- a/arch/arm/Kconfig.debug
239 ++++ b/arch/arm/Kconfig.debug
240 +@@ -162,10 +162,9 @@ choice
241 + mobile SoCs in the Kona family of chips (e.g. bcm28155,
242 + bcm11351, etc...)
243 +
244 +- config DEBUG_BCM63XX
245 ++ config DEBUG_BCM63XX_UART
246 + bool "Kernel low-level debugging on BCM63XX UART"
247 + depends on ARCH_BCM_63XX
248 +- select DEBUG_UART_BCM63XX
249 +
250 + config DEBUG_BERLIN_UART
251 + bool "Marvell Berlin SoC Debug UART"
252 +@@ -1348,7 +1347,7 @@ config DEBUG_LL_INCLUDE
253 + default "debug/vf.S" if DEBUG_VF_UART
254 + default "debug/vt8500.S" if DEBUG_VT8500_UART0
255 + default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
256 +- default "debug/bcm63xx.S" if DEBUG_UART_BCM63XX
257 ++ default "debug/bcm63xx.S" if DEBUG_BCM63XX_UART
258 + default "debug/digicolor.S" if DEBUG_DIGICOLOR_UA0
259 + default "mach/debug-macro.S"
260 +
261 +@@ -1364,10 +1363,6 @@ config DEBUG_UART_8250
262 + ARCH_IOP33X || ARCH_IXP4XX || \
263 + ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC
264 +
265 +-# Compatibility options for BCM63xx
266 +-config DEBUG_UART_BCM63XX
267 +- def_bool ARCH_BCM_63XX
268 +-
269 + config DEBUG_UART_PHYS
270 + hex "Physical base address of debug UART"
271 + default 0x00100a00 if DEBUG_NETX_UART
272 +@@ -1462,7 +1457,7 @@ config DEBUG_UART_PHYS
273 + default 0xfffb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1
274 + default 0xfffb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
275 + default 0xfffb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3
276 +- default 0xfffe8600 if DEBUG_UART_BCM63XX
277 ++ default 0xfffe8600 if DEBUG_BCM63XX_UART
278 + default 0xfffff700 if ARCH_IOP33X
279 + depends on ARCH_EP93XX || \
280 + DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
281 +@@ -1474,7 +1469,7 @@ config DEBUG_UART_PHYS
282 + DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \
283 + DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \
284 + DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \
285 +- DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
286 ++ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
287 + DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 || \
288 + DEBUG_AT91_UART
289 +
290 +@@ -1515,7 +1510,7 @@ config DEBUG_UART_VIRT
291 + default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
292 + default 0xfc40ab00 if DEBUG_BRCMSTB_UART
293 + default 0xfc705000 if DEBUG_ZTE_ZX
294 +- default 0xfcfe8600 if DEBUG_UART_BCM63XX
295 ++ default 0xfcfe8600 if DEBUG_BCM63XX_UART
296 + default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
297 + default 0xfd000000 if ARCH_SPEAR13XX
298 + default 0xfd012000 if ARCH_MV78XX0
299 +@@ -1566,7 +1561,7 @@ config DEBUG_UART_VIRT
300 + DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
301 + DEBUG_NETX_UART || \
302 + DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
303 +- DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
304 ++ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
305 + DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
306 +
307 + config DEBUG_UART_8250_SHIFT
308 +diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
309 +index 1afe246..b0c912fe 100644
310 +--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
311 ++++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
312 +@@ -90,7 +90,7 @@
313 + #define PIN_PA14__I2SC1_MCK PINMUX_PIN(PIN_PA14, 4, 2)
314 + #define PIN_PA14__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA14, 5, 1)
315 + #define PIN_PA14__D9 PINMUX_PIN(PIN_PA14, 6, 2)
316 +-#define PIN_PA15 14
317 ++#define PIN_PA15 15
318 + #define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0)
319 + #define PIN_PA15__SPI0_MOSI PINMUX_PIN(PIN_PA15, 1, 1)
320 + #define PIN_PA15__TF1 PINMUX_PIN(PIN_PA15, 2, 1)
321 +diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
322 +index 68ee3ce..b4c6d99 100644
323 +--- a/arch/arm/include/asm/psci.h
324 ++++ b/arch/arm/include/asm/psci.h
325 +@@ -16,7 +16,7 @@
326 +
327 + extern struct smp_operations psci_smp_ops;
328 +
329 +-#ifdef CONFIG_ARM_PSCI
330 ++#if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI)
331 + bool psci_smp_available(void);
332 + #else
333 + static inline bool psci_smp_available(void) { return false; }
334 +diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
335 +index 0375c8c..9408a99 100644
336 +--- a/arch/arm/include/asm/xen/page-coherent.h
337 ++++ b/arch/arm/include/asm/xen/page-coherent.h
338 +@@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
339 + dma_addr_t dev_addr, unsigned long offset, size_t size,
340 + enum dma_data_direction dir, struct dma_attrs *attrs)
341 + {
342 +- bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page);
343 ++ unsigned long page_pfn = page_to_xen_pfn(page);
344 ++ unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
345 ++ unsigned long compound_pages =
346 ++ (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
347 ++ bool local = (page_pfn <= dev_pfn) &&
348 ++ (dev_pfn - page_pfn < compound_pages);
349 ++
350 + /*
351 +- * Dom0 is mapped 1:1, while the Linux page can be spanned accross
352 +- * multiple Xen page, it's not possible to have a mix of local and
353 +- * foreign Xen page. So if the first xen_pfn == mfn the page is local
354 +- * otherwise it's a foreign page grant-mapped in dom0. If the page is
355 +- * local we can safely call the native dma_ops function, otherwise we
356 +- * call the xen specific function.
357 ++ * Dom0 is mapped 1:1, while the Linux page can span across
358 ++ * multiple Xen pages, it's not possible for it to contain a
359 ++ * mix of local and foreign Xen pages. So if the first xen_pfn
360 ++ * == mfn the page is local otherwise it's a foreign page
361 ++ * grant-mapped in dom0. If the page is local we can safely
362 ++ * call the native dma_ops function, otherwise we call the xen
363 ++ * specific function.
364 + */
365 + if (local)
366 + __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
367 +diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
368 +index 7b76ce0..8633c70 100644
369 +--- a/arch/arm/mach-omap2/gpmc-onenand.c
370 ++++ b/arch/arm/mach-omap2/gpmc-onenand.c
371 +@@ -101,10 +101,8 @@ static void omap2_onenand_set_async_mode(void __iomem *onenand_base)
372 +
373 + static void set_onenand_cfg(void __iomem *onenand_base)
374 + {
375 +- u32 reg;
376 ++ u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
377 +
378 +- reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
379 +- reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9));
380 + reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
381 + ONENAND_SYS_CFG1_BL_16;
382 + if (onenand_flags & ONENAND_FLAG_SYNCREAD)
383 +@@ -123,6 +121,7 @@ static void set_onenand_cfg(void __iomem *onenand_base)
384 + reg |= ONENAND_SYS_CFG1_VHF;
385 + else
386 + reg &= ~ONENAND_SYS_CFG1_VHF;
387 ++
388 + writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
389 + }
390 +
391 +@@ -289,6 +288,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
392 + }
393 + }
394 +
395 ++ onenand_async.sync_write = true;
396 + omap2_onenand_calc_async_timings(&t);
397 +
398 + ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);
399 +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
400 +index cd822d8..b6c90e5 100644
401 +--- a/arch/arm64/Makefile
402 ++++ b/arch/arm64/Makefile
403 +@@ -27,6 +27,7 @@ $(warning LSE atomics not supported by binutils)
404 + endif
405 +
406 + KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
407 ++KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
408 + KBUILD_AFLAGS += $(lseinstr)
409 +
410 + ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
411 +diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
412 +index 2046c02..21ed715 100644
413 +--- a/arch/mips/include/asm/page.h
414 ++++ b/arch/mips/include/asm/page.h
415 +@@ -33,7 +33,7 @@
416 + #define PAGE_SHIFT 16
417 + #endif
418 + #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
419 +-#define PAGE_MASK (~(PAGE_SIZE - 1))
420 ++#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
421 +
422 + /*
423 + * This is used for calculating the real page sizes
424 +diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
425 +index 8957f15..18826aa 100644
426 +--- a/arch/mips/include/asm/pgtable.h
427 ++++ b/arch/mips/include/asm/pgtable.h
428 +@@ -353,7 +353,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
429 + static inline pte_t pte_mkyoung(pte_t pte)
430 + {
431 + pte_val(pte) |= _PAGE_ACCESSED;
432 +-#ifdef CONFIG_CPU_MIPSR2
433 ++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
434 + if (!(pte_val(pte) & _PAGE_NO_READ))
435 + pte_val(pte) |= _PAGE_SILENT_READ;
436 + else
437 +@@ -560,7 +560,7 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
438 + {
439 + pmd_val(pmd) |= _PAGE_ACCESSED;
440 +
441 +-#ifdef CONFIG_CPU_MIPSR2
442 ++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
443 + if (!(pmd_val(pmd) & _PAGE_NO_READ))
444 + pmd_val(pmd) |= _PAGE_SILENT_READ;
445 + else
446 +diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
447 +index 6499d93..47bc45a 100644
448 +--- a/arch/mips/include/asm/syscall.h
449 ++++ b/arch/mips/include/asm/syscall.h
450 +@@ -101,10 +101,8 @@ static inline void syscall_get_arguments(struct task_struct *task,
451 + /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
452 + if ((config_enabled(CONFIG_32BIT) ||
453 + test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
454 +- (regs->regs[2] == __NR_syscall)) {
455 ++ (regs->regs[2] == __NR_syscall))
456 + i++;
457 +- n++;
458 +- }
459 +
460 + while (n--)
461 + ret |= mips_get_syscall_arg(args++, task, regs, i++);
462 +diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
463 +index bf9f1a7..a2631a5 100644
464 +--- a/arch/mips/loongson64/loongson-3/hpet.c
465 ++++ b/arch/mips/loongson64/loongson-3/hpet.c
466 +@@ -13,6 +13,9 @@
467 + #define SMBUS_PCI_REG64 0x64
468 + #define SMBUS_PCI_REGB4 0xb4
469 +
470 ++#define HPET_MIN_CYCLES 64
471 ++#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
472 ++
473 + static DEFINE_SPINLOCK(hpet_lock);
474 + DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
475 +
476 +@@ -161,8 +164,9 @@ static int hpet_next_event(unsigned long delta,
477 + cnt += delta;
478 + hpet_write(HPET_T0_CMP, cnt);
479 +
480 +- res = ((int)(hpet_read(HPET_COUNTER) - cnt) > 0) ? -ETIME : 0;
481 +- return res;
482 ++ res = (int)(cnt - hpet_read(HPET_COUNTER));
483 ++
484 ++ return res < HPET_MIN_CYCLES ? -ETIME : 0;
485 + }
486 +
487 + static irqreturn_t hpet_irq_handler(int irq, void *data)
488 +@@ -237,7 +241,7 @@ void __init setup_hpet_timer(void)
489 + cd->cpumask = cpumask_of(cpu);
490 + clockevent_set_clock(cd, HPET_FREQ);
491 + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
492 +- cd->min_delta_ns = 5000;
493 ++ cd->min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, cd);
494 +
495 + clockevents_register_device(cd);
496 + setup_irq(HPET_T0_IRQ, &hpet_irq);
497 +diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
498 +index 1a4738a..509832a9 100644
499 +--- a/arch/mips/loongson64/loongson-3/smp.c
500 ++++ b/arch/mips/loongson64/loongson-3/smp.c
501 +@@ -30,13 +30,13 @@
502 + #include "smp.h"
503 +
504 + DEFINE_PER_CPU(int, cpu_state);
505 +-DEFINE_PER_CPU(uint32_t, core0_c0count);
506 +
507 + static void *ipi_set0_regs[16];
508 + static void *ipi_clear0_regs[16];
509 + static void *ipi_status0_regs[16];
510 + static void *ipi_en0_regs[16];
511 + static void *ipi_mailbox_buf[16];
512 ++static uint32_t core0_c0count[NR_CPUS];
513 +
514 + /* read a 32bit value from ipi register */
515 + #define loongson3_ipi_read32(addr) readl(addr)
516 +@@ -275,12 +275,14 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
517 + if (action & SMP_ASK_C0COUNT) {
518 + BUG_ON(cpu != 0);
519 + c0count = read_c0_count();
520 +- for (i = 1; i < num_possible_cpus(); i++)
521 +- per_cpu(core0_c0count, i) = c0count;
522 ++ c0count = c0count ? c0count : 1;
523 ++ for (i = 1; i < nr_cpu_ids; i++)
524 ++ core0_c0count[i] = c0count;
525 ++ __wbflush(); /* Let others see the result ASAP */
526 + }
527 + }
528 +
529 +-#define MAX_LOOPS 1111
530 ++#define MAX_LOOPS 800
531 + /*
532 + * SMP init and finish on secondary CPUs
533 + */
534 +@@ -305,16 +307,20 @@ static void loongson3_init_secondary(void)
535 + cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
536 +
537 + i = 0;
538 +- __this_cpu_write(core0_c0count, 0);
539 ++ core0_c0count[cpu] = 0;
540 + loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
541 +- while (!__this_cpu_read(core0_c0count)) {
542 ++ while (!core0_c0count[cpu]) {
543 + i++;
544 + cpu_relax();
545 + }
546 +
547 + if (i > MAX_LOOPS)
548 + i = MAX_LOOPS;
549 +- initcount = __this_cpu_read(core0_c0count) + i;
550 ++ if (cpu_data[cpu].package)
551 ++ initcount = core0_c0count[cpu] + i;
552 ++ else /* Local access is faster for loops */
553 ++ initcount = core0_c0count[cpu] + i/2;
554 ++
555 + write_c0_count(initcount);
556 + }
557 +
558 +diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
559 +index 32e0be2..29f73e0 100644
560 +--- a/arch/mips/mm/tlbex.c
561 ++++ b/arch/mips/mm/tlbex.c
562 +@@ -242,7 +242,7 @@ static void output_pgtable_bits_defines(void)
563 + pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
564 + pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
565 + #endif
566 +-#ifdef CONFIG_CPU_MIPSR2
567 ++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
568 + if (cpu_has_rixi) {
569 + #ifdef _PAGE_NO_EXEC_SHIFT
570 + pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
571 +diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
572 +index f69ecaa..52c1e27 100644
573 +--- a/arch/powerpc/kernel/eeh_driver.c
574 ++++ b/arch/powerpc/kernel/eeh_driver.c
575 +@@ -418,8 +418,7 @@ static void *eeh_rmv_device(void *data, void *userdata)
576 + eeh_pcid_put(dev);
577 + if (driver->err_handler &&
578 + driver->err_handler->error_detected &&
579 +- driver->err_handler->slot_reset &&
580 +- driver->err_handler->resume)
581 ++ driver->err_handler->slot_reset)
582 + return NULL;
583 + }
584 +
585 +diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h
586 +index 2559b16..17d9dcd 100644
587 +--- a/arch/s390/include/asm/fpu/internal.h
588 ++++ b/arch/s390/include/asm/fpu/internal.h
589 +@@ -48,6 +48,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
590 + static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
591 + {
592 + fpregs->pad = 0;
593 ++ fpregs->fpc = fpu->fpc;
594 + if (MACHINE_HAS_VX)
595 + convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
596 + else
597 +@@ -57,6 +58,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
598 +
599 + static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
600 + {
601 ++ fpu->fpc = fpregs->fpc;
602 + if (MACHINE_HAS_VX)
603 + convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
604 + else
605 +diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
606 +index efaac2c..e9a983f 100644
607 +--- a/arch/s390/include/asm/kvm_host.h
608 ++++ b/arch/s390/include/asm/kvm_host.h
609 +@@ -506,7 +506,6 @@ struct kvm_vcpu_arch {
610 + struct kvm_s390_sie_block *sie_block;
611 + unsigned int host_acrs[NUM_ACRS];
612 + struct fpu host_fpregs;
613 +- struct fpu guest_fpregs;
614 + struct kvm_s390_local_interrupt local_int;
615 + struct hrtimer ckc_timer;
616 + struct kvm_s390_pgm_info pgm;
617 +diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
618 +index 9cd248f..dc6c9c6 100644
619 +--- a/arch/s390/kernel/asm-offsets.c
620 ++++ b/arch/s390/kernel/asm-offsets.c
621 +@@ -181,6 +181,7 @@ int main(void)
622 + OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area);
623 + OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area);
624 + OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area);
625 ++ OFFSET(__LC_TOD_PROGREG_SAVE_AREA, _lowcore, tod_progreg_save_area);
626 + OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area);
627 + OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area);
628 + OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area);
629 +diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
630 +index 66c9441..4af6037 100644
631 +--- a/arch/s390/kernel/compat_signal.c
632 ++++ b/arch/s390/kernel/compat_signal.c
633 +@@ -271,7 +271,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
634 +
635 + /* Restore high gprs from signal stack */
636 + if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
637 +- sizeof(&sregs_ext->gprs_high)))
638 ++ sizeof(sregs_ext->gprs_high)))
639 + return -EFAULT;
640 + for (i = 0; i < NUM_GPRS; i++)
641 + *(__u32 *)&regs->gprs[i] = gprs_high[i];
642 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
643 +index 8465892..a08d0af 100644
644 +--- a/arch/s390/kvm/kvm-s390.c
645 ++++ b/arch/s390/kvm/kvm-s390.c
646 +@@ -1268,44 +1268,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
647 + return 0;
648 + }
649 +
650 +-/*
651 +- * Backs up the current FP/VX register save area on a particular
652 +- * destination. Used to switch between different register save
653 +- * areas.
654 +- */
655 +-static inline void save_fpu_to(struct fpu *dst)
656 +-{
657 +- dst->fpc = current->thread.fpu.fpc;
658 +- dst->regs = current->thread.fpu.regs;
659 +-}
660 +-
661 +-/*
662 +- * Switches the FP/VX register save area from which to lazy
663 +- * restore register contents.
664 +- */
665 +-static inline void load_fpu_from(struct fpu *from)
666 +-{
667 +- current->thread.fpu.fpc = from->fpc;
668 +- current->thread.fpu.regs = from->regs;
669 +-}
670 +-
671 + void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
672 + {
673 + /* Save host register state */
674 + save_fpu_regs();
675 +- save_fpu_to(&vcpu->arch.host_fpregs);
676 +-
677 +- if (test_kvm_facility(vcpu->kvm, 129)) {
678 +- current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
679 +- /*
680 +- * Use the register save area in the SIE-control block
681 +- * for register restore and save in kvm_arch_vcpu_put()
682 +- */
683 +- current->thread.fpu.vxrs =
684 +- (__vector128 *)&vcpu->run->s.regs.vrs;
685 +- } else
686 +- load_fpu_from(&vcpu->arch.guest_fpregs);
687 ++ vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
688 ++ vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
689 +
690 ++ /* Depending on MACHINE_HAS_VX, data stored to vrs either
691 ++ * has vector register or floating point register format.
692 ++ */
693 ++ current->thread.fpu.regs = vcpu->run->s.regs.vrs;
694 ++ current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
695 + if (test_fp_ctl(current->thread.fpu.fpc))
696 + /* User space provided an invalid FPC, let's clear it */
697 + current->thread.fpu.fpc = 0;
698 +@@ -1321,19 +1295,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
699 + atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
700 + gmap_disable(vcpu->arch.gmap);
701 +
702 ++ /* Save guest register state */
703 + save_fpu_regs();
704 ++ vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
705 +
706 +- if (test_kvm_facility(vcpu->kvm, 129))
707 +- /*
708 +- * kvm_arch_vcpu_load() set up the register save area to
709 +- * the &vcpu->run->s.regs.vrs and, thus, the vector registers
710 +- * are already saved. Only the floating-point control must be
711 +- * copied.
712 +- */
713 +- vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
714 +- else
715 +- save_fpu_to(&vcpu->arch.guest_fpregs);
716 +- load_fpu_from(&vcpu->arch.host_fpregs);
717 ++ /* Restore host register state */
718 ++ current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
719 ++ current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
720 +
721 + save_access_regs(vcpu->run->s.regs.acrs);
722 + restore_access_regs(vcpu->arch.host_acrs);
723 +@@ -1351,8 +1319,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
724 + memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
725 + vcpu->arch.sie_block->gcr[0] = 0xE0UL;
726 + vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
727 +- vcpu->arch.guest_fpregs.fpc = 0;
728 +- asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
729 ++ /* make sure the new fpc will be lazily loaded */
730 ++ save_fpu_regs();
731 ++ current->thread.fpu.fpc = 0;
732 + vcpu->arch.sie_block->gbea = 1;
733 + vcpu->arch.sie_block->pp = 0;
734 + vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
735 +@@ -1501,19 +1470,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
736 + vcpu->arch.local_int.wq = &vcpu->wq;
737 + vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
738 +
739 +- /*
740 +- * Allocate a save area for floating-point registers. If the vector
741 +- * extension is available, register contents are saved in the SIE
742 +- * control block. The allocated save area is still required in
743 +- * particular places, for example, in kvm_s390_vcpu_store_status().
744 +- */
745 +- vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
746 +- GFP_KERNEL);
747 +- if (!vcpu->arch.guest_fpregs.fprs) {
748 +- rc = -ENOMEM;
749 +- goto out_free_sie_block;
750 +- }
751 +-
752 + rc = kvm_vcpu_init(vcpu, kvm, id);
753 + if (rc)
754 + goto out_free_sie_block;
755 +@@ -1734,19 +1690,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
756 +
757 + int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
758 + {
759 ++ /* make sure the new values will be lazily loaded */
760 ++ save_fpu_regs();
761 + if (test_fp_ctl(fpu->fpc))
762 + return -EINVAL;
763 +- memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
764 +- vcpu->arch.guest_fpregs.fpc = fpu->fpc;
765 +- save_fpu_regs();
766 +- load_fpu_from(&vcpu->arch.guest_fpregs);
767 ++ current->thread.fpu.fpc = fpu->fpc;
768 ++ if (MACHINE_HAS_VX)
769 ++ convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
770 ++ else
771 ++ memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
772 + return 0;
773 + }
774 +
775 + int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
776 + {
777 +- memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
778 +- fpu->fpc = vcpu->arch.guest_fpregs.fpc;
779 ++ /* make sure we have the latest values */
780 ++ save_fpu_regs();
781 ++ if (MACHINE_HAS_VX)
782 ++ convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
783 ++ else
784 ++ memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
785 ++ fpu->fpc = current->thread.fpu.fpc;
786 + return 0;
787 + }
788 +
789 +@@ -2266,41 +2230,50 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
790 + int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
791 + {
792 + unsigned char archmode = 1;
793 ++ freg_t fprs[NUM_FPRS];
794 + unsigned int px;
795 + u64 clkcomp;
796 + int rc;
797 +
798 ++ px = kvm_s390_get_prefix(vcpu);
799 + if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
800 + if (write_guest_abs(vcpu, 163, &archmode, 1))
801 + return -EFAULT;
802 +- gpa = SAVE_AREA_BASE;
803 ++ gpa = 0;
804 + } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
805 + if (write_guest_real(vcpu, 163, &archmode, 1))
806 + return -EFAULT;
807 +- gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
808 ++ gpa = px;
809 ++ } else
810 ++ gpa -= __LC_FPREGS_SAVE_AREA;
811 ++
812 ++ /* manually convert vector registers if necessary */
813 ++ if (MACHINE_HAS_VX) {
814 ++ convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
815 ++ rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
816 ++ fprs, 128);
817 ++ } else {
818 ++ rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
819 ++ vcpu->run->s.regs.vrs, 128);
820 + }
821 +- rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
822 +- vcpu->arch.guest_fpregs.fprs, 128);
823 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
824 ++ rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
825 + vcpu->run->s.regs.gprs, 128);
826 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
827 ++ rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
828 + &vcpu->arch.sie_block->gpsw, 16);
829 +- px = kvm_s390_get_prefix(vcpu);
830 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
831 ++ rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
832 + &px, 4);
833 +- rc |= write_guest_abs(vcpu,
834 +- gpa + offsetof(struct save_area, fp_ctrl_reg),
835 +- &vcpu->arch.guest_fpregs.fpc, 4);
836 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
837 ++ rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
838 ++ &vcpu->run->s.regs.fpc, 4);
839 ++ rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
840 + &vcpu->arch.sie_block->todpr, 4);
841 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
842 ++ rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
843 + &vcpu->arch.sie_block->cputm, 8);
844 + clkcomp = vcpu->arch.sie_block->ckc >> 8;
845 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
846 ++ rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
847 + &clkcomp, 8);
848 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
849 ++ rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
850 + &vcpu->run->s.regs.acrs, 64);
851 +- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
852 ++ rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
853 + &vcpu->arch.sie_block->gcr, 128);
854 + return rc ? -EFAULT : 0;
855 + }
856 +@@ -2313,19 +2286,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
857 + * it into the save area
858 + */
859 + save_fpu_regs();
860 +- if (test_kvm_facility(vcpu->kvm, 129)) {
861 +- /*
862 +- * If the vector extension is available, the vector registers
863 +- * which overlaps with floating-point registers are saved in
864 +- * the SIE-control block. Hence, extract the floating-point
865 +- * registers and the FPC value and store them in the
866 +- * guest_fpregs structure.
867 +- */
868 +- vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
869 +- convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
870 +- current->thread.fpu.vxrs);
871 +- } else
872 +- save_fpu_to(&vcpu->arch.guest_fpregs);
873 ++ vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
874 + save_access_regs(vcpu->run->s.regs.acrs);
875 +
876 + return kvm_s390_store_status_unloaded(vcpu, addr);
877 +diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
878 +index 4d1ee88..18c8b81 100644
879 +--- a/arch/s390/mm/extable.c
880 ++++ b/arch/s390/mm/extable.c
881 +@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
882 + int i;
883 +
884 + /* Normalize entries to being relative to the start of the section */
885 +- for (p = start, i = 0; p < finish; p++, i += 8)
886 ++ for (p = start, i = 0; p < finish; p++, i += 8) {
887 + p->insn += i;
888 ++ p->fixup += i + 4;
889 ++ }
890 + sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
891 + /* Denormalize all entries */
892 +- for (p = start, i = 0; p < finish; p++, i += 8)
893 ++ for (p = start, i = 0; p < finish; p++, i += 8) {
894 + p->insn -= i;
895 ++ p->fixup -= i + 4;
896 ++ }
897 + }
898 +
899 + #ifdef CONFIG_MODULES
900 +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
901 +index 30e7ddb..c690c8e 100644
902 +--- a/arch/sparc/kernel/sys_sparc_64.c
903 ++++ b/arch/sparc/kernel/sys_sparc_64.c
904 +@@ -413,7 +413,7 @@ out:
905 +
906 + SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
907 + {
908 +- int ret;
909 ++ long ret;
910 +
911 + if (personality(current->personality) == PER_LINUX32 &&
912 + personality(personality) == PER_LINUX)
913 +diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
914 +index 47f1ff0..22a358e 100644
915 +--- a/arch/um/os-Linux/start_up.c
916 ++++ b/arch/um/os-Linux/start_up.c
917 +@@ -94,6 +94,8 @@ static int start_ptraced_child(void)
918 + {
919 + int pid, n, status;
920 +
921 ++ fflush(stdout);
922 ++
923 + pid = fork();
924 + if (pid == 0)
925 + ptrace_child();
926 +diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
927 +index 6a1ae37..15cfeba 100644
928 +--- a/arch/x86/entry/entry_64_compat.S
929 ++++ b/arch/x86/entry/entry_64_compat.S
930 +@@ -267,6 +267,7 @@ ENTRY(entry_INT80_compat)
931 + * Interrupts are off on entry.
932 + */
933 + PARAVIRT_ADJUST_EXCEPTION_FRAME
934 ++ ASM_CLAC /* Do this early to minimize exposure */
935 + SWAPGS
936 +
937 + /*
938 +diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
939 +index 881b476..e7de5c9 100644
940 +--- a/arch/x86/include/asm/irq.h
941 ++++ b/arch/x86/include/asm/irq.h
942 +@@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
943 +
944 + #define __ARCH_HAS_DO_SOFTIRQ
945 +
946 ++struct irq_desc;
947 ++
948 + #ifdef CONFIG_HOTPLUG_CPU
949 + #include <linux/cpumask.h>
950 + extern int check_irq_vectors_for_cpu_disable(void);
951 + extern void fixup_irqs(void);
952 +-extern void irq_force_complete_move(int);
953 ++extern void irq_force_complete_move(struct irq_desc *desc);
954 + #endif
955 +
956 + #ifdef CONFIG_HAVE_KVM
957 +@@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
958 + extern void (*x86_platform_ipi_callback)(void);
959 + extern void native_init_IRQ(void);
960 +
961 +-struct irq_desc;
962 + extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
963 +
964 + extern __visible unsigned int do_IRQ(struct pt_regs *regs);
965 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
966 +index f253218..fdb0fbf 100644
967 +--- a/arch/x86/kernel/apic/io_apic.c
968 ++++ b/arch/x86/kernel/apic/io_apic.c
969 +@@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void)
970 + {
971 + int pin, ioapic, irq, irq_entry;
972 + const struct cpumask *mask;
973 ++ struct irq_desc *desc;
974 + struct irq_data *idata;
975 + struct irq_chip *chip;
976 +
977 +@@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void)
978 + if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
979 + continue;
980 +
981 +- idata = irq_get_irq_data(irq);
982 ++ desc = irq_to_desc(irq);
983 ++ raw_spin_lock_irq(&desc->lock);
984 ++ idata = irq_desc_get_irq_data(desc);
985 +
986 + /*
987 + * Honour affinities which have been set in early boot
988 +@@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void)
989 + /* Might be lapic_chip for irq 0 */
990 + if (chip->irq_set_affinity)
991 + chip->irq_set_affinity(idata, mask, false);
992 ++ raw_spin_unlock_irq(&desc->lock);
993 + }
994 + }
995 + #endif
996 +diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
997 +index 861bc59..a35f6b5 100644
998 +--- a/arch/x86/kernel/apic/vector.c
999 ++++ b/arch/x86/kernel/apic/vector.c
1000 +@@ -30,7 +30,7 @@ struct apic_chip_data {
1001 +
1002 + struct irq_domain *x86_vector_domain;
1003 + static DEFINE_RAW_SPINLOCK(vector_lock);
1004 +-static cpumask_var_t vector_cpumask;
1005 ++static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
1006 + static struct irq_chip lapic_controller;
1007 + #ifdef CONFIG_X86_IO_APIC
1008 + static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
1009 +@@ -116,35 +116,47 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
1010 + */
1011 + static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1012 + static int current_offset = VECTOR_OFFSET_START % 16;
1013 +- int cpu, err;
1014 ++ int cpu, vector;
1015 +
1016 +- if (d->move_in_progress)
1017 ++ /*
1018 ++ * If there is still a move in progress or the previous move has not
1019 ++ * been cleaned up completely, tell the caller to come back later.
1020 ++ */
1021 ++ if (d->move_in_progress ||
1022 ++ cpumask_intersects(d->old_domain, cpu_online_mask))
1023 + return -EBUSY;
1024 +
1025 + /* Only try and allocate irqs on cpus that are present */
1026 +- err = -ENOSPC;
1027 + cpumask_clear(d->old_domain);
1028 ++ cpumask_clear(searched_cpumask);
1029 + cpu = cpumask_first_and(mask, cpu_online_mask);
1030 + while (cpu < nr_cpu_ids) {
1031 +- int new_cpu, vector, offset;
1032 ++ int new_cpu, offset;
1033 +
1034 ++ /* Get the possible target cpus for @mask/@cpu from the apic */
1035 + apic->vector_allocation_domain(cpu, vector_cpumask, mask);
1036 +
1037 ++ /*
1038 ++ * Clear the offline cpus from @vector_cpumask for searching
1039 ++ * and verify whether the result overlaps with @mask. If true,
1040 ++ * then the call to apic->cpu_mask_to_apicid_and() will
1041 ++ * succeed as well. If not, no point in trying to find a
1042 ++ * vector in this mask.
1043 ++ */
1044 ++ cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
1045 ++ if (!cpumask_intersects(vector_searchmask, mask))
1046 ++ goto next_cpu;
1047 ++
1048 + if (cpumask_subset(vector_cpumask, d->domain)) {
1049 +- err = 0;
1050 + if (cpumask_equal(vector_cpumask, d->domain))
1051 +- break;
1052 ++ goto success;
1053 + /*
1054 +- * New cpumask using the vector is a proper subset of
1055 +- * the current in use mask. So cleanup the vector
1056 +- * allocation for the members that are not used anymore.
1057 ++ * Mark the cpus which are not longer in the mask for
1058 ++ * cleanup.
1059 + */
1060 +- cpumask_andnot(d->old_domain, d->domain,
1061 +- vector_cpumask);
1062 +- d->move_in_progress =
1063 +- cpumask_intersects(d->old_domain, cpu_online_mask);
1064 +- cpumask_and(d->domain, d->domain, vector_cpumask);
1065 +- break;
1066 ++ cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
1067 ++ vector = d->cfg.vector;
1068 ++ goto update;
1069 + }
1070 +
1071 + vector = current_vector;
1072 +@@ -156,45 +168,60 @@ next:
1073 + vector = FIRST_EXTERNAL_VECTOR + offset;
1074 + }
1075 +
1076 +- if (unlikely(current_vector == vector)) {
1077 +- cpumask_or(d->old_domain, d->old_domain,
1078 +- vector_cpumask);
1079 +- cpumask_andnot(vector_cpumask, mask, d->old_domain);
1080 +- cpu = cpumask_first_and(vector_cpumask,
1081 +- cpu_online_mask);
1082 +- continue;
1083 +- }
1084 ++ /* If the search wrapped around, try the next cpu */
1085 ++ if (unlikely(current_vector == vector))
1086 ++ goto next_cpu;
1087 +
1088 + if (test_bit(vector, used_vectors))
1089 + goto next;
1090 +
1091 +- for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
1092 ++ for_each_cpu(new_cpu, vector_searchmask) {
1093 + if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
1094 + goto next;
1095 + }
1096 + /* Found one! */
1097 + current_vector = vector;
1098 + current_offset = offset;
1099 +- if (d->cfg.vector) {
1100 ++ /* Schedule the old vector for cleanup on all cpus */
1101 ++ if (d->cfg.vector)
1102 + cpumask_copy(d->old_domain, d->domain);
1103 +- d->move_in_progress =
1104 +- cpumask_intersects(d->old_domain, cpu_online_mask);
1105 +- }
1106 +- for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
1107 ++ for_each_cpu(new_cpu, vector_searchmask)
1108 + per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
1109 +- d->cfg.vector = vector;
1110 +- cpumask_copy(d->domain, vector_cpumask);
1111 +- err = 0;
1112 +- break;
1113 +- }
1114 ++ goto update;
1115 +
1116 +- if (!err) {
1117 +- /* cache destination APIC IDs into cfg->dest_apicid */
1118 +- err = apic->cpu_mask_to_apicid_and(mask, d->domain,
1119 +- &d->cfg.dest_apicid);
1120 ++next_cpu:
1121 ++ /*
1122 ++ * We exclude the current @vector_cpumask from the requested
1123 ++ * @mask and try again with the next online cpu in the
1124 ++ * result. We cannot modify @mask, so we use @vector_cpumask
1125 ++ * as a temporary buffer here as it will be reassigned when
1126 ++ * calling apic->vector_allocation_domain() above.
1127 ++ */
1128 ++ cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
1129 ++ cpumask_andnot(vector_cpumask, mask, searched_cpumask);
1130 ++ cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
1131 ++ continue;
1132 + }
1133 ++ return -ENOSPC;
1134 +
1135 +- return err;
1136 ++update:
1137 ++ /*
1138 ++ * Exclude offline cpus from the cleanup mask and set the
1139 ++ * move_in_progress flag when the result is not empty.
1140 ++ */
1141 ++ cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
1142 ++ d->move_in_progress = !cpumask_empty(d->old_domain);
1143 ++ d->cfg.vector = vector;
1144 ++ cpumask_copy(d->domain, vector_cpumask);
1145 ++success:
1146 ++ /*
1147 ++ * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
1148 ++ * as we already established, that mask & d->domain & cpu_online_mask
1149 ++ * is not empty.
1150 ++ */
1151 ++ BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
1152 ++ &d->cfg.dest_apicid));
1153 ++ return 0;
1154 + }
1155 +
1156 + static int assign_irq_vector(int irq, struct apic_chip_data *data,
1157 +@@ -224,10 +251,8 @@ static int assign_irq_vector_policy(int irq, int node,
1158 + static void clear_irq_vector(int irq, struct apic_chip_data *data)
1159 + {
1160 + struct irq_desc *desc;
1161 +- unsigned long flags;
1162 + int cpu, vector;
1163 +
1164 +- raw_spin_lock_irqsave(&vector_lock, flags);
1165 + BUG_ON(!data->cfg.vector);
1166 +
1167 + vector = data->cfg.vector;
1168 +@@ -237,10 +262,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
1169 + data->cfg.vector = 0;
1170 + cpumask_clear(data->domain);
1171 +
1172 +- if (likely(!data->move_in_progress)) {
1173 +- raw_spin_unlock_irqrestore(&vector_lock, flags);
1174 ++ /*
1175 ++ * If move is in progress or the old_domain mask is not empty,
1176 ++ * i.e. the cleanup IPI has not been processed yet, we need to remove
1177 ++ * the old references to desc from all cpus vector tables.
1178 ++ */
1179 ++ if (!data->move_in_progress && cpumask_empty(data->old_domain))
1180 + return;
1181 +- }
1182 +
1183 + desc = irq_to_desc(irq);
1184 + for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
1185 +@@ -253,7 +281,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
1186 + }
1187 + }
1188 + data->move_in_progress = 0;
1189 +- raw_spin_unlock_irqrestore(&vector_lock, flags);
1190 + }
1191 +
1192 + void init_irq_alloc_info(struct irq_alloc_info *info,
1193 +@@ -274,19 +301,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
1194 + static void x86_vector_free_irqs(struct irq_domain *domain,
1195 + unsigned int virq, unsigned int nr_irqs)
1196 + {
1197 ++ struct apic_chip_data *apic_data;
1198 + struct irq_data *irq_data;
1199 ++ unsigned long flags;
1200 + int i;
1201 +
1202 + for (i = 0; i < nr_irqs; i++) {
1203 + irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
1204 + if (irq_data && irq_data->chip_data) {
1205 ++ raw_spin_lock_irqsave(&vector_lock, flags);
1206 + clear_irq_vector(virq + i, irq_data->chip_data);
1207 +- free_apic_chip_data(irq_data->chip_data);
1208 ++ apic_data = irq_data->chip_data;
1209 ++ irq_domain_reset_irq_data(irq_data);
1210 ++ raw_spin_unlock_irqrestore(&vector_lock, flags);
1211 ++ free_apic_chip_data(apic_data);
1212 + #ifdef CONFIG_X86_IO_APIC
1213 + if (virq + i < nr_legacy_irqs())
1214 + legacy_irq_data[virq + i] = NULL;
1215 + #endif
1216 +- irq_domain_reset_irq_data(irq_data);
1217 + }
1218 + }
1219 + }
1220 +@@ -404,6 +436,8 @@ int __init arch_early_irq_init(void)
1221 + arch_init_htirq_domain(x86_vector_domain);
1222 +
1223 + BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
1224 ++ BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
1225 ++ BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
1226 +
1227 + return arch_early_ioapic_init();
1228 + }
1229 +@@ -492,14 +526,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
1230 + return -EINVAL;
1231 +
1232 + err = assign_irq_vector(irq, data, dest);
1233 +- if (err) {
1234 +- if (assign_irq_vector(irq, data,
1235 +- irq_data_get_affinity_mask(irq_data)))
1236 +- pr_err("Failed to recover vector for irq %d\n", irq);
1237 +- return err;
1238 +- }
1239 +-
1240 +- return IRQ_SET_MASK_OK;
1241 ++ return err ? err : IRQ_SET_MASK_OK;
1242 + }
1243 +
1244 + static struct irq_chip lapic_controller = {
1245 +@@ -511,20 +538,12 @@ static struct irq_chip lapic_controller = {
1246 + #ifdef CONFIG_SMP
1247 + static void __send_cleanup_vector(struct apic_chip_data *data)
1248 + {
1249 +- cpumask_var_t cleanup_mask;
1250 +-
1251 +- if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
1252 +- unsigned int i;
1253 +-
1254 +- for_each_cpu_and(i, data->old_domain, cpu_online_mask)
1255 +- apic->send_IPI_mask(cpumask_of(i),
1256 +- IRQ_MOVE_CLEANUP_VECTOR);
1257 +- } else {
1258 +- cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
1259 +- apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1260 +- free_cpumask_var(cleanup_mask);
1261 +- }
1262 ++ raw_spin_lock(&vector_lock);
1263 ++ cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
1264 + data->move_in_progress = 0;
1265 ++ if (!cpumask_empty(data->old_domain))
1266 ++ apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
1267 ++ raw_spin_unlock(&vector_lock);
1268 + }
1269 +
1270 + void send_cleanup_vector(struct irq_cfg *cfg)
1271 +@@ -568,12 +587,25 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
1272 + goto unlock;
1273 +
1274 + /*
1275 +- * Check if the irq migration is in progress. If so, we
1276 +- * haven't received the cleanup request yet for this irq.
1277 ++ * Nothing to cleanup if irq migration is in progress
1278 ++ * or this cpu is not set in the cleanup mask.
1279 + */
1280 +- if (data->move_in_progress)
1281 ++ if (data->move_in_progress ||
1282 ++ !cpumask_test_cpu(me, data->old_domain))
1283 + goto unlock;
1284 +
1285 ++ /*
1286 ++ * We have two cases to handle here:
1287 ++ * 1) vector is unchanged but the target mask got reduced
1288 ++ * 2) vector and the target mask has changed
1289 ++ *
1290 ++ * #1 is obvious, but in #2 we have two vectors with the same
1291 ++ * irq descriptor: the old and the new vector. So we need to
1292 ++ * make sure that we only cleanup the old vector. The new
1293 ++ * vector has the current @vector number in the config and
1294 ++ * this cpu is part of the target mask. We better leave that
1295 ++ * one alone.
1296 ++ */
1297 + if (vector == data->cfg.vector &&
1298 + cpumask_test_cpu(me, data->domain))
1299 + goto unlock;
1300 +@@ -591,6 +623,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
1301 + goto unlock;
1302 + }
1303 + __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
1304 ++ cpumask_clear_cpu(me, data->old_domain);
1305 + unlock:
1306 + raw_spin_unlock(&desc->lock);
1307 + }
1308 +@@ -619,12 +652,48 @@ void irq_complete_move(struct irq_cfg *cfg)
1309 + __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
1310 + }
1311 +
1312 +-void irq_force_complete_move(int irq)
1313 ++/*
1314 ++ * Called with @desc->lock held and interrupts disabled.
1315 ++ */
1316 ++void irq_force_complete_move(struct irq_desc *desc)
1317 + {
1318 +- struct irq_cfg *cfg = irq_cfg(irq);
1319 ++ struct irq_data *irqdata = irq_desc_get_irq_data(desc);
1320 ++ struct apic_chip_data *data = apic_chip_data(irqdata);
1321 ++ struct irq_cfg *cfg = data ? &data->cfg : NULL;
1322 +
1323 +- if (cfg)
1324 +- __irq_complete_move(cfg, cfg->vector);
1325 ++ if (!cfg)
1326 ++ return;
1327 ++
1328 ++ __irq_complete_move(cfg, cfg->vector);
1329 ++
1330 ++ /*
1331 ++ * This is tricky. If the cleanup of @data->old_domain has not been
1332 ++ * done yet, then the following setaffinity call will fail with
1333 ++ * -EBUSY. This can leave the interrupt in a stale state.
1334 ++ *
1335 ++ * The cleanup cannot make progress because we hold @desc->lock. So in
1336 ++ * case @data->old_domain is not yet cleaned up, we need to drop the
1337 ++ * lock and acquire it again. @desc cannot go away, because the
1338 ++ * hotplug code holds the sparse irq lock.
1339 ++ */
1340 ++ raw_spin_lock(&vector_lock);
1341 ++ /* Clean out all offline cpus (including ourself) first. */
1342 ++ cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
1343 ++ while (!cpumask_empty(data->old_domain)) {
1344 ++ raw_spin_unlock(&vector_lock);
1345 ++ raw_spin_unlock(&desc->lock);
1346 ++ cpu_relax();
1347 ++ raw_spin_lock(&desc->lock);
1348 ++ /*
1349 ++ * Reevaluate apic_chip_data. It might have been cleared after
1350 ++ * we dropped @desc->lock.
1351 ++ */
1352 ++ data = apic_chip_data(irqdata);
1353 ++ if (!data)
1354 ++ return;
1355 ++ raw_spin_lock(&vector_lock);
1356 ++ }
1357 ++ raw_spin_unlock(&vector_lock);
1358 + }
1359 + #endif
1360 +
1361 +diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
1362 +index f8062aa..61521dc 100644
1363 +--- a/arch/x86/kernel/irq.c
1364 ++++ b/arch/x86/kernel/irq.c
1365 +@@ -462,7 +462,7 @@ void fixup_irqs(void)
1366 + * non intr-remapping case, we can't wait till this interrupt
1367 + * arrives at this cpu before completing the irq move.
1368 + */
1369 +- irq_force_complete_move(irq);
1370 ++ irq_force_complete_move(desc);
1371 +
1372 + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
1373 + break_affinity = 1;
1374 +@@ -470,6 +470,15 @@ void fixup_irqs(void)
1375 + }
1376 +
1377 + chip = irq_data_get_irq_chip(data);
1378 ++ /*
1379 ++ * The interrupt descriptor might have been cleaned up
1380 ++ * already, but it is not yet removed from the radix tree
1381 ++ */
1382 ++ if (!chip) {
1383 ++ raw_spin_unlock(&desc->lock);
1384 ++ continue;
1385 ++ }
1386 ++
1387 + if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
1388 + chip->irq_mask(data);
1389 +
1390 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
1391 +index 1505587..b9b09fe 100644
1392 +--- a/arch/x86/kvm/emulate.c
1393 ++++ b/arch/x86/kvm/emulate.c
1394 +@@ -650,10 +650,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
1395 + u16 sel;
1396 +
1397 + la = seg_base(ctxt, addr.seg) + addr.ea;
1398 +- *linear = la;
1399 + *max_size = 0;
1400 + switch (mode) {
1401 + case X86EMUL_MODE_PROT64:
1402 ++ *linear = la;
1403 + if (is_noncanonical_address(la))
1404 + goto bad;
1405 +
1406 +@@ -662,6 +662,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
1407 + goto bad;
1408 + break;
1409 + default:
1410 ++ *linear = la = (u32)la;
1411 + usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
1412 + addr.seg);
1413 + if (!usable)
1414 +@@ -689,7 +690,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
1415 + if (size > *max_size)
1416 + goto bad;
1417 + }
1418 +- la &= (u32)-1;
1419 + break;
1420 + }
1421 + if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
1422 +diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
1423 +index 3058a22..7be8a25 100644
1424 +--- a/arch/x86/kvm/paging_tmpl.h
1425 ++++ b/arch/x86/kvm/paging_tmpl.h
1426 +@@ -249,7 +249,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
1427 + return ret;
1428 +
1429 + kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
1430 +- walker->ptes[level] = pte;
1431 ++ walker->ptes[level - 1] = pte;
1432 + }
1433 + return 0;
1434 + }
1435 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1436 +index 9a2ed89..6ef3856 100644
1437 +--- a/arch/x86/kvm/x86.c
1438 ++++ b/arch/x86/kvm/x86.c
1439 +@@ -2736,6 +2736,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1440 + }
1441 +
1442 + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1443 ++ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
1444 + }
1445 +
1446 + void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1447 +diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
1448 +index b2fd67d..ef05755 100644
1449 +--- a/arch/x86/mm/mpx.c
1450 ++++ b/arch/x86/mm/mpx.c
1451 +@@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
1452 + break;
1453 + }
1454 +
1455 +- if (regno > nr_registers) {
1456 ++ if (regno >= nr_registers) {
1457 + WARN_ONCE(1, "decoded an instruction with an invalid register");
1458 + return -EINVAL;
1459 + }
1460 +diff --git a/block/bio.c b/block/bio.c
1461 +index 4f184d9..d4d1443 100644
1462 +--- a/block/bio.c
1463 ++++ b/block/bio.c
1464 +@@ -1090,9 +1090,12 @@ int bio_uncopy_user(struct bio *bio)
1465 + if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1466 + /*
1467 + * if we're in a workqueue, the request is orphaned, so
1468 +- * don't copy into a random user address space, just free.
1469 ++ * don't copy into a random user address space, just free
1470 ++ * and return -EINTR so user space doesn't expect any data.
1471 + */
1472 +- if (current->mm && bio_data_dir(bio) == READ)
1473 ++ if (!current->mm)
1474 ++ ret = -EINTR;
1475 ++ else if (bio_data_dir(bio) == READ)
1476 + ret = bio_copy_to_iter(bio, bmd->iter);
1477 + if (bmd->is_our_pages)
1478 + bio_free_pages(bio);
1479 +diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
1480 +index 3405f7a..5fdac39 100644
1481 +--- a/drivers/acpi/acpi_video.c
1482 ++++ b/drivers/acpi/acpi_video.c
1483 +@@ -465,6 +465,15 @@ static struct dmi_system_id video_dmi_table[] = {
1484 + * as brightness control does not work.
1485 + */
1486 + {
1487 ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
1488 ++ .callback = video_disable_backlight_sysfs_if,
1489 ++ .ident = "Toshiba Portege R700",
1490 ++ .matches = {
1491 ++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1492 ++ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
1493 ++ },
1494 ++ },
1495 ++ {
1496 + /* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
1497 + .callback = video_disable_backlight_sysfs_if,
1498 + .ident = "Toshiba Portege R830",
1499 +@@ -473,6 +482,15 @@ static struct dmi_system_id video_dmi_table[] = {
1500 + DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"),
1501 + },
1502 + },
1503 ++ {
1504 ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
1505 ++ .callback = video_disable_backlight_sysfs_if,
1506 ++ .ident = "Toshiba Satellite R830",
1507 ++ .matches = {
1508 ++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1509 ++ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"),
1510 ++ },
1511 ++ },
1512 + /*
1513 + * Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
1514 + * but the IDs actually follow the Device ID Scheme.
1515 +diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
1516 +index aa45d48..11d8209 100644
1517 +--- a/drivers/acpi/nfit.c
1518 ++++ b/drivers/acpi/nfit.c
1519 +@@ -468,37 +468,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
1520 + nfit_mem->bdw = NULL;
1521 + }
1522 +
1523 +-static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
1524 ++static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
1525 + struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
1526 + {
1527 + u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
1528 + struct nfit_memdev *nfit_memdev;
1529 + struct nfit_flush *nfit_flush;
1530 +- struct nfit_dcr *nfit_dcr;
1531 + struct nfit_bdw *nfit_bdw;
1532 + struct nfit_idt *nfit_idt;
1533 + u16 idt_idx, range_index;
1534 +
1535 +- list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1536 +- if (nfit_dcr->dcr->region_index != dcr)
1537 +- continue;
1538 +- nfit_mem->dcr = nfit_dcr->dcr;
1539 +- break;
1540 +- }
1541 +-
1542 +- if (!nfit_mem->dcr) {
1543 +- dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
1544 +- spa->range_index, __to_nfit_memdev(nfit_mem)
1545 +- ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
1546 +- return -ENODEV;
1547 +- }
1548 +-
1549 +- /*
1550 +- * We've found enough to create an nvdimm, optionally
1551 +- * find an associated BDW
1552 +- */
1553 +- list_add(&nfit_mem->list, &acpi_desc->dimms);
1554 +-
1555 + list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
1556 + if (nfit_bdw->bdw->region_index != dcr)
1557 + continue;
1558 +@@ -507,12 +486,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
1559 + }
1560 +
1561 + if (!nfit_mem->bdw)
1562 +- return 0;
1563 ++ return;
1564 +
1565 + nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
1566 +
1567 + if (!nfit_mem->spa_bdw)
1568 +- return 0;
1569 ++ return;
1570 +
1571 + range_index = nfit_mem->spa_bdw->range_index;
1572 + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1573 +@@ -537,8 +516,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
1574 + }
1575 + break;
1576 + }
1577 +-
1578 +- return 0;
1579 + }
1580 +
1581 + static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1582 +@@ -547,7 +524,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1583 + struct nfit_mem *nfit_mem, *found;
1584 + struct nfit_memdev *nfit_memdev;
1585 + int type = nfit_spa_type(spa);
1586 +- u16 dcr;
1587 +
1588 + switch (type) {
1589 + case NFIT_SPA_DCR:
1590 +@@ -558,14 +534,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1591 + }
1592 +
1593 + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1594 +- int rc;
1595 ++ struct nfit_dcr *nfit_dcr;
1596 ++ u32 device_handle;
1597 ++ u16 dcr;
1598 +
1599 + if (nfit_memdev->memdev->range_index != spa->range_index)
1600 + continue;
1601 + found = NULL;
1602 + dcr = nfit_memdev->memdev->region_index;
1603 ++ device_handle = nfit_memdev->memdev->device_handle;
1604 + list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1605 +- if (__to_nfit_memdev(nfit_mem)->region_index == dcr) {
1606 ++ if (__to_nfit_memdev(nfit_mem)->device_handle
1607 ++ == device_handle) {
1608 + found = nfit_mem;
1609 + break;
1610 + }
1611 +@@ -578,6 +558,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1612 + if (!nfit_mem)
1613 + return -ENOMEM;
1614 + INIT_LIST_HEAD(&nfit_mem->list);
1615 ++ list_add(&nfit_mem->list, &acpi_desc->dimms);
1616 ++ }
1617 ++
1618 ++ list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1619 ++ if (nfit_dcr->dcr->region_index != dcr)
1620 ++ continue;
1621 ++ /*
1622 ++ * Record the control region for the dimm. For
1623 ++ * the ACPI 6.1 case, where there are separate
1624 ++ * control regions for the pmem vs blk
1625 ++ * interfaces, be sure to record the extended
1626 ++ * blk details.
1627 ++ */
1628 ++ if (!nfit_mem->dcr)
1629 ++ nfit_mem->dcr = nfit_dcr->dcr;
1630 ++ else if (nfit_mem->dcr->windows == 0
1631 ++ && nfit_dcr->dcr->windows)
1632 ++ nfit_mem->dcr = nfit_dcr->dcr;
1633 ++ break;
1634 ++ }
1635 ++
1636 ++ if (dcr && !nfit_mem->dcr) {
1637 ++ dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
1638 ++ spa->range_index, dcr);
1639 ++ return -ENODEV;
1640 + }
1641 +
1642 + if (type == NFIT_SPA_DCR) {
1643 +@@ -594,6 +599,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1644 + nfit_mem->idt_dcr = nfit_idt->idt;
1645 + break;
1646 + }
1647 ++ nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
1648 + } else {
1649 + /*
1650 + * A single dimm may belong to multiple SPA-PM
1651 +@@ -602,13 +608,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1652 + */
1653 + nfit_mem->memdev_pmem = nfit_memdev->memdev;
1654 + }
1655 +-
1656 +- if (found)
1657 +- continue;
1658 +-
1659 +- rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
1660 +- if (rc)
1661 +- return rc;
1662 + }
1663 +
1664 + return 0;
1665 +diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
1666 +index daaf1c4..80e55cb 100644
1667 +--- a/drivers/acpi/video_detect.c
1668 ++++ b/drivers/acpi/video_detect.c
1669 +@@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
1670 + DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
1671 + },
1672 + },
1673 +- {
1674 +- .callback = video_detect_force_vendor,
1675 +- .ident = "Dell Inspiron 5737",
1676 +- .matches = {
1677 +- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1678 +- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
1679 +- },
1680 +- },
1681 +
1682 + /*
1683 + * These models have a working acpi_video backlight control, and using
1684 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
1685 +index a39e85f..7d00b7a 100644
1686 +--- a/drivers/android/binder.c
1687 ++++ b/drivers/android/binder.c
1688 +@@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc,
1689 + if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1690 + return -EFAULT;
1691 +
1692 +- ptr += sizeof(void *);
1693 ++ ptr += sizeof(cookie);
1694 + list_for_each_entry(w, &proc->delivered_death, entry) {
1695 + struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
1696 +
1697 +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
1698 +index cdf6215..7dbba38 100644
1699 +--- a/drivers/ata/libata-sff.c
1700 ++++ b/drivers/ata/libata-sff.c
1701 +@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
1702 + static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1703 + {
1704 + struct ata_port *ap = qc->ap;
1705 +- unsigned long flags;
1706 +
1707 + if (ap->ops->error_handler) {
1708 + if (in_wq) {
1709 +- spin_lock_irqsave(ap->lock, flags);
1710 +-
1711 + /* EH might have kicked in while host lock is
1712 + * released.
1713 + */
1714 +@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1715 + } else
1716 + ata_port_freeze(ap);
1717 + }
1718 +-
1719 +- spin_unlock_irqrestore(ap->lock, flags);
1720 + } else {
1721 + if (likely(!(qc->err_mask & AC_ERR_HSM)))
1722 + ata_qc_complete(qc);
1723 +@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1724 + }
1725 + } else {
1726 + if (in_wq) {
1727 +- spin_lock_irqsave(ap->lock, flags);
1728 + ata_sff_irq_on(ap);
1729 + ata_qc_complete(qc);
1730 +- spin_unlock_irqrestore(ap->lock, flags);
1731 + } else
1732 + ata_qc_complete(qc);
1733 + }
1734 +@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1735 + {
1736 + struct ata_link *link = qc->dev->link;
1737 + struct ata_eh_info *ehi = &link->eh_info;
1738 +- unsigned long flags = 0;
1739 + int poll_next;
1740 +
1741 ++ lockdep_assert_held(ap->lock);
1742 ++
1743 + WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1744 +
1745 + /* Make sure ata_sff_qc_issue() does not throw things
1746 +@@ -1112,14 +1106,6 @@ fsm_start:
1747 + }
1748 + }
1749 +
1750 +- /* Send the CDB (atapi) or the first data block (ata pio out).
1751 +- * During the state transition, interrupt handler shouldn't
1752 +- * be invoked before the data transfer is complete and
1753 +- * hsm_task_state is changed. Hence, the following locking.
1754 +- */
1755 +- if (in_wq)
1756 +- spin_lock_irqsave(ap->lock, flags);
1757 +-
1758 + if (qc->tf.protocol == ATA_PROT_PIO) {
1759 + /* PIO data out protocol.
1760 + * send first data block.
1761 +@@ -1135,9 +1121,6 @@ fsm_start:
1762 + /* send CDB */
1763 + atapi_send_cdb(ap, qc);
1764 +
1765 +- if (in_wq)
1766 +- spin_unlock_irqrestore(ap->lock, flags);
1767 +-
1768 + /* if polling, ata_sff_pio_task() handles the rest.
1769 + * otherwise, interrupt handler takes over from here.
1770 + */
1771 +@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
1772 + u8 status;
1773 + int poll_next;
1774 +
1775 ++ spin_lock_irq(ap->lock);
1776 ++
1777 + BUG_ON(ap->sff_pio_task_link == NULL);
1778 + /* qc can be NULL if timeout occurred */
1779 + qc = ata_qc_from_tag(ap, link->active_tag);
1780 + if (!qc) {
1781 + ap->sff_pio_task_link = NULL;
1782 +- return;
1783 ++ goto out_unlock;
1784 + }
1785 +
1786 + fsm_start:
1787 +@@ -1381,11 +1366,14 @@ fsm_start:
1788 + */
1789 + status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1790 + if (status & ATA_BUSY) {
1791 ++ spin_unlock_irq(ap->lock);
1792 + ata_msleep(ap, 2);
1793 ++ spin_lock_irq(ap->lock);
1794 ++
1795 + status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1796 + if (status & ATA_BUSY) {
1797 + ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1798 +- return;
1799 ++ goto out_unlock;
1800 + }
1801 + }
1802 +
1803 +@@ -1402,6 +1390,8 @@ fsm_start:
1804 + */
1805 + if (poll_next)
1806 + goto fsm_start;
1807 ++out_unlock:
1808 ++ spin_unlock_irq(ap->lock);
1809 + }
1810 +
1811 + /**
1812 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1813 +index 92f0ee3..9688971 100644
1814 +--- a/drivers/bluetooth/btusb.c
1815 ++++ b/drivers/bluetooth/btusb.c
1816 +@@ -153,6 +153,10 @@ static const struct usb_device_id btusb_table[] = {
1817 + { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
1818 + .driver_info = BTUSB_BCM_PATCHRAM },
1819 +
1820 ++ /* Toshiba Corp - Broadcom based */
1821 ++ { USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
1822 ++ .driver_info = BTUSB_BCM_PATCHRAM },
1823 ++
1824 + /* Intel Bluetooth USB Bootloader (RAM module) */
1825 + { USB_DEVICE(0x8087, 0x0a5a),
1826 + .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
1827 +diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
1828 +index 2fe37f7..813003d 100644
1829 +--- a/drivers/clk/samsung/clk-cpu.c
1830 ++++ b/drivers/clk/samsung/clk-cpu.c
1831 +@@ -148,6 +148,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
1832 + unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
1833 + unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
1834 + unsigned long div0, div1 = 0, mux_reg;
1835 ++ unsigned long flags;
1836 +
1837 + /* find out the divider values to use for clock data */
1838 + while ((cfg_data->prate * 1000) != ndata->new_rate) {
1839 +@@ -156,7 +157,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
1840 + cfg_data++;
1841 + }
1842 +
1843 +- spin_lock(cpuclk->lock);
1844 ++ spin_lock_irqsave(cpuclk->lock, flags);
1845 +
1846 + /*
1847 + * For the selected PLL clock frequency, get the pre-defined divider
1848 +@@ -212,7 +213,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
1849 + DIV_MASK_ALL);
1850 + }
1851 +
1852 +- spin_unlock(cpuclk->lock);
1853 ++ spin_unlock_irqrestore(cpuclk->lock, flags);
1854 + return 0;
1855 + }
1856 +
1857 +@@ -223,6 +224,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
1858 + const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
1859 + unsigned long div = 0, div_mask = DIV_MASK;
1860 + unsigned long mux_reg;
1861 ++ unsigned long flags;
1862 +
1863 + /* find out the divider values to use for clock data */
1864 + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
1865 +@@ -233,7 +235,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
1866 + }
1867 + }
1868 +
1869 +- spin_lock(cpuclk->lock);
1870 ++ spin_lock_irqsave(cpuclk->lock, flags);
1871 +
1872 + /* select mout_apll as the alternate parent */
1873 + mux_reg = readl(base + E4210_SRC_CPU);
1874 +@@ -246,7 +248,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
1875 + }
1876 +
1877 + exynos_set_safe_div(base, div, div_mask);
1878 +- spin_unlock(cpuclk->lock);
1879 ++ spin_unlock_irqrestore(cpuclk->lock, flags);
1880 + return 0;
1881 + }
1882 +
1883 +diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
1884 +index 6ee9140..4da2af9 100644
1885 +--- a/drivers/clocksource/tcb_clksrc.c
1886 ++++ b/drivers/clocksource/tcb_clksrc.c
1887 +@@ -98,7 +98,8 @@ static int tc_shutdown(struct clock_event_device *d)
1888 +
1889 + __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
1890 + __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
1891 +- clk_disable(tcd->clk);
1892 ++ if (!clockevent_state_detached(d))
1893 ++ clk_disable(tcd->clk);
1894 +
1895 + return 0;
1896 + }
1897 +diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
1898 +index a92e94b..dfc3bb4 100644
1899 +--- a/drivers/clocksource/vt8500_timer.c
1900 ++++ b/drivers/clocksource/vt8500_timer.c
1901 +@@ -50,6 +50,8 @@
1902 +
1903 + #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
1904 +
1905 ++#define MIN_OSCR_DELTA 16
1906 ++
1907 + static void __iomem *regbase;
1908 +
1909 + static cycle_t vt8500_timer_read(struct clocksource *cs)
1910 +@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
1911 + cpu_relax();
1912 + writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
1913 +
1914 +- if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
1915 ++ if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
1916 + return -ETIME;
1917 +
1918 + writel(1, regbase + TIMER_IER_VAL);
1919 +@@ -151,7 +153,7 @@ static void __init vt8500_timer_init(struct device_node *np)
1920 + pr_err("%s: setup_irq failed for %s\n", __func__,
1921 + clockevent.name);
1922 + clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
1923 +- 4, 0xf0000000);
1924 ++ MIN_OSCR_DELTA * 2, 0xf0000000);
1925 + }
1926 +
1927 + CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
1928 +diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
1929 +index b260576..d994b0f 100644
1930 +--- a/drivers/cpufreq/cpufreq_governor.c
1931 ++++ b/drivers/cpufreq/cpufreq_governor.c
1932 +@@ -356,16 +356,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
1933 + if (!have_governor_per_policy())
1934 + cdata->gdbs_data = dbs_data;
1935 +
1936 ++ policy->governor_data = dbs_data;
1937 ++
1938 + ret = sysfs_create_group(get_governor_parent_kobj(policy),
1939 + get_sysfs_attr(dbs_data));
1940 + if (ret)
1941 + goto reset_gdbs_data;
1942 +
1943 +- policy->governor_data = dbs_data;
1944 +-
1945 + return 0;
1946 +
1947 + reset_gdbs_data:
1948 ++ policy->governor_data = NULL;
1949 ++
1950 + if (!have_governor_per_policy())
1951 + cdata->gdbs_data = NULL;
1952 + cdata->exit(dbs_data, !policy->governor->initialized);
1953 +@@ -386,16 +388,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
1954 + if (!cdbs->shared || cdbs->shared->policy)
1955 + return -EBUSY;
1956 +
1957 +- policy->governor_data = NULL;
1958 + if (!--dbs_data->usage_count) {
1959 + sysfs_remove_group(get_governor_parent_kobj(policy),
1960 + get_sysfs_attr(dbs_data));
1961 +
1962 ++ policy->governor_data = NULL;
1963 ++
1964 + if (!have_governor_per_policy())
1965 + cdata->gdbs_data = NULL;
1966 +
1967 + cdata->exit(dbs_data, policy->governor->initialized == 1);
1968 + kfree(dbs_data);
1969 ++ } else {
1970 ++ policy->governor_data = NULL;
1971 + }
1972 +
1973 + free_common_dbs_info(policy, cdata);
1974 +diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
1975 +index 1d99c97..0963772 100644
1976 +--- a/drivers/cpufreq/pxa2xx-cpufreq.c
1977 ++++ b/drivers/cpufreq/pxa2xx-cpufreq.c
1978 +@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
1979 + }
1980 + }
1981 + #else
1982 +-static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq)
1983 ++static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
1984 + {
1985 + return 0;
1986 + }
1987 +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
1988 +index 370c661..fa00f3a 100644
1989 +--- a/drivers/dma/at_xdmac.c
1990 ++++ b/drivers/dma/at_xdmac.c
1991 +@@ -1688,6 +1688,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1992 + list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1993 + at_xdmac_remove_xfer(atchan, desc);
1994 +
1995 ++ clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1996 + clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1997 + spin_unlock_irqrestore(&atchan->lock, flags);
1998 +
1999 +@@ -1820,6 +1821,8 @@ static int atmel_xdmac_resume(struct device *dev)
2000 + atchan = to_at_xdmac_chan(chan);
2001 + at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
2002 + if (at_xdmac_chan_is_cyclic(atchan)) {
2003 ++ if (at_xdmac_chan_is_paused(atchan))
2004 ++ at_xdmac_device_resume(chan);
2005 + at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
2006 + at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
2007 + at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
2008 +diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
2009 +index 7067b6d..4f099ea 100644
2010 +--- a/drivers/dma/dw/core.c
2011 ++++ b/drivers/dma/dw/core.c
2012 +@@ -536,16 +536,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
2013 +
2014 + /* Called with dwc->lock held and all DMAC interrupts disabled */
2015 + static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
2016 +- u32 status_err, u32 status_xfer)
2017 ++ u32 status_block, u32 status_err, u32 status_xfer)
2018 + {
2019 + unsigned long flags;
2020 +
2021 +- if (dwc->mask) {
2022 ++ if (status_block & dwc->mask) {
2023 + void (*callback)(void *param);
2024 + void *callback_param;
2025 +
2026 + dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
2027 + channel_readl(dwc, LLP));
2028 ++ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
2029 +
2030 + callback = dwc->cdesc->period_callback;
2031 + callback_param = dwc->cdesc->period_callback_param;
2032 +@@ -577,6 +578,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
2033 + channel_writel(dwc, CTL_LO, 0);
2034 + channel_writel(dwc, CTL_HI, 0);
2035 +
2036 ++ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
2037 + dma_writel(dw, CLEAR.ERROR, dwc->mask);
2038 + dma_writel(dw, CLEAR.XFER, dwc->mask);
2039 +
2040 +@@ -585,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
2041 +
2042 + spin_unlock_irqrestore(&dwc->lock, flags);
2043 + }
2044 ++
2045 ++ /* Re-enable interrupts */
2046 ++ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
2047 + }
2048 +
2049 + /* ------------------------------------------------------------------------- */
2050 +@@ -593,10 +598,12 @@ static void dw_dma_tasklet(unsigned long data)
2051 + {
2052 + struct dw_dma *dw = (struct dw_dma *)data;
2053 + struct dw_dma_chan *dwc;
2054 ++ u32 status_block;
2055 + u32 status_xfer;
2056 + u32 status_err;
2057 + int i;
2058 +
2059 ++ status_block = dma_readl(dw, RAW.BLOCK);
2060 + status_xfer = dma_readl(dw, RAW.XFER);
2061 + status_err = dma_readl(dw, RAW.ERROR);
2062 +
2063 +@@ -605,16 +612,15 @@ static void dw_dma_tasklet(unsigned long data)
2064 + for (i = 0; i < dw->dma.chancnt; i++) {
2065 + dwc = &dw->chan[i];
2066 + if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
2067 +- dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
2068 ++ dwc_handle_cyclic(dw, dwc, status_block, status_err,
2069 ++ status_xfer);
2070 + else if (status_err & (1 << i))
2071 + dwc_handle_error(dw, dwc);
2072 + else if (status_xfer & (1 << i))
2073 + dwc_scan_descriptors(dw, dwc);
2074 + }
2075 +
2076 +- /*
2077 +- * Re-enable interrupts.
2078 +- */
2079 ++ /* Re-enable interrupts */
2080 + channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
2081 + channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
2082 + }
2083 +@@ -635,6 +641,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
2084 + * softirq handler.
2085 + */
2086 + channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
2087 ++ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
2088 + channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
2089 +
2090 + status = dma_readl(dw, STATUS_INT);
2091 +@@ -645,6 +652,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
2092 +
2093 + /* Try to recover */
2094 + channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
2095 ++ channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
2096 + channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
2097 + channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
2098 + channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
2099 +@@ -1111,6 +1119,7 @@ static void dw_dma_off(struct dw_dma *dw)
2100 + dma_writel(dw, CFG, 0);
2101 +
2102 + channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
2103 ++ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
2104 + channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
2105 + channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
2106 + channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
2107 +@@ -1216,6 +1225,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
2108 +
2109 + /* Disable interrupts */
2110 + channel_clear_bit(dw, MASK.XFER, dwc->mask);
2111 ++ channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
2112 + channel_clear_bit(dw, MASK.ERROR, dwc->mask);
2113 +
2114 + spin_unlock_irqrestore(&dwc->lock, flags);
2115 +@@ -1245,7 +1255,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
2116 + int dw_dma_cyclic_start(struct dma_chan *chan)
2117 + {
2118 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
2119 +- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
2120 ++ struct dw_dma *dw = to_dw_dma(chan->device);
2121 + unsigned long flags;
2122 +
2123 + if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
2124 +@@ -1255,25 +1265,10 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
2125 +
2126 + spin_lock_irqsave(&dwc->lock, flags);
2127 +
2128 +- /* Assert channel is idle */
2129 +- if (dma_readl(dw, CH_EN) & dwc->mask) {
2130 +- dev_err(chan2dev(&dwc->chan),
2131 +- "%s: BUG: Attempted to start non-idle channel\n",
2132 +- __func__);
2133 +- dwc_dump_chan_regs(dwc);
2134 +- spin_unlock_irqrestore(&dwc->lock, flags);
2135 +- return -EBUSY;
2136 +- }
2137 +-
2138 +- dma_writel(dw, CLEAR.ERROR, dwc->mask);
2139 +- dma_writel(dw, CLEAR.XFER, dwc->mask);
2140 ++ /* Enable interrupts to perform cyclic transfer */
2141 ++ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
2142 +
2143 +- /* Setup DMAC channel registers */
2144 +- channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
2145 +- channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
2146 +- channel_writel(dwc, CTL_HI, 0);
2147 +-
2148 +- channel_set_bit(dw, CH_EN, dwc->mask);
2149 ++ dwc_dostart(dwc, dwc->cdesc->desc[0]);
2150 +
2151 + spin_unlock_irqrestore(&dwc->lock, flags);
2152 +
2153 +@@ -1479,6 +1474,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
2154 +
2155 + dwc_chan_disable(dw, dwc);
2156 +
2157 ++ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
2158 + dma_writel(dw, CLEAR.ERROR, dwc->mask);
2159 + dma_writel(dw, CLEAR.XFER, dwc->mask);
2160 +
2161 +@@ -1567,9 +1563,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
2162 + /* Force dma off, just in case */
2163 + dw_dma_off(dw);
2164 +
2165 +- /* Disable BLOCK interrupts as well */
2166 +- channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
2167 +-
2168 + /* Create a pool of consistent memory blocks for hardware descriptors */
2169 + dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
2170 + sizeof(struct dw_desc), 4, 0);
2171 +diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
2172 +index 592af5f..5358737 100644
2173 +--- a/drivers/edac/edac_device.c
2174 ++++ b/drivers/edac/edac_device.c
2175 +@@ -435,16 +435,13 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
2176 + */
2177 + void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
2178 + {
2179 +- int status;
2180 +-
2181 + if (!edac_dev->edac_check)
2182 + return;
2183 +
2184 +- status = cancel_delayed_work(&edac_dev->work);
2185 +- if (status == 0) {
2186 +- /* workq instance might be running, wait for it */
2187 +- flush_workqueue(edac_workqueue);
2188 +- }
2189 ++ edac_dev->op_state = OP_OFFLINE;
2190 ++
2191 ++ cancel_delayed_work_sync(&edac_dev->work);
2192 ++ flush_workqueue(edac_workqueue);
2193 + }
2194 +
2195 + /*
2196 +diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
2197 +index 77ecd6a..1b2c218 100644
2198 +--- a/drivers/edac/edac_mc.c
2199 ++++ b/drivers/edac/edac_mc.c
2200 +@@ -586,18 +586,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
2201 + */
2202 + static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
2203 + {
2204 +- int status;
2205 +-
2206 +- if (mci->op_state != OP_RUNNING_POLL)
2207 +- return;
2208 +-
2209 +- status = cancel_delayed_work(&mci->work);
2210 +- if (status == 0) {
2211 +- edac_dbg(0, "not canceled, flush the queue\n");
2212 ++ mci->op_state = OP_OFFLINE;
2213 +
2214 +- /* workq instance might be running, wait for it */
2215 +- flush_workqueue(edac_workqueue);
2216 +- }
2217 ++ cancel_delayed_work_sync(&mci->work);
2218 ++ flush_workqueue(edac_workqueue);
2219 + }
2220 +
2221 + /*
2222 +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
2223 +index a75acea..58aed67 100644
2224 +--- a/drivers/edac/edac_mc_sysfs.c
2225 ++++ b/drivers/edac/edac_mc_sysfs.c
2226 +@@ -880,21 +880,26 @@ static struct device_type mci_attr_type = {
2227 + int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
2228 + const struct attribute_group **groups)
2229 + {
2230 ++ char *name;
2231 + int i, err;
2232 +
2233 + /*
2234 + * The memory controller needs its own bus, in order to avoid
2235 + * namespace conflicts at /sys/bus/edac.
2236 + */
2237 +- mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
2238 +- if (!mci->bus->name)
2239 ++ name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
2240 ++ if (!name)
2241 + return -ENOMEM;
2242 +
2243 ++ mci->bus->name = name;
2244 ++
2245 + edac_dbg(0, "creating bus %s\n", mci->bus->name);
2246 +
2247 + err = bus_register(mci->bus);
2248 +- if (err < 0)
2249 +- goto fail_free_name;
2250 ++ if (err < 0) {
2251 ++ kfree(name);
2252 ++ return err;
2253 ++ }
2254 +
2255 + /* get the /sys/devices/system/edac subsys reference */
2256 + mci->dev.type = &mci_attr_type;
2257 +@@ -961,8 +966,8 @@ fail_unregister_dimm:
2258 + device_unregister(&mci->dev);
2259 + fail_unregister_bus:
2260 + bus_unregister(mci->bus);
2261 +-fail_free_name:
2262 +- kfree(mci->bus->name);
2263 ++ kfree(name);
2264 ++
2265 + return err;
2266 + }
2267 +
2268 +@@ -993,10 +998,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
2269 +
2270 + void edac_unregister_sysfs(struct mem_ctl_info *mci)
2271 + {
2272 ++ const char *name = mci->bus->name;
2273 ++
2274 + edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
2275 + device_unregister(&mci->dev);
2276 + bus_unregister(mci->bus);
2277 +- kfree(mci->bus->name);
2278 ++ kfree(name);
2279 + }
2280 +
2281 + static void mc_attr_release(struct device *dev)
2282 +diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
2283 +index 2cf44b4d..b4b3860 100644
2284 +--- a/drivers/edac/edac_pci.c
2285 ++++ b/drivers/edac/edac_pci.c
2286 +@@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
2287 + */
2288 + static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
2289 + {
2290 +- int status;
2291 +-
2292 + edac_dbg(0, "\n");
2293 +
2294 +- status = cancel_delayed_work(&pci->work);
2295 +- if (status == 0)
2296 +- flush_workqueue(edac_workqueue);
2297 ++ pci->op_state = OP_OFFLINE;
2298 ++
2299 ++ cancel_delayed_work_sync(&pci->work);
2300 ++ flush_workqueue(edac_workqueue);
2301 + }
2302 +
2303 + /*
2304 +diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
2305 +index 756eca8..10e6774 100644
2306 +--- a/drivers/firmware/efi/efivars.c
2307 ++++ b/drivers/firmware/efi/efivars.c
2308 +@@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
2309 + }
2310 +
2311 + if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
2312 +- efivar_validate(name, data, size) == false) {
2313 ++ efivar_validate(vendor, name, data, size) == false) {
2314 + printk(KERN_ERR "efivars: Malformed variable content\n");
2315 + return -EINVAL;
2316 + }
2317 +@@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
2318 + }
2319 +
2320 + if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
2321 +- efivar_validate(name, data, size) == false) {
2322 ++ efivar_validate(new_var->VendorGuid, name, data,
2323 ++ size) == false) {
2324 + printk(KERN_ERR "efivars: Malformed variable content\n");
2325 + return -EINVAL;
2326 + }
2327 +@@ -540,38 +541,30 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
2328 + static int
2329 + efivar_create_sysfs_entry(struct efivar_entry *new_var)
2330 + {
2331 +- int i, short_name_size;
2332 ++ int short_name_size;
2333 + char *short_name;
2334 +- unsigned long variable_name_size;
2335 +- efi_char16_t *variable_name;
2336 ++ unsigned long utf8_name_size;
2337 ++ efi_char16_t *variable_name = new_var->var.VariableName;
2338 + int ret;
2339 +
2340 +- variable_name = new_var->var.VariableName;
2341 +- variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
2342 +-
2343 + /*
2344 +- * Length of the variable bytes in ASCII, plus the '-' separator,
2345 ++ * Length of the variable bytes in UTF8, plus the '-' separator,
2346 + * plus the GUID, plus trailing NUL
2347 + */
2348 +- short_name_size = variable_name_size / sizeof(efi_char16_t)
2349 +- + 1 + EFI_VARIABLE_GUID_LEN + 1;
2350 +-
2351 +- short_name = kzalloc(short_name_size, GFP_KERNEL);
2352 ++ utf8_name_size = ucs2_utf8size(variable_name);
2353 ++ short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
2354 +
2355 ++ short_name = kmalloc(short_name_size, GFP_KERNEL);
2356 + if (!short_name)
2357 + return -ENOMEM;
2358 +
2359 +- /* Convert Unicode to normal chars (assume top bits are 0),
2360 +- ala UTF-8 */
2361 +- for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
2362 +- short_name[i] = variable_name[i] & 0xFF;
2363 +- }
2364 ++ ucs2_as_utf8(short_name, variable_name, short_name_size);
2365 ++
2366 + /* This is ugly, but necessary to separate one vendor's
2367 + private variables from another's. */
2368 +-
2369 +- *(short_name + strlen(short_name)) = '-';
2370 ++ short_name[utf8_name_size] = '-';
2371 + efi_guid_to_str(&new_var->var.VendorGuid,
2372 +- short_name + strlen(short_name));
2373 ++ short_name + utf8_name_size + 1);
2374 +
2375 + new_var->kobj.kset = efivars_kset;
2376 +
2377 +diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
2378 +index 70a0fb1..7f2ea21 100644
2379 +--- a/drivers/firmware/efi/vars.c
2380 ++++ b/drivers/firmware/efi/vars.c
2381 +@@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
2382 + }
2383 +
2384 + struct variable_validate {
2385 ++ efi_guid_t vendor;
2386 + char *name;
2387 + bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
2388 + unsigned long len);
2389 + };
2390 +
2391 ++/*
2392 ++ * This is the list of variables we need to validate, as well as the
2393 ++ * whitelist for what we think is safe not to default to immutable.
2394 ++ *
2395 ++ * If it has a validate() method that's not NULL, it'll go into the
2396 ++ * validation routine. If not, it is assumed valid, but still used for
2397 ++ * whitelisting.
2398 ++ *
2399 ++ * Note that it's sorted by {vendor,name}, but globbed names must come after
2400 ++ * any other name with the same prefix.
2401 ++ */
2402 + static const struct variable_validate variable_validate[] = {
2403 +- { "BootNext", validate_uint16 },
2404 +- { "BootOrder", validate_boot_order },
2405 +- { "DriverOrder", validate_boot_order },
2406 +- { "Boot*", validate_load_option },
2407 +- { "Driver*", validate_load_option },
2408 +- { "ConIn", validate_device_path },
2409 +- { "ConInDev", validate_device_path },
2410 +- { "ConOut", validate_device_path },
2411 +- { "ConOutDev", validate_device_path },
2412 +- { "ErrOut", validate_device_path },
2413 +- { "ErrOutDev", validate_device_path },
2414 +- { "Timeout", validate_uint16 },
2415 +- { "Lang", validate_ascii_string },
2416 +- { "PlatformLang", validate_ascii_string },
2417 +- { "", NULL },
2418 ++ { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
2419 ++ { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
2420 ++ { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
2421 ++ { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
2422 ++ { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
2423 ++ { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
2424 ++ { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
2425 ++ { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
2426 ++ { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
2427 ++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
2428 ++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
2429 ++ { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
2430 ++ { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
2431 ++ { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
2432 ++ { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
2433 ++ { LINUX_EFI_CRASH_GUID, "*", NULL },
2434 ++ { NULL_GUID, "", NULL },
2435 + };
2436 +
2437 ++static bool
2438 ++variable_matches(const char *var_name, size_t len, const char *match_name,
2439 ++ int *match)
2440 ++{
2441 ++ for (*match = 0; ; (*match)++) {
2442 ++ char c = match_name[*match];
2443 ++ char u = var_name[*match];
2444 ++
2445 ++ /* Wildcard in the matching name means we've matched */
2446 ++ if (c == '*')
2447 ++ return true;
2448 ++
2449 ++ /* Case sensitive match */
2450 ++ if (!c && *match == len)
2451 ++ return true;
2452 ++
2453 ++ if (c != u)
2454 ++ return false;
2455 ++
2456 ++ if (!c)
2457 ++ return true;
2458 ++ }
2459 ++ return true;
2460 ++}
2461 ++
2462 + bool
2463 +-efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len)
2464 ++efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
2465 ++ unsigned long data_size)
2466 + {
2467 + int i;
2468 +- u16 *unicode_name = var_name;
2469 ++ unsigned long utf8_size;
2470 ++ u8 *utf8_name;
2471 +
2472 +- for (i = 0; variable_validate[i].validate != NULL; i++) {
2473 +- const char *name = variable_validate[i].name;
2474 +- int match;
2475 ++ utf8_size = ucs2_utf8size(var_name);
2476 ++ utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
2477 ++ if (!utf8_name)
2478 ++ return false;
2479 +
2480 +- for (match = 0; ; match++) {
2481 +- char c = name[match];
2482 +- u16 u = unicode_name[match];
2483 ++ ucs2_as_utf8(utf8_name, var_name, utf8_size);
2484 ++ utf8_name[utf8_size] = '\0';
2485 +
2486 +- /* All special variables are plain ascii */
2487 +- if (u > 127)
2488 +- return true;
2489 ++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
2490 ++ const char *name = variable_validate[i].name;
2491 ++ int match = 0;
2492 +
2493 +- /* Wildcard in the matching name means we've matched */
2494 +- if (c == '*')
2495 +- return variable_validate[i].validate(var_name,
2496 +- match, data, len);
2497 ++ if (efi_guidcmp(vendor, variable_validate[i].vendor))
2498 ++ continue;
2499 +
2500 +- /* Case sensitive match */
2501 +- if (c != u)
2502 ++ if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
2503 ++ if (variable_validate[i].validate == NULL)
2504 + break;
2505 +-
2506 +- /* Reached the end of the string while matching */
2507 +- if (!c)
2508 +- return variable_validate[i].validate(var_name,
2509 +- match, data, len);
2510 ++ kfree(utf8_name);
2511 ++ return variable_validate[i].validate(var_name, match,
2512 ++ data, data_size);
2513 + }
2514 + }
2515 +-
2516 ++ kfree(utf8_name);
2517 + return true;
2518 + }
2519 + EXPORT_SYMBOL_GPL(efivar_validate);
2520 +
2521 ++bool
2522 ++efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
2523 ++ size_t len)
2524 ++{
2525 ++ int i;
2526 ++ bool found = false;
2527 ++ int match = 0;
2528 ++
2529 ++ /*
2530 ++ * Check if our variable is in the validated variables list
2531 ++ */
2532 ++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
2533 ++ if (efi_guidcmp(variable_validate[i].vendor, vendor))
2534 ++ continue;
2535 ++
2536 ++ if (variable_matches(var_name, len,
2537 ++ variable_validate[i].name, &match)) {
2538 ++ found = true;
2539 ++ break;
2540 ++ }
2541 ++ }
2542 ++
2543 ++ /*
2544 ++ * If it's in our list, it is removable.
2545 ++ */
2546 ++ return found;
2547 ++}
2548 ++EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
2549 ++
2550 + static efi_status_t
2551 + check_var_size(u32 attributes, unsigned long size)
2552 + {
2553 +@@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
2554 +
2555 + *set = false;
2556 +
2557 +- if (efivar_validate(name, data, *size) == false)
2558 ++ if (efivar_validate(*vendor, name, data, *size) == false)
2559 + return -EINVAL;
2560 +
2561 + /*
2562 +diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
2563 +index 04c2707..ca06601 100644
2564 +--- a/drivers/gpu/drm/amd/amdgpu/Makefile
2565 ++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
2566 +@@ -22,7 +22,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
2567 + amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
2568 +
2569 + # add asic specific block
2570 +-amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
2571 ++amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
2572 + ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
2573 + amdgpu_amdkfd_gfx_v7.o
2574 +
2575 +@@ -31,6 +31,7 @@ amdgpu-y += \
2576 +
2577 + # add GMC block
2578 + amdgpu-y += \
2579 ++ gmc_v7_0.o \
2580 + gmc_v8_0.o
2581 +
2582 + # add IH block
2583 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
2584 +index 048cfe0..bb1099c 100644
2585 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
2586 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
2587 +@@ -604,8 +604,6 @@ struct amdgpu_sa_manager {
2588 + uint32_t align;
2589 + };
2590 +
2591 +-struct amdgpu_sa_bo;
2592 +-
2593 + /* sub-allocation buffer */
2594 + struct amdgpu_sa_bo {
2595 + struct list_head olist;
2596 +@@ -2314,6 +2312,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
2597 + int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2598 + uint32_t flags);
2599 + bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
2600 ++bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
2601 ++ unsigned long end);
2602 + bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
2603 + uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2604 + struct ttm_mem_reg *mem);
2605 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2606 +index d5b4213..c961fe0 100644
2607 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2608 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2609 +@@ -1744,15 +1744,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
2610 + }
2611 +
2612 + /* post card */
2613 +- amdgpu_atom_asic_init(adev->mode_info.atom_context);
2614 ++ if (!amdgpu_card_posted(adev))
2615 ++ amdgpu_atom_asic_init(adev->mode_info.atom_context);
2616 +
2617 + r = amdgpu_resume(adev);
2618 ++ if (r)
2619 ++ DRM_ERROR("amdgpu_resume failed (%d).\n", r);
2620 +
2621 + amdgpu_fence_driver_resume(adev);
2622 +
2623 +- r = amdgpu_ib_ring_tests(adev);
2624 +- if (r)
2625 +- DRM_ERROR("ib ring test failed (%d).\n", r);
2626 ++ if (resume) {
2627 ++ r = amdgpu_ib_ring_tests(adev);
2628 ++ if (r)
2629 ++ DRM_ERROR("ib ring test failed (%d).\n", r);
2630 ++ }
2631 +
2632 + r = amdgpu_late_init(adev);
2633 + if (r)
2634 +@@ -1788,6 +1793,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
2635 + }
2636 +
2637 + drm_kms_helper_poll_enable(dev);
2638 ++ drm_helper_hpd_irq_event(dev);
2639 +
2640 + if (fbcon) {
2641 + amdgpu_fbdev_set_suspend(adev, 0);
2642 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
2643 +index 5580d34..0c713a9 100644
2644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
2645 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
2646 +@@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
2647 +
2648 + struct drm_crtc *crtc = &amdgpuCrtc->base;
2649 + unsigned long flags;
2650 +- unsigned i;
2651 +- int vpos, hpos, stat, min_udelay;
2652 ++ unsigned i, repcnt = 4;
2653 ++ int vpos, hpos, stat, min_udelay = 0;
2654 + struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
2655 +
2656 + amdgpu_flip_wait_fence(adev, &work->excl);
2657 +@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
2658 + * In practice this won't execute very often unless on very fast
2659 + * machines because the time window for this to happen is very small.
2660 + */
2661 +- for (;;) {
2662 ++ while (amdgpuCrtc->enabled && repcnt--) {
2663 + /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
2664 + * start in hpos, and to the "fudged earlier" vblank start in
2665 + * vpos.
2666 +@@ -114,10 +114,22 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
2667 + /* Sleep at least until estimated real start of hw vblank */
2668 + spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
2669 + min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
2670 ++ if (min_udelay > vblank->framedur_ns / 2000) {
2671 ++ /* Don't wait ridiculously long - something is wrong */
2672 ++ repcnt = 0;
2673 ++ break;
2674 ++ }
2675 + usleep_range(min_udelay, 2 * min_udelay);
2676 + spin_lock_irqsave(&crtc->dev->event_lock, flags);
2677 + };
2678 +
2679 ++ if (!repcnt)
2680 ++ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
2681 ++ "framedur %d, linedur %d, stat %d, vpos %d, "
2682 ++ "hpos %d\n", work->crtc_id, min_udelay,
2683 ++ vblank->framedur_ns / 1000,
2684 ++ vblank->linedur_ns / 1000, stat, vpos, hpos);
2685 ++
2686 + /* do the flip (mmio) */
2687 + adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
2688 + /* set the flip status */
2689 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2690 +index 0508c5c..8d6668c 100644
2691 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2692 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2693 +@@ -250,11 +250,11 @@ static struct pci_device_id pciidlist[] = {
2694 + {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
2695 + #endif
2696 + /* topaz */
2697 +- {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
2698 +- {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
2699 +- {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
2700 +- {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
2701 +- {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
2702 ++ {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
2703 ++ {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
2704 ++ {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
2705 ++ {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
2706 ++ {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
2707 + /* tonga */
2708 + {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
2709 + {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
2710 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
2711 +index b1969f2..d4e2780 100644
2712 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
2713 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
2714 +@@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
2715 +
2716 + list_for_each_entry(bo, &node->bos, mn_list) {
2717 +
2718 +- if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
2719 ++ if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
2720 ++ end))
2721 + continue;
2722 +
2723 + r = amdgpu_bo_reserve(bo, true);
2724 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
2725 +index c3ce103..a2a16ac 100644
2726 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
2727 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
2728 +@@ -399,7 +399,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
2729 + }
2730 + if (fpfn > bo->placements[i].fpfn)
2731 + bo->placements[i].fpfn = fpfn;
2732 +- if (lpfn && lpfn < bo->placements[i].lpfn)
2733 ++ if (!bo->placements[i].lpfn ||
2734 ++ (lpfn && lpfn < bo->placements[i].lpfn))
2735 + bo->placements[i].lpfn = lpfn;
2736 + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
2737 + }
2738 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
2739 +index 22a8c7d..03fe251 100644
2740 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
2741 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
2742 +@@ -595,8 +595,6 @@ force:
2743 +
2744 + /* update display watermarks based on new power state */
2745 + amdgpu_display_bandwidth_update(adev);
2746 +- /* update displays */
2747 +- amdgpu_dpm_display_configuration_changed(adev);
2748 +
2749 + adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
2750 + adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
2751 +@@ -616,6 +614,9 @@ force:
2752 +
2753 + amdgpu_dpm_post_set_power_state(adev);
2754 +
2755 ++ /* update displays */
2756 ++ amdgpu_dpm_display_configuration_changed(adev);
2757 ++
2758 + if (adev->pm.funcs->force_performance_level) {
2759 + if (adev->pm.dpm.thermal_active) {
2760 + enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
2761 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
2762 +index 8b88edb..ca72a2e 100644
2763 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
2764 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
2765 +@@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
2766 +
2767 + for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
2768 + if (fences[i])
2769 +- fences[count++] = fences[i];
2770 ++ fences[count++] = fence_get(fences[i]);
2771 +
2772 + if (count) {
2773 + spin_unlock(&sa_manager->wq.lock);
2774 + t = fence_wait_any_timeout(fences, count, false,
2775 + MAX_SCHEDULE_TIMEOUT);
2776 ++ for (i = 0; i < count; ++i)
2777 ++ fence_put(fences[i]);
2778 ++
2779 + r = (t > 0) ? 0 : t;
2780 + spin_lock(&sa_manager->wq.lock);
2781 + } else {
2782 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
2783 +index dd005c3..181ce39 100644
2784 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
2785 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
2786 +@@ -293,7 +293,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
2787 + fence = to_amdgpu_fence(sync->sync_to[i]);
2788 +
2789 + /* check if we really need to sync */
2790 +- if (!amdgpu_fence_need_sync(fence, ring))
2791 ++ if (!amdgpu_enable_scheduler &&
2792 ++ !amdgpu_fence_need_sync(fence, ring))
2793 + continue;
2794 +
2795 + /* prevent GPU deadlocks */
2796 +@@ -303,7 +304,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
2797 + }
2798 +
2799 + if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
2800 +- r = fence_wait(&fence->base, true);
2801 ++ r = fence_wait(sync->sync_to[i], true);
2802 + if (r)
2803 + return r;
2804 + continue;
2805 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2806 +index 8a1752f..1cbb16e 100644
2807 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2808 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2809 +@@ -712,7 +712,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
2810 + 0, PAGE_SIZE,
2811 + PCI_DMA_BIDIRECTIONAL);
2812 + if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
2813 +- while (--i) {
2814 ++ while (i--) {
2815 + pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
2816 + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
2817 + gtt->ttm.dma_address[i] = 0;
2818 +@@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
2819 + return !!gtt->userptr;
2820 + }
2821 +
2822 ++bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
2823 ++ unsigned long end)
2824 ++{
2825 ++ struct amdgpu_ttm_tt *gtt = (void *)ttm;
2826 ++ unsigned long size;
2827 ++
2828 ++ if (gtt == NULL)
2829 ++ return false;
2830 ++
2831 ++ if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
2832 ++ return false;
2833 ++
2834 ++ size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
2835 ++ if (gtt->userptr > end || gtt->userptr + size <= start)
2836 ++ return false;
2837 ++
2838 ++ return true;
2839 ++}
2840 ++
2841 + bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
2842 + {
2843 + struct amdgpu_ttm_tt *gtt = (void *)ttm;
2844 +@@ -808,7 +827,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2845 + flags |= AMDGPU_PTE_SNOOPED;
2846 + }
2847 +
2848 +- if (adev->asic_type >= CHIP_TOPAZ)
2849 ++ if (adev->asic_type >= CHIP_TONGA)
2850 + flags |= AMDGPU_PTE_EXECUTABLE;
2851 +
2852 + flags |= AMDGPU_PTE_READABLE;
2853 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2854 +index b53d273..39adbb6 100644
2855 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2856 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2857 +@@ -1010,13 +1010,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2858 + return -EINVAL;
2859 +
2860 + /* make sure object fit at this offset */
2861 +- eaddr = saddr + size;
2862 ++ eaddr = saddr + size - 1;
2863 + if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
2864 + return -EINVAL;
2865 +
2866 + last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
2867 +- if (last_pfn > adev->vm_manager.max_pfn) {
2868 +- dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
2869 ++ if (last_pfn >= adev->vm_manager.max_pfn) {
2870 ++ dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
2871 + last_pfn, adev->vm_manager.max_pfn);
2872 + return -EINVAL;
2873 + }
2874 +@@ -1025,7 +1025,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2875 + eaddr /= AMDGPU_GPU_PAGE_SIZE;
2876 +
2877 + spin_lock(&vm->it_lock);
2878 +- it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
2879 ++ it = interval_tree_iter_first(&vm->va, saddr, eaddr);
2880 + spin_unlock(&vm->it_lock);
2881 + if (it) {
2882 + struct amdgpu_bo_va_mapping *tmp;
2883 +@@ -1046,7 +1046,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2884 +
2885 + INIT_LIST_HEAD(&mapping->list);
2886 + mapping->it.start = saddr;
2887 +- mapping->it.last = eaddr - 1;
2888 ++ mapping->it.last = eaddr;
2889 + mapping->offset = offset;
2890 + mapping->flags = flags;
2891 +
2892 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
2893 +index e1dcab9..4cb45f4 100644
2894 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
2895 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
2896 +@@ -90,7 +90,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
2897 + MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
2898 + MODULE_FIRMWARE("amdgpu/topaz_me.bin");
2899 + MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
2900 +-MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
2901 + MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
2902 +
2903 + MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
2904 +@@ -807,7 +806,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
2905 + adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
2906 + adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
2907 +
2908 +- if (adev->asic_type != CHIP_STONEY) {
2909 ++ if ((adev->asic_type != CHIP_STONEY) &&
2910 ++ (adev->asic_type != CHIP_TOPAZ)) {
2911 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
2912 + err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
2913 + if (!err) {
2914 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
2915 +index ed8abb5..272110c 100644
2916 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
2917 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
2918 +@@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
2919 +
2920 + MODULE_FIRMWARE("radeon/bonaire_mc.bin");
2921 + MODULE_FIRMWARE("radeon/hawaii_mc.bin");
2922 ++MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
2923 ++
2924 ++static const u32 golden_settings_iceland_a11[] =
2925 ++{
2926 ++ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
2927 ++ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
2928 ++ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
2929 ++ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
2930 ++};
2931 ++
2932 ++static const u32 iceland_mgcg_cgcg_init[] =
2933 ++{
2934 ++ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
2935 ++};
2936 ++
2937 ++static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
2938 ++{
2939 ++ switch (adev->asic_type) {
2940 ++ case CHIP_TOPAZ:
2941 ++ amdgpu_program_register_sequence(adev,
2942 ++ iceland_mgcg_cgcg_init,
2943 ++ (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
2944 ++ amdgpu_program_register_sequence(adev,
2945 ++ golden_settings_iceland_a11,
2946 ++ (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
2947 ++ break;
2948 ++ default:
2949 ++ break;
2950 ++ }
2951 ++}
2952 +
2953 + /**
2954 +- * gmc8_mc_wait_for_idle - wait for MC idle callback.
2955 ++ * gmc7_mc_wait_for_idle - wait for MC idle callback.
2956 + *
2957 + * @adev: amdgpu_device pointer
2958 + *
2959 +@@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
2960 + case CHIP_HAWAII:
2961 + chip_name = "hawaii";
2962 + break;
2963 ++ case CHIP_TOPAZ:
2964 ++ chip_name = "topaz";
2965 ++ break;
2966 + case CHIP_KAVERI:
2967 + case CHIP_KABINI:
2968 + return 0;
2969 + default: BUG();
2970 + }
2971 +
2972 +- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
2973 ++ if (adev->asic_type == CHIP_TOPAZ)
2974 ++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
2975 ++ else
2976 ++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
2977 ++
2978 + err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
2979 + if (err)
2980 + goto out;
2981 +@@ -980,6 +1017,8 @@ static int gmc_v7_0_hw_init(void *handle)
2982 + int r;
2983 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2984 +
2985 ++ gmc_v7_0_init_golden_registers(adev);
2986 ++
2987 + gmc_v7_0_mc_program(adev);
2988 +
2989 + if (!(adev->flags & AMD_IS_APU)) {
2990 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
2991 +index d390284..ba4ad00 100644
2992 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
2993 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
2994 +@@ -42,9 +42,7 @@
2995 + static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
2996 + static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
2997 +
2998 +-MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
2999 + MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
3000 +-MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
3001 +
3002 + static const u32 golden_settings_tonga_a11[] =
3003 + {
3004 +@@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
3005 + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
3006 + };
3007 +
3008 +-static const u32 golden_settings_iceland_a11[] =
3009 +-{
3010 +- mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
3011 +- mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
3012 +- mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
3013 +- mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
3014 +-};
3015 +-
3016 +-static const u32 iceland_mgcg_cgcg_init[] =
3017 +-{
3018 +- mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
3019 +-};
3020 +-
3021 + static const u32 cz_mgcg_cgcg_init[] =
3022 + {
3023 + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
3024 +@@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
3025 + static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
3026 + {
3027 + switch (adev->asic_type) {
3028 +- case CHIP_TOPAZ:
3029 +- amdgpu_program_register_sequence(adev,
3030 +- iceland_mgcg_cgcg_init,
3031 +- (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
3032 +- amdgpu_program_register_sequence(adev,
3033 +- golden_settings_iceland_a11,
3034 +- (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
3035 +- break;
3036 + case CHIP_FIJI:
3037 + amdgpu_program_register_sequence(adev,
3038 + fiji_mgcg_cgcg_init,
3039 +@@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
3040 + DRM_DEBUG("\n");
3041 +
3042 + switch (adev->asic_type) {
3043 +- case CHIP_TOPAZ:
3044 +- chip_name = "topaz";
3045 +- break;
3046 + case CHIP_TONGA:
3047 + chip_name = "tonga";
3048 + break;
3049 + case CHIP_FIJI:
3050 +- chip_name = "fiji";
3051 +- break;
3052 + case CHIP_CARRIZO:
3053 + case CHIP_STONEY:
3054 + return 0;
3055 +@@ -1003,7 +975,7 @@ static int gmc_v8_0_hw_init(void *handle)
3056 +
3057 + gmc_v8_0_mc_program(adev);
3058 +
3059 +- if (!(adev->flags & AMD_IS_APU)) {
3060 ++ if (adev->asic_type == CHIP_TONGA) {
3061 + r = gmc_v8_0_mc_load_microcode(adev);
3062 + if (r) {
3063 + DRM_ERROR("Failed to load MC firmware!\n");
3064 +diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
3065 +index 966d4b2..090486c 100644
3066 +--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
3067 ++++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
3068 +@@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
3069 + case AMDGPU_UCODE_ID_CP_ME:
3070 + return UCODE_ID_CP_ME_MASK;
3071 + case AMDGPU_UCODE_ID_CP_MEC1:
3072 +- return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK;
3073 ++ return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
3074 + case AMDGPU_UCODE_ID_CP_MEC2:
3075 + return UCODE_ID_CP_MEC_MASK;
3076 + case AMDGPU_UCODE_ID_RLC_G:
3077 +@@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
3078 + return -EINVAL;
3079 + }
3080 +
3081 +- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
3082 +- &toc->entry[toc->num_entries++])) {
3083 +- DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
3084 +- return -EINVAL;
3085 +- }
3086 +-
3087 + if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
3088 + &toc->entry[toc->num_entries++])) {
3089 + DRM_ERROR("Failed to get firmware entry for SDMA0\n");
3090 +@@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
3091 + UCODE_ID_CP_ME_MASK |
3092 + UCODE_ID_CP_PFP_MASK |
3093 + UCODE_ID_CP_MEC_MASK |
3094 +- UCODE_ID_CP_MEC_JT1_MASK |
3095 +- UCODE_ID_CP_MEC_JT2_MASK;
3096 ++ UCODE_ID_CP_MEC_JT1_MASK;
3097 ++
3098 +
3099 + if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
3100 + DRM_ERROR("Fail to request SMU load ucode\n");
3101 +diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
3102 +index 2049038..63d6cb3 100644
3103 +--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
3104 ++++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
3105 +@@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle)
3106 +
3107 + static int tonga_dpm_suspend(void *handle)
3108 + {
3109 +- return 0;
3110 ++ return tonga_dpm_hw_fini(handle);
3111 + }
3112 +
3113 + static int tonga_dpm_resume(void *handle)
3114 + {
3115 +- int ret;
3116 +- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3117 +-
3118 +- mutex_lock(&adev->pm.mutex);
3119 +-
3120 +- ret = tonga_smu_start(adev);
3121 +- if (ret) {
3122 +- DRM_ERROR("SMU start failed\n");
3123 +- goto fail;
3124 +- }
3125 +-
3126 +-fail:
3127 +- mutex_unlock(&adev->pm.mutex);
3128 +- return ret;
3129 ++ return tonga_dpm_hw_init(handle);
3130 + }
3131 +
3132 + static int tonga_dpm_set_clockgating_state(void *handle,
3133 +diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
3134 +index 2adc1c8..7628eb4 100644
3135 +--- a/drivers/gpu/drm/amd/amdgpu/vi.c
3136 ++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
3137 +@@ -60,6 +60,7 @@
3138 + #include "vi.h"
3139 + #include "vi_dpm.h"
3140 + #include "gmc_v8_0.h"
3141 ++#include "gmc_v7_0.h"
3142 + #include "gfx_v8_0.h"
3143 + #include "sdma_v2_4.h"
3144 + #include "sdma_v3_0.h"
3145 +@@ -1128,10 +1129,10 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
3146 + },
3147 + {
3148 + .type = AMD_IP_BLOCK_TYPE_GMC,
3149 +- .major = 8,
3150 +- .minor = 0,
3151 ++ .major = 7,
3152 ++ .minor = 4,
3153 + .rev = 0,
3154 +- .funcs = &gmc_v8_0_ip_funcs,
3155 ++ .funcs = &gmc_v7_0_ip_funcs,
3156 + },
3157 + {
3158 + .type = AMD_IP_BLOCK_TYPE_IH,
3159 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
3160 +index 809959d..39d7e2e 100644
3161 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
3162 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
3163 +@@ -798,6 +798,18 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
3164 + return mstb;
3165 + }
3166 +
3167 ++static void drm_dp_free_mst_port(struct kref *kref);
3168 ++
3169 ++static void drm_dp_free_mst_branch_device(struct kref *kref)
3170 ++{
3171 ++ struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
3172 ++ if (mstb->port_parent) {
3173 ++ if (list_empty(&mstb->port_parent->next))
3174 ++ kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
3175 ++ }
3176 ++ kfree(mstb);
3177 ++}
3178 ++
3179 + static void drm_dp_destroy_mst_branch_device(struct kref *kref)
3180 + {
3181 + struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
3182 +@@ -805,6 +817,15 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
3183 + bool wake_tx = false;
3184 +
3185 + /*
3186 ++ * init kref again to be used by ports to remove mst branch when it is
3187 ++ * not needed anymore
3188 ++ */
3189 ++ kref_init(kref);
3190 ++
3191 ++ if (mstb->port_parent && list_empty(&mstb->port_parent->next))
3192 ++ kref_get(&mstb->port_parent->kref);
3193 ++
3194 ++ /*
3195 + * destroy all ports - don't need lock
3196 + * as there are no more references to the mst branch
3197 + * device at this point.
3198 +@@ -830,7 +851,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
3199 +
3200 + if (wake_tx)
3201 + wake_up(&mstb->mgr->tx_waitq);
3202 +- kfree(mstb);
3203 ++
3204 ++ kref_put(kref, drm_dp_free_mst_branch_device);
3205 + }
3206 +
3207 + static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
3208 +@@ -878,6 +900,7 @@ static void drm_dp_destroy_port(struct kref *kref)
3209 + * from an EDID retrieval */
3210 +
3211 + mutex_lock(&mgr->destroy_connector_lock);
3212 ++ kref_get(&port->parent->kref);
3213 + list_add(&port->next, &mgr->destroy_connector_list);
3214 + mutex_unlock(&mgr->destroy_connector_lock);
3215 + schedule_work(&mgr->destroy_connector_work);
3216 +@@ -973,17 +996,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u
3217 + static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
3218 + u8 *rad)
3219 + {
3220 +- int lct = port->parent->lct;
3221 ++ int parent_lct = port->parent->lct;
3222 + int shift = 4;
3223 +- int idx = lct / 2;
3224 +- if (lct > 1) {
3225 +- memcpy(rad, port->parent->rad, idx);
3226 +- shift = (lct % 2) ? 4 : 0;
3227 ++ int idx = (parent_lct - 1) / 2;
3228 ++ if (parent_lct > 1) {
3229 ++ memcpy(rad, port->parent->rad, idx + 1);
3230 ++ shift = (parent_lct % 2) ? 4 : 0;
3231 + } else
3232 + rad[0] = 0;
3233 +
3234 + rad[idx] |= port->port_num << shift;
3235 +- return lct + 1;
3236 ++ return parent_lct + 1;
3237 + }
3238 +
3239 + /*
3240 +@@ -1013,18 +1036,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
3241 + return send_link;
3242 + }
3243 +
3244 +-static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
3245 +- struct drm_dp_mst_port *port)
3246 ++static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
3247 + {
3248 + int ret;
3249 +- if (port->dpcd_rev >= 0x12) {
3250 +- port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
3251 +- if (!port->guid_valid) {
3252 +- ret = drm_dp_send_dpcd_write(mstb->mgr,
3253 +- port,
3254 +- DP_GUID,
3255 +- 16, port->guid);
3256 +- port->guid_valid = true;
3257 ++
3258 ++ memcpy(mstb->guid, guid, 16);
3259 ++
3260 ++ if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
3261 ++ if (mstb->port_parent) {
3262 ++ ret = drm_dp_send_dpcd_write(
3263 ++ mstb->mgr,
3264 ++ mstb->port_parent,
3265 ++ DP_GUID,
3266 ++ 16,
3267 ++ mstb->guid);
3268 ++ } else {
3269 ++
3270 ++ ret = drm_dp_dpcd_write(
3271 ++ mstb->mgr->aux,
3272 ++ DP_GUID,
3273 ++ mstb->guid,
3274 ++ 16);
3275 + }
3276 + }
3277 + }
3278 +@@ -1039,7 +1071,7 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
3279 + snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
3280 + for (i = 0; i < (mstb->lct - 1); i++) {
3281 + int shift = (i % 2) ? 0 : 4;
3282 +- int port_num = mstb->rad[i / 2] >> shift;
3283 ++ int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
3284 + snprintf(temp, sizeof(temp), "-%d", port_num);
3285 + strlcat(proppath, temp, proppath_size);
3286 + }
3287 +@@ -1081,7 +1113,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
3288 + port->dpcd_rev = port_msg->dpcd_revision;
3289 + port->num_sdp_streams = port_msg->num_sdp_streams;
3290 + port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
3291 +- memcpy(port->guid, port_msg->peer_guid, 16);
3292 +
3293 + /* manage mstb port lists with mgr lock - take a reference
3294 + for this list */
3295 +@@ -1094,11 +1125,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
3296 +
3297 + if (old_ddps != port->ddps) {
3298 + if (port->ddps) {
3299 +- drm_dp_check_port_guid(mstb, port);
3300 + if (!port->input)
3301 + drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
3302 + } else {
3303 +- port->guid_valid = false;
3304 + port->available_pbn = 0;
3305 + }
3306 + }
3307 +@@ -1157,10 +1186,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
3308 +
3309 + if (old_ddps != port->ddps) {
3310 + if (port->ddps) {
3311 +- drm_dp_check_port_guid(mstb, port);
3312 + dowork = true;
3313 + } else {
3314 +- port->guid_valid = false;
3315 + port->available_pbn = 0;
3316 + }
3317 + }
3318 +@@ -1190,7 +1217,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
3319 +
3320 + for (i = 0; i < lct - 1; i++) {
3321 + int shift = (i % 2) ? 0 : 4;
3322 +- int port_num = rad[i / 2] >> shift;
3323 ++ int port_num = (rad[i / 2] >> shift) & 0xf;
3324 +
3325 + list_for_each_entry(port, &mstb->ports, next) {
3326 + if (port->port_num == port_num) {
3327 +@@ -1210,6 +1237,48 @@ out:
3328 + return mstb;
3329 + }
3330 +
3331 ++static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
3332 ++ struct drm_dp_mst_branch *mstb,
3333 ++ uint8_t *guid)
3334 ++{
3335 ++ struct drm_dp_mst_branch *found_mstb;
3336 ++ struct drm_dp_mst_port *port;
3337 ++
3338 ++ if (memcmp(mstb->guid, guid, 16) == 0)
3339 ++ return mstb;
3340 ++
3341 ++
3342 ++ list_for_each_entry(port, &mstb->ports, next) {
3343 ++ if (!port->mstb)
3344 ++ continue;
3345 ++
3346 ++ found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
3347 ++
3348 ++ if (found_mstb)
3349 ++ return found_mstb;
3350 ++ }
3351 ++
3352 ++ return NULL;
3353 ++}
3354 ++
3355 ++static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
3356 ++ struct drm_dp_mst_topology_mgr *mgr,
3357 ++ uint8_t *guid)
3358 ++{
3359 ++ struct drm_dp_mst_branch *mstb;
3360 ++
3361 ++ /* find the port by iterating down */
3362 ++ mutex_lock(&mgr->lock);
3363 ++
3364 ++ mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
3365 ++
3366 ++ if (mstb)
3367 ++ kref_get(&mstb->kref);
3368 ++
3369 ++ mutex_unlock(&mgr->lock);
3370 ++ return mstb;
3371 ++}
3372 ++
3373 + static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
3374 + struct drm_dp_mst_branch *mstb)
3375 + {
3376 +@@ -1320,6 +1389,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
3377 + struct drm_dp_sideband_msg_tx *txmsg)
3378 + {
3379 + struct drm_dp_mst_branch *mstb = txmsg->dst;
3380 ++ u8 req_type;
3381 +
3382 + /* both msg slots are full */
3383 + if (txmsg->seqno == -1) {
3384 +@@ -1336,7 +1406,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
3385 + txmsg->seqno = 1;
3386 + mstb->tx_slots[txmsg->seqno] = txmsg;
3387 + }
3388 +- hdr->broadcast = 0;
3389 ++
3390 ++ req_type = txmsg->msg[0] & 0x7f;
3391 ++ if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
3392 ++ req_type == DP_RESOURCE_STATUS_NOTIFY)
3393 ++ hdr->broadcast = 1;
3394 ++ else
3395 ++ hdr->broadcast = 0;
3396 + hdr->path_msg = txmsg->path_msg;
3397 + hdr->lct = mstb->lct;
3398 + hdr->lcr = mstb->lct - 1;
3399 +@@ -1438,26 +1514,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
3400 + }
3401 +
3402 + /* called holding qlock */
3403 +-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
3404 ++static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
3405 ++ struct drm_dp_sideband_msg_tx *txmsg)
3406 + {
3407 +- struct drm_dp_sideband_msg_tx *txmsg;
3408 + int ret;
3409 +
3410 + /* construct a chunk from the first msg in the tx_msg queue */
3411 +- if (list_empty(&mgr->tx_msg_upq)) {
3412 +- mgr->tx_up_in_progress = false;
3413 +- return;
3414 +- }
3415 +-
3416 +- txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
3417 + ret = process_single_tx_qlock(mgr, txmsg, true);
3418 +- if (ret == 1) {
3419 +- /* up txmsgs aren't put in slots - so free after we send it */
3420 +- list_del(&txmsg->next);
3421 +- kfree(txmsg);
3422 +- } else if (ret)
3423 ++
3424 ++ if (ret != 1)
3425 + DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
3426 +- mgr->tx_up_in_progress = true;
3427 ++
3428 ++ txmsg->dst->tx_slots[txmsg->seqno] = NULL;
3429 + }
3430 +
3431 + static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
3432 +@@ -1507,6 +1575,9 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
3433 + txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
3434 + txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
3435 + }
3436 ++
3437 ++ drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
3438 ++
3439 + for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
3440 + drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
3441 + }
3442 +@@ -1554,6 +1625,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
3443 + return 0;
3444 + }
3445 +
3446 ++static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
3447 ++{
3448 ++ if (!mstb->port_parent)
3449 ++ return NULL;
3450 ++
3451 ++ if (mstb->port_parent->mstb != mstb)
3452 ++ return mstb->port_parent;
3453 ++
3454 ++ return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
3455 ++}
3456 ++
3457 ++static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3458 ++ struct drm_dp_mst_branch *mstb,
3459 ++ int *port_num)
3460 ++{
3461 ++ struct drm_dp_mst_branch *rmstb = NULL;
3462 ++ struct drm_dp_mst_port *found_port;
3463 ++ mutex_lock(&mgr->lock);
3464 ++ if (mgr->mst_primary) {
3465 ++ found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3466 ++
3467 ++ if (found_port) {
3468 ++ rmstb = found_port->parent;
3469 ++ kref_get(&rmstb->kref);
3470 ++ *port_num = found_port->port_num;
3471 ++ }
3472 ++ }
3473 ++ mutex_unlock(&mgr->lock);
3474 ++ return rmstb;
3475 ++}
3476 ++
3477 + static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3478 + struct drm_dp_mst_port *port,
3479 + int id,
3480 +@@ -1561,11 +1663,16 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3481 + {
3482 + struct drm_dp_sideband_msg_tx *txmsg;
3483 + struct drm_dp_mst_branch *mstb;
3484 +- int len, ret;
3485 ++ int len, ret, port_num;
3486 +
3487 ++ port_num = port->port_num;
3488 + mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
3489 +- if (!mstb)
3490 +- return -EINVAL;
3491 ++ if (!mstb) {
3492 ++ mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
3493 ++
3494 ++ if (!mstb)
3495 ++ return -EINVAL;
3496 ++ }
3497 +
3498 + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3499 + if (!txmsg) {
3500 +@@ -1574,7 +1681,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3501 + }
3502 +
3503 + txmsg->dst = mstb;
3504 +- len = build_allocate_payload(txmsg, port->port_num,
3505 ++ len = build_allocate_payload(txmsg, port_num,
3506 + id,
3507 + pbn);
3508 +
3509 +@@ -1844,11 +1951,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3510 + drm_dp_encode_up_ack_reply(txmsg, req_type);
3511 +
3512 + mutex_lock(&mgr->qlock);
3513 +- list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
3514 +- if (!mgr->tx_up_in_progress) {
3515 +- process_single_up_tx_qlock(mgr);
3516 +- }
3517 ++
3518 ++ process_single_up_tx_qlock(mgr, txmsg);
3519 ++
3520 + mutex_unlock(&mgr->qlock);
3521 ++
3522 ++ kfree(txmsg);
3523 + return 0;
3524 + }
3525 +
3526 +@@ -1927,31 +2035,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
3527 + mgr->mst_primary = mstb;
3528 + kref_get(&mgr->mst_primary->kref);
3529 +
3530 +- {
3531 +- struct drm_dp_payload reset_pay;
3532 +- reset_pay.start_slot = 0;
3533 +- reset_pay.num_slots = 0x3f;
3534 +- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3535 +- }
3536 +-
3537 + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3538 +- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3539 ++ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3540 + if (ret < 0) {
3541 + goto out_unlock;
3542 + }
3543 +
3544 +-
3545 +- /* sort out guid */
3546 +- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
3547 +- if (ret != 16) {
3548 +- DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
3549 +- goto out_unlock;
3550 +- }
3551 +-
3552 +- mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
3553 +- if (!mgr->guid_valid) {
3554 +- ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
3555 +- mgr->guid_valid = true;
3556 ++ {
3557 ++ struct drm_dp_payload reset_pay;
3558 ++ reset_pay.start_slot = 0;
3559 ++ reset_pay.num_slots = 0x3f;
3560 ++ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3561 + }
3562 +
3563 + queue_work(system_long_wq, &mgr->work);
3564 +@@ -2145,28 +2239,51 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
3565 +
3566 + if (mgr->up_req_recv.have_eomt) {
3567 + struct drm_dp_sideband_msg_req_body msg;
3568 +- struct drm_dp_mst_branch *mstb;
3569 ++ struct drm_dp_mst_branch *mstb = NULL;
3570 + bool seqno;
3571 +- mstb = drm_dp_get_mst_branch_device(mgr,
3572 +- mgr->up_req_recv.initial_hdr.lct,
3573 +- mgr->up_req_recv.initial_hdr.rad);
3574 +- if (!mstb) {
3575 +- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3576 +- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3577 +- return 0;
3578 ++
3579 ++ if (!mgr->up_req_recv.initial_hdr.broadcast) {
3580 ++ mstb = drm_dp_get_mst_branch_device(mgr,
3581 ++ mgr->up_req_recv.initial_hdr.lct,
3582 ++ mgr->up_req_recv.initial_hdr.rad);
3583 ++ if (!mstb) {
3584 ++ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3585 ++ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3586 ++ return 0;
3587 ++ }
3588 + }
3589 +
3590 + seqno = mgr->up_req_recv.initial_hdr.seqno;
3591 + drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
3592 +
3593 + if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
3594 +- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
3595 ++ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
3596 ++
3597 ++ if (!mstb)
3598 ++ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
3599 ++
3600 ++ if (!mstb) {
3601 ++ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3602 ++ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3603 ++ return 0;
3604 ++ }
3605 ++
3606 + drm_dp_update_port(mstb, &msg.u.conn_stat);
3607 ++
3608 + DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
3609 + (*mgr->cbs->hotplug)(mgr);
3610 +
3611 + } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3612 +- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
3613 ++ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
3614 ++ if (!mstb)
3615 ++ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
3616 ++
3617 ++ if (!mstb) {
3618 ++ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3619 ++ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3620 ++ return 0;
3621 ++ }
3622 ++
3623 + DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
3624 + }
3625 +
3626 +@@ -2346,6 +2463,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
3627 + DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
3628 + if (pbn == port->vcpi.pbn) {
3629 + *slots = port->vcpi.num_slots;
3630 ++ drm_dp_put_port(port);
3631 + return true;
3632 + }
3633 + }
3634 +@@ -2505,32 +2623,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
3635 + */
3636 + int drm_dp_calc_pbn_mode(int clock, int bpp)
3637 + {
3638 +- fixed20_12 pix_bw;
3639 +- fixed20_12 fbpp;
3640 +- fixed20_12 result;
3641 +- fixed20_12 margin, tmp;
3642 +- u32 res;
3643 +-
3644 +- pix_bw.full = dfixed_const(clock);
3645 +- fbpp.full = dfixed_const(bpp);
3646 +- tmp.full = dfixed_const(8);
3647 +- fbpp.full = dfixed_div(fbpp, tmp);
3648 +-
3649 +- result.full = dfixed_mul(pix_bw, fbpp);
3650 +- margin.full = dfixed_const(54);
3651 +- tmp.full = dfixed_const(64);
3652 +- margin.full = dfixed_div(margin, tmp);
3653 +- result.full = dfixed_div(result, margin);
3654 +-
3655 +- margin.full = dfixed_const(1006);
3656 +- tmp.full = dfixed_const(1000);
3657 +- margin.full = dfixed_div(margin, tmp);
3658 +- result.full = dfixed_mul(result, margin);
3659 +-
3660 +- result.full = dfixed_div(result, tmp);
3661 +- result.full = dfixed_ceil(result);
3662 +- res = dfixed_trunc(result);
3663 +- return res;
3664 ++ u64 kbps;
3665 ++ s64 peak_kbps;
3666 ++ u32 numerator;
3667 ++ u32 denominator;
3668 ++
3669 ++ kbps = clock * bpp;
3670 ++
3671 ++ /*
3672 ++ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
3673 ++ * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
3674 ++ * common multiplier to render an integer PBN for all link rate/lane
3675 ++ * counts combinations
3676 ++ * calculate
3677 ++ * peak_kbps *= (1006/1000)
3678 ++ * peak_kbps *= (64/54)
3679 ++ * peak_kbps *= 8 convert to bytes
3680 ++ */
3681 ++
3682 ++ numerator = 64 * 1006;
3683 ++ denominator = 54 * 8 * 1000 * 1000;
3684 ++
3685 ++ kbps *= numerator;
3686 ++ peak_kbps = drm_fixp_from_fraction(kbps, denominator);
3687 ++
3688 ++ return drm_fixp2int_ceil(peak_kbps);
3689 + }
3690 + EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
3691 +
3692 +@@ -2538,11 +2655,23 @@ static int test_calc_pbn_mode(void)
3693 + {
3694 + int ret;
3695 + ret = drm_dp_calc_pbn_mode(154000, 30);
3696 +- if (ret != 689)
3697 ++ if (ret != 689) {
3698 ++ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3699 ++ 154000, 30, 689, ret);
3700 + return -EINVAL;
3701 ++ }
3702 + ret = drm_dp_calc_pbn_mode(234000, 30);
3703 +- if (ret != 1047)
3704 ++ if (ret != 1047) {
3705 ++ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3706 ++ 234000, 30, 1047, ret);
3707 ++ return -EINVAL;
3708 ++ }
3709 ++ ret = drm_dp_calc_pbn_mode(297000, 24);
3710 ++ if (ret != 1063) {
3711 ++ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3712 ++ 297000, 24, 1063, ret);
3713 + return -EINVAL;
3714 ++ }
3715 + return 0;
3716 + }
3717 +
3718 +@@ -2683,6 +2812,13 @@ static void drm_dp_tx_work(struct work_struct *work)
3719 + mutex_unlock(&mgr->qlock);
3720 + }
3721 +
3722 ++static void drm_dp_free_mst_port(struct kref *kref)
3723 ++{
3724 ++ struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
3725 ++ kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
3726 ++ kfree(port);
3727 ++}
3728 ++
3729 + static void drm_dp_destroy_connector_work(struct work_struct *work)
3730 + {
3731 + struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
3732 +@@ -2703,13 +2839,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
3733 + list_del(&port->next);
3734 + mutex_unlock(&mgr->destroy_connector_lock);
3735 +
3736 ++ kref_init(&port->kref);
3737 ++ INIT_LIST_HEAD(&port->next);
3738 ++
3739 + mgr->cbs->destroy_connector(mgr, port->connector);
3740 +
3741 + drm_dp_port_teardown_pdt(port, port->pdt);
3742 +
3743 +- if (!port->input && port->vcpi.vcpi > 0)
3744 +- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3745 +- kfree(port);
3746 ++ if (!port->input && port->vcpi.vcpi > 0) {
3747 ++ if (mgr->mst_state) {
3748 ++ drm_dp_mst_reset_vcpi_slots(mgr, port);
3749 ++ drm_dp_update_payload_part1(mgr);
3750 ++ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3751 ++ }
3752 ++ }
3753 ++
3754 ++ kref_put(&port->kref, drm_dp_free_mst_port);
3755 + send_hotplug = true;
3756 + }
3757 + if (send_hotplug)
3758 +@@ -2736,7 +2881,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
3759 + mutex_init(&mgr->qlock);
3760 + mutex_init(&mgr->payload_lock);
3761 + mutex_init(&mgr->destroy_connector_lock);
3762 +- INIT_LIST_HEAD(&mgr->tx_msg_upq);
3763 + INIT_LIST_HEAD(&mgr->tx_msg_downq);
3764 + INIT_LIST_HEAD(&mgr->destroy_connector_list);
3765 + INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
3766 +diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
3767 +index 607f493..8090989 100644
3768 +--- a/drivers/gpu/drm/drm_irq.c
3769 ++++ b/drivers/gpu/drm/drm_irq.c
3770 +@@ -221,6 +221,64 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
3771 + diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
3772 + }
3773 +
3774 ++ /*
3775 ++ * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
3776 ++ * interval? If so then vblank irqs keep running and it will likely
3777 ++ * happen that the hardware vblank counter is not trustworthy as it
3778 ++ * might reset at some point in that interval and vblank timestamps
3779 ++ * are not trustworthy either in that interval. Iow. this can result
3780 ++ * in a bogus diff >> 1 which must be avoided as it would cause
3781 ++ * random large forward jumps of the software vblank counter.
3782 ++ */
3783 ++ if (diff > 1 && (vblank->inmodeset & 0x2)) {
3784 ++ DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
3785 ++ " due to pre-modeset.\n", pipe, diff);
3786 ++ diff = 1;
3787 ++ }
3788 ++
3789 ++ /*
3790 ++ * FIMXE: Need to replace this hack with proper seqlocks.
3791 ++ *
3792 ++ * Restrict the bump of the software vblank counter to a safe maximum
3793 ++ * value of +1 whenever there is the possibility that concurrent readers
3794 ++ * of vblank timestamps could be active at the moment, as the current
3795 ++ * implementation of the timestamp caching and updating is not safe
3796 ++ * against concurrent readers for calls to store_vblank() with a bump
3797 ++ * of anything but +1. A bump != 1 would very likely return corrupted
3798 ++ * timestamps to userspace, because the same slot in the cache could
3799 ++ * be concurrently written by store_vblank() and read by one of those
3800 ++ * readers without the read-retry logic detecting the collision.
3801 ++ *
3802 ++ * Concurrent readers can exist when we are called from the
3803 ++ * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
3804 ++ * irq callers. However, all those calls to us are happening with the
3805 ++ * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
3806 ++ * can't increase while we are executing. Therefore a zero refcount at
3807 ++ * this point is safe for arbitrary counter bumps if we are called
3808 ++ * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
3809 ++ * we must also accept a refcount of 1, as whenever we are called from
3810 ++ * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
3811 ++ * we must let that one pass through in order to not lose vblank counts
3812 ++ * during vblank irq off - which would completely defeat the whole
3813 ++ * point of this routine.
3814 ++ *
3815 ++ * Whenever we are called from vblank irq, we have to assume concurrent
3816 ++ * readers exist or can show up any time during our execution, even if
3817 ++ * the refcount is currently zero, as vblank irqs are usually only
3818 ++ * enabled due to the presence of readers, and because when we are called
3819 ++ * from vblank irq we can't hold the vbl_lock to protect us from sudden
3820 ++ * bumps in vblank refcount. Therefore also restrict bumps to +1 when
3821 ++ * called from vblank irq.
3822 ++ */
3823 ++ if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
3824 ++ (flags & DRM_CALLED_FROM_VBLIRQ))) {
3825 ++ DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
3826 ++ "refcount %u, vblirq %u\n", pipe, diff,
3827 ++ atomic_read(&vblank->refcount),
3828 ++ (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
3829 ++ diff = 1;
3830 ++ }
3831 ++
3832 + DRM_DEBUG_VBL("updating vblank count on crtc %u:"
3833 + " current=%u, diff=%u, hw=%u hw_last=%u\n",
3834 + pipe, vblank->count, diff, cur_vblank, vblank->last);
3835 +@@ -1313,7 +1371,13 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
3836 + spin_lock_irqsave(&dev->event_lock, irqflags);
3837 +
3838 + spin_lock(&dev->vbl_lock);
3839 +- vblank_disable_and_save(dev, pipe);
3840 ++ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
3841 ++ pipe, vblank->enabled, vblank->inmodeset);
3842 ++
3843 ++ /* Avoid redundant vblank disables without previous drm_vblank_on(). */
3844 ++ if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
3845 ++ vblank_disable_and_save(dev, pipe);
3846 ++
3847 + wake_up(&vblank->queue);
3848 +
3849 + /*
3850 +@@ -1415,6 +1479,9 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
3851 + return;
3852 +
3853 + spin_lock_irqsave(&dev->vbl_lock, irqflags);
3854 ++ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
3855 ++ pipe, vblank->enabled, vblank->inmodeset);
3856 ++
3857 + /* Drop our private "prevent drm_vblank_get" refcount */
3858 + if (vblank->inmodeset) {
3859 + atomic_dec(&vblank->refcount);
3860 +@@ -1427,8 +1494,7 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
3861 + * re-enable interrupts if there are users left, or the
3862 + * user wishes vblank interrupts to be enabled all the time.
3863 + */
3864 +- if (atomic_read(&vblank->refcount) != 0 ||
3865 +- (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
3866 ++ if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
3867 + WARN_ON(drm_vblank_enable(dev, pipe));
3868 + spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
3869 + }
3870 +@@ -1523,6 +1589,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
3871 + if (vblank->inmodeset) {
3872 + spin_lock_irqsave(&dev->vbl_lock, irqflags);
3873 + dev->vblank_disable_allowed = true;
3874 ++ drm_reset_vblank_timestamp(dev, pipe);
3875 + spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
3876 +
3877 + if (vblank->inmodeset & 0x2)
3878 +diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
3879 +index c707fa6..e3bdc8b 100644
3880 +--- a/drivers/gpu/drm/gma500/gem.c
3881 ++++ b/drivers/gpu/drm/gma500/gem.c
3882 +@@ -130,7 +130,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
3883 + return ret;
3884 + }
3885 + /* We have the initial and handle reference but need only one now */
3886 +- drm_gem_object_unreference(&r->gem);
3887 ++ drm_gem_object_unreference_unlocked(&r->gem);
3888 + *handlep = handle;
3889 + return 0;
3890 + }
3891 +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
3892 +index b4741d1..61fcb3b 100644
3893 +--- a/drivers/gpu/drm/i915/i915_dma.c
3894 ++++ b/drivers/gpu/drm/i915/i915_dma.c
3895 +@@ -402,6 +402,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
3896 + if (ret)
3897 + goto cleanup_gem_stolen;
3898 +
3899 ++ intel_setup_gmbus(dev);
3900 ++
3901 + /* Important: The output setup functions called by modeset_init need
3902 + * working irqs for e.g. gmbus and dp aux transfers. */
3903 + intel_modeset_init(dev);
3904 +@@ -451,6 +453,7 @@ cleanup_gem:
3905 + cleanup_irq:
3906 + intel_guc_ucode_fini(dev);
3907 + drm_irq_uninstall(dev);
3908 ++ intel_teardown_gmbus(dev);
3909 + cleanup_gem_stolen:
3910 + i915_gem_cleanup_stolen(dev);
3911 + cleanup_vga_switcheroo:
3912 +@@ -1028,7 +1031,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
3913 +
3914 + /* Try to make sure MCHBAR is enabled before poking at it */
3915 + intel_setup_mchbar(dev);
3916 +- intel_setup_gmbus(dev);
3917 + intel_opregion_setup(dev);
3918 +
3919 + i915_gem_load(dev);
3920 +@@ -1099,7 +1101,6 @@ out_gem_unload:
3921 + if (dev->pdev->msi_enabled)
3922 + pci_disable_msi(dev->pdev);
3923 +
3924 +- intel_teardown_gmbus(dev);
3925 + intel_teardown_mchbar(dev);
3926 + pm_qos_remove_request(&dev_priv->pm_qos);
3927 + destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
3928 +@@ -1198,7 +1199,6 @@ int i915_driver_unload(struct drm_device *dev)
3929 +
3930 + intel_csr_ucode_fini(dev);
3931 +
3932 +- intel_teardown_gmbus(dev);
3933 + intel_teardown_mchbar(dev);
3934 +
3935 + destroy_workqueue(dev_priv->hotplug.dp_wq);
3936 +diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
3937 +index 02ceb7a..0433d25 100644
3938 +--- a/drivers/gpu/drm/i915/i915_gem_context.c
3939 ++++ b/drivers/gpu/drm/i915/i915_gem_context.c
3940 +@@ -340,6 +340,10 @@ void i915_gem_context_reset(struct drm_device *dev)
3941 + i915_gem_context_unreference(lctx);
3942 + ring->last_context = NULL;
3943 + }
3944 ++
3945 ++ /* Force the GPU state to be reinitialised on enabling */
3946 ++ if (ring->default_context)
3947 ++ ring->default_context->legacy_hw_ctx.initialized = false;
3948 + }
3949 + }
3950 +
3951 +@@ -708,7 +712,7 @@ static int do_switch(struct drm_i915_gem_request *req)
3952 + if (ret)
3953 + goto unpin_out;
3954 +
3955 +- if (!to->legacy_hw_ctx.initialized) {
3956 ++ if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
3957 + hw_flags |= MI_RESTORE_INHIBIT;
3958 + /* NB: If we inhibit the restore, the context is not allowed to
3959 + * die because future work may end up depending on valid address
3960 +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
3961 +index 0d228f9..0f42a27 100644
3962 +--- a/drivers/gpu/drm/i915/i915_irq.c
3963 ++++ b/drivers/gpu/drm/i915/i915_irq.c
3964 +@@ -2354,9 +2354,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
3965 + spt_irq_handler(dev, pch_iir);
3966 + else
3967 + cpt_irq_handler(dev, pch_iir);
3968 +- } else
3969 +- DRM_ERROR("The master control interrupt lied (SDE)!\n");
3970 +-
3971 ++ } else {
3972 ++ /*
3973 ++ * Like on previous PCH there seems to be something
3974 ++ * fishy going on with forwarding PCH interrupts.
3975 ++ */
3976 ++ DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
3977 ++ }
3978 + }
3979 +
3980 + I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3981 +diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
3982 +index a6752a6..7e6158b 100644
3983 +--- a/drivers/gpu/drm/i915/intel_ddi.c
3984 ++++ b/drivers/gpu/drm/i915/intel_ddi.c
3985 +@@ -1582,7 +1582,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
3986 + DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
3987 + DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
3988 + wrpll_params.central_freq;
3989 +- } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
3990 ++ } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
3991 ++ intel_encoder->type == INTEL_OUTPUT_DP_MST) {
3992 + switch (crtc_state->port_clock / 2) {
3993 + case 81000:
3994 + ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
3995 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
3996 +index 32cf973..f859a5b 100644
3997 +--- a/drivers/gpu/drm/i915/intel_display.c
3998 ++++ b/drivers/gpu/drm/i915/intel_display.c
3999 +@@ -11930,11 +11930,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
4000 + pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
4001 + }
4002 +
4003 +- /* Clamp bpp to 8 on screens without EDID 1.4 */
4004 +- if (connector->base.display_info.bpc == 0 && bpp > 24) {
4005 +- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
4006 +- bpp);
4007 +- pipe_config->pipe_bpp = 24;
4008 ++ /* Clamp bpp to default limit on screens without EDID 1.4 */
4009 ++ if (connector->base.display_info.bpc == 0) {
4010 ++ int type = connector->base.connector_type;
4011 ++ int clamp_bpp = 24;
4012 ++
4013 ++ /* Fall back to 18 bpp when DP sink capability is unknown. */
4014 ++ if (type == DRM_MODE_CONNECTOR_DisplayPort ||
4015 ++ type == DRM_MODE_CONNECTOR_eDP)
4016 ++ clamp_bpp = 18;
4017 ++
4018 ++ if (bpp > clamp_bpp) {
4019 ++ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
4020 ++ bpp, clamp_bpp);
4021 ++ pipe_config->pipe_bpp = clamp_bpp;
4022 ++ }
4023 + }
4024 + }
4025 +
4026 +@@ -13537,11 +13547,12 @@ intel_check_primary_plane(struct drm_plane *plane,
4027 + int max_scale = DRM_PLANE_HELPER_NO_SCALING;
4028 + bool can_position = false;
4029 +
4030 +- /* use scaler when colorkey is not required */
4031 +- if (INTEL_INFO(plane->dev)->gen >= 9 &&
4032 +- state->ckey.flags == I915_SET_COLORKEY_NONE) {
4033 +- min_scale = 1;
4034 +- max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
4035 ++ if (INTEL_INFO(plane->dev)->gen >= 9) {
4036 ++ /* use scaler when colorkey is not required */
4037 ++ if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
4038 ++ min_scale = 1;
4039 ++ max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
4040 ++ }
4041 + can_position = true;
4042 + }
4043 +
4044 +@@ -15565,6 +15576,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
4045 + mutex_lock(&dev->struct_mutex);
4046 + intel_cleanup_gt_powersave(dev);
4047 + mutex_unlock(&dev->struct_mutex);
4048 ++
4049 ++ intel_teardown_gmbus(dev);
4050 + }
4051 +
4052 + /*
4053 +diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
4054 +index a5e99ac..a8912ae 100644
4055 +--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
4056 ++++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
4057 +@@ -207,7 +207,12 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
4058 + gpio = *data++;
4059 +
4060 + /* pull up/down */
4061 +- action = *data++;
4062 ++ action = *data++ & 1;
4063 ++
4064 ++ if (gpio >= ARRAY_SIZE(gtable)) {
4065 ++ DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
4066 ++ goto out;
4067 ++ }
4068 +
4069 + function = gtable[gpio].function_reg;
4070 + pad = gtable[gpio].pad_reg;
4071 +@@ -226,6 +231,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
4072 + vlv_gpio_nc_write(dev_priv, pad, val);
4073 + mutex_unlock(&dev_priv->sb_lock);
4074 +
4075 ++out:
4076 + return data;
4077 + }
4078 +
4079 +diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
4080 +index b177857..d7a6437 100644
4081 +--- a/drivers/gpu/drm/i915/intel_hotplug.c
4082 ++++ b/drivers/gpu/drm/i915/intel_hotplug.c
4083 +@@ -468,9 +468,14 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
4084 + list_for_each_entry(connector, &mode_config->connector_list, head) {
4085 + struct intel_connector *intel_connector = to_intel_connector(connector);
4086 + connector->polled = intel_connector->polled;
4087 +- if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4088 +- connector->polled = DRM_CONNECTOR_POLL_HPD;
4089 ++
4090 ++ /* MST has a dynamic intel_connector->encoder and it's reprobing
4091 ++ * is all handled by the MST helpers. */
4092 + if (intel_connector->mst_port)
4093 ++ continue;
4094 ++
4095 ++ if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
4096 ++ intel_connector->encoder->hpd_pin > HPD_NONE)
4097 + connector->polled = DRM_CONNECTOR_POLL_HPD;
4098 + }
4099 +
4100 +diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
4101 +index 8324654..f3bee54 100644
4102 +--- a/drivers/gpu/drm/i915/intel_i2c.c
4103 ++++ b/drivers/gpu/drm/i915/intel_i2c.c
4104 +@@ -675,7 +675,7 @@ int intel_setup_gmbus(struct drm_device *dev)
4105 + return 0;
4106 +
4107 + err:
4108 +- while (--pin) {
4109 ++ while (pin--) {
4110 + if (!intel_gmbus_is_valid_pin(dev_priv, pin))
4111 + continue;
4112 +
4113 +diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
4114 +index 88e12bd..d69547a 100644
4115 +--- a/drivers/gpu/drm/i915/intel_lrc.c
4116 ++++ b/drivers/gpu/drm/i915/intel_lrc.c
4117 +@@ -1706,6 +1706,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
4118 + if (flush_domains) {
4119 + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
4120 + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
4121 ++ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
4122 + flags |= PIPE_CONTROL_FLUSH_ENABLE;
4123 + }
4124 +
4125 +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
4126 +index 9461a23..f6b2a81 100644
4127 +--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
4128 ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
4129 +@@ -347,6 +347,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
4130 + if (flush_domains) {
4131 + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
4132 + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
4133 ++ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
4134 + flags |= PIPE_CONTROL_FLUSH_ENABLE;
4135 + }
4136 + if (invalidate_domains) {
4137 +@@ -419,6 +420,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
4138 + if (flush_domains) {
4139 + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
4140 + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
4141 ++ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
4142 + flags |= PIPE_CONTROL_FLUSH_ENABLE;
4143 + }
4144 + if (invalidate_domains) {
4145 +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
4146 +index 2e7cbe9..2a5ed74 100644
4147 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
4148 ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
4149 +@@ -969,10 +969,13 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
4150 +
4151 + NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
4152 +
4153 ++ mutex_lock(&drm->dev->mode_config.mutex);
4154 + if (plugged)
4155 + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
4156 + else
4157 + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
4158 ++ mutex_unlock(&drm->dev->mode_config.mutex);
4159 ++
4160 + drm_helper_hpd_irq_event(connector->dev);
4161 + }
4162 +
4163 +diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
4164 +index 64c8d93..58a3f7c 100644
4165 +--- a/drivers/gpu/drm/nouveau/nouveau_display.c
4166 ++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
4167 +@@ -634,10 +634,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
4168 + nv_crtc->lut.depth = 0;
4169 + }
4170 +
4171 +- /* Make sure that drm and hw vblank irqs get resumed if needed. */
4172 +- for (head = 0; head < dev->mode_config.num_crtc; head++)
4173 +- drm_vblank_on(dev, head);
4174 +-
4175 + /* This should ensure we don't hit a locking problem when someone
4176 + * wakes us up via a connector. We should never go into suspend
4177 + * while the display is on anyways.
4178 +@@ -647,6 +643,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
4179 +
4180 + drm_helper_resume_force_mode(dev);
4181 +
4182 ++ /* Make sure that drm and hw vblank irqs get resumed if needed. */
4183 ++ for (head = 0; head < dev->mode_config.num_crtc; head++)
4184 ++ drm_vblank_on(dev, head);
4185 ++
4186 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4187 + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
4188 +
4189 +diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
4190 +index 60e32c4..35ecc0d 100644
4191 +--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
4192 ++++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
4193 +@@ -24,7 +24,7 @@
4194 + static int nouveau_platform_probe(struct platform_device *pdev)
4195 + {
4196 + const struct nvkm_device_tegra_func *func;
4197 +- struct nvkm_device *device;
4198 ++ struct nvkm_device *device = NULL;
4199 + struct drm_device *drm;
4200 + int ret;
4201 +
4202 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
4203 +index 7f8a427..e7e581d 100644
4204 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
4205 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
4206 +@@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
4207 +
4208 + if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
4209 + return -ENOMEM;
4210 +- *pdevice = &tdev->device;
4211 ++
4212 + tdev->func = func;
4213 + tdev->pdev = pdev;
4214 + tdev->irq = -1;
4215 +
4216 + tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
4217 +- if (IS_ERR(tdev->vdd))
4218 +- return PTR_ERR(tdev->vdd);
4219 ++ if (IS_ERR(tdev->vdd)) {
4220 ++ ret = PTR_ERR(tdev->vdd);
4221 ++ goto free;
4222 ++ }
4223 +
4224 + tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
4225 +- if (IS_ERR(tdev->rst))
4226 +- return PTR_ERR(tdev->rst);
4227 ++ if (IS_ERR(tdev->rst)) {
4228 ++ ret = PTR_ERR(tdev->rst);
4229 ++ goto free;
4230 ++ }
4231 +
4232 + tdev->clk = devm_clk_get(&pdev->dev, "gpu");
4233 +- if (IS_ERR(tdev->clk))
4234 +- return PTR_ERR(tdev->clk);
4235 ++ if (IS_ERR(tdev->clk)) {
4236 ++ ret = PTR_ERR(tdev->clk);
4237 ++ goto free;
4238 ++ }
4239 +
4240 + tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
4241 +- if (IS_ERR(tdev->clk_pwr))
4242 +- return PTR_ERR(tdev->clk_pwr);
4243 ++ if (IS_ERR(tdev->clk_pwr)) {
4244 ++ ret = PTR_ERR(tdev->clk_pwr);
4245 ++ goto free;
4246 ++ }
4247 +
4248 + nvkm_device_tegra_probe_iommu(tdev);
4249 +
4250 + ret = nvkm_device_tegra_power_up(tdev);
4251 + if (ret)
4252 +- return ret;
4253 ++ goto remove;
4254 +
4255 + tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
4256 + ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
4257 +@@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
4258 + cfg, dbg, detect, mmio, subdev_mask,
4259 + &tdev->device);
4260 + if (ret)
4261 +- return ret;
4262 ++ goto powerdown;
4263 ++
4264 ++ *pdevice = &tdev->device;
4265 +
4266 + return 0;
4267 ++
4268 ++powerdown:
4269 ++ nvkm_device_tegra_power_down(tdev);
4270 ++remove:
4271 ++ nvkm_device_tegra_remove_iommu(tdev);
4272 ++free:
4273 ++ kfree(tdev);
4274 ++ return ret;
4275 + }
4276 + #else
4277 + int
4278 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
4279 +index 74e2f7c..9688970 100644
4280 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
4281 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
4282 +@@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w)
4283 + .outp = outp,
4284 + }, *dp = &_dp;
4285 + u32 datarate = 0;
4286 ++ u8 pwr;
4287 + int ret;
4288 +
4289 + if (!outp->base.info.location && disp->func->sor.magic)
4290 +@@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w)
4291 + /* disable link interrupt handling during link training */
4292 + nvkm_notify_put(&outp->irq);
4293 +
4294 ++ /* ensure sink is not in a low-power state */
4295 ++ if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
4296 ++ if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
4297 ++ pwr &= ~DPCD_SC00_SET_POWER;
4298 ++ pwr |= DPCD_SC00_SET_POWER_D0;
4299 ++ nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
4300 ++ }
4301 ++ }
4302 ++
4303 + /* enable down-spreading and execute pre-train script from vbios */
4304 + dp_link_train_init(dp, outp->dpcd[3] & 0x01);
4305 +
4306 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
4307 +index 9596290..6e10c5e 100644
4308 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
4309 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
4310 +@@ -71,5 +71,11 @@
4311 + #define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
4312 + #define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
4313 +
4314 ++/* DPCD Sink Control */
4315 ++#define DPCD_SC00 0x00600
4316 ++#define DPCD_SC00_SET_POWER 0x03
4317 ++#define DPCD_SC00_SET_POWER_D0 0x01
4318 ++#define DPCD_SC00_SET_POWER_D3 0x03
4319 ++
4320 + void nvkm_dp_train(struct work_struct *);
4321 + #endif
4322 +diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
4323 +index 2ae8577..7c2e782 100644
4324 +--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
4325 ++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
4326 +@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
4327 + cmd->command_size))
4328 + return -EFAULT;
4329 +
4330 +- reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
4331 ++ reloc_info = kmalloc_array(cmd->relocs_num,
4332 ++ sizeof(struct qxl_reloc_info), GFP_KERNEL);
4333 + if (!reloc_info)
4334 + return -ENOMEM;
4335 +
4336 +diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
4337 +index 7520727..367a916 100644
4338 +--- a/drivers/gpu/drm/radeon/dce6_afmt.c
4339 ++++ b/drivers/gpu/drm/radeon/dce6_afmt.c
4340 +@@ -301,6 +301,14 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
4341 + * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
4342 + */
4343 + if (ASIC_IS_DCE8(rdev)) {
4344 ++ unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
4345 ++ DENTIST_DPREFCLK_WDIVIDER_MASK) >>
4346 ++ DENTIST_DPREFCLK_WDIVIDER_SHIFT;
4347 ++ div = radeon_audio_decode_dfs_div(div);
4348 ++
4349 ++ if (div)
4350 ++ clock = clock * 100 / div;
4351 ++
4352 + WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
4353 + WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
4354 + } else {
4355 +diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
4356 +index 9953356..3cf04a2 100644
4357 +--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
4358 ++++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
4359 +@@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
4360 + * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
4361 + * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
4362 + */
4363 ++ if (ASIC_IS_DCE41(rdev)) {
4364 ++ unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
4365 ++ DENTIST_DPREFCLK_WDIVIDER_MASK) >>
4366 ++ DENTIST_DPREFCLK_WDIVIDER_SHIFT;
4367 ++ div = radeon_audio_decode_dfs_div(div);
4368 ++
4369 ++ if (div)
4370 ++ clock = 100 * clock / div;
4371 ++ }
4372 ++
4373 + WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
4374 + WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
4375 + }
4376 +diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
4377 +index 4aa5f75..13b6029 100644
4378 +--- a/drivers/gpu/drm/radeon/evergreend.h
4379 ++++ b/drivers/gpu/drm/radeon/evergreend.h
4380 +@@ -511,6 +511,11 @@
4381 + #define DCCG_AUDIO_DTO1_CNTL 0x05cc
4382 + # define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
4383 +
4384 ++#define DCE41_DENTIST_DISPCLK_CNTL 0x049c
4385 ++# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
4386 ++# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
4387 ++# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
4388 ++
4389 + /* DCE 4.0 AFMT */
4390 + #define HDMI_CONTROL 0x7030
4391 + # define HDMI_KEEPOUT_MODE (1 << 0)
4392 +diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
4393 +index 87db649..5580568 100644
4394 +--- a/drivers/gpu/drm/radeon/radeon.h
4395 ++++ b/drivers/gpu/drm/radeon/radeon.h
4396 +@@ -268,6 +268,7 @@ struct radeon_clock {
4397 + uint32_t current_dispclk;
4398 + uint32_t dp_extclk;
4399 + uint32_t max_pixel_clock;
4400 ++ uint32_t vco_freq;
4401 + };
4402 +
4403 + /*
4404 +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
4405 +index 8f28524..de9a2ff 100644
4406 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c
4407 ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
4408 +@@ -437,7 +437,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
4409 + }
4410 +
4411 + /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
4412 +- if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
4413 ++ if (((dev->pdev->device == 0x9802) ||
4414 ++ (dev->pdev->device == 0x9805) ||
4415 ++ (dev->pdev->device == 0x9806)) &&
4416 + (dev->pdev->subsystem_vendor == 0x1734) &&
4417 + (dev->pdev->subsystem_device == 0x11bd)) {
4418 + if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
4419 +@@ -448,14 +450,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
4420 + }
4421 + }
4422 +
4423 +- /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
4424 +- if ((dev->pdev->device == 0x9805) &&
4425 +- (dev->pdev->subsystem_vendor == 0x1734) &&
4426 +- (dev->pdev->subsystem_device == 0x11bd)) {
4427 +- if (*connector_type == DRM_MODE_CONNECTOR_VGA)
4428 +- return false;
4429 +- }
4430 +-
4431 + return true;
4432 + }
4433 +
4434 +@@ -1112,6 +1106,31 @@ union firmware_info {
4435 + ATOM_FIRMWARE_INFO_V2_2 info_22;
4436 + };
4437 +
4438 ++union igp_info {
4439 ++ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
4440 ++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
4441 ++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
4442 ++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
4443 ++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
4444 ++};
4445 ++
4446 ++static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
4447 ++{
4448 ++ struct radeon_mode_info *mode_info = &rdev->mode_info;
4449 ++ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
4450 ++ union igp_info *igp_info;
4451 ++ u8 frev, crev;
4452 ++ u16 data_offset;
4453 ++
4454 ++ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
4455 ++ &frev, &crev, &data_offset)) {
4456 ++ igp_info = (union igp_info *)(mode_info->atom_context->bios +
4457 ++ data_offset);
4458 ++ rdev->clock.vco_freq =
4459 ++ le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
4460 ++ }
4461 ++}
4462 ++
4463 + bool radeon_atom_get_clock_info(struct drm_device *dev)
4464 + {
4465 + struct radeon_device *rdev = dev->dev_private;
4466 +@@ -1263,20 +1282,25 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
4467 + rdev->mode_info.firmware_flags =
4468 + le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
4469 +
4470 ++ if (ASIC_IS_DCE8(rdev))
4471 ++ rdev->clock.vco_freq =
4472 ++ le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
4473 ++ else if (ASIC_IS_DCE5(rdev))
4474 ++ rdev->clock.vco_freq = rdev->clock.current_dispclk;
4475 ++ else if (ASIC_IS_DCE41(rdev))
4476 ++ radeon_atombios_get_dentist_vco_freq(rdev);
4477 ++ else
4478 ++ rdev->clock.vco_freq = rdev->clock.current_dispclk;
4479 ++
4480 ++ if (rdev->clock.vco_freq == 0)
4481 ++ rdev->clock.vco_freq = 360000; /* 3.6 GHz */
4482 ++
4483 + return true;
4484 + }
4485 +
4486 + return false;
4487 + }
4488 +
4489 +-union igp_info {
4490 +- struct _ATOM_INTEGRATED_SYSTEM_INFO info;
4491 +- struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
4492 +- struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
4493 +- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
4494 +- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
4495 +-};
4496 +-
4497 + bool radeon_atombios_sideport_present(struct radeon_device *rdev)
4498 + {
4499 + struct radeon_mode_info *mode_info = &rdev->mode_info;
4500 +diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
4501 +index 2c02e99..b214663 100644
4502 +--- a/drivers/gpu/drm/radeon/radeon_audio.c
4503 ++++ b/drivers/gpu/drm/radeon/radeon_audio.c
4504 +@@ -739,9 +739,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
4505 + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
4506 + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
4507 + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
4508 +- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
4509 +- struct radeon_connector_atom_dig *dig_connector =
4510 +- radeon_connector->con_priv;
4511 +
4512 + if (!dig || !dig->afmt)
4513 + return;
4514 +@@ -753,10 +750,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
4515 + radeon_audio_write_speaker_allocation(encoder);
4516 + radeon_audio_write_sad_regs(encoder);
4517 + radeon_audio_write_latency_fields(encoder, mode);
4518 +- if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
4519 +- radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
4520 +- else
4521 +- radeon_audio_set_dto(encoder, dig_connector->dp_clock);
4522 ++ radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
4523 + radeon_audio_set_audio_packet(encoder);
4524 + radeon_audio_select_pin(encoder);
4525 +
4526 +@@ -781,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
4527 + if (radeon_encoder->audio && radeon_encoder->audio->dpms)
4528 + radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
4529 + }
4530 ++
4531 ++unsigned int radeon_audio_decode_dfs_div(unsigned int div)
4532 ++{
4533 ++ if (div >= 8 && div < 64)
4534 ++ return (div - 8) * 25 + 200;
4535 ++ else if (div >= 64 && div < 96)
4536 ++ return (div - 64) * 50 + 1600;
4537 ++ else if (div >= 96 && div < 128)
4538 ++ return (div - 96) * 100 + 3200;
4539 ++ else
4540 ++ return 0;
4541 ++}
4542 +diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
4543 +index 059cc30..5c70cce 100644
4544 +--- a/drivers/gpu/drm/radeon/radeon_audio.h
4545 ++++ b/drivers/gpu/drm/radeon/radeon_audio.h
4546 +@@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev);
4547 + void radeon_audio_mode_set(struct drm_encoder *encoder,
4548 + struct drm_display_mode *mode);
4549 + void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
4550 ++unsigned int radeon_audio_decode_dfs_div(unsigned int div);
4551 +
4552 + #endif
4553 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
4554 +index c566993..d690df5 100644
4555 +--- a/drivers/gpu/drm/radeon/radeon_device.c
4556 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
4557 +@@ -1744,6 +1744,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
4558 + }
4559 +
4560 + drm_kms_helper_poll_enable(dev);
4561 ++ drm_helper_hpd_irq_event(dev);
4562 +
4563 + /* set the power state here in case we are a PX system or headless */
4564 + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
4565 +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
4566 +index 1eca0ac..13767d2 100644
4567 +--- a/drivers/gpu/drm/radeon/radeon_display.c
4568 ++++ b/drivers/gpu/drm/radeon/radeon_display.c
4569 +@@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
4570 + struct drm_crtc *crtc = &radeon_crtc->base;
4571 + unsigned long flags;
4572 + int r;
4573 +- int vpos, hpos, stat, min_udelay;
4574 ++ int vpos, hpos, stat, min_udelay = 0;
4575 ++ unsigned repcnt = 4;
4576 + struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
4577 +
4578 + down_read(&rdev->exclusive_lock);
4579 +@@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
4580 + * In practice this won't execute very often unless on very fast
4581 + * machines because the time window for this to happen is very small.
4582 + */
4583 +- for (;;) {
4584 ++ while (radeon_crtc->enabled && repcnt--) {
4585 + /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
4586 + * start in hpos, and to the "fudged earlier" vblank start in
4587 + * vpos.
4588 +@@ -472,10 +473,22 @@ static void radeon_flip_work_func(struct work_struct *__work)
4589 + /* Sleep at least until estimated real start of hw vblank */
4590 + spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4591 + min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
4592 ++ if (min_udelay > vblank->framedur_ns / 2000) {
4593 ++ /* Don't wait ridiculously long - something is wrong */
4594 ++ repcnt = 0;
4595 ++ break;
4596 ++ }
4597 + usleep_range(min_udelay, 2 * min_udelay);
4598 + spin_lock_irqsave(&crtc->dev->event_lock, flags);
4599 + };
4600 +
4601 ++ if (!repcnt)
4602 ++ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
4603 ++ "framedur %d, linedur %d, stat %d, vpos %d, "
4604 ++ "hpos %d\n", work->crtc_id, min_udelay,
4605 ++ vblank->framedur_ns / 1000,
4606 ++ vblank->linedur_ns / 1000, stat, vpos, hpos);
4607 ++
4608 + /* do the flip (mmio) */
4609 + radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
4610 +
4611 +diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
4612 +index 84d4563..fb6ad14 100644
4613 +--- a/drivers/gpu/drm/radeon/radeon_object.c
4614 ++++ b/drivers/gpu/drm/radeon/radeon_object.c
4615 +@@ -33,6 +33,7 @@
4616 + #include <linux/slab.h>
4617 + #include <drm/drmP.h>
4618 + #include <drm/radeon_drm.h>
4619 ++#include <drm/drm_cache.h>
4620 + #include "radeon.h"
4621 + #include "radeon_trace.h"
4622 +
4623 +@@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev,
4624 + DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
4625 + "better performance thanks to write-combining\n");
4626 + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
4627 ++#else
4628 ++ /* For architectures that don't support WC memory,
4629 ++ * mask out the WC flag from the BO
4630 ++ */
4631 ++ if (!drm_arch_can_wc_memory())
4632 ++ bo->flags &= ~RADEON_GEM_GTT_WC;
4633 + #endif
4634 +
4635 + radeon_ttm_placement_from_domain(bo, domain);
4636 +diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
4637 +index 59abebd..2081a60 100644
4638 +--- a/drivers/gpu/drm/radeon/radeon_pm.c
4639 ++++ b/drivers/gpu/drm/radeon/radeon_pm.c
4640 +@@ -1075,8 +1075,6 @@ force:
4641 +
4642 + /* update display watermarks based on new power state */
4643 + radeon_bandwidth_update(rdev);
4644 +- /* update displays */
4645 +- radeon_dpm_display_configuration_changed(rdev);
4646 +
4647 + rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
4648 + rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
4649 +@@ -1097,6 +1095,9 @@ force:
4650 +
4651 + radeon_dpm_post_set_power_state(rdev);
4652 +
4653 ++ /* update displays */
4654 ++ radeon_dpm_display_configuration_changed(rdev);
4655 ++
4656 + if (rdev->asic->dpm.force_performance_level) {
4657 + if (rdev->pm.dpm.thermal_active) {
4658 + enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
4659 +diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
4660 +index c507896..197b157 100644
4661 +--- a/drivers/gpu/drm/radeon/radeon_sa.c
4662 ++++ b/drivers/gpu/drm/radeon/radeon_sa.c
4663 +@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
4664 + /* see if we can skip over some allocations */
4665 + } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
4666 +
4667 ++ for (i = 0; i < RADEON_NUM_RINGS; ++i)
4668 ++ radeon_fence_ref(fences[i]);
4669 ++
4670 + spin_unlock(&sa_manager->wq.lock);
4671 + r = radeon_fence_wait_any(rdev, fences, false);
4672 ++ for (i = 0; i < RADEON_NUM_RINGS; ++i)
4673 ++ radeon_fence_unref(&fences[i]);
4674 + spin_lock(&sa_manager->wq.lock);
4675 + /* if we have nothing to wait for block */
4676 + if (r == -ENOENT) {
4677 +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
4678 +index e343074..e06ac54 100644
4679 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c
4680 ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
4681 +@@ -758,7 +758,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
4682 + 0, PAGE_SIZE,
4683 + PCI_DMA_BIDIRECTIONAL);
4684 + if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
4685 +- while (--i) {
4686 ++ while (i--) {
4687 + pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
4688 + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
4689 + gtt->ttm.dma_address[i] = 0;
4690 +diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
4691 +index 48d97c0..3979632 100644
4692 +--- a/drivers/gpu/drm/radeon/radeon_vm.c
4693 ++++ b/drivers/gpu/drm/radeon/radeon_vm.c
4694 +@@ -455,15 +455,15 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
4695 +
4696 + if (soffset) {
4697 + /* make sure object fit at this offset */
4698 +- eoffset = soffset + size;
4699 ++ eoffset = soffset + size - 1;
4700 + if (soffset >= eoffset) {
4701 + r = -EINVAL;
4702 + goto error_unreserve;
4703 + }
4704 +
4705 + last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
4706 +- if (last_pfn > rdev->vm_manager.max_pfn) {
4707 +- dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
4708 ++ if (last_pfn >= rdev->vm_manager.max_pfn) {
4709 ++ dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
4710 + last_pfn, rdev->vm_manager.max_pfn);
4711 + r = -EINVAL;
4712 + goto error_unreserve;
4713 +@@ -478,7 +478,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
4714 + eoffset /= RADEON_GPU_PAGE_SIZE;
4715 + if (soffset || eoffset) {
4716 + struct interval_tree_node *it;
4717 +- it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
4718 ++ it = interval_tree_iter_first(&vm->va, soffset, eoffset);
4719 + if (it && it != &bo_va->it) {
4720 + struct radeon_bo_va *tmp;
4721 + tmp = container_of(it, struct radeon_bo_va, it);
4722 +@@ -518,7 +518,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
4723 + if (soffset || eoffset) {
4724 + spin_lock(&vm->status_lock);
4725 + bo_va->it.start = soffset;
4726 +- bo_va->it.last = eoffset - 1;
4727 ++ bo_va->it.last = eoffset;
4728 + list_add(&bo_va->vm_status, &vm->cleared);
4729 + spin_unlock(&vm->status_lock);
4730 + interval_tree_insert(&bo_va->it, &vm->va);
4731 +@@ -888,7 +888,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
4732 + unsigned i;
4733 +
4734 + start >>= radeon_vm_block_size;
4735 +- end >>= radeon_vm_block_size;
4736 ++ end = (end - 1) >> radeon_vm_block_size;
4737 +
4738 + for (i = start; i <= end; ++i)
4739 + radeon_bo_fence(vm->page_tables[i].bo, fence, true);
4740 +diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
4741 +index 4c4a721..d1a7b58 100644
4742 +--- a/drivers/gpu/drm/radeon/sid.h
4743 ++++ b/drivers/gpu/drm/radeon/sid.h
4744 +@@ -915,6 +915,11 @@
4745 + #define DCCG_AUDIO_DTO1_PHASE 0x05c0
4746 + #define DCCG_AUDIO_DTO1_MODULE 0x05c4
4747 +
4748 ++#define DENTIST_DISPCLK_CNTL 0x0490
4749 ++# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
4750 ++# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
4751 ++# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
4752 ++
4753 + #define AFMT_AUDIO_SRC_CONTROL 0x713c
4754 + #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
4755 + /* AFMT_AUDIO_SRC_SELECT
4756 +diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
4757 +index 07a0d37..a01efe3 100644
4758 +--- a/drivers/gpu/drm/radeon/vce_v1_0.c
4759 ++++ b/drivers/gpu/drm/radeon/vce_v1_0.c
4760 +@@ -178,12 +178,12 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
4761 + return -EINVAL;
4762 + }
4763 +
4764 +- for (i = 0; i < sign->num; ++i) {
4765 +- if (sign->val[i].chip_id == chip_id)
4766 ++ for (i = 0; i < le32_to_cpu(sign->num); ++i) {
4767 ++ if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
4768 + break;
4769 + }
4770 +
4771 +- if (i == sign->num)
4772 ++ if (i == le32_to_cpu(sign->num))
4773 + return -EINVAL;
4774 +
4775 + data += (256 - 64) / 4;
4776 +@@ -191,18 +191,18 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
4777 + data[1] = sign->val[i].nonce[1];
4778 + data[2] = sign->val[i].nonce[2];
4779 + data[3] = sign->val[i].nonce[3];
4780 +- data[4] = sign->len + 64;
4781 ++ data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64);
4782 +
4783 + memset(&data[5], 0, 44);
4784 + memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
4785 +
4786 +- data += data[4] / 4;
4787 ++ data += le32_to_cpu(data[4]) / 4;
4788 + data[0] = sign->val[i].sigval[0];
4789 + data[1] = sign->val[i].sigval[1];
4790 + data[2] = sign->val[i].sigval[2];
4791 + data[3] = sign->val[i].sigval[3];
4792 +
4793 +- rdev->vce.keyselect = sign->val[i].keyselect;
4794 ++ rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
4795 +
4796 + return 0;
4797 + }
4798 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
4799 +index 6377e81..67cebb2 100644
4800 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
4801 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
4802 +@@ -247,7 +247,7 @@ static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
4803 + {
4804 + struct vmw_cmdbuf_man *man = header->man;
4805 +
4806 +- BUG_ON(!spin_is_locked(&man->lock));
4807 ++ lockdep_assert_held_once(&man->lock);
4808 +
4809 + if (header->inline_space) {
4810 + vmw_cmdbuf_header_inline_free(header);
4811 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
4812 +index c49812b..24fb348 100644
4813 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
4814 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
4815 +@@ -25,6 +25,7 @@
4816 + *
4817 + **************************************************************************/
4818 + #include <linux/module.h>
4819 ++#include <linux/console.h>
4820 +
4821 + #include <drm/drmP.h>
4822 + #include "vmwgfx_drv.h"
4823 +@@ -1538,6 +1539,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4824 + static int __init vmwgfx_init(void)
4825 + {
4826 + int ret;
4827 ++
4828 ++#ifdef CONFIG_VGA_CONSOLE
4829 ++ if (vgacon_text_force())
4830 ++ return -EINVAL;
4831 ++#endif
4832 ++
4833 + ret = drm_pci_init(&driver, &vmw_pci_driver);
4834 + if (ret)
4835 + DRM_ERROR("Failed initializing DRM.\n");
4836 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
4837 +index 9b4bb9e..7c2e118 100644
4838 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
4839 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
4840 +@@ -763,21 +763,25 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
4841 + uint32_t format;
4842 + struct drm_vmw_size content_base_size;
4843 + struct vmw_resource *res;
4844 ++ unsigned int bytes_pp;
4845 + int ret;
4846 +
4847 + switch (mode_cmd->depth) {
4848 + case 32:
4849 + case 24:
4850 + format = SVGA3D_X8R8G8B8;
4851 ++ bytes_pp = 4;
4852 + break;
4853 +
4854 + case 16:
4855 + case 15:
4856 + format = SVGA3D_R5G6B5;
4857 ++ bytes_pp = 2;
4858 + break;
4859 +
4860 + case 8:
4861 + format = SVGA3D_P8;
4862 ++ bytes_pp = 1;
4863 + break;
4864 +
4865 + default:
4866 +@@ -785,7 +789,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
4867 + return -EINVAL;
4868 + }
4869 +
4870 +- content_base_size.width = mode_cmd->width;
4871 ++ content_base_size.width = mode_cmd->pitch / bytes_pp;
4872 + content_base_size.height = mode_cmd->height;
4873 + content_base_size.depth = 1;
4874 +
4875 +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
4876 +index c4dcab0..9098f13 100644
4877 +--- a/drivers/hv/channel.c
4878 ++++ b/drivers/hv/channel.c
4879 +@@ -630,10 +630,19 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
4880 + * on the ring. We will not signal if more data is
4881 + * to be placed.
4882 + *
4883 ++ * Based on the channel signal state, we will decide
4884 ++ * which signaling policy will be applied.
4885 ++ *
4886 + * If we cannot write to the ring-buffer; signal the host
4887 + * even if we may not have written anything. This is a rare
4888 + * enough condition that it should not matter.
4889 + */
4890 ++
4891 ++ if (channel->signal_policy)
4892 ++ signal = true;
4893 ++ else
4894 ++ kick_q = true;
4895 ++
4896 + if (((ret == 0) && kick_q && signal) || (ret))
4897 + vmbus_setevent(channel);
4898 +
4899 +@@ -733,10 +742,19 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
4900 + * on the ring. We will not signal if more data is
4901 + * to be placed.
4902 + *
4903 ++ * Based on the channel signal state, we will decide
4904 ++ * which signaling policy will be applied.
4905 ++ *
4906 + * If we cannot write to the ring-buffer; signal the host
4907 + * even if we may not have written anything. This is a rare
4908 + * enough condition that it should not matter.
4909 + */
4910 ++
4911 ++ if (channel->signal_policy)
4912 ++ signal = true;
4913 ++ else
4914 ++ kick_q = true;
4915 ++
4916 + if (((ret == 0) && kick_q && signal) || (ret))
4917 + vmbus_setevent(channel);
4918 +
4919 +diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
4920 +index f155b83..2b3105c 100644
4921 +--- a/drivers/hwmon/ads1015.c
4922 ++++ b/drivers/hwmon/ads1015.c
4923 +@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
4924 + struct ads1015_data *data = i2c_get_clientdata(client);
4925 + unsigned int pga = data->channel_data[channel].pga;
4926 + int fullscale = fullscale_table[pga];
4927 +- const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
4928 ++ const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
4929 +
4930 + return DIV_ROUND_CLOSEST(reg * fullscale, mask);
4931 + }
4932 +diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
4933 +index c848789..c43318d 100644
4934 +--- a/drivers/hwmon/dell-smm-hwmon.c
4935 ++++ b/drivers/hwmon/dell-smm-hwmon.c
4936 +@@ -932,6 +932,17 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
4937 + static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
4938 + {
4939 + /*
4940 ++ * CPU fan speed going up and down on Dell Studio XPS 8000
4941 ++ * for unknown reasons.
4942 ++ */
4943 ++ .ident = "Dell Studio XPS 8000",
4944 ++ .matches = {
4945 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
4946 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8000"),
4947 ++ },
4948 ++ },
4949 ++ {
4950 ++ /*
4951 + * CPU fan speed going up and down on Dell Studio XPS 8100
4952 + * for unknown reasons.
4953 + */
4954 +diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
4955 +index 82de3de..685568b 100644
4956 +--- a/drivers/hwmon/gpio-fan.c
4957 ++++ b/drivers/hwmon/gpio-fan.c
4958 +@@ -406,16 +406,11 @@ static int gpio_fan_get_cur_state(struct thermal_cooling_device *cdev,
4959 + unsigned long *state)
4960 + {
4961 + struct gpio_fan_data *fan_data = cdev->devdata;
4962 +- int r;
4963 +
4964 + if (!fan_data)
4965 + return -EINVAL;
4966 +
4967 +- r = get_fan_speed_index(fan_data);
4968 +- if (r < 0)
4969 +- return r;
4970 +-
4971 +- *state = r;
4972 ++ *state = fan_data->speed_index;
4973 + return 0;
4974 + }
4975 +
4976 +diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
4977 +index e254921..93738df 100644
4978 +--- a/drivers/hwtracing/coresight/coresight.c
4979 ++++ b/drivers/hwtracing/coresight/coresight.c
4980 +@@ -548,7 +548,7 @@ static int coresight_name_match(struct device *dev, void *data)
4981 + to_match = data;
4982 + i_csdev = to_coresight_device(dev);
4983 +
4984 +- if (!strcmp(to_match, dev_name(&i_csdev->dev)))
4985 ++ if (to_match && !strcmp(to_match, dev_name(&i_csdev->dev)))
4986 + return 1;
4987 +
4988 + return 0;
4989 +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
4990 +index f62d697..27fa0cb 100644
4991 +--- a/drivers/i2c/busses/i2c-i801.c
4992 ++++ b/drivers/i2c/busses/i2c-i801.c
4993 +@@ -1271,6 +1271,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
4994 + switch (dev->device) {
4995 + case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
4996 + case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
4997 ++ case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
4998 ++ case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
4999 + case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
5000 + priv->features |= FEATURE_I2C_BLOCK_READ;
5001 + priv->features |= FEATURE_IRQ;
5002 +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
5003 +index 0a26dd6..d6d2b35 100644
5004 +--- a/drivers/infiniband/core/cm.c
5005 ++++ b/drivers/infiniband/core/cm.c
5006 +@@ -782,11 +782,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
5007 + wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
5008 +
5009 + /* Check if the device started its remove_one */
5010 +- spin_lock_irq(&cm.lock);
5011 ++ spin_lock_irqsave(&cm.lock, flags);
5012 + if (!cm_dev->going_down)
5013 + queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
5014 + msecs_to_jiffies(wait_time));
5015 +- spin_unlock_irq(&cm.lock);
5016 ++ spin_unlock_irqrestore(&cm.lock, flags);
5017 +
5018 + cm_id_priv->timewait_info = NULL;
5019 + }
5020 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
5021 +index 2d762a2..17a15c5 100644
5022 +--- a/drivers/infiniband/core/cma.c
5023 ++++ b/drivers/infiniband/core/cma.c
5024 +@@ -453,7 +453,7 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
5025 + if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
5026 + return ret;
5027 +
5028 +- if (dev_type == ARPHRD_ETHER)
5029 ++ if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
5030 + ndev = dev_get_by_index(&init_net, bound_if_index);
5031 +
5032 + ret = ib_find_cached_gid_by_port(device, gid, port, ndev, NULL);
5033 +diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
5034 +index cb78b1e..f504ba7 100644
5035 +--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
5036 ++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
5037 +@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
5038 + error = l2t_send(tdev, skb, l2e);
5039 + if (error < 0)
5040 + kfree_skb(skb);
5041 +- return error;
5042 ++ return error < 0 ? error : 0;
5043 + }
5044 +
5045 + int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
5046 +@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
5047 + error = cxgb3_ofld_send(tdev, skb);
5048 + if (error < 0)
5049 + kfree_skb(skb);
5050 +- return error;
5051 ++ return error < 0 ? error : 0;
5052 + }
5053 +
5054 + static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
5055 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
5056 +index 7e97cb5..c4e0915 100644
5057 +--- a/drivers/infiniband/hw/mlx5/main.c
5058 ++++ b/drivers/infiniband/hw/mlx5/main.c
5059 +@@ -275,7 +275,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
5060 + props->max_sge = min(max_rq_sg, max_sq_sg);
5061 + props->max_sge_rd = props->max_sge;
5062 + props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
5063 +- props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
5064 ++ props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
5065 + props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
5066 + props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
5067 + props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
5068 +diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
5069 +index 40f85bb..3eff35c 100644
5070 +--- a/drivers/infiniband/hw/qib/qib_qp.c
5071 ++++ b/drivers/infiniband/hw/qib/qib_qp.c
5072 +@@ -100,9 +100,10 @@ static u32 credit_table[31] = {
5073 + 32768 /* 1E */
5074 + };
5075 +
5076 +-static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
5077 ++static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
5078 ++ gfp_t gfp)
5079 + {
5080 +- unsigned long page = get_zeroed_page(GFP_KERNEL);
5081 ++ unsigned long page = get_zeroed_page(gfp);
5082 +
5083 + /*
5084 + * Free the page if someone raced with us installing it.
5085 +@@ -121,7 +122,7 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
5086 + * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
5087 + */
5088 + static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
5089 +- enum ib_qp_type type, u8 port)
5090 ++ enum ib_qp_type type, u8 port, gfp_t gfp)
5091 + {
5092 + u32 i, offset, max_scan, qpn;
5093 + struct qpn_map *map;
5094 +@@ -151,7 +152,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
5095 + max_scan = qpt->nmaps - !offset;
5096 + for (i = 0;;) {
5097 + if (unlikely(!map->page)) {
5098 +- get_map_page(qpt, map);
5099 ++ get_map_page(qpt, map, gfp);
5100 + if (unlikely(!map->page))
5101 + break;
5102 + }
5103 +@@ -983,13 +984,21 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
5104 + size_t sz;
5105 + size_t sg_list_sz;
5106 + struct ib_qp *ret;
5107 ++ gfp_t gfp;
5108 ++
5109 +
5110 + if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
5111 + init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
5112 +- init_attr->create_flags) {
5113 +- ret = ERR_PTR(-EINVAL);
5114 +- goto bail;
5115 +- }
5116 ++ init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
5117 ++ return ERR_PTR(-EINVAL);
5118 ++
5119 ++ /* GFP_NOIO is applicable in RC QPs only */
5120 ++ if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
5121 ++ init_attr->qp_type != IB_QPT_RC)
5122 ++ return ERR_PTR(-EINVAL);
5123 ++
5124 ++ gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
5125 ++ GFP_NOIO : GFP_KERNEL;
5126 +
5127 + /* Check receive queue parameters if no SRQ is specified. */
5128 + if (!init_attr->srq) {
5129 +@@ -1021,7 +1030,8 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
5130 + sz = sizeof(struct qib_sge) *
5131 + init_attr->cap.max_send_sge +
5132 + sizeof(struct qib_swqe);
5133 +- swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
5134 ++ swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
5135 ++ gfp, PAGE_KERNEL);
5136 + if (swq == NULL) {
5137 + ret = ERR_PTR(-ENOMEM);
5138 + goto bail;
5139 +@@ -1037,13 +1047,13 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
5140 + } else if (init_attr->cap.max_recv_sge > 1)
5141 + sg_list_sz = sizeof(*qp->r_sg_list) *
5142 + (init_attr->cap.max_recv_sge - 1);
5143 +- qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
5144 ++ qp = kzalloc(sz + sg_list_sz, gfp);
5145 + if (!qp) {
5146 + ret = ERR_PTR(-ENOMEM);
5147 + goto bail_swq;
5148 + }
5149 + RCU_INIT_POINTER(qp->next, NULL);
5150 +- qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
5151 ++ qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp);
5152 + if (!qp->s_hdr) {
5153 + ret = ERR_PTR(-ENOMEM);
5154 + goto bail_qp;
5155 +@@ -1058,8 +1068,16 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
5156 + qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
5157 + sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
5158 + sizeof(struct qib_rwqe);
5159 +- qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
5160 +- qp->r_rq.size * sz);
5161 ++ if (gfp != GFP_NOIO)
5162 ++ qp->r_rq.wq = vmalloc_user(
5163 ++ sizeof(struct qib_rwq) +
5164 ++ qp->r_rq.size * sz);
5165 ++ else
5166 ++ qp->r_rq.wq = __vmalloc(
5167 ++ sizeof(struct qib_rwq) +
5168 ++ qp->r_rq.size * sz,
5169 ++ gfp, PAGE_KERNEL);
5170 ++
5171 + if (!qp->r_rq.wq) {
5172 + ret = ERR_PTR(-ENOMEM);
5173 + goto bail_qp;
5174 +@@ -1090,7 +1108,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
5175 + dev = to_idev(ibpd->device);
5176 + dd = dd_from_dev(dev);
5177 + err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
5178 +- init_attr->port_num);
5179 ++ init_attr->port_num, gfp);
5180 + if (err < 0) {
5181 + ret = ERR_PTR(err);
5182 + vfree(qp->r_rq.wq);
5183 +diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
5184 +index f8ea069..b2fb528 100644
5185 +--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
5186 ++++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
5187 +@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
5188 + struct qib_ibdev *dev = to_idev(ibqp->device);
5189 + struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
5190 + struct qib_mcast *mcast = NULL;
5191 +- struct qib_mcast_qp *p, *tmp;
5192 ++ struct qib_mcast_qp *p, *tmp, *delp = NULL;
5193 + struct rb_node *n;
5194 + int last = 0;
5195 + int ret;
5196 +
5197 +- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
5198 +- ret = -EINVAL;
5199 +- goto bail;
5200 +- }
5201 ++ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
5202 ++ return -EINVAL;
5203 +
5204 + spin_lock_irq(&ibp->lock);
5205 +
5206 +@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
5207 + while (1) {
5208 + if (n == NULL) {
5209 + spin_unlock_irq(&ibp->lock);
5210 +- ret = -EINVAL;
5211 +- goto bail;
5212 ++ return -EINVAL;
5213 + }
5214 +
5215 + mcast = rb_entry(n, struct qib_mcast, rb_node);
5216 +@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
5217 + */
5218 + list_del_rcu(&p->list);
5219 + mcast->n_attached--;
5220 ++ delp = p;
5221 +
5222 + /* If this was the last attached QP, remove the GID too. */
5223 + if (list_empty(&mcast->qp_list)) {
5224 +@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
5225 + }
5226 +
5227 + spin_unlock_irq(&ibp->lock);
5228 ++ /* QP not attached */
5229 ++ if (!delp)
5230 ++ return -EINVAL;
5231 ++ /*
5232 ++ * Wait for any list walkers to finish before freeing the
5233 ++ * list element.
5234 ++ */
5235 ++ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
5236 ++ qib_mcast_qp_free(delp);
5237 +
5238 +- if (p) {
5239 +- /*
5240 +- * Wait for any list walkers to finish before freeing the
5241 +- * list element.
5242 +- */
5243 +- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
5244 +- qib_mcast_qp_free(p);
5245 +- }
5246 + if (last) {
5247 + atomic_dec(&mcast->refcount);
5248 + wait_event(mcast->wait, !atomic_read(&mcast->refcount));
5249 +@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
5250 + dev->n_mcast_grps_allocated--;
5251 + spin_unlock_irq(&dev->n_mcast_grps_lock);
5252 + }
5253 +-
5254 +- ret = 0;
5255 +-
5256 +-bail:
5257 +- return ret;
5258 ++ return 0;
5259 + }
5260 +
5261 + int qib_mcast_tree_empty(struct qib_ibport *ibp)
5262 +diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
5263 +index b12a5d5..37199b9 100644
5264 +--- a/drivers/irqchip/irq-atmel-aic-common.c
5265 ++++ b/drivers/irqchip/irq-atmel-aic-common.c
5266 +@@ -86,7 +86,7 @@ int aic_common_set_priority(int priority, unsigned *val)
5267 + priority > AT91_AIC_IRQ_MAX_PRIORITY)
5268 + return -EINVAL;
5269 +
5270 +- *val &= AT91_AIC_PRIOR;
5271 ++ *val &= ~AT91_AIC_PRIOR;
5272 + *val |= priority;
5273 +
5274 + return 0;
5275 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
5276 +index e23d1d1..a159529f 100644
5277 +--- a/drivers/irqchip/irq-gic-v3-its.c
5278 ++++ b/drivers/irqchip/irq-gic-v3-its.c
5279 +@@ -597,11 +597,6 @@ static void its_unmask_irq(struct irq_data *d)
5280 + lpi_set_config(d, true);
5281 + }
5282 +
5283 +-static void its_eoi_irq(struct irq_data *d)
5284 +-{
5285 +- gic_write_eoir(d->hwirq);
5286 +-}
5287 +-
5288 + static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
5289 + bool force)
5290 + {
5291 +@@ -638,7 +633,7 @@ static struct irq_chip its_irq_chip = {
5292 + .name = "ITS",
5293 + .irq_mask = its_mask_irq,
5294 + .irq_unmask = its_unmask_irq,
5295 +- .irq_eoi = its_eoi_irq,
5296 ++ .irq_eoi = irq_chip_eoi_parent,
5297 + .irq_set_affinity = its_set_affinity,
5298 + .irq_compose_msi_msg = its_irq_compose_msi_msg,
5299 + };
5300 +diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
5301 +index c22e2d4..efe5084 100644
5302 +--- a/drivers/irqchip/irq-mxs.c
5303 ++++ b/drivers/irqchip/irq-mxs.c
5304 +@@ -241,6 +241,7 @@ static int __init asm9260_of_init(struct device_node *np,
5305 + writel(0, icoll_priv.intr + i);
5306 +
5307 + icoll_add_domain(np, ASM9260_NUM_IRQS);
5308 ++ set_handle_irq(icoll_handle_irq);
5309 +
5310 + return 0;
5311 + }
5312 +diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
5313 +index 8587d0f..f6cb1b8 100644
5314 +--- a/drivers/irqchip/irq-omap-intc.c
5315 ++++ b/drivers/irqchip/irq-omap-intc.c
5316 +@@ -47,6 +47,7 @@
5317 + #define INTC_ILR0 0x0100
5318 +
5319 + #define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */
5320 ++#define SPURIOUSIRQ_MASK (0x1ffffff << 7)
5321 + #define INTCPS_NR_ILR_REGS 128
5322 + #define INTCPS_NR_MIR_REGS 4
5323 +
5324 +@@ -330,11 +331,35 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
5325 + static asmlinkage void __exception_irq_entry
5326 + omap_intc_handle_irq(struct pt_regs *regs)
5327 + {
5328 ++ extern unsigned long irq_err_count;
5329 + u32 irqnr;
5330 +
5331 + irqnr = intc_readl(INTC_SIR);
5332 ++
5333 ++ /*
5334 ++ * A spurious IRQ can result if interrupt that triggered the
5335 ++ * sorting is no longer active during the sorting (10 INTC
5336 ++ * functional clock cycles after interrupt assertion). Or a
5337 ++ * change in interrupt mask affected the result during sorting
5338 ++ * time. There is no special handling required except ignoring
5339 ++ * the SIR register value just read and retrying.
5340 ++ * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
5341 ++ *
5342 ++ * Many a times, a spurious interrupt situation has been fixed
5343 ++ * by adding a flush for the posted write acking the IRQ in
5344 ++ * the device driver. Typically, this is going be the device
5345 ++ * driver whose interrupt was handled just before the spurious
5346 ++ * IRQ occurred. Pay attention to those device drivers if you
5347 ++ * run into hitting the spurious IRQ condition below.
5348 ++ */
5349 ++ if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
5350 ++ pr_err_once("%s: spurious irq!\n", __func__);
5351 ++ irq_err_count++;
5352 ++ omap_ack_irq(NULL);
5353 ++ return;
5354 ++ }
5355 ++
5356 + irqnr &= ACTIVEIRQ_MASK;
5357 +- WARN_ONCE(!irqnr, "Spurious IRQ ?\n");
5358 + handle_domain_irq(domain, irqnr, regs);
5359 + }
5360 +
5361 +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
5362 +index 83392f8..22b9e34 100644
5363 +--- a/drivers/md/bcache/btree.c
5364 ++++ b/drivers/md/bcache/btree.c
5365 +@@ -1741,6 +1741,7 @@ static void bch_btree_gc(struct cache_set *c)
5366 + do {
5367 + ret = btree_root(gc_root, c, &op, &writes, &stats);
5368 + closure_sync(&writes);
5369 ++ cond_resched();
5370 +
5371 + if (ret && ret != -EAGAIN)
5372 + pr_warn("gc failed!");
5373 +@@ -2162,8 +2163,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
5374 + rw_lock(true, b, b->level);
5375 +
5376 + if (b->key.ptr[0] != btree_ptr ||
5377 +- b->seq != seq + 1)
5378 ++ b->seq != seq + 1) {
5379 ++ op->lock = b->level;
5380 + goto out;
5381 ++ }
5382 + }
5383 +
5384 + SET_KEY_PTRS(check_key, 1);
5385 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
5386 +index 679a093..8d0ead9 100644
5387 +--- a/drivers/md/bcache/super.c
5388 ++++ b/drivers/md/bcache/super.c
5389 +@@ -685,6 +685,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
5390 + WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
5391 + sysfs_create_link(&c->kobj, &d->kobj, d->name),
5392 + "Couldn't create device <-> cache set symlinks");
5393 ++
5394 ++ clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
5395 + }
5396 +
5397 + static void bcache_device_detach(struct bcache_device *d)
5398 +@@ -847,8 +849,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
5399 + buf[SB_LABEL_SIZE] = '\0';
5400 + env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
5401 +
5402 +- if (atomic_xchg(&dc->running, 1))
5403 ++ if (atomic_xchg(&dc->running, 1)) {
5404 ++ kfree(env[1]);
5405 ++ kfree(env[2]);
5406 + return;
5407 ++ }
5408 +
5409 + if (!d->c &&
5410 + BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
5411 +@@ -1933,6 +1938,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
5412 + else
5413 + err = "device busy";
5414 + mutex_unlock(&bch_register_lock);
5415 ++ if (attr == &ksysfs_register_quiet)
5416 ++ goto out;
5417 + }
5418 + goto err;
5419 + }
5420 +@@ -1971,8 +1978,7 @@ out:
5421 + err_close:
5422 + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
5423 + err:
5424 +- if (attr != &ksysfs_register_quiet)
5425 +- pr_info("error opening %s: %s", path, err);
5426 ++ pr_info("error opening %s: %s", path, err);
5427 + ret = -EINVAL;
5428 + goto out;
5429 + }
5430 +@@ -2066,8 +2072,10 @@ static int __init bcache_init(void)
5431 + closure_debug_init();
5432 +
5433 + bcache_major = register_blkdev(0, "bcache");
5434 +- if (bcache_major < 0)
5435 ++ if (bcache_major < 0) {
5436 ++ unregister_reboot_notifier(&reboot);
5437 + return bcache_major;
5438 ++ }
5439 +
5440 + if (!(bcache_wq = create_workqueue("bcache")) ||
5441 + !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
5442 +diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
5443 +index b23f88d..b9346cd 100644
5444 +--- a/drivers/md/bcache/writeback.c
5445 ++++ b/drivers/md/bcache/writeback.c
5446 +@@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
5447 +
5448 + static bool dirty_pred(struct keybuf *buf, struct bkey *k)
5449 + {
5450 ++ struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
5451 ++
5452 ++ BUG_ON(KEY_INODE(k) != dc->disk.id);
5453 ++
5454 + return KEY_DIRTY(k);
5455 + }
5456 +
5457 +@@ -372,11 +376,24 @@ next:
5458 + }
5459 + }
5460 +
5461 ++/*
5462 ++ * Returns true if we scanned the entire disk
5463 ++ */
5464 + static bool refill_dirty(struct cached_dev *dc)
5465 + {
5466 + struct keybuf *buf = &dc->writeback_keys;
5467 ++ struct bkey start = KEY(dc->disk.id, 0, 0);
5468 + struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
5469 +- bool searched_from_start = false;
5470 ++ struct bkey start_pos;
5471 ++
5472 ++ /*
5473 ++ * make sure keybuf pos is inside the range for this disk - at bringup
5474 ++ * we might not be attached yet so this disk's inode nr isn't
5475 ++ * initialized then
5476 ++ */
5477 ++ if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
5478 ++ bkey_cmp(&buf->last_scanned, &end) > 0)
5479 ++ buf->last_scanned = start;
5480 +
5481 + if (dc->partial_stripes_expensive) {
5482 + refill_full_stripes(dc);
5483 +@@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc)
5484 + return false;
5485 + }
5486 +
5487 +- if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
5488 +- buf->last_scanned = KEY(dc->disk.id, 0, 0);
5489 +- searched_from_start = true;
5490 +- }
5491 +-
5492 ++ start_pos = buf->last_scanned;
5493 + bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
5494 +
5495 +- return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
5496 ++ if (bkey_cmp(&buf->last_scanned, &end) < 0)
5497 ++ return false;
5498 ++
5499 ++ /*
5500 ++ * If we get to the end start scanning again from the beginning, and
5501 ++ * only scan up to where we initially started scanning from:
5502 ++ */
5503 ++ buf->last_scanned = start;
5504 ++ bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
5505 ++
5506 ++ return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
5507 + }
5508 +
5509 + static int bch_writeback_thread(void *arg)
5510 +diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
5511 +index 0a9dab1..073a042 100644
5512 +--- a/drivers/md/bcache/writeback.h
5513 ++++ b/drivers/md/bcache/writeback.h
5514 +@@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
5515 +
5516 + static inline void bch_writeback_queue(struct cached_dev *dc)
5517 + {
5518 +- wake_up_process(dc->writeback_thread);
5519 ++ if (!IS_ERR_OR_NULL(dc->writeback_thread))
5520 ++ wake_up_process(dc->writeback_thread);
5521 + }
5522 +
5523 + static inline void bch_writeback_add(struct cached_dev *dc)
5524 +diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
5525 +index fae34e7..12b5216 100644
5526 +--- a/drivers/md/dm-exception-store.h
5527 ++++ b/drivers/md/dm-exception-store.h
5528 +@@ -69,7 +69,7 @@ struct dm_exception_store_type {
5529 + * Update the metadata with this exception.
5530 + */
5531 + void (*commit_exception) (struct dm_exception_store *store,
5532 +- struct dm_exception *e,
5533 ++ struct dm_exception *e, int valid,
5534 + void (*callback) (void *, int success),
5535 + void *callback_context);
5536 +
5537 +diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
5538 +index 3164b8b..4d39093 100644
5539 +--- a/drivers/md/dm-snap-persistent.c
5540 ++++ b/drivers/md/dm-snap-persistent.c
5541 +@@ -695,7 +695,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
5542 + }
5543 +
5544 + static void persistent_commit_exception(struct dm_exception_store *store,
5545 +- struct dm_exception *e,
5546 ++ struct dm_exception *e, int valid,
5547 + void (*callback) (void *, int success),
5548 + void *callback_context)
5549 + {
5550 +@@ -704,6 +704,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
5551 + struct core_exception ce;
5552 + struct commit_callback *cb;
5553 +
5554 ++ if (!valid)
5555 ++ ps->valid = 0;
5556 ++
5557 + ce.old_chunk = e->old_chunk;
5558 + ce.new_chunk = e->new_chunk;
5559 + write_exception(ps, ps->current_committed++, &ce);
5560 +diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
5561 +index 9b7c8c8..4d50a12 100644
5562 +--- a/drivers/md/dm-snap-transient.c
5563 ++++ b/drivers/md/dm-snap-transient.c
5564 +@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
5565 + }
5566 +
5567 + static void transient_commit_exception(struct dm_exception_store *store,
5568 +- struct dm_exception *e,
5569 ++ struct dm_exception *e, int valid,
5570 + void (*callback) (void *, int success),
5571 + void *callback_context)
5572 + {
5573 + /* Just succeed */
5574 +- callback(callback_context, 1);
5575 ++ callback(callback_context, valid);
5576 + }
5577 +
5578 + static void transient_usage(struct dm_exception_store *store,
5579 +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
5580 +index c06b74e..61f184a 100644
5581 +--- a/drivers/md/dm-snap.c
5582 ++++ b/drivers/md/dm-snap.c
5583 +@@ -1438,8 +1438,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
5584 + dm_table_event(s->ti->table);
5585 + }
5586 +
5587 +-static void pending_complete(struct dm_snap_pending_exception *pe, int success)
5588 ++static void pending_complete(void *context, int success)
5589 + {
5590 ++ struct dm_snap_pending_exception *pe = context;
5591 + struct dm_exception *e;
5592 + struct dm_snapshot *s = pe->snap;
5593 + struct bio *origin_bios = NULL;
5594 +@@ -1509,24 +1510,13 @@ out:
5595 + free_pending_exception(pe);
5596 + }
5597 +
5598 +-static void commit_callback(void *context, int success)
5599 +-{
5600 +- struct dm_snap_pending_exception *pe = context;
5601 +-
5602 +- pending_complete(pe, success);
5603 +-}
5604 +-
5605 + static void complete_exception(struct dm_snap_pending_exception *pe)
5606 + {
5607 + struct dm_snapshot *s = pe->snap;
5608 +
5609 +- if (unlikely(pe->copy_error))
5610 +- pending_complete(pe, 0);
5611 +-
5612 +- else
5613 +- /* Update the metadata if we are persistent */
5614 +- s->store->type->commit_exception(s->store, &pe->e,
5615 +- commit_callback, pe);
5616 ++ /* Update the metadata if we are persistent */
5617 ++ s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
5618 ++ pending_complete, pe);
5619 + }
5620 +
5621 + /*
5622 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
5623 +index 63903a5..a1cc797 100644
5624 +--- a/drivers/md/dm-thin.c
5625 ++++ b/drivers/md/dm-thin.c
5626 +@@ -3453,8 +3453,8 @@ static void pool_postsuspend(struct dm_target *ti)
5627 + struct pool_c *pt = ti->private;
5628 + struct pool *pool = pt->pool;
5629 +
5630 +- cancel_delayed_work(&pool->waker);
5631 +- cancel_delayed_work(&pool->no_space_timeout);
5632 ++ cancel_delayed_work_sync(&pool->waker);
5633 ++ cancel_delayed_work_sync(&pool->no_space_timeout);
5634 + flush_workqueue(pool->wq);
5635 + (void) commit(pool);
5636 + }
5637 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
5638 +index 5df4048..dd83492 100644
5639 +--- a/drivers/md/dm.c
5640 ++++ b/drivers/md/dm.c
5641 +@@ -1191,6 +1191,8 @@ static void dm_unprep_request(struct request *rq)
5642 +
5643 + if (clone)
5644 + free_rq_clone(clone);
5645 ++ else if (!tio->md->queue->mq_ops)
5646 ++ free_rq_tio(tio);
5647 + }
5648 +
5649 + /*
5650 +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
5651 +index fca6dbc..7e44005 100644
5652 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c
5653 ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
5654 +@@ -152,12 +152,9 @@ static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
5655 +
5656 + static int brb_pop(struct bop_ring_buffer *brb)
5657 + {
5658 +- struct block_op *bop;
5659 +-
5660 + if (brb_empty(brb))
5661 + return -ENODATA;
5662 +
5663 +- bop = brb->bops + brb->begin;
5664 + brb->begin = brb_next(brb, brb->begin);
5665 +
5666 + return 0;
5667 +diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
5668 +index c38ef1a..e2a3833 100644
5669 +--- a/drivers/media/dvb-core/dvb_frontend.c
5670 ++++ b/drivers/media/dvb-core/dvb_frontend.c
5671 +@@ -2313,9 +2313,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
5672 + dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
5673 + __func__, c->delivery_system, fe->ops.info.type);
5674 +
5675 +- /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
5676 +- * do it, it is done for it. */
5677 +- info->caps |= FE_CAN_INVERSION_AUTO;
5678 ++ /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
5679 ++ if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
5680 ++ info->caps |= FE_CAN_INVERSION_AUTO;
5681 + err = 0;
5682 + break;
5683 + }
5684 +diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
5685 +index 0e209b5..c6abeb4 100644
5686 +--- a/drivers/media/dvb-frontends/tda1004x.c
5687 ++++ b/drivers/media/dvb-frontends/tda1004x.c
5688 +@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
5689 + {
5690 + struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
5691 + struct tda1004x_state* state = fe->demodulator_priv;
5692 ++ int status;
5693 +
5694 + dprintk("%s\n", __func__);
5695 +
5696 ++ status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
5697 ++ if (status == -1)
5698 ++ return -EIO;
5699 ++
5700 ++ /* Only update the properties cache if device is locked */
5701 ++ if (!(status & 8))
5702 ++ return 0;
5703 ++
5704 + // inversion status
5705 + fe_params->inversion = INVERSION_OFF;
5706 + if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
5707 +diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
5708 +index 7830aef..40f7768 100644
5709 +--- a/drivers/media/rc/sunxi-cir.c
5710 ++++ b/drivers/media/rc/sunxi-cir.c
5711 +@@ -153,6 +153,8 @@ static int sunxi_ir_probe(struct platform_device *pdev)
5712 + if (!ir)
5713 + return -ENOMEM;
5714 +
5715 ++ spin_lock_init(&ir->ir_lock);
5716 ++
5717 + if (of_device_is_compatible(dn, "allwinner,sun5i-a13-ir"))
5718 + ir->fifo_size = 64;
5719 + else
5720 +diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
5721 +index ce157ed..0e1ca2b 100644
5722 +--- a/drivers/media/tuners/si2157.c
5723 ++++ b/drivers/media/tuners/si2157.c
5724 +@@ -168,6 +168,7 @@ static int si2157_init(struct dvb_frontend *fe)
5725 + len = fw->data[fw->size - remaining];
5726 + if (len > SI2157_ARGLEN) {
5727 + dev_err(&client->dev, "Bad firmware length\n");
5728 ++ ret = -EINVAL;
5729 + goto err_release_firmware;
5730 + }
5731 + memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
5732 +diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
5733 +index 146071b..bfff1d1 100644
5734 +--- a/drivers/media/usb/gspca/ov534.c
5735 ++++ b/drivers/media/usb/gspca/ov534.c
5736 +@@ -1491,8 +1491,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
5737 + struct v4l2_fract *tpf = &cp->timeperframe;
5738 + struct sd *sd = (struct sd *) gspca_dev;
5739 +
5740 +- /* Set requested framerate */
5741 +- sd->frame_rate = tpf->denominator / tpf->numerator;
5742 ++ if (tpf->numerator == 0 || tpf->denominator == 0)
5743 ++ /* Set default framerate */
5744 ++ sd->frame_rate = 30;
5745 ++ else
5746 ++ /* Set requested framerate */
5747 ++ sd->frame_rate = tpf->denominator / tpf->numerator;
5748 ++
5749 + if (gspca_dev->streaming)
5750 + set_frame_rate(gspca_dev);
5751 +
5752 +diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
5753 +index c70ff40..c028a5c 100644
5754 +--- a/drivers/media/usb/gspca/topro.c
5755 ++++ b/drivers/media/usb/gspca/topro.c
5756 +@@ -4802,7 +4802,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
5757 + struct v4l2_fract *tpf = &cp->timeperframe;
5758 + int fr, i;
5759 +
5760 +- sd->framerate = tpf->denominator / tpf->numerator;
5761 ++ if (tpf->numerator == 0 || tpf->denominator == 0)
5762 ++ sd->framerate = 30;
5763 ++ else
5764 ++ sd->framerate = tpf->denominator / tpf->numerator;
5765 ++
5766 + if (gspca_dev->streaming)
5767 + setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
5768 +
5769 +diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
5770 +index 27b4b9e..502984c 100644
5771 +--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
5772 ++++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
5773 +@@ -822,10 +822,10 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
5774 + return res | POLLERR;
5775 +
5776 + /*
5777 +- * For output streams you can write as long as there are fewer buffers
5778 +- * queued than there are buffers available.
5779 ++ * For output streams you can call write() as long as there are fewer
5780 ++ * buffers queued than there are buffers available.
5781 + */
5782 +- if (q->is_output && q->queued_count < q->num_buffers)
5783 ++ if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
5784 + return res | POLLOUT | POLLWRNORM;
5785 +
5786 + if (list_empty(&q->done_list)) {
5787 +diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
5788 +index c241e15..cbd4331 100644
5789 +--- a/drivers/misc/cxl/vphb.c
5790 ++++ b/drivers/misc/cxl/vphb.c
5791 +@@ -203,7 +203,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
5792 + mask <<= shift;
5793 + val <<= shift;
5794 +
5795 +- v = (in_le32(ioaddr) & ~mask) || (val & mask);
5796 ++ v = (in_le32(ioaddr) & ~mask) | (val & mask);
5797 +
5798 + out_le32(ioaddr, v);
5799 + return PCIBIOS_SUCCESSFUL;
5800 +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
5801 +index b2f2486..80f9afc 100644
5802 +--- a/drivers/misc/mei/main.c
5803 ++++ b/drivers/misc/mei/main.c
5804 +@@ -458,7 +458,11 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request)
5805 + {
5806 + struct mei_cl *cl = file->private_data;
5807 +
5808 +- return mei_cl_notify_request(cl, file, request);
5809 ++ if (request != MEI_HBM_NOTIFICATION_START &&
5810 ++ request != MEI_HBM_NOTIFICATION_STOP)
5811 ++ return -EINVAL;
5812 ++
5813 ++ return mei_cl_notify_request(cl, file, (u8)request);
5814 + }
5815 +
5816 + /**
5817 +@@ -657,7 +661,9 @@ out:
5818 + * @file: pointer to file structure
5819 + * @band: band bitmap
5820 + *
5821 +- * Return: poll mask
5822 ++ * Return: negative on error,
5823 ++ * 0 if it did no changes,
5824 ++ * and positive a process was added or deleted
5825 + */
5826 + static int mei_fasync(int fd, struct file *file, int band)
5827 + {
5828 +@@ -665,7 +671,7 @@ static int mei_fasync(int fd, struct file *file, int band)
5829 + struct mei_cl *cl = file->private_data;
5830 +
5831 + if (!mei_cl_is_connected(cl))
5832 +- return POLLERR;
5833 ++ return -ENODEV;
5834 +
5835 + return fasync_helper(fd, file, band, &cl->ev_async);
5836 + }
5837 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
5838 +index 3a9a79e..3d5087b 100644
5839 +--- a/drivers/mmc/core/mmc.c
5840 ++++ b/drivers/mmc/core/mmc.c
5841 +@@ -1076,8 +1076,7 @@ static int mmc_select_hs400(struct mmc_card *card)
5842 + mmc_set_clock(host, max_dtr);
5843 +
5844 + /* Switch card to HS mode */
5845 +- val = EXT_CSD_TIMING_HS |
5846 +- card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
5847 ++ val = EXT_CSD_TIMING_HS;
5848 + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
5849 + EXT_CSD_HS_TIMING, val,
5850 + card->ext_csd.generic_cmd6_time,
5851 +@@ -1160,8 +1159,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
5852 + mmc_set_clock(host, max_dtr);
5853 +
5854 + /* Switch HS400 to HS DDR */
5855 +- val = EXT_CSD_TIMING_HS |
5856 +- card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
5857 ++ val = EXT_CSD_TIMING_HS;
5858 + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
5859 + val, card->ext_csd.generic_cmd6_time,
5860 + true, send_status, true);
5861 +diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
5862 +index 141eaa9..967535d 100644
5863 +--- a/drivers/mmc/core/sd.c
5864 ++++ b/drivers/mmc/core/sd.c
5865 +@@ -626,9 +626,9 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
5866 + * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
5867 + */
5868 + if (!mmc_host_is_spi(card->host) &&
5869 +- (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
5870 +- card->sd_bus_speed == UHS_DDR50_BUS_SPEED ||
5871 +- card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) {
5872 ++ (card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
5873 ++ card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
5874 ++ card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
5875 + err = mmc_execute_tuning(card);
5876 +
5877 + /*
5878 +@@ -638,7 +638,7 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
5879 + * difference between v3.00 and 3.01 spec means that CMD19
5880 + * tuning is also available for DDR50 mode.
5881 + */
5882 +- if (err && card->sd_bus_speed == UHS_DDR50_BUS_SPEED) {
5883 ++ if (err && card->host->ios.timing == MMC_TIMING_UHS_DDR50) {
5884 + pr_warn("%s: ddr50 tuning failed\n",
5885 + mmc_hostname(card->host));
5886 + err = 0;
5887 +diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
5888 +index 16d838e..467b3cf 100644
5889 +--- a/drivers/mmc/core/sdio.c
5890 ++++ b/drivers/mmc/core/sdio.c
5891 +@@ -535,8 +535,8 @@ static int mmc_sdio_init_uhs_card(struct mmc_card *card)
5892 + * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
5893 + */
5894 + if (!mmc_host_is_spi(card->host) &&
5895 +- ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) ||
5896 +- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)))
5897 ++ ((card->host->ios.timing == MMC_TIMING_UHS_SDR50) ||
5898 ++ (card->host->ios.timing == MMC_TIMING_UHS_SDR104)))
5899 + err = mmc_execute_tuning(card);
5900 + out:
5901 + return err;
5902 +@@ -630,7 +630,7 @@ try_again:
5903 + */
5904 + if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
5905 + err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
5906 +- ocr);
5907 ++ ocr_card);
5908 + if (err == -EAGAIN) {
5909 + sdio_reset(host);
5910 + mmc_go_idle(host);
5911 +diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
5912 +index fb26674..acece32 100644
5913 +--- a/drivers/mmc/host/mmci.c
5914 ++++ b/drivers/mmc/host/mmci.c
5915 +@@ -1886,7 +1886,7 @@ static struct amba_id mmci_ids[] = {
5916 + {
5917 + .id = 0x00280180,
5918 + .mask = 0x00ffffff,
5919 +- .data = &variant_u300,
5920 ++ .data = &variant_nomadik,
5921 + },
5922 + {
5923 + .id = 0x00480180,
5924 +diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
5925 +index ce08896..28a057f 100644
5926 +--- a/drivers/mmc/host/pxamci.c
5927 ++++ b/drivers/mmc/host/pxamci.c
5928 +@@ -804,7 +804,7 @@ static int pxamci_probe(struct platform_device *pdev)
5929 + dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
5930 + goto out;
5931 + } else {
5932 +- mmc->caps |= host->pdata->gpio_card_ro_invert ?
5933 ++ mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
5934 + 0 : MMC_CAP2_RO_ACTIVE_HIGH;
5935 + }
5936 +
5937 +diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
5938 +index f6047fc..a5cda92 100644
5939 +--- a/drivers/mmc/host/sdhci-acpi.c
5940 ++++ b/drivers/mmc/host/sdhci-acpi.c
5941 +@@ -146,6 +146,33 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
5942 + .ops = &sdhci_acpi_ops_int,
5943 + };
5944 +
5945 ++static int bxt_get_cd(struct mmc_host *mmc)
5946 ++{
5947 ++ int gpio_cd = mmc_gpio_get_cd(mmc);
5948 ++ struct sdhci_host *host = mmc_priv(mmc);
5949 ++ unsigned long flags;
5950 ++ int ret = 0;
5951 ++
5952 ++ if (!gpio_cd)
5953 ++ return 0;
5954 ++
5955 ++ pm_runtime_get_sync(mmc->parent);
5956 ++
5957 ++ spin_lock_irqsave(&host->lock, flags);
5958 ++
5959 ++ if (host->flags & SDHCI_DEVICE_DEAD)
5960 ++ goto out;
5961 ++
5962 ++ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
5963 ++out:
5964 ++ spin_unlock_irqrestore(&host->lock, flags);
5965 ++
5966 ++ pm_runtime_mark_last_busy(mmc->parent);
5967 ++ pm_runtime_put_autosuspend(mmc->parent);
5968 ++
5969 ++ return ret;
5970 ++}
5971 ++
5972 + static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
5973 + const char *hid, const char *uid)
5974 + {
5975 +@@ -196,6 +223,9 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
5976 +
5977 + /* Platform specific code during sd probe slot goes here */
5978 +
5979 ++ if (hid && !strcmp(hid, "80865ACA"))
5980 ++ host->mmc_host_ops.get_cd = bxt_get_cd;
5981 ++
5982 + return 0;
5983 + }
5984 +
5985 +diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
5986 +index cf7ad45..45ee07d 100644
5987 +--- a/drivers/mmc/host/sdhci-pci-core.c
5988 ++++ b/drivers/mmc/host/sdhci-pci-core.c
5989 +@@ -277,7 +277,7 @@ static int spt_select_drive_strength(struct sdhci_host *host,
5990 + if (sdhci_pci_spt_drive_strength > 0)
5991 + drive_strength = sdhci_pci_spt_drive_strength & 0xf;
5992 + else
5993 +- drive_strength = 1; /* 33-ohm */
5994 ++ drive_strength = 0; /* Default 50-ohm */
5995 +
5996 + if ((mmc_driver_type_mask(drive_strength) & card_drv) == 0)
5997 + drive_strength = 0; /* Default 50-ohm */
5998 +@@ -330,6 +330,33 @@ static void spt_read_drive_strength(struct sdhci_host *host)
5999 + sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf);
6000 + }
6001 +
6002 ++static int bxt_get_cd(struct mmc_host *mmc)
6003 ++{
6004 ++ int gpio_cd = mmc_gpio_get_cd(mmc);
6005 ++ struct sdhci_host *host = mmc_priv(mmc);
6006 ++ unsigned long flags;
6007 ++ int ret = 0;
6008 ++
6009 ++ if (!gpio_cd)
6010 ++ return 0;
6011 ++
6012 ++ pm_runtime_get_sync(mmc->parent);
6013 ++
6014 ++ spin_lock_irqsave(&host->lock, flags);
6015 ++
6016 ++ if (host->flags & SDHCI_DEVICE_DEAD)
6017 ++ goto out;
6018 ++
6019 ++ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
6020 ++out:
6021 ++ spin_unlock_irqrestore(&host->lock, flags);
6022 ++
6023 ++ pm_runtime_mark_last_busy(mmc->parent);
6024 ++ pm_runtime_put_autosuspend(mmc->parent);
6025 ++
6026 ++ return ret;
6027 ++}
6028 ++
6029 + static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
6030 + {
6031 + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
6032 +@@ -362,6 +389,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
6033 + slot->cd_con_id = NULL;
6034 + slot->cd_idx = 0;
6035 + slot->cd_override_level = true;
6036 ++ if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
6037 ++ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
6038 ++ slot->host->mmc_host_ops.get_cd = bxt_get_cd;
6039 ++
6040 + return 0;
6041 + }
6042 +
6043 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
6044 +index b48565e..8814eb6 100644
6045 +--- a/drivers/mmc/host/sdhci.c
6046 ++++ b/drivers/mmc/host/sdhci.c
6047 +@@ -540,9 +540,12 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
6048 +
6049 + BUG_ON(len > 65536);
6050 +
6051 +- /* tran, valid */
6052 +- sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
6053 +- desc += host->desc_sz;
6054 ++ if (len) {
6055 ++ /* tran, valid */
6056 ++ sdhci_adma_write_desc(host, desc, addr, len,
6057 ++ ADMA2_TRAN_VALID);
6058 ++ desc += host->desc_sz;
6059 ++ }
6060 +
6061 + /*
6062 + * If this triggers then we have a calculation bug
6063 +@@ -1364,7 +1367,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
6064 + sdhci_runtime_pm_get(host);
6065 +
6066 + /* Firstly check card presence */
6067 +- present = sdhci_do_get_cd(host);
6068 ++ present = mmc->ops->get_cd(mmc);
6069 +
6070 + spin_lock_irqsave(&host->lock, flags);
6071 +
6072 +@@ -2760,7 +2763,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
6073 +
6074 + static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
6075 + {
6076 +- if (host->runtime_suspended || host->bus_on)
6077 ++ if (host->bus_on)
6078 + return;
6079 + host->bus_on = true;
6080 + pm_runtime_get_noresume(host->mmc->parent);
6081 +@@ -2768,7 +2771,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
6082 +
6083 + static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
6084 + {
6085 +- if (host->runtime_suspended || !host->bus_on)
6086 ++ if (!host->bus_on)
6087 + return;
6088 + host->bus_on = false;
6089 + pm_runtime_put_noidle(host->mmc->parent);
6090 +@@ -2861,6 +2864,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
6091 +
6092 + host = mmc_priv(mmc);
6093 + host->mmc = mmc;
6094 ++ host->mmc_host_ops = sdhci_ops;
6095 ++ mmc->ops = &host->mmc_host_ops;
6096 +
6097 + return host;
6098 + }
6099 +@@ -3057,7 +3062,6 @@ int sdhci_add_host(struct sdhci_host *host)
6100 + /*
6101 + * Set host parameters.
6102 + */
6103 +- mmc->ops = &sdhci_ops;
6104 + max_clk = host->max_clk;
6105 +
6106 + if (host->ops->get_min_clock)
6107 +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
6108 +index 9d4aa31..9c331ac 100644
6109 +--- a/drivers/mmc/host/sdhci.h
6110 ++++ b/drivers/mmc/host/sdhci.h
6111 +@@ -425,6 +425,7 @@ struct sdhci_host {
6112 +
6113 + /* Internal data */
6114 + struct mmc_host *mmc; /* MMC structure */
6115 ++ struct mmc_host_ops mmc_host_ops; /* MMC host ops */
6116 + u64 dma_mask; /* custom DMA mask */
6117 +
6118 + #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
6119 +diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
6120 +index 4498e92..b47122d 100644
6121 +--- a/drivers/mmc/host/usdhi6rol0.c
6122 ++++ b/drivers/mmc/host/usdhi6rol0.c
6123 +@@ -1634,7 +1634,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
6124 + struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
6125 + struct mmc_request *mrq = host->mrq;
6126 + struct mmc_data *data = mrq ? mrq->data : NULL;
6127 +- struct scatterlist *sg = host->sg ?: data->sg;
6128 ++ struct scatterlist *sg;
6129 +
6130 + dev_warn(mmc_dev(host->mmc),
6131 + "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
6132 +@@ -1666,6 +1666,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
6133 + case USDHI6_WAIT_FOR_MWRITE:
6134 + case USDHI6_WAIT_FOR_READ:
6135 + case USDHI6_WAIT_FOR_WRITE:
6136 ++ sg = host->sg ?: data->sg;
6137 + dev_dbg(mmc_dev(host->mmc),
6138 + "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
6139 + data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
6140 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
6141 +index f1692e4..28bbca0 100644
6142 +--- a/drivers/net/bonding/bond_main.c
6143 ++++ b/drivers/net/bonding/bond_main.c
6144 +@@ -214,6 +214,8 @@ static void bond_uninit(struct net_device *bond_dev);
6145 + static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
6146 + struct rtnl_link_stats64 *stats);
6147 + static void bond_slave_arr_handler(struct work_struct *work);
6148 ++static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
6149 ++ int mod);
6150 +
6151 + /*---------------------------- General routines -----------------------------*/
6152 +
6153 +@@ -2418,7 +2420,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
6154 + struct slave *slave)
6155 + {
6156 + struct arphdr *arp = (struct arphdr *)skb->data;
6157 +- struct slave *curr_active_slave;
6158 ++ struct slave *curr_active_slave, *curr_arp_slave;
6159 + unsigned char *arp_ptr;
6160 + __be32 sip, tip;
6161 + int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
6162 +@@ -2465,26 +2467,41 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
6163 + &sip, &tip);
6164 +
6165 + curr_active_slave = rcu_dereference(bond->curr_active_slave);
6166 ++ curr_arp_slave = rcu_dereference(bond->current_arp_slave);
6167 +
6168 +- /* Backup slaves won't see the ARP reply, but do come through
6169 +- * here for each ARP probe (so we swap the sip/tip to validate
6170 +- * the probe). In a "redundant switch, common router" type of
6171 +- * configuration, the ARP probe will (hopefully) travel from
6172 +- * the active, through one switch, the router, then the other
6173 +- * switch before reaching the backup.
6174 ++ /* We 'trust' the received ARP enough to validate it if:
6175 ++ *
6176 ++ * (a) the slave receiving the ARP is active (which includes the
6177 ++ * current ARP slave, if any), or
6178 ++ *
6179 ++ * (b) the receiving slave isn't active, but there is a currently
6180 ++ * active slave and it received valid arp reply(s) after it became
6181 ++ * the currently active slave, or
6182 ++ *
6183 ++ * (c) there is an ARP slave that sent an ARP during the prior ARP
6184 ++ * interval, and we receive an ARP reply on any slave. We accept
6185 ++ * these because switch FDB update delays may deliver the ARP
6186 ++ * reply to a slave other than the sender of the ARP request.
6187 + *
6188 +- * We 'trust' the arp requests if there is an active slave and
6189 +- * it received valid arp reply(s) after it became active. This
6190 +- * is done to avoid endless looping when we can't reach the
6191 ++ * Note: for (b), backup slaves are receiving the broadcast ARP
6192 ++ * request, not a reply. This request passes from the sending
6193 ++ * slave through the L2 switch(es) to the receiving slave. Since
6194 ++ * this is checking the request, sip/tip are swapped for
6195 ++ * validation.
6196 ++ *
6197 ++ * This is done to avoid endless looping when we can't reach the
6198 + * arp_ip_target and fool ourselves with our own arp requests.
6199 + */
6200 +-
6201 + if (bond_is_active_slave(slave))
6202 + bond_validate_arp(bond, slave, sip, tip);
6203 + else if (curr_active_slave &&
6204 + time_after(slave_last_rx(bond, curr_active_slave),
6205 + curr_active_slave->last_link_up))
6206 + bond_validate_arp(bond, slave, tip, sip);
6207 ++ else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
6208 ++ bond_time_in_interval(bond,
6209 ++ dev_trans_start(curr_arp_slave->dev), 1))
6210 ++ bond_validate_arp(bond, slave, sip, tip);
6211 +
6212 + out_unlock:
6213 + if (arp != (struct arphdr *)skb->data)
6214 +diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
6215 +index fc5b756..eb7192f 100644
6216 +--- a/drivers/net/can/usb/ems_usb.c
6217 ++++ b/drivers/net/can/usb/ems_usb.c
6218 +@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
6219 + */
6220 + #define EMS_USB_ARM7_CLOCK 8000000
6221 +
6222 ++#define CPC_TX_QUEUE_TRIGGER_LOW 25
6223 ++#define CPC_TX_QUEUE_TRIGGER_HIGH 35
6224 ++
6225 + /*
6226 + * CAN-Message representation in a CPC_MSG. Message object type is
6227 + * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
6228 +@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
6229 + switch (urb->status) {
6230 + case 0:
6231 + dev->free_slots = dev->intr_in_buffer[1];
6232 ++ if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
6233 ++ if (netif_queue_stopped(netdev)){
6234 ++ netif_wake_queue(netdev);
6235 ++ }
6236 ++ }
6237 + break;
6238 +
6239 + case -ECONNRESET: /* unlink */
6240 +@@ -526,8 +534,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
6241 + /* Release context */
6242 + context->echo_index = MAX_TX_URBS;
6243 +
6244 +- if (netif_queue_stopped(netdev))
6245 +- netif_wake_queue(netdev);
6246 + }
6247 +
6248 + /*
6249 +@@ -587,7 +593,7 @@ static int ems_usb_start(struct ems_usb *dev)
6250 + int err, i;
6251 +
6252 + dev->intr_in_buffer[0] = 0;
6253 +- dev->free_slots = 15; /* initial size */
6254 ++ dev->free_slots = 50; /* initial size */
6255 +
6256 + for (i = 0; i < MAX_RX_URBS; i++) {
6257 + struct urb *urb = NULL;
6258 +@@ -835,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
6259 +
6260 + /* Slow down tx path */
6261 + if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
6262 +- dev->free_slots < 5) {
6263 ++ dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
6264 + netif_stop_queue(netdev);
6265 + }
6266 + }
6267 +diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
6268 +index b06dba0..2dea39b 100644
6269 +--- a/drivers/net/dsa/mv88e6xxx.c
6270 ++++ b/drivers/net/dsa/mv88e6xxx.c
6271 +@@ -1519,7 +1519,7 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
6272 +
6273 + /* no PVID with ranges, otherwise it's a bug */
6274 + if (pvid)
6275 +- err = _mv88e6xxx_port_pvid_set(ds, port, vid);
6276 ++ err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
6277 + unlock:
6278 + mutex_unlock(&ps->smi_mutex);
6279 +
6280 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
6281 +index 79789d8..ca5ac5d 100644
6282 +--- a/drivers/net/ethernet/broadcom/tg3.c
6283 ++++ b/drivers/net/ethernet/broadcom/tg3.c
6284 +@@ -7833,6 +7833,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6285 + return ret;
6286 + }
6287 +
6288 ++static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
6289 ++{
6290 ++ /* Check if we will never have enough descriptors,
6291 ++ * as gso_segs can be more than current ring size
6292 ++ */
6293 ++ return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
6294 ++}
6295 ++
6296 + static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6297 +
6298 + /* Use GSO to workaround all TSO packets that meet HW bug conditions
6299 +@@ -7936,14 +7944,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6300 + * vlan encapsulated.
6301 + */
6302 + if (skb->protocol == htons(ETH_P_8021Q) ||
6303 +- skb->protocol == htons(ETH_P_8021AD))
6304 +- return tg3_tso_bug(tp, tnapi, txq, skb);
6305 ++ skb->protocol == htons(ETH_P_8021AD)) {
6306 ++ if (tg3_tso_bug_gso_check(tnapi, skb))
6307 ++ return tg3_tso_bug(tp, tnapi, txq, skb);
6308 ++ goto drop;
6309 ++ }
6310 +
6311 + if (!skb_is_gso_v6(skb)) {
6312 + if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6313 +- tg3_flag(tp, TSO_BUG))
6314 +- return tg3_tso_bug(tp, tnapi, txq, skb);
6315 +-
6316 ++ tg3_flag(tp, TSO_BUG)) {
6317 ++ if (tg3_tso_bug_gso_check(tnapi, skb))
6318 ++ return tg3_tso_bug(tp, tnapi, txq, skb);
6319 ++ goto drop;
6320 ++ }
6321 + ip_csum = iph->check;
6322 + ip_tot_len = iph->tot_len;
6323 + iph->check = 0;
6324 +@@ -8075,7 +8088,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6325 + if (would_hit_hwbug) {
6326 + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6327 +
6328 +- if (mss) {
6329 ++ if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
6330 + /* If it's a TSO packet, do GSO instead of
6331 + * allocating and copying to a large linear SKB
6332 + */
6333 +diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
6334 +index 1671fa3..7ba6d53 100644
6335 +--- a/drivers/net/ethernet/cisco/enic/enic.h
6336 ++++ b/drivers/net/ethernet/cisco/enic/enic.h
6337 +@@ -33,7 +33,7 @@
6338 +
6339 + #define DRV_NAME "enic"
6340 + #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
6341 +-#define DRV_VERSION "2.3.0.12"
6342 ++#define DRV_VERSION "2.3.0.20"
6343 + #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
6344 +
6345 + #define ENIC_BARS_MAX 6
6346 +diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
6347 +index 1ffd105..1fdf5fe 100644
6348 +--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
6349 ++++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
6350 +@@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
6351 + int wait)
6352 + {
6353 + struct devcmd2_controller *dc2c = vdev->devcmd2;
6354 +- struct devcmd2_result *result = dc2c->result + dc2c->next_result;
6355 ++ struct devcmd2_result *result;
6356 ++ u8 color;
6357 + unsigned int i;
6358 + int delay, err;
6359 + u32 fetch_index, new_posted;
6360 +@@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
6361 + if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
6362 + return 0;
6363 +
6364 ++ result = dc2c->result + dc2c->next_result;
6365 ++ color = dc2c->color;
6366 ++
6367 ++ dc2c->next_result++;
6368 ++ if (dc2c->next_result == dc2c->result_size) {
6369 ++ dc2c->next_result = 0;
6370 ++ dc2c->color = dc2c->color ? 0 : 1;
6371 ++ }
6372 ++
6373 + for (delay = 0; delay < wait; delay++) {
6374 +- if (result->color == dc2c->color) {
6375 +- dc2c->next_result++;
6376 +- if (dc2c->next_result == dc2c->result_size) {
6377 +- dc2c->next_result = 0;
6378 +- dc2c->color = dc2c->color ? 0 : 1;
6379 +- }
6380 ++ if (result->color == color) {
6381 + if (result->error) {
6382 + err = result->error;
6383 + if (err != ERR_ECMDUNKNOWN ||
6384 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
6385 +index 038f9ce..1494997 100644
6386 +--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
6387 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
6388 +@@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
6389 + .enable = mlx4_en_phc_enable,
6390 + };
6391 +
6392 ++#define MLX4_EN_WRAP_AROUND_SEC 10ULL
6393 ++
6394 ++/* This function calculates the max shift that enables the user range
6395 ++ * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
6396 ++ */
6397 ++static u32 freq_to_shift(u16 freq)
6398 ++{
6399 ++ u32 freq_khz = freq * 1000;
6400 ++ u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
6401 ++ u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
6402 ++ max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
6403 ++ /* calculate max possible multiplier in order to fit in 64bit */
6404 ++ u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
6405 ++
6406 ++ /* This comes from the reverse of clocksource_khz2mult */
6407 ++ return ilog2(div_u64(max_mul * freq_khz, 1000000));
6408 ++}
6409 ++
6410 + void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
6411 + {
6412 + struct mlx4_dev *dev = mdev->dev;
6413 +@@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
6414 + memset(&mdev->cycles, 0, sizeof(mdev->cycles));
6415 + mdev->cycles.read = mlx4_en_read_clock;
6416 + mdev->cycles.mask = CLOCKSOURCE_MASK(48);
6417 +- /* Using shift to make calculation more accurate. Since current HW
6418 +- * clock frequency is 427 MHz, and cycles are given using a 48 bits
6419 +- * register, the biggest shift when calculating using u64, is 14
6420 +- * (max_cycles * multiplier < 2^64)
6421 +- */
6422 +- mdev->cycles.shift = 14;
6423 ++ mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
6424 + mdev->cycles.mult =
6425 + clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
6426 + mdev->nominal_c_mult = mdev->cycles.mult;
6427 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
6428 +index 7869f97..67e9633 100644
6429 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
6430 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
6431 +@@ -2381,8 +2381,6 @@ out:
6432 + /* set offloads */
6433 + priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
6434 + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
6435 +- priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
6436 +- priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
6437 + }
6438 +
6439 + static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
6440 +@@ -2393,8 +2391,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
6441 + /* unset offloads */
6442 + priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
6443 + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
6444 +- priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
6445 +- priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
6446 +
6447 + ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
6448 + VXLAN_STEER_BY_OUTER_MAC, 0);
6449 +@@ -3020,6 +3016,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
6450 + priv->rss_hash_fn = ETH_RSS_HASH_TOP;
6451 + }
6452 +
6453 ++ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
6454 ++ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
6455 ++ dev->features |= NETIF_F_GSO_UDP_TUNNEL;
6456 ++ }
6457 ++
6458 + mdev->pndev[port] = dev;
6459 + mdev->upper[port] = NULL;
6460 +
6461 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
6462 +index ee99e67..3904b5f 100644
6463 +--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
6464 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
6465 +@@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
6466 + stats->collisions = 0;
6467 + stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
6468 + stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
6469 +- stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
6470 ++ stats->rx_over_errors = 0;
6471 + stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
6472 + stats->rx_frame_errors = 0;
6473 + stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
6474 +- stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
6475 ++ stats->rx_missed_errors = 0;
6476 + stats->tx_aborted_errors = 0;
6477 + stats->tx_carrier_errors = 0;
6478 + stats->tx_fifo_errors = 0;
6479 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
6480 +index 617fb22..7dbeafa 100644
6481 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
6482 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
6483 +@@ -45,6 +45,7 @@
6484 + #include <linux/if_bridge.h>
6485 + #include <linux/workqueue.h>
6486 + #include <linux/jiffies.h>
6487 ++#include <linux/rtnetlink.h>
6488 + #include <net/switchdev.h>
6489 +
6490 + #include "spectrum.h"
6491 +@@ -812,6 +813,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
6492 +
6493 + mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
6494 +
6495 ++ rtnl_lock();
6496 + do {
6497 + mlxsw_reg_sfn_pack(sfn_pl);
6498 + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
6499 +@@ -824,6 +826,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
6500 + mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
6501 +
6502 + } while (num_rec);
6503 ++ rtnl_unlock();
6504 +
6505 + kfree(sfn_pl);
6506 + mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
6507 +diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
6508 +index e9f2349..52ec3d6 100644
6509 +--- a/drivers/net/ethernet/rocker/rocker.c
6510 ++++ b/drivers/net/ethernet/rocker/rocker.c
6511 +@@ -3531,12 +3531,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
6512 + info.addr = lw->addr;
6513 + info.vid = lw->vid;
6514 +
6515 ++ rtnl_lock();
6516 + if (learned && removing)
6517 + call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
6518 + lw->rocker_port->dev, &info.info);
6519 + else if (learned && !removing)
6520 + call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
6521 + lw->rocker_port->dev, &info.info);
6522 ++ rtnl_unlock();
6523 +
6524 + rocker_port_kfree(lw->trans, work);
6525 + }
6526 +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
6527 +index 47b7117..e6cefd0 100644
6528 +--- a/drivers/net/phy/dp83640.c
6529 ++++ b/drivers/net/phy/dp83640.c
6530 +@@ -845,6 +845,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
6531 + struct skb_shared_hwtstamps *shhwtstamps = NULL;
6532 + struct sk_buff *skb;
6533 + unsigned long flags;
6534 ++ u8 overflow;
6535 ++
6536 ++ overflow = (phy_rxts->ns_hi >> 14) & 0x3;
6537 ++ if (overflow)
6538 ++ pr_debug("rx timestamp queue overflow, count %d\n", overflow);
6539 +
6540 + spin_lock_irqsave(&dp83640->rx_lock, flags);
6541 +
6542 +@@ -887,6 +892,7 @@ static void decode_txts(struct dp83640_private *dp83640,
6543 + struct skb_shared_hwtstamps shhwtstamps;
6544 + struct sk_buff *skb;
6545 + u64 ns;
6546 ++ u8 overflow;
6547 +
6548 + /* We must already have the skb that triggered this. */
6549 +
6550 +@@ -896,6 +902,17 @@ static void decode_txts(struct dp83640_private *dp83640,
6551 + pr_debug("have timestamp but tx_queue empty\n");
6552 + return;
6553 + }
6554 ++
6555 ++ overflow = (phy_txts->ns_hi >> 14) & 0x3;
6556 ++ if (overflow) {
6557 ++ pr_debug("tx timestamp queue overflow, count %d\n", overflow);
6558 ++ while (skb) {
6559 ++ skb_complete_tx_timestamp(skb, NULL);
6560 ++ skb = skb_dequeue(&dp83640->tx_queue);
6561 ++ }
6562 ++ return;
6563 ++ }
6564 ++
6565 + ns = phy2txts(phy_txts);
6566 + memset(&shhwtstamps, 0, sizeof(shhwtstamps));
6567 + shhwtstamps.hwtstamp = ns_to_ktime(ns);
6568 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
6569 +index 0a37f84..4e0068e7 100644
6570 +--- a/drivers/net/ppp/pppoe.c
6571 ++++ b/drivers/net/ppp/pppoe.c
6572 +@@ -395,6 +395,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
6573 +
6574 + if (!__pppoe_xmit(sk_pppox(relay_po), skb))
6575 + goto abort_put;
6576 ++
6577 ++ sock_put(sk_pppox(relay_po));
6578 + } else {
6579 + if (sock_queue_rcv_skb(sk, skb))
6580 + goto abort_kfree;
6581 +diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
6582 +index 597c53e..f7e8c79 100644
6583 +--- a/drivers/net/ppp/pptp.c
6584 ++++ b/drivers/net/ppp/pptp.c
6585 +@@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
6586 + return i < MAX_CALLID;
6587 + }
6588 +
6589 +-static int add_chan(struct pppox_sock *sock)
6590 ++static int add_chan(struct pppox_sock *sock,
6591 ++ struct pptp_addr *sa)
6592 + {
6593 + static int call_id;
6594 +
6595 + spin_lock(&chan_lock);
6596 +- if (!sock->proto.pptp.src_addr.call_id) {
6597 ++ if (!sa->call_id) {
6598 + call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
6599 + if (call_id == MAX_CALLID) {
6600 + call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
6601 + if (call_id == MAX_CALLID)
6602 + goto out_err;
6603 + }
6604 +- sock->proto.pptp.src_addr.call_id = call_id;
6605 +- } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
6606 ++ sa->call_id = call_id;
6607 ++ } else if (test_bit(sa->call_id, callid_bitmap)) {
6608 + goto out_err;
6609 ++ }
6610 +
6611 +- set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
6612 +- rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
6613 ++ sock->proto.pptp.src_addr = *sa;
6614 ++ set_bit(sa->call_id, callid_bitmap);
6615 ++ rcu_assign_pointer(callid_sock[sa->call_id], sock);
6616 + spin_unlock(&chan_lock);
6617 +
6618 + return 0;
6619 +@@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
6620 + struct sock *sk = sock->sk;
6621 + struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
6622 + struct pppox_sock *po = pppox_sk(sk);
6623 +- struct pptp_opt *opt = &po->proto.pptp;
6624 + int error = 0;
6625 +
6626 + if (sockaddr_len < sizeof(struct sockaddr_pppox))
6627 +@@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
6628 +
6629 + lock_sock(sk);
6630 +
6631 +- opt->src_addr = sp->sa_addr.pptp;
6632 +- if (add_chan(po))
6633 ++ if (sk->sk_state & PPPOX_DEAD) {
6634 ++ error = -EALREADY;
6635 ++ goto out;
6636 ++ }
6637 ++
6638 ++ if (sk->sk_state & PPPOX_BOUND) {
6639 + error = -EBUSY;
6640 ++ goto out;
6641 ++ }
6642 ++
6643 ++ if (add_chan(po, &sp->sa_addr.pptp))
6644 ++ error = -EBUSY;
6645 ++ else
6646 ++ sk->sk_state |= PPPOX_BOUND;
6647 +
6648 ++out:
6649 + release_sock(sk);
6650 + return error;
6651 + }
6652 +@@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
6653 + }
6654 +
6655 + opt->dst_addr = sp->sa_addr.pptp;
6656 +- sk->sk_state = PPPOX_CONNECTED;
6657 ++ sk->sk_state |= PPPOX_CONNECTED;
6658 +
6659 + end:
6660 + release_sock(sk);
6661 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
6662 +index 5fccc5a..982e0acd 100644
6663 +--- a/drivers/net/usb/qmi_wwan.c
6664 ++++ b/drivers/net/usb/qmi_wwan.c
6665 +@@ -492,6 +492,7 @@ static const struct usb_device_id products[] = {
6666 +
6667 + /* 3. Combined interface devices matching on interface number */
6668 + {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
6669 ++ {QMI_FIXED_INTF(0x05c6, 0x6001, 3)}, /* 4G LTE usb-modem U901 */
6670 + {QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
6671 + {QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
6672 + {QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
6673 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
6674 +index 405a7b6..e0fcda4 100644
6675 +--- a/drivers/net/vxlan.c
6676 ++++ b/drivers/net/vxlan.c
6677 +@@ -1984,11 +1984,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
6678 + vxlan->cfg.port_max, true);
6679 +
6680 + if (info) {
6681 +- if (info->key.tun_flags & TUNNEL_CSUM)
6682 +- flags |= VXLAN_F_UDP_CSUM;
6683 +- else
6684 +- flags &= ~VXLAN_F_UDP_CSUM;
6685 +-
6686 + ttl = info->key.ttl;
6687 + tos = info->key.tos;
6688 +
6689 +@@ -2003,8 +1998,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
6690 + goto drop;
6691 + sk = vxlan->vn4_sock->sock->sk;
6692 +
6693 +- if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
6694 +- df = htons(IP_DF);
6695 ++ if (info) {
6696 ++ if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
6697 ++ df = htons(IP_DF);
6698 ++
6699 ++ if (info->key.tun_flags & TUNNEL_CSUM)
6700 ++ flags |= VXLAN_F_UDP_CSUM;
6701 ++ else
6702 ++ flags &= ~VXLAN_F_UDP_CSUM;
6703 ++ }
6704 +
6705 + memset(&fl4, 0, sizeof(fl4));
6706 + fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
6707 +@@ -2102,6 +2104,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
6708 + return;
6709 + }
6710 +
6711 ++ if (info) {
6712 ++ if (info->key.tun_flags & TUNNEL_CSUM)
6713 ++ flags &= ~VXLAN_F_UDP_ZERO_CSUM6_TX;
6714 ++ else
6715 ++ flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
6716 ++ }
6717 ++
6718 + ttl = ttl ? : ip6_dst_hoplimit(ndst);
6719 + err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
6720 + 0, ttl, src_port, dst_port, htonl(vni << 8), md,
6721 +diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
6722 +index e18629a..0961f33 100644
6723 +--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
6724 ++++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
6725 +@@ -1154,6 +1154,9 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
6726 +
6727 + priv->ucode_loaded = false;
6728 + iwl_trans_stop_device(priv->trans);
6729 ++ ret = iwl_trans_start_hw(priv->trans);
6730 ++ if (ret)
6731 ++ goto out;
6732 +
6733 + priv->wowlan = true;
6734 +
6735 +diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
6736 +index d6e0c1b..8215d74 100644
6737 +--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
6738 ++++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
6739 +@@ -1267,6 +1267,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
6740 + return -EBUSY;
6741 + }
6742 +
6743 ++ /* we don't support "match all" in the firmware */
6744 ++ if (!req->n_match_sets)
6745 ++ return -EOPNOTSUPP;
6746 ++
6747 + ret = iwl_mvm_check_running_scans(mvm, type);
6748 + if (ret)
6749 + return ret;
6750 +diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
6751 +index 639761f..d58c094 100644
6752 +--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
6753 ++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
6754 +@@ -384,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
6755 + {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
6756 + {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
6757 + {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
6758 ++ {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)},
6759 + {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
6760 + {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
6761 + {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
6762 +@@ -401,10 +402,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
6763 + {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
6764 + {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
6765 + {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
6766 +- {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
6767 ++ {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)},
6768 + {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
6769 + {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
6770 +- {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
6771 ++ {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)},
6772 + {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
6773 + {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
6774 + {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
6775 +diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
6776 +index 9028345..8c72047 100644
6777 +--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
6778 ++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
6779 +@@ -7,6 +7,7 @@
6780 + *
6781 + * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
6782 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
6783 ++ * Copyright(c) 2016 Intel Deutschland GmbH
6784 + *
6785 + * This program is free software; you can redistribute it and/or modify
6786 + * it under the terms of version 2 of the GNU General Public License as
6787 +@@ -33,6 +34,7 @@
6788 + *
6789 + * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
6790 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
6791 ++ * Copyright(c) 2016 Intel Deutschland GmbH
6792 + * All rights reserved.
6793 + *
6794 + * Redistribution and use in source and binary forms, with or without
6795 +@@ -924,9 +926,16 @@ monitor:
6796 + if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
6797 + iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
6798 + trans_pcie->fw_mon_phys >> dest->base_shift);
6799 +- iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
6800 +- (trans_pcie->fw_mon_phys +
6801 +- trans_pcie->fw_mon_size) >> dest->end_shift);
6802 ++ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
6803 ++ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
6804 ++ (trans_pcie->fw_mon_phys +
6805 ++ trans_pcie->fw_mon_size - 256) >>
6806 ++ dest->end_shift);
6807 ++ else
6808 ++ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
6809 ++ (trans_pcie->fw_mon_phys +
6810 ++ trans_pcie->fw_mon_size) >>
6811 ++ dest->end_shift);
6812 + }
6813 + }
6814 +
6815 +diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
6816 +index f46c9d7..7f471bf 100644
6817 +--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
6818 ++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
6819 +@@ -801,7 +801,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
6820 + hw_queue);
6821 + if (rx_remained_cnt == 0)
6822 + return;
6823 +-
6824 ++ buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
6825 ++ rtlpci->rx_ring[rxring_idx].idx];
6826 ++ pdesc = (struct rtl_rx_desc *)skb->data;
6827 + } else { /* rx descriptor */
6828 + pdesc = &rtlpci->rx_ring[rxring_idx].desc[
6829 + rtlpci->rx_ring[rxring_idx].idx];
6830 +@@ -824,13 +826,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
6831 + new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
6832 + if (unlikely(!new_skb))
6833 + goto no_new;
6834 +- if (rtlpriv->use_new_trx_flow) {
6835 +- buffer_desc =
6836 +- &rtlpci->rx_ring[rxring_idx].buffer_desc
6837 +- [rtlpci->rx_ring[rxring_idx].idx];
6838 +- /*means rx wifi info*/
6839 +- pdesc = (struct rtl_rx_desc *)skb->data;
6840 +- }
6841 + memset(&rx_status , 0 , sizeof(rx_status));
6842 + rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
6843 + &rx_status, (u8 *)pdesc, skb);
6844 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
6845 +index 1134412..47e32cb 100644
6846 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
6847 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
6848 +@@ -88,8 +88,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
6849 + u8 tid;
6850 +
6851 + rtl8188ee_bt_reg_init(hw);
6852 +- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6853 +-
6854 + rtlpriv->dm.dm_initialgain_enable = 1;
6855 + rtlpriv->dm.dm_flag = 0;
6856 + rtlpriv->dm.disable_framebursting = 0;
6857 +@@ -138,6 +136,11 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
6858 + rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
6859 + rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6860 + rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6861 ++ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6862 ++ rtlpriv->cfg->mod_params->sw_crypto =
6863 ++ rtlpriv->cfg->mod_params->sw_crypto;
6864 ++ rtlpriv->cfg->mod_params->disable_watchdog =
6865 ++ rtlpriv->cfg->mod_params->disable_watchdog;
6866 + if (rtlpriv->cfg->mod_params->disable_watchdog)
6867 + pr_info("watchdog disabled\n");
6868 + if (!rtlpriv->psc.inactiveps)
6869 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
6870 +index de6cb6c..4780bdc 100644
6871 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
6872 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
6873 +@@ -139,6 +139,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
6874 + rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
6875 + rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6876 + rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6877 ++ rtlpriv->cfg->mod_params->sw_crypto =
6878 ++ rtlpriv->cfg->mod_params->sw_crypto;
6879 + if (!rtlpriv->psc.inactiveps)
6880 + pr_info("rtl8192ce: Power Save off (module option)\n");
6881 + if (!rtlpriv->psc.fwctrl_lps)
6882 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
6883 +index fd4a535..7c6f7f0 100644
6884 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
6885 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
6886 +@@ -65,6 +65,8 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
6887 + rtlpriv->dm.disable_framebursting = false;
6888 + rtlpriv->dm.thermalvalue = 0;
6889 + rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
6890 ++ rtlpriv->cfg->mod_params->sw_crypto =
6891 ++ rtlpriv->cfg->mod_params->sw_crypto;
6892 +
6893 + /* for firmware buf */
6894 + rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
6895 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
6896 +index b19d039..c6e09a1 100644
6897 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
6898 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
6899 +@@ -376,8 +376,8 @@ module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444);
6900 + module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444);
6901 + MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
6902 + MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
6903 +-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
6904 +-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
6905 ++MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
6906 ++MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
6907 + MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
6908 +
6909 + static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
6910 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
6911 +index e1fd27c..31baca41 100644
6912 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
6913 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
6914 +@@ -187,6 +187,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
6915 + rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
6916 + rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6917 + rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6918 ++ rtlpriv->cfg->mod_params->sw_crypto =
6919 ++ rtlpriv->cfg->mod_params->sw_crypto;
6920 + if (!rtlpriv->psc.inactiveps)
6921 + pr_info("Power Save off (module option)\n");
6922 + if (!rtlpriv->psc.fwctrl_lps)
6923 +@@ -425,8 +427,8 @@ module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444);
6924 + module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444);
6925 + MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
6926 + MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
6927 +-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
6928 +-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
6929 ++MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
6930 ++MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
6931 + MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
6932 +
6933 + static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
6934 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
6935 +index 3859b3e..ff49a8c 100644
6936 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
6937 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
6938 +@@ -150,6 +150,11 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
6939 + rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
6940 + rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6941 + rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6942 ++ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6943 ++ rtlpriv->cfg->mod_params->sw_crypto =
6944 ++ rtlpriv->cfg->mod_params->sw_crypto;
6945 ++ rtlpriv->cfg->mod_params->disable_watchdog =
6946 ++ rtlpriv->cfg->mod_params->disable_watchdog;
6947 + if (rtlpriv->cfg->mod_params->disable_watchdog)
6948 + pr_info("watchdog disabled\n");
6949 + rtlpriv->psc.reg_fwctrl_lps = 3;
6950 +@@ -267,6 +272,8 @@ static struct rtl_mod_params rtl8723e_mod_params = {
6951 + .swctrl_lps = false,
6952 + .fwctrl_lps = true,
6953 + .debug = DBG_EMERG,
6954 ++ .msi_support = false,
6955 ++ .disable_watchdog = false,
6956 + };
6957 +
6958 + static struct rtl_hal_cfg rtl8723e_hal_cfg = {
6959 +@@ -383,12 +390,14 @@ module_param_named(debug, rtl8723e_mod_params.debug, int, 0444);
6960 + module_param_named(ips, rtl8723e_mod_params.inactiveps, bool, 0444);
6961 + module_param_named(swlps, rtl8723e_mod_params.swctrl_lps, bool, 0444);
6962 + module_param_named(fwlps, rtl8723e_mod_params.fwctrl_lps, bool, 0444);
6963 ++module_param_named(msi, rtl8723e_mod_params.msi_support, bool, 0444);
6964 + module_param_named(disable_watchdog, rtl8723e_mod_params.disable_watchdog,
6965 + bool, 0444);
6966 + MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
6967 + MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
6968 + MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
6969 + MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
6970 ++MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
6971 + MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
6972 + MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
6973 +
6974 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
6975 +index d091f1d..a78eaed 100644
6976 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
6977 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
6978 +@@ -93,7 +93,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
6979 + struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
6980 +
6981 + rtl8723be_bt_reg_init(hw);
6982 +- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6983 + rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
6984 +
6985 + rtlpriv->dm.dm_initialgain_enable = 1;
6986 +@@ -151,6 +150,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
6987 + rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6988 + rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6989 + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6990 ++ rtlpriv->cfg->mod_params->sw_crypto =
6991 ++ rtlpriv->cfg->mod_params->sw_crypto;
6992 ++ rtlpriv->cfg->mod_params->disable_watchdog =
6993 ++ rtlpriv->cfg->mod_params->disable_watchdog;
6994 + if (rtlpriv->cfg->mod_params->disable_watchdog)
6995 + pr_info("watchdog disabled\n");
6996 + rtlpriv->psc.reg_fwctrl_lps = 3;
6997 +@@ -267,6 +270,9 @@ static struct rtl_mod_params rtl8723be_mod_params = {
6998 + .inactiveps = true,
6999 + .swctrl_lps = false,
7000 + .fwctrl_lps = true,
7001 ++ .msi_support = false,
7002 ++ .disable_watchdog = false,
7003 ++ .debug = DBG_EMERG,
7004 + };
7005 +
7006 + static struct rtl_hal_cfg rtl8723be_hal_cfg = {
7007 +diff --git a/drivers/of/irq.c b/drivers/of/irq.c
7008 +index 4fa916d..72a2c19 100644
7009 +--- a/drivers/of/irq.c
7010 ++++ b/drivers/of/irq.c
7011 +@@ -636,6 +636,13 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
7012 + msi_base = be32_to_cpup(msi_map + 2);
7013 + rid_len = be32_to_cpup(msi_map + 3);
7014 +
7015 ++ if (rid_base & ~map_mask) {
7016 ++ dev_err(parent_dev,
7017 ++ "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
7018 ++ map_mask, rid_base);
7019 ++ return rid_out;
7020 ++ }
7021 ++
7022 + msi_controller_node = of_find_node_by_phandle(phandle);
7023 +
7024 + matched = (masked_rid >= rid_base &&
7025 +@@ -655,7 +662,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
7026 + if (!matched)
7027 + return rid_out;
7028 +
7029 +- rid_out = masked_rid + msi_base;
7030 ++ rid_out = masked_rid - rid_base + msi_base;
7031 + dev_dbg(dev,
7032 + "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
7033 + dev_name(parent_dev), map_mask, rid_base, msi_base,
7034 +diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
7035 +index ff53856..0b3e0bf 100644
7036 +--- a/drivers/pci/hotplug/acpiphp_glue.c
7037 ++++ b/drivers/pci/hotplug/acpiphp_glue.c
7038 +@@ -953,8 +953,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
7039 + {
7040 + pci_lock_rescan_remove();
7041 +
7042 +- if (slot->flags & SLOT_IS_GOING_AWAY)
7043 ++ if (slot->flags & SLOT_IS_GOING_AWAY) {
7044 ++ pci_unlock_rescan_remove();
7045 + return -ENODEV;
7046 ++ }
7047 +
7048 + /* configure all functions */
7049 + if (!(slot->flags & SLOT_ENABLED))
7050 +diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
7051 +index 0bf82a2..48d21e0 100644
7052 +--- a/drivers/pci/pcie/aer/aerdrv.c
7053 ++++ b/drivers/pci/pcie/aer/aerdrv.c
7054 +@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
7055 + rpc->rpd = dev;
7056 + INIT_WORK(&rpc->dpc_handler, aer_isr);
7057 + mutex_init(&rpc->rpc_mutex);
7058 +- init_waitqueue_head(&rpc->wait_release);
7059 +
7060 + /* Use PCIe bus function to store rpc into PCIe device */
7061 + set_service_data(dev, rpc);
7062 +@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
7063 + if (rpc->isr)
7064 + free_irq(dev->irq, dev);
7065 +
7066 +- wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
7067 +-
7068 ++ flush_work(&rpc->dpc_handler);
7069 + aer_disable_rootport(rpc);
7070 + kfree(rpc);
7071 + set_service_data(dev, NULL);
7072 +diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
7073 +index 84420b7..945c939 100644
7074 +--- a/drivers/pci/pcie/aer/aerdrv.h
7075 ++++ b/drivers/pci/pcie/aer/aerdrv.h
7076 +@@ -72,7 +72,6 @@ struct aer_rpc {
7077 + * recovery on the same
7078 + * root port hierarchy
7079 + */
7080 +- wait_queue_head_t wait_release;
7081 + };
7082 +
7083 + struct aer_broadcast_data {
7084 +diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
7085 +index fba785e..4e14de0 100644
7086 +--- a/drivers/pci/pcie/aer/aerdrv_core.c
7087 ++++ b/drivers/pci/pcie/aer/aerdrv_core.c
7088 +@@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work)
7089 + while (get_e_source(rpc, &e_src))
7090 + aer_isr_one_error(p_device, &e_src);
7091 + mutex_unlock(&rpc->rpc_mutex);
7092 +-
7093 +- wake_up(&rpc->wait_release);
7094 + }
7095 +
7096 + /**
7097 +diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
7098 +index c777b97..5f70fee 100644
7099 +--- a/drivers/pci/xen-pcifront.c
7100 ++++ b/drivers/pci/xen-pcifront.c
7101 +@@ -53,7 +53,7 @@ struct pcifront_device {
7102 + };
7103 +
7104 + struct pcifront_sd {
7105 +- int domain;
7106 ++ struct pci_sysdata sd;
7107 + struct pcifront_device *pdev;
7108 + };
7109 +
7110 +@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
7111 + unsigned int domain, unsigned int bus,
7112 + struct pcifront_device *pdev)
7113 + {
7114 +- sd->domain = domain;
7115 ++ /* Because we do not expose that information via XenBus. */
7116 ++ sd->sd.node = first_online_node;
7117 ++ sd->sd.domain = domain;
7118 + sd->pdev = pdev;
7119 + }
7120 +
7121 +@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
7122 + dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
7123 + domain, bus);
7124 +
7125 +- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
7126 +- sd = kmalloc(sizeof(*sd), GFP_KERNEL);
7127 ++ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
7128 ++ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
7129 + if (!bus_entry || !sd) {
7130 + err = -ENOMEM;
7131 + goto err_out;
7132 +diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
7133 +index 8c7f27d..e7e574d 100644
7134 +--- a/drivers/phy/phy-core.c
7135 ++++ b/drivers/phy/phy-core.c
7136 +@@ -275,20 +275,21 @@ EXPORT_SYMBOL_GPL(phy_exit);
7137 +
7138 + int phy_power_on(struct phy *phy)
7139 + {
7140 +- int ret;
7141 ++ int ret = 0;
7142 +
7143 + if (!phy)
7144 +- return 0;
7145 ++ goto out;
7146 +
7147 + if (phy->pwr) {
7148 + ret = regulator_enable(phy->pwr);
7149 + if (ret)
7150 +- return ret;
7151 ++ goto out;
7152 + }
7153 +
7154 + ret = phy_pm_runtime_get_sync(phy);
7155 + if (ret < 0 && ret != -ENOTSUPP)
7156 +- return ret;
7157 ++ goto err_pm_sync;
7158 ++
7159 + ret = 0; /* Override possible ret == -ENOTSUPP */
7160 +
7161 + mutex_lock(&phy->mutex);
7162 +@@ -296,19 +297,20 @@ int phy_power_on(struct phy *phy)
7163 + ret = phy->ops->power_on(phy);
7164 + if (ret < 0) {
7165 + dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
7166 +- goto out;
7167 ++ goto err_pwr_on;
7168 + }
7169 + }
7170 + ++phy->power_count;
7171 + mutex_unlock(&phy->mutex);
7172 + return 0;
7173 +
7174 +-out:
7175 ++err_pwr_on:
7176 + mutex_unlock(&phy->mutex);
7177 + phy_pm_runtime_put_sync(phy);
7178 ++err_pm_sync:
7179 + if (phy->pwr)
7180 + regulator_disable(phy->pwr);
7181 +-
7182 ++out:
7183 + return ret;
7184 + }
7185 + EXPORT_SYMBOL_GPL(phy_power_on);
7186 +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
7187 +index a313dfc..d78ee15 100644
7188 +--- a/drivers/platform/x86/ideapad-laptop.c
7189 ++++ b/drivers/platform/x86/ideapad-laptop.c
7190 +@@ -865,6 +865,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
7191 + },
7192 + },
7193 + {
7194 ++ .ident = "Lenovo ideapad Y700-17ISK",
7195 ++ .matches = {
7196 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
7197 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-17ISK"),
7198 ++ },
7199 ++ },
7200 ++ {
7201 + .ident = "Lenovo Yoga 2 11 / 13 / Pro",
7202 + .matches = {
7203 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
7204 +@@ -893,6 +900,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
7205 + },
7206 + },
7207 + {
7208 ++ .ident = "Lenovo Yoga 700",
7209 ++ .matches = {
7210 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
7211 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"),
7212 ++ },
7213 ++ },
7214 ++ {
7215 + .ident = "Lenovo Yoga 900",
7216 + .matches = {
7217 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
7218 +diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
7219 +index c013029..b0f6214 100644
7220 +--- a/drivers/platform/x86/toshiba_acpi.c
7221 ++++ b/drivers/platform/x86/toshiba_acpi.c
7222 +@@ -2484,6 +2484,14 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
7223 + brightness = __get_lcd_brightness(dev);
7224 + if (brightness < 0)
7225 + return 0;
7226 ++ /*
7227 ++ * If transflective backlight is supported and the brightness is zero
7228 ++ * (lowest brightness level), the set_lcd_brightness function will
7229 ++ * activate the transflective backlight, making the LCD appear to be
7230 ++ * turned off, simply increment the brightness level to avoid that.
7231 ++ */
7232 ++ if (dev->tr_backlight_supported && brightness == 0)
7233 ++ brightness++;
7234 + ret = set_lcd_brightness(dev, brightness);
7235 + if (ret) {
7236 + pr_debug("Backlight method is read-only, disabling backlight support\n");
7237 +diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
7238 +index 8df0b0e..0067620 100644
7239 +--- a/drivers/regulator/Kconfig
7240 ++++ b/drivers/regulator/Kconfig
7241 +@@ -446,6 +446,7 @@ config REGULATOR_MC13892
7242 + config REGULATOR_MT6311
7243 + tristate "MediaTek MT6311 PMIC"
7244 + depends on I2C
7245 ++ select REGMAP_I2C
7246 + help
7247 + Say y here to select this option to enable the power regulator of
7248 + MediaTek MT6311 PMIC.
7249 +diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
7250 +index 35de22f..f2e1a39 100644
7251 +--- a/drivers/regulator/axp20x-regulator.c
7252 ++++ b/drivers/regulator/axp20x-regulator.c
7253 +@@ -27,8 +27,8 @@
7254 + #define AXP20X_IO_ENABLED 0x03
7255 + #define AXP20X_IO_DISABLED 0x07
7256 +
7257 +-#define AXP22X_IO_ENABLED 0x04
7258 +-#define AXP22X_IO_DISABLED 0x03
7259 ++#define AXP22X_IO_ENABLED 0x03
7260 ++#define AXP22X_IO_DISABLED 0x04
7261 +
7262 + #define AXP20X_WORKMODE_DCDC2_MASK BIT(2)
7263 + #define AXP20X_WORKMODE_DCDC3_MASK BIT(1)
7264 +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
7265 +index a263c10..4abfbdb 100644
7266 +--- a/drivers/s390/block/dasd.c
7267 ++++ b/drivers/s390/block/dasd.c
7268 +@@ -3031,6 +3031,7 @@ static void dasd_setup_queue(struct dasd_block *block)
7269 + max = block->base->discipline->max_blocks << block->s2b_shift;
7270 + }
7271 + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
7272 ++ block->request_queue->limits.max_dev_sectors = max;
7273 + blk_queue_logical_block_size(block->request_queue,
7274 + block->bp_block);
7275 + blk_queue_max_hw_sectors(block->request_queue, max);
7276 +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
7277 +index 184b1db..286782c 100644
7278 +--- a/drivers/s390/block/dasd_alias.c
7279 ++++ b/drivers/s390/block/dasd_alias.c
7280 +@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
7281 + spin_unlock_irqrestore(&lcu->lock, flags);
7282 + cancel_work_sync(&lcu->suc_data.worker);
7283 + spin_lock_irqsave(&lcu->lock, flags);
7284 +- if (device == lcu->suc_data.device)
7285 ++ if (device == lcu->suc_data.device) {
7286 ++ dasd_put_device(device);
7287 + lcu->suc_data.device = NULL;
7288 ++ }
7289 + }
7290 + was_pending = 0;
7291 + if (device == lcu->ruac_data.device) {
7292 +@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
7293 + was_pending = 1;
7294 + cancel_delayed_work_sync(&lcu->ruac_data.dwork);
7295 + spin_lock_irqsave(&lcu->lock, flags);
7296 +- if (device == lcu->ruac_data.device)
7297 ++ if (device == lcu->ruac_data.device) {
7298 ++ dasd_put_device(device);
7299 + lcu->ruac_data.device = NULL;
7300 ++ }
7301 + }
7302 + private->lcu = NULL;
7303 + spin_unlock_irqrestore(&lcu->lock, flags);
7304 +@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
7305 + if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
7306 + DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
7307 + " alias data in lcu (rc = %d), retry later", rc);
7308 +- schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
7309 ++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
7310 ++ dasd_put_device(device);
7311 + } else {
7312 ++ dasd_put_device(device);
7313 + lcu->ruac_data.device = NULL;
7314 + lcu->flags &= ~UPDATE_PENDING;
7315 + }
7316 +@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
7317 + */
7318 + if (!usedev)
7319 + return -EINVAL;
7320 ++ dasd_get_device(usedev);
7321 + lcu->ruac_data.device = usedev;
7322 +- schedule_delayed_work(&lcu->ruac_data.dwork, 0);
7323 ++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
7324 ++ dasd_put_device(usedev);
7325 + return 0;
7326 + }
7327 +
7328 +@@ -723,7 +731,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
7329 + ASCEBC((char *) &cqr->magic, 4);
7330 + ccw = cqr->cpaddr;
7331 + ccw->cmd_code = DASD_ECKD_CCW_RSCK;
7332 +- ccw->flags = 0 ;
7333 ++ ccw->flags = CCW_FLAG_SLI;
7334 + ccw->count = 16;
7335 + ccw->cda = (__u32)(addr_t) cqr->data;
7336 + ((char *)cqr->data)[0] = reason;
7337 +@@ -930,6 +938,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
7338 + /* 3. read new alias configuration */
7339 + _schedule_lcu_update(lcu, device);
7340 + lcu->suc_data.device = NULL;
7341 ++ dasd_put_device(device);
7342 + spin_unlock_irqrestore(&lcu->lock, flags);
7343 + }
7344 +
7345 +@@ -989,6 +998,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
7346 + }
7347 + lcu->suc_data.reason = reason;
7348 + lcu->suc_data.device = device;
7349 ++ dasd_get_device(device);
7350 + spin_unlock(&lcu->lock);
7351 +- schedule_work(&lcu->suc_data.worker);
7352 ++ if (!schedule_work(&lcu->suc_data.worker))
7353 ++ dasd_put_device(device);
7354 + };
7355 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
7356 +index 16a1935c..e197c6f 100644
7357 +--- a/drivers/scsi/qla2xxx/qla_init.c
7358 ++++ b/drivers/scsi/qla2xxx/qla_init.c
7359 +@@ -2192,7 +2192,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
7360 + /* Clear outstanding commands array. */
7361 + for (que = 0; que < ha->max_req_queues; que++) {
7362 + req = ha->req_q_map[que];
7363 +- if (!req)
7364 ++ if (!req || !test_bit(que, ha->req_qid_map))
7365 + continue;
7366 + req->out_ptr = (void *)(req->ring + req->length);
7367 + *req->out_ptr = 0;
7368 +@@ -2209,7 +2209,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
7369 +
7370 + for (que = 0; que < ha->max_rsp_queues; que++) {
7371 + rsp = ha->rsp_q_map[que];
7372 +- if (!rsp)
7373 ++ if (!rsp || !test_bit(que, ha->rsp_qid_map))
7374 + continue;
7375 + rsp->in_ptr = (void *)(rsp->ring + rsp->length);
7376 + *rsp->in_ptr = 0;
7377 +@@ -4961,7 +4961,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
7378 +
7379 + for (i = 1; i < ha->max_rsp_queues; i++) {
7380 + rsp = ha->rsp_q_map[i];
7381 +- if (rsp) {
7382 ++ if (rsp && test_bit(i, ha->rsp_qid_map)) {
7383 + rsp->options &= ~BIT_0;
7384 + ret = qla25xx_init_rsp_que(base_vha, rsp);
7385 + if (ret != QLA_SUCCESS)
7386 +@@ -4976,8 +4976,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
7387 + }
7388 + for (i = 1; i < ha->max_req_queues; i++) {
7389 + req = ha->req_q_map[i];
7390 +- if (req) {
7391 +- /* Clear outstanding commands array. */
7392 ++ if (req && test_bit(i, ha->req_qid_map)) {
7393 ++ /* Clear outstanding commands array. */
7394 + req->options &= ~BIT_0;
7395 + ret = qla25xx_init_req_que(base_vha, req);
7396 + if (ret != QLA_SUCCESS)
7397 +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
7398 +index ccf6a7f..0e59731 100644
7399 +--- a/drivers/scsi/qla2xxx/qla_isr.c
7400 ++++ b/drivers/scsi/qla2xxx/qla_isr.c
7401 +@@ -3018,9 +3018,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
7402 + "MSI-X: Failed to enable support "
7403 + "-- %d/%d\n Retry with %d vectors.\n",
7404 + ha->msix_count, ret, ret);
7405 ++ ha->msix_count = ret;
7406 ++ ha->max_rsp_queues = ha->msix_count - 1;
7407 + }
7408 +- ha->msix_count = ret;
7409 +- ha->max_rsp_queues = ha->msix_count - 1;
7410 + ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
7411 + ha->msix_count, GFP_KERNEL);
7412 + if (!ha->msix_entries) {
7413 +diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
7414 +index c5dd594..cf7ba52 100644
7415 +--- a/drivers/scsi/qla2xxx/qla_mid.c
7416 ++++ b/drivers/scsi/qla2xxx/qla_mid.c
7417 +@@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
7418 + /* Delete request queues */
7419 + for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
7420 + req = ha->req_q_map[cnt];
7421 +- if (req) {
7422 ++ if (req && test_bit(cnt, ha->req_qid_map)) {
7423 + ret = qla25xx_delete_req_que(vha, req);
7424 + if (ret != QLA_SUCCESS) {
7425 + ql_log(ql_log_warn, vha, 0x00ea,
7426 +@@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
7427 + /* Delete response queues */
7428 + for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
7429 + rsp = ha->rsp_q_map[cnt];
7430 +- if (rsp) {
7431 ++ if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
7432 + ret = qla25xx_delete_rsp_que(vha, rsp);
7433 + if (ret != QLA_SUCCESS) {
7434 + ql_log(ql_log_warn, vha, 0x00eb,
7435 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
7436 +index bfa9a64..fc6674d 100644
7437 +--- a/drivers/scsi/qla2xxx/qla_os.c
7438 ++++ b/drivers/scsi/qla2xxx/qla_os.c
7439 +@@ -397,6 +397,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
7440 + int cnt;
7441 +
7442 + for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
7443 ++ if (!test_bit(cnt, ha->req_qid_map))
7444 ++ continue;
7445 ++
7446 + req = ha->req_q_map[cnt];
7447 + qla2x00_free_req_que(ha, req);
7448 + }
7449 +@@ -404,6 +407,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
7450 + ha->req_q_map = NULL;
7451 +
7452 + for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
7453 ++ if (!test_bit(cnt, ha->rsp_qid_map))
7454 ++ continue;
7455 ++
7456 + rsp = ha->rsp_q_map[cnt];
7457 + qla2x00_free_rsp_que(ha, rsp);
7458 + }
7459 +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
7460 +index ddbe2e7..c3e6225 100644
7461 +--- a/drivers/scsi/qla2xxx/qla_tmpl.c
7462 ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c
7463 +@@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
7464 + if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
7465 + for (i = 0; i < vha->hw->max_req_queues; i++) {
7466 + struct req_que *req = vha->hw->req_q_map[i];
7467 ++
7468 ++ if (!test_bit(i, vha->hw->req_qid_map))
7469 ++ continue;
7470 ++
7471 + if (req || !buf) {
7472 + length = req ?
7473 + req->length : REQUEST_ENTRY_CNT_24XX;
7474 +@@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
7475 + } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
7476 + for (i = 0; i < vha->hw->max_rsp_queues; i++) {
7477 + struct rsp_que *rsp = vha->hw->rsp_q_map[i];
7478 ++
7479 ++ if (!test_bit(i, vha->hw->rsp_qid_map))
7480 ++ continue;
7481 ++
7482 + if (rsp || !buf) {
7483 + length = rsp ?
7484 + rsp->length : RESPONSE_ENTRY_CNT_MQ;
7485 +@@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
7486 + if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
7487 + for (i = 0; i < vha->hw->max_req_queues; i++) {
7488 + struct req_que *req = vha->hw->req_q_map[i];
7489 ++
7490 ++ if (!test_bit(i, vha->hw->req_qid_map))
7491 ++ continue;
7492 ++
7493 + if (req || !buf) {
7494 + qla27xx_insert16(i, buf, len);
7495 + qla27xx_insert16(1, buf, len);
7496 +@@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
7497 + } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
7498 + for (i = 0; i < vha->hw->max_rsp_queues; i++) {
7499 + struct rsp_que *rsp = vha->hw->rsp_q_map[i];
7500 ++
7501 ++ if (!test_bit(i, vha->hw->rsp_qid_map))
7502 ++ continue;
7503 ++
7504 + if (rsp || !buf) {
7505 + qla27xx_insert16(i, buf, len);
7506 + qla27xx_insert16(1, buf, len);
7507 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
7508 +index 84fa4c4..bb669d3 100644
7509 +--- a/drivers/scsi/sd.c
7510 ++++ b/drivers/scsi/sd.c
7511 +@@ -2893,7 +2893,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
7512 + sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
7513 + sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
7514 + rw_max = q->limits.io_opt =
7515 +- logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
7516 ++ sdkp->opt_xfer_blocks * sdp->sector_size;
7517 + else
7518 + rw_max = BLK_DEF_MAX_SECTORS;
7519 +
7520 +diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
7521 +index aebad36..8feac59 100644
7522 +--- a/drivers/spi/spi-atmel.c
7523 ++++ b/drivers/spi/spi-atmel.c
7524 +@@ -1571,6 +1571,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
7525 +
7526 + as->use_cs_gpios = true;
7527 + if (atmel_spi_is_v2(as) &&
7528 ++ pdev->dev.of_node &&
7529 + !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
7530 + as->use_cs_gpios = false;
7531 + master->num_chipselect = 4;
7532 +diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
7533 +index 1f8903d..ed8283e 100644
7534 +--- a/drivers/spi/spi-omap2-mcspi.c
7535 ++++ b/drivers/spi/spi-omap2-mcspi.c
7536 +@@ -1024,6 +1024,16 @@ static int omap2_mcspi_setup(struct spi_device *spi)
7537 + spi->controller_state = cs;
7538 + /* Link this to context save list */
7539 + list_add_tail(&cs->node, &ctx->cs);
7540 ++
7541 ++ if (gpio_is_valid(spi->cs_gpio)) {
7542 ++ ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
7543 ++ if (ret) {
7544 ++ dev_err(&spi->dev, "failed to request gpio\n");
7545 ++ return ret;
7546 ++ }
7547 ++ gpio_direction_output(spi->cs_gpio,
7548 ++ !(spi->mode & SPI_CS_HIGH));
7549 ++ }
7550 + }
7551 +
7552 + if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
7553 +@@ -1032,15 +1042,6 @@ static int omap2_mcspi_setup(struct spi_device *spi)
7554 + return ret;
7555 + }
7556 +
7557 +- if (gpio_is_valid(spi->cs_gpio)) {
7558 +- ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
7559 +- if (ret) {
7560 +- dev_err(&spi->dev, "failed to request gpio\n");
7561 +- return ret;
7562 +- }
7563 +- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
7564 +- }
7565 +-
7566 + ret = pm_runtime_get_sync(mcspi->dev);
7567 + if (ret < 0)
7568 + return ret;
7569 +diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
7570 +index 79ac192..70b8f4f 100644
7571 +--- a/drivers/staging/panel/panel.c
7572 ++++ b/drivers/staging/panel/panel.c
7573 +@@ -825,8 +825,7 @@ static void lcd_write_cmd_s(int cmd)
7574 + lcd_send_serial(0x1F); /* R/W=W, RS=0 */
7575 + lcd_send_serial(cmd & 0x0F);
7576 + lcd_send_serial((cmd >> 4) & 0x0F);
7577 +- /* the shortest command takes at least 40 us */
7578 +- usleep_range(40, 100);
7579 ++ udelay(40); /* the shortest command takes at least 40 us */
7580 + spin_unlock_irq(&pprt_lock);
7581 + }
7582 +
7583 +@@ -837,8 +836,7 @@ static void lcd_write_data_s(int data)
7584 + lcd_send_serial(0x5F); /* R/W=W, RS=1 */
7585 + lcd_send_serial(data & 0x0F);
7586 + lcd_send_serial((data >> 4) & 0x0F);
7587 +- /* the shortest data takes at least 40 us */
7588 +- usleep_range(40, 100);
7589 ++ udelay(40); /* the shortest data takes at least 40 us */
7590 + spin_unlock_irq(&pprt_lock);
7591 + }
7592 +
7593 +@@ -848,20 +846,19 @@ static void lcd_write_cmd_p8(int cmd)
7594 + spin_lock_irq(&pprt_lock);
7595 + /* present the data to the data port */
7596 + w_dtr(pprt, cmd);
7597 +- /* maintain the data during 20 us before the strobe */
7598 +- usleep_range(20, 100);
7599 ++ udelay(20); /* maintain the data during 20 us before the strobe */
7600 +
7601 + bits.e = BIT_SET;
7602 + bits.rs = BIT_CLR;
7603 + bits.rw = BIT_CLR;
7604 + set_ctrl_bits();
7605 +
7606 +- usleep_range(40, 100); /* maintain the strobe during 40 us */
7607 ++ udelay(40); /* maintain the strobe during 40 us */
7608 +
7609 + bits.e = BIT_CLR;
7610 + set_ctrl_bits();
7611 +
7612 +- usleep_range(120, 500); /* the shortest command takes at least 120 us */
7613 ++ udelay(120); /* the shortest command takes at least 120 us */
7614 + spin_unlock_irq(&pprt_lock);
7615 + }
7616 +
7617 +@@ -871,20 +868,19 @@ static void lcd_write_data_p8(int data)
7618 + spin_lock_irq(&pprt_lock);
7619 + /* present the data to the data port */
7620 + w_dtr(pprt, data);
7621 +- /* maintain the data during 20 us before the strobe */
7622 +- usleep_range(20, 100);
7623 ++ udelay(20); /* maintain the data during 20 us before the strobe */
7624 +
7625 + bits.e = BIT_SET;
7626 + bits.rs = BIT_SET;
7627 + bits.rw = BIT_CLR;
7628 + set_ctrl_bits();
7629 +
7630 +- usleep_range(40, 100); /* maintain the strobe during 40 us */
7631 ++ udelay(40); /* maintain the strobe during 40 us */
7632 +
7633 + bits.e = BIT_CLR;
7634 + set_ctrl_bits();
7635 +
7636 +- usleep_range(45, 100); /* the shortest data takes at least 45 us */
7637 ++ udelay(45); /* the shortest data takes at least 45 us */
7638 + spin_unlock_irq(&pprt_lock);
7639 + }
7640 +
7641 +@@ -894,7 +890,7 @@ static void lcd_write_cmd_tilcd(int cmd)
7642 + spin_lock_irq(&pprt_lock);
7643 + /* present the data to the control port */
7644 + w_ctr(pprt, cmd);
7645 +- usleep_range(60, 120);
7646 ++ udelay(60);
7647 + spin_unlock_irq(&pprt_lock);
7648 + }
7649 +
7650 +@@ -904,7 +900,7 @@ static void lcd_write_data_tilcd(int data)
7651 + spin_lock_irq(&pprt_lock);
7652 + /* present the data to the data port */
7653 + w_dtr(pprt, data);
7654 +- usleep_range(60, 120);
7655 ++ udelay(60);
7656 + spin_unlock_irq(&pprt_lock);
7657 + }
7658 +
7659 +@@ -947,7 +943,7 @@ static void lcd_clear_fast_s(void)
7660 + lcd_send_serial(0x5F); /* R/W=W, RS=1 */
7661 + lcd_send_serial(' ' & 0x0F);
7662 + lcd_send_serial((' ' >> 4) & 0x0F);
7663 +- usleep_range(40, 100); /* the shortest data takes at least 40 us */
7664 ++ udelay(40); /* the shortest data takes at least 40 us */
7665 + }
7666 + spin_unlock_irq(&pprt_lock);
7667 +
7668 +@@ -971,7 +967,7 @@ static void lcd_clear_fast_p8(void)
7669 + w_dtr(pprt, ' ');
7670 +
7671 + /* maintain the data during 20 us before the strobe */
7672 +- usleep_range(20, 100);
7673 ++ udelay(20);
7674 +
7675 + bits.e = BIT_SET;
7676 + bits.rs = BIT_SET;
7677 +@@ -979,13 +975,13 @@ static void lcd_clear_fast_p8(void)
7678 + set_ctrl_bits();
7679 +
7680 + /* maintain the strobe during 40 us */
7681 +- usleep_range(40, 100);
7682 ++ udelay(40);
7683 +
7684 + bits.e = BIT_CLR;
7685 + set_ctrl_bits();
7686 +
7687 + /* the shortest data takes at least 45 us */
7688 +- usleep_range(45, 100);
7689 ++ udelay(45);
7690 + }
7691 + spin_unlock_irq(&pprt_lock);
7692 +
7693 +@@ -1007,7 +1003,7 @@ static void lcd_clear_fast_tilcd(void)
7694 + for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
7695 + /* present the data to the data port */
7696 + w_dtr(pprt, ' ');
7697 +- usleep_range(60, 120);
7698 ++ udelay(60);
7699 + }
7700 +
7701 + spin_unlock_irq(&pprt_lock);
7702 +diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
7703 +index 3b5835b..a5bbb33 100644
7704 +--- a/drivers/staging/speakup/serialio.c
7705 ++++ b/drivers/staging/speakup/serialio.c
7706 +@@ -6,6 +6,11 @@
7707 + #include "spk_priv.h"
7708 + #include "serialio.h"
7709 +
7710 ++#include <linux/serial_core.h>
7711 ++/* WARNING: Do not change this to <linux/serial.h> without testing that
7712 ++ * SERIAL_PORT_DFNS does get defined to the appropriate value. */
7713 ++#include <asm/serial.h>
7714 ++
7715 + #ifndef SERIAL_PORT_DFNS
7716 + #define SERIAL_PORT_DFNS
7717 + #endif
7718 +@@ -23,9 +28,15 @@ const struct old_serial_port *spk_serial_init(int index)
7719 + int baud = 9600, quot = 0;
7720 + unsigned int cval = 0;
7721 + int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
7722 +- const struct old_serial_port *ser = rs_table + index;
7723 ++ const struct old_serial_port *ser;
7724 + int err;
7725 +
7726 ++ if (index >= ARRAY_SIZE(rs_table)) {
7727 ++ pr_info("no port info for ttyS%d\n", index);
7728 ++ return NULL;
7729 ++ }
7730 ++ ser = rs_table + index;
7731 ++
7732 + /* Divisor, bytesize and parity */
7733 + quot = ser->baud_base / baud;
7734 + cval = cflag & (CSIZE | CSTOPB);
7735 +diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
7736 +index 28fb301..88029cc 100644
7737 +--- a/drivers/target/target_core_tmr.c
7738 ++++ b/drivers/target/target_core_tmr.c
7739 +@@ -68,23 +68,25 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
7740 +
7741 + if (dev) {
7742 + spin_lock_irqsave(&dev->se_tmr_lock, flags);
7743 +- list_del(&tmr->tmr_list);
7744 ++ list_del_init(&tmr->tmr_list);
7745 + spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
7746 + }
7747 +
7748 + kfree(tmr);
7749 + }
7750 +
7751 +-static void core_tmr_handle_tas_abort(
7752 +- struct se_node_acl *tmr_nacl,
7753 +- struct se_cmd *cmd,
7754 +- int tas)
7755 ++static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
7756 + {
7757 +- bool remove = true;
7758 ++ unsigned long flags;
7759 ++ bool remove = true, send_tas;
7760 + /*
7761 + * TASK ABORTED status (TAS) bit support
7762 + */
7763 +- if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
7764 ++ spin_lock_irqsave(&cmd->t_state_lock, flags);
7765 ++ send_tas = (cmd->transport_state & CMD_T_TAS);
7766 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
7767 ++
7768 ++ if (send_tas) {
7769 + remove = false;
7770 + transport_send_task_abort(cmd);
7771 + }
7772 +@@ -107,6 +109,46 @@ static int target_check_cdb_and_preempt(struct list_head *list,
7773 + return 1;
7774 + }
7775 +
7776 ++static bool __target_check_io_state(struct se_cmd *se_cmd,
7777 ++ struct se_session *tmr_sess, int tas)
7778 ++{
7779 ++ struct se_session *sess = se_cmd->se_sess;
7780 ++
7781 ++ assert_spin_locked(&sess->sess_cmd_lock);
7782 ++ WARN_ON_ONCE(!irqs_disabled());
7783 ++ /*
7784 ++ * If command already reached CMD_T_COMPLETE state within
7785 ++ * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
7786 ++ * this se_cmd has been passed to fabric driver and will
7787 ++ * not be aborted.
7788 ++ *
7789 ++ * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
7790 ++ * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
7791 ++ * long as se_cmd->cmd_kref is still active unless zero.
7792 ++ */
7793 ++ spin_lock(&se_cmd->t_state_lock);
7794 ++ if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
7795 ++ pr_debug("Attempted to abort io tag: %llu already complete or"
7796 ++ " fabric stop, skipping\n", se_cmd->tag);
7797 ++ spin_unlock(&se_cmd->t_state_lock);
7798 ++ return false;
7799 ++ }
7800 ++ if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
7801 ++ pr_debug("Attempted to abort io tag: %llu already shutdown,"
7802 ++ " skipping\n", se_cmd->tag);
7803 ++ spin_unlock(&se_cmd->t_state_lock);
7804 ++ return false;
7805 ++ }
7806 ++ se_cmd->transport_state |= CMD_T_ABORTED;
7807 ++
7808 ++ if ((tmr_sess != se_cmd->se_sess) && tas)
7809 ++ se_cmd->transport_state |= CMD_T_TAS;
7810 ++
7811 ++ spin_unlock(&se_cmd->t_state_lock);
7812 ++
7813 ++ return kref_get_unless_zero(&se_cmd->cmd_kref);
7814 ++}
7815 ++
7816 + void core_tmr_abort_task(
7817 + struct se_device *dev,
7818 + struct se_tmr_req *tmr,
7819 +@@ -130,34 +172,22 @@ void core_tmr_abort_task(
7820 + if (tmr->ref_task_tag != ref_tag)
7821 + continue;
7822 +
7823 +- if (!kref_get_unless_zero(&se_cmd->cmd_kref))
7824 +- continue;
7825 +-
7826 + printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
7827 + se_cmd->se_tfo->get_fabric_name(), ref_tag);
7828 +
7829 +- spin_lock(&se_cmd->t_state_lock);
7830 +- if (se_cmd->transport_state & CMD_T_COMPLETE) {
7831 +- printk("ABORT_TASK: ref_tag: %llu already complete,"
7832 +- " skipping\n", ref_tag);
7833 +- spin_unlock(&se_cmd->t_state_lock);
7834 ++ if (!__target_check_io_state(se_cmd, se_sess, 0)) {
7835 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
7836 +-
7837 + target_put_sess_cmd(se_cmd);
7838 +-
7839 + goto out;
7840 + }
7841 +- se_cmd->transport_state |= CMD_T_ABORTED;
7842 +- spin_unlock(&se_cmd->t_state_lock);
7843 +-
7844 + list_del_init(&se_cmd->se_cmd_list);
7845 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
7846 +
7847 + cancel_work_sync(&se_cmd->work);
7848 + transport_wait_for_tasks(se_cmd);
7849 +
7850 +- target_put_sess_cmd(se_cmd);
7851 + transport_cmd_finish_abort(se_cmd, true);
7852 ++ target_put_sess_cmd(se_cmd);
7853 +
7854 + printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
7855 + " ref_tag: %llu\n", ref_tag);
7856 +@@ -178,9 +208,11 @@ static void core_tmr_drain_tmr_list(
7857 + struct list_head *preempt_and_abort_list)
7858 + {
7859 + LIST_HEAD(drain_tmr_list);
7860 ++ struct se_session *sess;
7861 + struct se_tmr_req *tmr_p, *tmr_pp;
7862 + struct se_cmd *cmd;
7863 + unsigned long flags;
7864 ++ bool rc;
7865 + /*
7866 + * Release all pending and outgoing TMRs aside from the received
7867 + * LUN_RESET tmr..
7868 +@@ -206,17 +238,39 @@ static void core_tmr_drain_tmr_list(
7869 + if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
7870 + continue;
7871 +
7872 ++ sess = cmd->se_sess;
7873 ++ if (WARN_ON_ONCE(!sess))
7874 ++ continue;
7875 ++
7876 ++ spin_lock(&sess->sess_cmd_lock);
7877 + spin_lock(&cmd->t_state_lock);
7878 +- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
7879 ++ if (!(cmd->transport_state & CMD_T_ACTIVE) ||
7880 ++ (cmd->transport_state & CMD_T_FABRIC_STOP)) {
7881 + spin_unlock(&cmd->t_state_lock);
7882 ++ spin_unlock(&sess->sess_cmd_lock);
7883 + continue;
7884 + }
7885 + if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
7886 + spin_unlock(&cmd->t_state_lock);
7887 ++ spin_unlock(&sess->sess_cmd_lock);
7888 + continue;
7889 + }
7890 ++ if (sess->sess_tearing_down || cmd->cmd_wait_set) {
7891 ++ spin_unlock(&cmd->t_state_lock);
7892 ++ spin_unlock(&sess->sess_cmd_lock);
7893 ++ continue;
7894 ++ }
7895 ++ cmd->transport_state |= CMD_T_ABORTED;
7896 + spin_unlock(&cmd->t_state_lock);
7897 +
7898 ++ rc = kref_get_unless_zero(&cmd->cmd_kref);
7899 ++ if (!rc) {
7900 ++ printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
7901 ++ spin_unlock(&sess->sess_cmd_lock);
7902 ++ continue;
7903 ++ }
7904 ++ spin_unlock(&sess->sess_cmd_lock);
7905 ++
7906 + list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
7907 + }
7908 + spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
7909 +@@ -230,20 +284,26 @@ static void core_tmr_drain_tmr_list(
7910 + (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
7911 + tmr_p->function, tmr_p->response, cmd->t_state);
7912 +
7913 ++ cancel_work_sync(&cmd->work);
7914 ++ transport_wait_for_tasks(cmd);
7915 ++
7916 + transport_cmd_finish_abort(cmd, 1);
7917 ++ target_put_sess_cmd(cmd);
7918 + }
7919 + }
7920 +
7921 + static void core_tmr_drain_state_list(
7922 + struct se_device *dev,
7923 + struct se_cmd *prout_cmd,
7924 +- struct se_node_acl *tmr_nacl,
7925 ++ struct se_session *tmr_sess,
7926 + int tas,
7927 + struct list_head *preempt_and_abort_list)
7928 + {
7929 + LIST_HEAD(drain_task_list);
7930 ++ struct se_session *sess;
7931 + struct se_cmd *cmd, *next;
7932 + unsigned long flags;
7933 ++ int rc;
7934 +
7935 + /*
7936 + * Complete outstanding commands with TASK_ABORTED SAM status.
7937 +@@ -282,6 +342,16 @@ static void core_tmr_drain_state_list(
7938 + if (prout_cmd == cmd)
7939 + continue;
7940 +
7941 ++ sess = cmd->se_sess;
7942 ++ if (WARN_ON_ONCE(!sess))
7943 ++ continue;
7944 ++
7945 ++ spin_lock(&sess->sess_cmd_lock);
7946 ++ rc = __target_check_io_state(cmd, tmr_sess, tas);
7947 ++ spin_unlock(&sess->sess_cmd_lock);
7948 ++ if (!rc)
7949 ++ continue;
7950 ++
7951 + list_move_tail(&cmd->state_list, &drain_task_list);
7952 + cmd->state_active = false;
7953 + }
7954 +@@ -289,7 +359,7 @@ static void core_tmr_drain_state_list(
7955 +
7956 + while (!list_empty(&drain_task_list)) {
7957 + cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
7958 +- list_del(&cmd->state_list);
7959 ++ list_del_init(&cmd->state_list);
7960 +
7961 + pr_debug("LUN_RESET: %s cmd: %p"
7962 + " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
7963 +@@ -313,16 +383,11 @@ static void core_tmr_drain_state_list(
7964 + * loop above, but we do it down here given that
7965 + * cancel_work_sync may block.
7966 + */
7967 +- if (cmd->t_state == TRANSPORT_COMPLETE)
7968 +- cancel_work_sync(&cmd->work);
7969 +-
7970 +- spin_lock_irqsave(&cmd->t_state_lock, flags);
7971 +- target_stop_cmd(cmd, &flags);
7972 +-
7973 +- cmd->transport_state |= CMD_T_ABORTED;
7974 +- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
7975 ++ cancel_work_sync(&cmd->work);
7976 ++ transport_wait_for_tasks(cmd);
7977 +
7978 +- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
7979 ++ core_tmr_handle_tas_abort(cmd, tas);
7980 ++ target_put_sess_cmd(cmd);
7981 + }
7982 + }
7983 +
7984 +@@ -334,6 +399,7 @@ int core_tmr_lun_reset(
7985 + {
7986 + struct se_node_acl *tmr_nacl = NULL;
7987 + struct se_portal_group *tmr_tpg = NULL;
7988 ++ struct se_session *tmr_sess = NULL;
7989 + int tas;
7990 + /*
7991 + * TASK_ABORTED status bit, this is configurable via ConfigFS
7992 +@@ -352,8 +418,9 @@ int core_tmr_lun_reset(
7993 + * or struct se_device passthrough..
7994 + */
7995 + if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
7996 +- tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
7997 +- tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
7998 ++ tmr_sess = tmr->task_cmd->se_sess;
7999 ++ tmr_nacl = tmr_sess->se_node_acl;
8000 ++ tmr_tpg = tmr_sess->se_tpg;
8001 + if (tmr_nacl && tmr_tpg) {
8002 + pr_debug("LUN_RESET: TMR caller fabric: %s"
8003 + " initiator port %s\n",
8004 +@@ -366,7 +433,7 @@ int core_tmr_lun_reset(
8005 + dev->transport->name, tas);
8006 +
8007 + core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
8008 +- core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
8009 ++ core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
8010 + preempt_and_abort_list);
8011 +
8012 + /*
8013 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
8014 +index 4fdcee2..94f4ffa 100644
8015 +--- a/drivers/target/target_core_transport.c
8016 ++++ b/drivers/target/target_core_transport.c
8017 +@@ -528,9 +528,6 @@ void transport_deregister_session(struct se_session *se_sess)
8018 + }
8019 + EXPORT_SYMBOL(transport_deregister_session);
8020 +
8021 +-/*
8022 +- * Called with cmd->t_state_lock held.
8023 +- */
8024 + static void target_remove_from_state_list(struct se_cmd *cmd)
8025 + {
8026 + struct se_device *dev = cmd->se_dev;
8027 +@@ -555,10 +552,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
8028 + {
8029 + unsigned long flags;
8030 +
8031 +- spin_lock_irqsave(&cmd->t_state_lock, flags);
8032 +- if (write_pending)
8033 +- cmd->t_state = TRANSPORT_WRITE_PENDING;
8034 +-
8035 + if (remove_from_lists) {
8036 + target_remove_from_state_list(cmd);
8037 +
8038 +@@ -568,6 +561,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
8039 + cmd->se_lun = NULL;
8040 + }
8041 +
8042 ++ spin_lock_irqsave(&cmd->t_state_lock, flags);
8043 ++ if (write_pending)
8044 ++ cmd->t_state = TRANSPORT_WRITE_PENDING;
8045 ++
8046 + /*
8047 + * Determine if frontend context caller is requesting the stopping of
8048 + * this command for frontend exceptions.
8049 +@@ -621,6 +618,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
8050 +
8051 + void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
8052 + {
8053 ++ bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
8054 ++
8055 + if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
8056 + transport_lun_remove_cmd(cmd);
8057 + /*
8058 +@@ -632,7 +631,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
8059 +
8060 + if (transport_cmd_check_stop_to_fabric(cmd))
8061 + return;
8062 +- if (remove)
8063 ++ if (remove && ack_kref)
8064 + transport_put_cmd(cmd);
8065 + }
8066 +
8067 +@@ -700,7 +699,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
8068 + * Check for case where an explicit ABORT_TASK has been received
8069 + * and transport_wait_for_tasks() will be waiting for completion..
8070 + */
8071 +- if (cmd->transport_state & CMD_T_ABORTED &&
8072 ++ if (cmd->transport_state & CMD_T_ABORTED ||
8073 + cmd->transport_state & CMD_T_STOP) {
8074 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8075 + complete_all(&cmd->t_transport_stop_comp);
8076 +@@ -1850,19 +1849,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
8077 + return true;
8078 + }
8079 +
8080 ++static int __transport_check_aborted_status(struct se_cmd *, int);
8081 ++
8082 + void target_execute_cmd(struct se_cmd *cmd)
8083 + {
8084 + /*
8085 +- * If the received CDB has aleady been aborted stop processing it here.
8086 +- */
8087 +- if (transport_check_aborted_status(cmd, 1))
8088 +- return;
8089 +-
8090 +- /*
8091 + * Determine if frontend context caller is requesting the stopping of
8092 + * this command for frontend exceptions.
8093 ++ *
8094 ++ * If the received CDB has aleady been aborted stop processing it here.
8095 + */
8096 + spin_lock_irq(&cmd->t_state_lock);
8097 ++ if (__transport_check_aborted_status(cmd, 1)) {
8098 ++ spin_unlock_irq(&cmd->t_state_lock);
8099 ++ return;
8100 ++ }
8101 + if (cmd->transport_state & CMD_T_STOP) {
8102 + pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
8103 + __func__, __LINE__, cmd->tag);
8104 +@@ -2213,20 +2214,14 @@ static inline void transport_free_pages(struct se_cmd *cmd)
8105 + }
8106 +
8107 + /**
8108 +- * transport_release_cmd - free a command
8109 +- * @cmd: command to free
8110 ++ * transport_put_cmd - release a reference to a command
8111 ++ * @cmd: command to release
8112 + *
8113 +- * This routine unconditionally frees a command, and reference counting
8114 +- * or list removal must be done in the caller.
8115 ++ * This routine releases our reference to the command and frees it if possible.
8116 + */
8117 +-static int transport_release_cmd(struct se_cmd *cmd)
8118 ++static int transport_put_cmd(struct se_cmd *cmd)
8119 + {
8120 + BUG_ON(!cmd->se_tfo);
8121 +-
8122 +- if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
8123 +- core_tmr_release_req(cmd->se_tmr_req);
8124 +- if (cmd->t_task_cdb != cmd->__t_task_cdb)
8125 +- kfree(cmd->t_task_cdb);
8126 + /*
8127 + * If this cmd has been setup with target_get_sess_cmd(), drop
8128 + * the kref and call ->release_cmd() in kref callback.
8129 +@@ -2234,18 +2229,6 @@ static int transport_release_cmd(struct se_cmd *cmd)
8130 + return target_put_sess_cmd(cmd);
8131 + }
8132 +
8133 +-/**
8134 +- * transport_put_cmd - release a reference to a command
8135 +- * @cmd: command to release
8136 +- *
8137 +- * This routine releases our reference to the command and frees it if possible.
8138 +- */
8139 +-static int transport_put_cmd(struct se_cmd *cmd)
8140 +-{
8141 +- transport_free_pages(cmd);
8142 +- return transport_release_cmd(cmd);
8143 +-}
8144 +-
8145 + void *transport_kmap_data_sg(struct se_cmd *cmd)
8146 + {
8147 + struct scatterlist *sg = cmd->t_data_sg;
8148 +@@ -2441,34 +2424,58 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
8149 + }
8150 + }
8151 +
8152 +-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
8153 ++static bool
8154 ++__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
8155 ++ unsigned long *flags);
8156 ++
8157 ++static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
8158 + {
8159 + unsigned long flags;
8160 ++
8161 ++ spin_lock_irqsave(&cmd->t_state_lock, flags);
8162 ++ __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
8163 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8164 ++}
8165 ++
8166 ++int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
8167 ++{
8168 + int ret = 0;
8169 ++ bool aborted = false, tas = false;
8170 +
8171 + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
8172 + if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
8173 +- transport_wait_for_tasks(cmd);
8174 ++ target_wait_free_cmd(cmd, &aborted, &tas);
8175 +
8176 +- ret = transport_release_cmd(cmd);
8177 ++ if (!aborted || tas)
8178 ++ ret = transport_put_cmd(cmd);
8179 + } else {
8180 + if (wait_for_tasks)
8181 +- transport_wait_for_tasks(cmd);
8182 ++ target_wait_free_cmd(cmd, &aborted, &tas);
8183 + /*
8184 + * Handle WRITE failure case where transport_generic_new_cmd()
8185 + * has already added se_cmd to state_list, but fabric has
8186 + * failed command before I/O submission.
8187 + */
8188 +- if (cmd->state_active) {
8189 +- spin_lock_irqsave(&cmd->t_state_lock, flags);
8190 ++ if (cmd->state_active)
8191 + target_remove_from_state_list(cmd);
8192 +- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8193 +- }
8194 +
8195 + if (cmd->se_lun)
8196 + transport_lun_remove_cmd(cmd);
8197 +
8198 +- ret = transport_put_cmd(cmd);
8199 ++ if (!aborted || tas)
8200 ++ ret = transport_put_cmd(cmd);
8201 ++ }
8202 ++ /*
8203 ++ * If the task has been internally aborted due to TMR ABORT_TASK
8204 ++ * or LUN_RESET, target_core_tmr.c is responsible for performing
8205 ++ * the remaining calls to target_put_sess_cmd(), and not the
8206 ++ * callers of this function.
8207 ++ */
8208 ++ if (aborted) {
8209 ++ pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
8210 ++ wait_for_completion(&cmd->cmd_wait_comp);
8211 ++ cmd->se_tfo->release_cmd(cmd);
8212 ++ ret = 1;
8213 + }
8214 + return ret;
8215 + }
8216 +@@ -2508,26 +2515,46 @@ out:
8217 + }
8218 + EXPORT_SYMBOL(target_get_sess_cmd);
8219 +
8220 ++static void target_free_cmd_mem(struct se_cmd *cmd)
8221 ++{
8222 ++ transport_free_pages(cmd);
8223 ++
8224 ++ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
8225 ++ core_tmr_release_req(cmd->se_tmr_req);
8226 ++ if (cmd->t_task_cdb != cmd->__t_task_cdb)
8227 ++ kfree(cmd->t_task_cdb);
8228 ++}
8229 ++
8230 + static void target_release_cmd_kref(struct kref *kref)
8231 + {
8232 + struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
8233 + struct se_session *se_sess = se_cmd->se_sess;
8234 + unsigned long flags;
8235 ++ bool fabric_stop;
8236 +
8237 + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
8238 + if (list_empty(&se_cmd->se_cmd_list)) {
8239 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
8240 ++ target_free_cmd_mem(se_cmd);
8241 + se_cmd->se_tfo->release_cmd(se_cmd);
8242 + return;
8243 + }
8244 +- if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
8245 ++
8246 ++ spin_lock(&se_cmd->t_state_lock);
8247 ++ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
8248 ++ spin_unlock(&se_cmd->t_state_lock);
8249 ++
8250 ++ if (se_cmd->cmd_wait_set || fabric_stop) {
8251 ++ list_del_init(&se_cmd->se_cmd_list);
8252 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
8253 ++ target_free_cmd_mem(se_cmd);
8254 + complete(&se_cmd->cmd_wait_comp);
8255 + return;
8256 + }
8257 +- list_del(&se_cmd->se_cmd_list);
8258 ++ list_del_init(&se_cmd->se_cmd_list);
8259 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
8260 +
8261 ++ target_free_cmd_mem(se_cmd);
8262 + se_cmd->se_tfo->release_cmd(se_cmd);
8263 + }
8264 +
8265 +@@ -2539,6 +2566,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
8266 + struct se_session *se_sess = se_cmd->se_sess;
8267 +
8268 + if (!se_sess) {
8269 ++ target_free_cmd_mem(se_cmd);
8270 + se_cmd->se_tfo->release_cmd(se_cmd);
8271 + return 1;
8272 + }
8273 +@@ -2555,6 +2583,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
8274 + {
8275 + struct se_cmd *se_cmd;
8276 + unsigned long flags;
8277 ++ int rc;
8278 +
8279 + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
8280 + if (se_sess->sess_tearing_down) {
8281 +@@ -2564,8 +2593,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
8282 + se_sess->sess_tearing_down = 1;
8283 + list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
8284 +
8285 +- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
8286 +- se_cmd->cmd_wait_set = 1;
8287 ++ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
8288 ++ rc = kref_get_unless_zero(&se_cmd->cmd_kref);
8289 ++ if (rc) {
8290 ++ se_cmd->cmd_wait_set = 1;
8291 ++ spin_lock(&se_cmd->t_state_lock);
8292 ++ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
8293 ++ spin_unlock(&se_cmd->t_state_lock);
8294 ++ }
8295 ++ }
8296 +
8297 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
8298 + }
8299 +@@ -2578,15 +2614,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
8300 + {
8301 + struct se_cmd *se_cmd, *tmp_cmd;
8302 + unsigned long flags;
8303 ++ bool tas;
8304 +
8305 + list_for_each_entry_safe(se_cmd, tmp_cmd,
8306 + &se_sess->sess_wait_list, se_cmd_list) {
8307 +- list_del(&se_cmd->se_cmd_list);
8308 ++ list_del_init(&se_cmd->se_cmd_list);
8309 +
8310 + pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
8311 + " %d\n", se_cmd, se_cmd->t_state,
8312 + se_cmd->se_tfo->get_cmd_state(se_cmd));
8313 +
8314 ++ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
8315 ++ tas = (se_cmd->transport_state & CMD_T_TAS);
8316 ++ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
8317 ++
8318 ++ if (!target_put_sess_cmd(se_cmd)) {
8319 ++ if (tas)
8320 ++ target_put_sess_cmd(se_cmd);
8321 ++ }
8322 ++
8323 + wait_for_completion(&se_cmd->cmd_wait_comp);
8324 + pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
8325 + " fabric state: %d\n", se_cmd, se_cmd->t_state,
8326 +@@ -2608,53 +2654,75 @@ void transport_clear_lun_ref(struct se_lun *lun)
8327 + wait_for_completion(&lun->lun_ref_comp);
8328 + }
8329 +
8330 +-/**
8331 +- * transport_wait_for_tasks - wait for completion to occur
8332 +- * @cmd: command to wait
8333 +- *
8334 +- * Called from frontend fabric context to wait for storage engine
8335 +- * to pause and/or release frontend generated struct se_cmd.
8336 +- */
8337 +-bool transport_wait_for_tasks(struct se_cmd *cmd)
8338 ++static bool
8339 ++__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
8340 ++ bool *aborted, bool *tas, unsigned long *flags)
8341 ++ __releases(&cmd->t_state_lock)
8342 ++ __acquires(&cmd->t_state_lock)
8343 + {
8344 +- unsigned long flags;
8345 +
8346 +- spin_lock_irqsave(&cmd->t_state_lock, flags);
8347 ++ assert_spin_locked(&cmd->t_state_lock);
8348 ++ WARN_ON_ONCE(!irqs_disabled());
8349 ++
8350 ++ if (fabric_stop)
8351 ++ cmd->transport_state |= CMD_T_FABRIC_STOP;
8352 ++
8353 ++ if (cmd->transport_state & CMD_T_ABORTED)
8354 ++ *aborted = true;
8355 ++
8356 ++ if (cmd->transport_state & CMD_T_TAS)
8357 ++ *tas = true;
8358 ++
8359 + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
8360 +- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
8361 +- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8362 ++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
8363 + return false;
8364 +- }
8365 +
8366 + if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
8367 +- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
8368 +- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8369 ++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
8370 + return false;
8371 +- }
8372 +
8373 +- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
8374 +- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8375 ++ if (!(cmd->transport_state & CMD_T_ACTIVE))
8376 ++ return false;
8377 ++
8378 ++ if (fabric_stop && *aborted)
8379 + return false;
8380 +- }
8381 +
8382 + cmd->transport_state |= CMD_T_STOP;
8383 +
8384 +- pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
8385 +- cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
8386 ++ pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
8387 ++ " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
8388 ++ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
8389 +
8390 +- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8391 ++ spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
8392 +
8393 + wait_for_completion(&cmd->t_transport_stop_comp);
8394 +
8395 +- spin_lock_irqsave(&cmd->t_state_lock, flags);
8396 ++ spin_lock_irqsave(&cmd->t_state_lock, *flags);
8397 + cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
8398 +
8399 +- pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
8400 +- cmd->tag);
8401 ++ pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
8402 ++ "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
8403 ++
8404 ++ return true;
8405 ++}
8406 +
8407 ++/**
8408 ++ * transport_wait_for_tasks - wait for completion to occur
8409 ++ * @cmd: command to wait
8410 ++ *
8411 ++ * Called from frontend fabric context to wait for storage engine
8412 ++ * to pause and/or release frontend generated struct se_cmd.
8413 ++ */
8414 ++bool transport_wait_for_tasks(struct se_cmd *cmd)
8415 ++{
8416 ++ unsigned long flags;
8417 ++ bool ret, aborted = false, tas = false;
8418 ++
8419 ++ spin_lock_irqsave(&cmd->t_state_lock, flags);
8420 ++ ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
8421 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8422 +
8423 +- return true;
8424 ++ return ret;
8425 + }
8426 + EXPORT_SYMBOL(transport_wait_for_tasks);
8427 +
8428 +@@ -2836,28 +2904,49 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
8429 + }
8430 + EXPORT_SYMBOL(transport_send_check_condition_and_sense);
8431 +
8432 +-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
8433 ++static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
8434 ++ __releases(&cmd->t_state_lock)
8435 ++ __acquires(&cmd->t_state_lock)
8436 + {
8437 ++ assert_spin_locked(&cmd->t_state_lock);
8438 ++ WARN_ON_ONCE(!irqs_disabled());
8439 ++
8440 + if (!(cmd->transport_state & CMD_T_ABORTED))
8441 + return 0;
8442 +-
8443 + /*
8444 + * If cmd has been aborted but either no status is to be sent or it has
8445 + * already been sent, just return
8446 + */
8447 +- if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
8448 ++ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
8449 ++ if (send_status)
8450 ++ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
8451 + return 1;
8452 ++ }
8453 +
8454 +- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
8455 +- cmd->t_task_cdb[0], cmd->tag);
8456 ++ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
8457 ++ " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
8458 +
8459 + cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
8460 + cmd->scsi_status = SAM_STAT_TASK_ABORTED;
8461 + trace_target_cmd_complete(cmd);
8462 ++
8463 ++ spin_unlock_irq(&cmd->t_state_lock);
8464 + cmd->se_tfo->queue_status(cmd);
8465 ++ spin_lock_irq(&cmd->t_state_lock);
8466 +
8467 + return 1;
8468 + }
8469 ++
8470 ++int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
8471 ++{
8472 ++ int ret;
8473 ++
8474 ++ spin_lock_irq(&cmd->t_state_lock);
8475 ++ ret = __transport_check_aborted_status(cmd, send_status);
8476 ++ spin_unlock_irq(&cmd->t_state_lock);
8477 ++
8478 ++ return ret;
8479 ++}
8480 + EXPORT_SYMBOL(transport_check_aborted_status);
8481 +
8482 + void transport_send_task_abort(struct se_cmd *cmd)
8483 +@@ -2879,11 +2968,17 @@ void transport_send_task_abort(struct se_cmd *cmd)
8484 + */
8485 + if (cmd->data_direction == DMA_TO_DEVICE) {
8486 + if (cmd->se_tfo->write_pending_status(cmd) != 0) {
8487 +- cmd->transport_state |= CMD_T_ABORTED;
8488 ++ spin_lock_irqsave(&cmd->t_state_lock, flags);
8489 ++ if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
8490 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8491 ++ goto send_abort;
8492 ++ }
8493 + cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
8494 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8495 + return;
8496 + }
8497 + }
8498 ++send_abort:
8499 + cmd->scsi_status = SAM_STAT_TASK_ABORTED;
8500 +
8501 + transport_lun_remove_cmd(cmd);
8502 +@@ -2900,8 +2995,17 @@ static void target_tmr_work(struct work_struct *work)
8503 + struct se_cmd *cmd = container_of(work, struct se_cmd, work);
8504 + struct se_device *dev = cmd->se_dev;
8505 + struct se_tmr_req *tmr = cmd->se_tmr_req;
8506 ++ unsigned long flags;
8507 + int ret;
8508 +
8509 ++ spin_lock_irqsave(&cmd->t_state_lock, flags);
8510 ++ if (cmd->transport_state & CMD_T_ABORTED) {
8511 ++ tmr->response = TMR_FUNCTION_REJECTED;
8512 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8513 ++ goto check_stop;
8514 ++ }
8515 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8516 ++
8517 + switch (tmr->function) {
8518 + case TMR_ABORT_TASK:
8519 + core_tmr_abort_task(dev, tmr, cmd->se_sess);
8520 +@@ -2934,9 +3038,17 @@ static void target_tmr_work(struct work_struct *work)
8521 + break;
8522 + }
8523 +
8524 ++ spin_lock_irqsave(&cmd->t_state_lock, flags);
8525 ++ if (cmd->transport_state & CMD_T_ABORTED) {
8526 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8527 ++ goto check_stop;
8528 ++ }
8529 + cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
8530 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8531 ++
8532 + cmd->se_tfo->queue_tm_rsp(cmd);
8533 +
8534 ++check_stop:
8535 + transport_cmd_check_stop_to_fabric(cmd);
8536 + }
8537 +
8538 +diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
8539 +index 2f9f708..ea9366a 100644
8540 +--- a/drivers/thermal/step_wise.c
8541 ++++ b/drivers/thermal/step_wise.c
8542 +@@ -63,6 +63,19 @@ static unsigned long get_target_state(struct thermal_instance *instance,
8543 + next_target = instance->target;
8544 + dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state);
8545 +
8546 ++ if (!instance->initialized) {
8547 ++ if (throttle) {
8548 ++ next_target = (cur_state + 1) >= instance->upper ?
8549 ++ instance->upper :
8550 ++ ((cur_state + 1) < instance->lower ?
8551 ++ instance->lower : (cur_state + 1));
8552 ++ } else {
8553 ++ next_target = THERMAL_NO_TARGET;
8554 ++ }
8555 ++
8556 ++ return next_target;
8557 ++ }
8558 ++
8559 + switch (trend) {
8560 + case THERMAL_TREND_RAISING:
8561 + if (throttle) {
8562 +@@ -149,7 +162,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
8563 + dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
8564 + old_target, (int)instance->target);
8565 +
8566 +- if (old_target == instance->target)
8567 ++ if (instance->initialized && old_target == instance->target)
8568 + continue;
8569 +
8570 + /* Activate a passive thermal instance */
8571 +@@ -161,7 +174,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
8572 + instance->target == THERMAL_NO_TARGET)
8573 + update_passive_instance(tz, trip_type, -1);
8574 +
8575 +-
8576 ++ instance->initialized = true;
8577 + instance->cdev->updated = false; /* cdev needs update */
8578 + }
8579 +
8580 +diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
8581 +index d9e525c..ba08b55 100644
8582 +--- a/drivers/thermal/thermal_core.c
8583 ++++ b/drivers/thermal/thermal_core.c
8584 +@@ -37,6 +37,7 @@
8585 + #include <linux/of.h>
8586 + #include <net/netlink.h>
8587 + #include <net/genetlink.h>
8588 ++#include <linux/suspend.h>
8589 +
8590 + #define CREATE_TRACE_POINTS
8591 + #include <trace/events/thermal.h>
8592 +@@ -59,6 +60,8 @@ static LIST_HEAD(thermal_governor_list);
8593 + static DEFINE_MUTEX(thermal_list_lock);
8594 + static DEFINE_MUTEX(thermal_governor_lock);
8595 +
8596 ++static atomic_t in_suspend;
8597 ++
8598 + static struct thermal_governor *def_governor;
8599 +
8600 + static struct thermal_governor *__find_governor(const char *name)
8601 +@@ -532,14 +535,31 @@ static void update_temperature(struct thermal_zone_device *tz)
8602 + mutex_unlock(&tz->lock);
8603 +
8604 + trace_thermal_temperature(tz);
8605 +- dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
8606 +- tz->last_temperature, tz->temperature);
8607 ++ if (tz->last_temperature == THERMAL_TEMP_INVALID)
8608 ++ dev_dbg(&tz->device, "last_temperature N/A, current_temperature=%d\n",
8609 ++ tz->temperature);
8610 ++ else
8611 ++ dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
8612 ++ tz->last_temperature, tz->temperature);
8613 ++}
8614 ++
8615 ++static void thermal_zone_device_reset(struct thermal_zone_device *tz)
8616 ++{
8617 ++ struct thermal_instance *pos;
8618 ++
8619 ++ tz->temperature = THERMAL_TEMP_INVALID;
8620 ++ tz->passive = 0;
8621 ++ list_for_each_entry(pos, &tz->thermal_instances, tz_node)
8622 ++ pos->initialized = false;
8623 + }
8624 +
8625 + void thermal_zone_device_update(struct thermal_zone_device *tz)
8626 + {
8627 + int count;
8628 +
8629 ++ if (atomic_read(&in_suspend))
8630 ++ return;
8631 ++
8632 + if (!tz->ops->get_temp)
8633 + return;
8634 +
8635 +@@ -1321,6 +1341,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
8636 + if (!result) {
8637 + list_add_tail(&dev->tz_node, &tz->thermal_instances);
8638 + list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
8639 ++ atomic_set(&tz->need_update, 1);
8640 + }
8641 + mutex_unlock(&cdev->lock);
8642 + mutex_unlock(&tz->lock);
8643 +@@ -1430,6 +1451,7 @@ __thermal_cooling_device_register(struct device_node *np,
8644 + const struct thermal_cooling_device_ops *ops)
8645 + {
8646 + struct thermal_cooling_device *cdev;
8647 ++ struct thermal_zone_device *pos = NULL;
8648 + int result;
8649 +
8650 + if (type && strlen(type) >= THERMAL_NAME_LENGTH)
8651 +@@ -1474,6 +1496,12 @@ __thermal_cooling_device_register(struct device_node *np,
8652 + /* Update binding information for 'this' new cdev */
8653 + bind_cdev(cdev);
8654 +
8655 ++ mutex_lock(&thermal_list_lock);
8656 ++ list_for_each_entry(pos, &thermal_tz_list, node)
8657 ++ if (atomic_cmpxchg(&pos->need_update, 1, 0))
8658 ++ thermal_zone_device_update(pos);
8659 ++ mutex_unlock(&thermal_list_lock);
8660 ++
8661 + return cdev;
8662 + }
8663 +
8664 +@@ -1806,6 +1834,8 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
8665 + tz->trips = trips;
8666 + tz->passive_delay = passive_delay;
8667 + tz->polling_delay = polling_delay;
8668 ++ /* A new thermal zone needs to be updated anyway. */
8669 ++ atomic_set(&tz->need_update, 1);
8670 +
8671 + dev_set_name(&tz->device, "thermal_zone%d", tz->id);
8672 + result = device_register(&tz->device);
8673 +@@ -1900,7 +1930,10 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
8674 +
8675 + INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
8676 +
8677 +- thermal_zone_device_update(tz);
8678 ++ thermal_zone_device_reset(tz);
8679 ++ /* Update the new thermal zone and mark it as already updated. */
8680 ++ if (atomic_cmpxchg(&tz->need_update, 1, 0))
8681 ++ thermal_zone_device_update(tz);
8682 +
8683 + return tz;
8684 +
8685 +@@ -2140,6 +2173,36 @@ static void thermal_unregister_governors(void)
8686 + thermal_gov_power_allocator_unregister();
8687 + }
8688 +
8689 ++static int thermal_pm_notify(struct notifier_block *nb,
8690 ++ unsigned long mode, void *_unused)
8691 ++{
8692 ++ struct thermal_zone_device *tz;
8693 ++
8694 ++ switch (mode) {
8695 ++ case PM_HIBERNATION_PREPARE:
8696 ++ case PM_RESTORE_PREPARE:
8697 ++ case PM_SUSPEND_PREPARE:
8698 ++ atomic_set(&in_suspend, 1);
8699 ++ break;
8700 ++ case PM_POST_HIBERNATION:
8701 ++ case PM_POST_RESTORE:
8702 ++ case PM_POST_SUSPEND:
8703 ++ atomic_set(&in_suspend, 0);
8704 ++ list_for_each_entry(tz, &thermal_tz_list, node) {
8705 ++ thermal_zone_device_reset(tz);
8706 ++ thermal_zone_device_update(tz);
8707 ++ }
8708 ++ break;
8709 ++ default:
8710 ++ break;
8711 ++ }
8712 ++ return 0;
8713 ++}
8714 ++
8715 ++static struct notifier_block thermal_pm_nb = {
8716 ++ .notifier_call = thermal_pm_notify,
8717 ++};
8718 ++
8719 + static int __init thermal_init(void)
8720 + {
8721 + int result;
8722 +@@ -2160,6 +2223,11 @@ static int __init thermal_init(void)
8723 + if (result)
8724 + goto exit_netlink;
8725 +
8726 ++ result = register_pm_notifier(&thermal_pm_nb);
8727 ++ if (result)
8728 ++ pr_warn("Thermal: Can not register suspend notifier, return %d\n",
8729 ++ result);
8730 ++
8731 + return 0;
8732 +
8733 + exit_netlink:
8734 +@@ -2179,6 +2247,7 @@ error:
8735 +
8736 + static void __exit thermal_exit(void)
8737 + {
8738 ++ unregister_pm_notifier(&thermal_pm_nb);
8739 + of_thermal_destroy_zones();
8740 + genetlink_exit();
8741 + class_unregister(&thermal_class);
8742 +diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
8743 +index d7ac1fc..749d41a 100644
8744 +--- a/drivers/thermal/thermal_core.h
8745 ++++ b/drivers/thermal/thermal_core.h
8746 +@@ -41,6 +41,7 @@ struct thermal_instance {
8747 + struct thermal_zone_device *tz;
8748 + struct thermal_cooling_device *cdev;
8749 + int trip;
8750 ++ bool initialized;
8751 + unsigned long upper; /* Highest cooling state for this trip point */
8752 + unsigned long lower; /* Lowest cooling state for this trip point */
8753 + unsigned long target; /* expected cooling state */
8754 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
8755 +index e4c70dc..fa4e239 100644
8756 +--- a/drivers/usb/class/cdc-acm.c
8757 ++++ b/drivers/usb/class/cdc-acm.c
8758 +@@ -1841,6 +1841,11 @@ static const struct usb_device_id acm_ids[] = {
8759 + },
8760 + #endif
8761 +
8762 ++ /*Samsung phone in firmware update mode */
8763 ++ { USB_DEVICE(0x04e8, 0x685d),
8764 ++ .driver_info = IGNORE_DEVICE,
8765 ++ },
8766 ++
8767 + /* Exclude Infineon Flash Loader utility */
8768 + { USB_DEVICE(0x058b, 0x0041),
8769 + .driver_info = IGNORE_DEVICE,
8770 +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
8771 +index 36f1cb7..78be201 100644
8772 +--- a/drivers/usb/dwc3/core.h
8773 ++++ b/drivers/usb/dwc3/core.h
8774 +@@ -853,7 +853,6 @@ struct dwc3 {
8775 + unsigned pullups_connected:1;
8776 + unsigned resize_fifos:1;
8777 + unsigned setup_packet_pending:1;
8778 +- unsigned start_config_issued:1;
8779 + unsigned three_stage_setup:1;
8780 + unsigned usb3_lpm_capable:1;
8781 +
8782 +diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
8783 +index 5320e93..b13912d 100644
8784 +--- a/drivers/usb/dwc3/ep0.c
8785 ++++ b/drivers/usb/dwc3/ep0.c
8786 +@@ -555,7 +555,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
8787 + int ret;
8788 + u32 reg;
8789 +
8790 +- dwc->start_config_issued = false;
8791 + cfg = le16_to_cpu(ctrl->wValue);
8792 +
8793 + switch (state) {
8794 +@@ -737,10 +736,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
8795 + dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
8796 + ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
8797 + break;
8798 +- case USB_REQ_SET_INTERFACE:
8799 +- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
8800 +- dwc->start_config_issued = false;
8801 +- /* Fall through */
8802 + default:
8803 + dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
8804 + ret = dwc3_ep0_delegate_req(dwc, ctrl);
8805 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
8806 +index a58376f..69ffe6e 100644
8807 +--- a/drivers/usb/dwc3/gadget.c
8808 ++++ b/drivers/usb/dwc3/gadget.c
8809 +@@ -388,24 +388,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
8810 + dep->trb_pool_dma = 0;
8811 + }
8812 +
8813 ++static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
8814 ++
8815 ++/**
8816 ++ * dwc3_gadget_start_config - Configure EP resources
8817 ++ * @dwc: pointer to our controller context structure
8818 ++ * @dep: endpoint that is being enabled
8819 ++ *
8820 ++ * The assignment of transfer resources cannot perfectly follow the
8821 ++ * data book due to the fact that the controller driver does not have
8822 ++ * all knowledge of the configuration in advance. It is given this
8823 ++ * information piecemeal by the composite gadget framework after every
8824 ++ * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
8825 ++ * programming model in this scenario can cause errors. For two
8826 ++ * reasons:
8827 ++ *
8828 ++ * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
8829 ++ * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
8830 ++ * multiple interfaces.
8831 ++ *
8832 ++ * 2) The databook does not mention doing more DEPXFERCFG for new
8833 ++ * endpoint on alt setting (8.1.6).
8834 ++ *
8835 ++ * The following simplified method is used instead:
8836 ++ *
8837 ++ * All hardware endpoints can be assigned a transfer resource and this
8838 ++ * setting will stay persistent until either a core reset or
8839 ++ * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
8840 ++ * do DEPXFERCFG for every hardware endpoint as well. We are
8841 ++ * guaranteed that there are as many transfer resources as endpoints.
8842 ++ *
8843 ++ * This function is called for each endpoint when it is being enabled
8844 ++ * but is triggered only when called for EP0-out, which always happens
8845 ++ * first, and which should only happen in one of the above conditions.
8846 ++ */
8847 + static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
8848 + {
8849 + struct dwc3_gadget_ep_cmd_params params;
8850 + u32 cmd;
8851 ++ int i;
8852 ++ int ret;
8853 ++
8854 ++ if (dep->number)
8855 ++ return 0;
8856 +
8857 + memset(&params, 0x00, sizeof(params));
8858 ++ cmd = DWC3_DEPCMD_DEPSTARTCFG;
8859 +
8860 +- if (dep->number != 1) {
8861 +- cmd = DWC3_DEPCMD_DEPSTARTCFG;
8862 +- /* XferRscIdx == 0 for ep0 and 2 for the remaining */
8863 +- if (dep->number > 1) {
8864 +- if (dwc->start_config_issued)
8865 +- return 0;
8866 +- dwc->start_config_issued = true;
8867 +- cmd |= DWC3_DEPCMD_PARAM(2);
8868 +- }
8869 ++ ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
8870 ++ if (ret)
8871 ++ return ret;
8872 +
8873 +- return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
8874 ++ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
8875 ++ struct dwc3_ep *dep = dwc->eps[i];
8876 ++
8877 ++ if (!dep)
8878 ++ continue;
8879 ++
8880 ++ ret = dwc3_gadget_set_xfer_resource(dwc, dep);
8881 ++ if (ret)
8882 ++ return ret;
8883 + }
8884 +
8885 + return 0;
8886 +@@ -519,10 +561,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
8887 + struct dwc3_trb *trb_st_hw;
8888 + struct dwc3_trb *trb_link;
8889 +
8890 +- ret = dwc3_gadget_set_xfer_resource(dwc, dep);
8891 +- if (ret)
8892 +- return ret;
8893 +-
8894 + dep->endpoint.desc = desc;
8895 + dep->comp_desc = comp_desc;
8896 + dep->type = usb_endpoint_type(desc);
8897 +@@ -1604,8 +1642,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
8898 + }
8899 + dwc3_writel(dwc->regs, DWC3_DCFG, reg);
8900 +
8901 +- dwc->start_config_issued = false;
8902 +-
8903 + /* Start with SuperSpeed Default */
8904 + dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
8905 +
8906 +@@ -2202,7 +2238,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
8907 + dwc3_writel(dwc->regs, DWC3_DCTL, reg);
8908 +
8909 + dwc3_disconnect_gadget(dwc);
8910 +- dwc->start_config_issued = false;
8911 +
8912 + dwc->gadget.speed = USB_SPEED_UNKNOWN;
8913 + dwc->setup_packet_pending = false;
8914 +@@ -2253,7 +2288,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
8915 +
8916 + dwc3_stop_active_transfers(dwc);
8917 + dwc3_clear_stall_all_ep(dwc);
8918 +- dwc->start_config_issued = false;
8919 +
8920 + /* Reset device address to zero */
8921 + reg = dwc3_readl(dwc->regs, DWC3_DCFG);
8922 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
8923 +index 1dd9919..a7caf53 100644
8924 +--- a/drivers/usb/serial/cp210x.c
8925 ++++ b/drivers/usb/serial/cp210x.c
8926 +@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
8927 + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
8928 + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
8929 + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
8930 ++ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
8931 ++ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
8932 + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
8933 + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
8934 + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
8935 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
8936 +index db86e51..8849439a 100644
8937 +--- a/drivers/usb/serial/option.c
8938 ++++ b/drivers/usb/serial/option.c
8939 +@@ -315,6 +315,7 @@ static void option_instat_callback(struct urb *urb);
8940 + #define TOSHIBA_PRODUCT_G450 0x0d45
8941 +
8942 + #define ALINK_VENDOR_ID 0x1e0e
8943 ++#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
8944 + #define ALINK_PRODUCT_PH300 0x9100
8945 + #define ALINK_PRODUCT_3GU 0x9200
8946 +
8947 +@@ -607,6 +608,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
8948 + .reserved = BIT(3) | BIT(4),
8949 + };
8950 +
8951 ++static const struct option_blacklist_info simcom_sim7100e_blacklist = {
8952 ++ .reserved = BIT(5) | BIT(6),
8953 ++};
8954 ++
8955 + static const struct option_blacklist_info telit_le910_blacklist = {
8956 + .sendsetup = BIT(0),
8957 + .reserved = BIT(1) | BIT(2),
8958 +@@ -1122,6 +1127,8 @@ static const struct usb_device_id option_ids[] = {
8959 + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
8960 + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
8961 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
8962 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
8963 ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
8964 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
8965 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
8966 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
8967 +@@ -1645,6 +1652,8 @@ static const struct usb_device_id option_ids[] = {
8968 + { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
8969 + { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
8970 + { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
8971 ++ { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
8972 ++ .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
8973 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
8974 + .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
8975 + },
8976 +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
8977 +index 7efc329..7d3e5d0 100644
8978 +--- a/drivers/virtio/virtio_balloon.c
8979 ++++ b/drivers/virtio/virtio_balloon.c
8980 +@@ -209,8 +209,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
8981 + */
8982 + if (vb->num_pfns != 0)
8983 + tell_host(vb, vb->deflate_vq);
8984 +- mutex_unlock(&vb->balloon_lock);
8985 + release_pages_balloon(vb);
8986 ++ mutex_unlock(&vb->balloon_lock);
8987 + return num_freed_pages;
8988 + }
8989 +
8990 +diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
8991 +index 78f804a..2046a68 100644
8992 +--- a/drivers/virtio/virtio_pci_common.c
8993 ++++ b/drivers/virtio/virtio_pci_common.c
8994 +@@ -545,6 +545,7 @@ err_enable_device:
8995 + static void virtio_pci_remove(struct pci_dev *pci_dev)
8996 + {
8997 + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
8998 ++ struct device *dev = get_device(&vp_dev->vdev.dev);
8999 +
9000 + unregister_virtio_device(&vp_dev->vdev);
9001 +
9002 +@@ -554,6 +555,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
9003 + virtio_pci_modern_remove(vp_dev);
9004 +
9005 + pci_disable_device(pci_dev);
9006 ++ put_device(dev);
9007 + }
9008 +
9009 + static struct pci_driver virtio_pci_driver = {
9010 +diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
9011 +index 73dafdc..fb02214 100644
9012 +--- a/drivers/xen/xen-pciback/pciback_ops.c
9013 ++++ b/drivers/xen/xen-pciback/pciback_ops.c
9014 +@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
9015 + /*
9016 + * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
9017 + * to access the BARs where the MSI-X entries reside.
9018 ++ * But VF devices are unique in which the PF needs to be checked.
9019 + */
9020 +- pci_read_config_word(dev, PCI_COMMAND, &cmd);
9021 ++ pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
9022 + if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
9023 + return -ENXIO;
9024 +
9025 +@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
9026 + struct xen_pcibk_dev_data *dev_data = NULL;
9027 + struct xen_pci_op *op = &pdev->op;
9028 + int test_intx = 0;
9029 ++#ifdef CONFIG_PCI_MSI
9030 ++ unsigned int nr = 0;
9031 ++#endif
9032 +
9033 + *op = pdev->sh_info->op;
9034 + barrier();
9035 +@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
9036 + op->err = xen_pcibk_disable_msi(pdev, dev, op);
9037 + break;
9038 + case XEN_PCI_OP_enable_msix:
9039 ++ nr = op->value;
9040 + op->err = xen_pcibk_enable_msix(pdev, dev, op);
9041 + break;
9042 + case XEN_PCI_OP_disable_msix:
9043 +@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
9044 + if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
9045 + unsigned int i;
9046 +
9047 +- for (i = 0; i < op->value; i++)
9048 ++ for (i = 0; i < nr; i++)
9049 + pdev->sh_info->op.msix_entries[i].vector =
9050 + op->msix_entries[i].vector;
9051 + }
9052 +diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
9053 +index ad4eb10..51387d7 100644
9054 +--- a/drivers/xen/xen-scsiback.c
9055 ++++ b/drivers/xen/xen-scsiback.c
9056 +@@ -939,12 +939,12 @@ out:
9057 + spin_unlock_irqrestore(&info->v2p_lock, flags);
9058 +
9059 + out_free:
9060 +- mutex_lock(&tpg->tv_tpg_mutex);
9061 +- tpg->tv_tpg_fe_count--;
9062 +- mutex_unlock(&tpg->tv_tpg_mutex);
9063 +-
9064 +- if (err)
9065 ++ if (err) {
9066 ++ mutex_lock(&tpg->tv_tpg_mutex);
9067 ++ tpg->tv_tpg_fe_count--;
9068 ++ mutex_unlock(&tpg->tv_tpg_mutex);
9069 + kfree(new);
9070 ++ }
9071 +
9072 + return err;
9073 + }
9074 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
9075 +index 0ddca67..4958360 100644
9076 +--- a/fs/btrfs/disk-io.c
9077 ++++ b/fs/btrfs/disk-io.c
9078 +@@ -1582,8 +1582,23 @@ int btrfs_init_fs_root(struct btrfs_root *root)
9079 + ret = get_anon_bdev(&root->anon_dev);
9080 + if (ret)
9081 + goto free_writers;
9082 ++
9083 ++ mutex_lock(&root->objectid_mutex);
9084 ++ ret = btrfs_find_highest_objectid(root,
9085 ++ &root->highest_objectid);
9086 ++ if (ret) {
9087 ++ mutex_unlock(&root->objectid_mutex);
9088 ++ goto free_root_dev;
9089 ++ }
9090 ++
9091 ++ ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
9092 ++
9093 ++ mutex_unlock(&root->objectid_mutex);
9094 ++
9095 + return 0;
9096 +
9097 ++free_root_dev:
9098 ++ free_anon_bdev(root->anon_dev);
9099 + free_writers:
9100 + btrfs_free_subvolume_writers(root->subv_writers);
9101 + fail:
9102 +@@ -2667,6 +2682,7 @@ int open_ctree(struct super_block *sb,
9103 + if (btrfs_check_super_csum(bh->b_data)) {
9104 + printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
9105 + err = -EINVAL;
9106 ++ brelse(bh);
9107 + goto fail_alloc;
9108 + }
9109 +
9110 +@@ -2899,6 +2915,18 @@ retry_root_backup:
9111 + tree_root->commit_root = btrfs_root_node(tree_root);
9112 + btrfs_set_root_refs(&tree_root->root_item, 1);
9113 +
9114 ++ mutex_lock(&tree_root->objectid_mutex);
9115 ++ ret = btrfs_find_highest_objectid(tree_root,
9116 ++ &tree_root->highest_objectid);
9117 ++ if (ret) {
9118 ++ mutex_unlock(&tree_root->objectid_mutex);
9119 ++ goto recovery_tree_root;
9120 ++ }
9121 ++
9122 ++ ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
9123 ++
9124 ++ mutex_unlock(&tree_root->objectid_mutex);
9125 ++
9126 + ret = btrfs_read_roots(fs_info, tree_root);
9127 + if (ret)
9128 + goto recovery_tree_root;
9129 +diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
9130 +index 767a605..07573dc 100644
9131 +--- a/fs/btrfs/inode-map.c
9132 ++++ b/fs/btrfs/inode-map.c
9133 +@@ -515,7 +515,7 @@ out:
9134 + return ret;
9135 + }
9136 +
9137 +-static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
9138 ++int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
9139 + {
9140 + struct btrfs_path *path;
9141 + int ret;
9142 +@@ -555,13 +555,6 @@ int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
9143 + int ret;
9144 + mutex_lock(&root->objectid_mutex);
9145 +
9146 +- if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
9147 +- ret = btrfs_find_highest_objectid(root,
9148 +- &root->highest_objectid);
9149 +- if (ret)
9150 +- goto out;
9151 +- }
9152 +-
9153 + if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
9154 + ret = -ENOSPC;
9155 + goto out;
9156 +diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h
9157 +index ddb347b..c8e864b 100644
9158 +--- a/fs/btrfs/inode-map.h
9159 ++++ b/fs/btrfs/inode-map.h
9160 +@@ -9,5 +9,6 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
9161 + struct btrfs_trans_handle *trans);
9162 +
9163 + int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
9164 ++int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid);
9165 +
9166 + #endif
9167 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
9168 +index 54b5f0d..52fc1b5 100644
9169 +--- a/fs/btrfs/inode.c
9170 ++++ b/fs/btrfs/inode.c
9171 +@@ -6493,7 +6493,7 @@ out_unlock_inode:
9172 + static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
9173 + struct dentry *dentry)
9174 + {
9175 +- struct btrfs_trans_handle *trans;
9176 ++ struct btrfs_trans_handle *trans = NULL;
9177 + struct btrfs_root *root = BTRFS_I(dir)->root;
9178 + struct inode *inode = d_inode(old_dentry);
9179 + u64 index;
9180 +@@ -6519,6 +6519,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
9181 + trans = btrfs_start_transaction(root, 5);
9182 + if (IS_ERR(trans)) {
9183 + err = PTR_ERR(trans);
9184 ++ trans = NULL;
9185 + goto fail;
9186 + }
9187 +
9188 +@@ -6552,9 +6553,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
9189 + btrfs_log_new_name(trans, inode, NULL, parent);
9190 + }
9191 +
9192 +- btrfs_end_transaction(trans, root);
9193 + btrfs_balance_delayed_items(root);
9194 + fail:
9195 ++ if (trans)
9196 ++ btrfs_end_transaction(trans, root);
9197 + if (drop_inode) {
9198 + inode_dec_link_count(inode);
9199 + iput(inode);
9200 +@@ -8548,15 +8550,28 @@ int btrfs_readpage(struct file *file, struct page *page)
9201 + static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
9202 + {
9203 + struct extent_io_tree *tree;
9204 +-
9205 ++ struct inode *inode = page->mapping->host;
9206 ++ int ret;
9207 +
9208 + if (current->flags & PF_MEMALLOC) {
9209 + redirty_page_for_writepage(wbc, page);
9210 + unlock_page(page);
9211 + return 0;
9212 + }
9213 ++
9214 ++ /*
9215 ++ * If we are under memory pressure we will call this directly from the
9216 ++ * VM, we need to make sure we have the inode referenced for the ordered
9217 ++ * extent. If not just return like we didn't do anything.
9218 ++ */
9219 ++ if (!igrab(inode)) {
9220 ++ redirty_page_for_writepage(wbc, page);
9221 ++ return AOP_WRITEPAGE_ACTIVATE;
9222 ++ }
9223 + tree = &BTRFS_I(page->mapping->host)->io_tree;
9224 +- return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
9225 ++ ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
9226 ++ btrfs_add_delayed_iput(inode);
9227 ++ return ret;
9228 + }
9229 +
9230 + static int btrfs_writepages(struct address_space *mapping,
9231 +@@ -9650,9 +9665,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
9232 + /*
9233 + * 2 items for inode item and ref
9234 + * 2 items for dir items
9235 ++ * 1 item for updating parent inode item
9236 ++ * 1 item for the inline extent item
9237 + * 1 item for xattr if selinux is on
9238 + */
9239 +- trans = btrfs_start_transaction(root, 5);
9240 ++ trans = btrfs_start_transaction(root, 7);
9241 + if (IS_ERR(trans))
9242 + return PTR_ERR(trans);
9243 +
9244 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
9245 +index 08fd3f0..f07d01b 100644
9246 +--- a/fs/btrfs/ioctl.c
9247 ++++ b/fs/btrfs/ioctl.c
9248 +@@ -568,6 +568,10 @@ static noinline int create_subvol(struct inode *dir,
9249 + goto fail;
9250 + }
9251 +
9252 ++ mutex_lock(&new_root->objectid_mutex);
9253 ++ new_root->highest_objectid = new_dirid;
9254 ++ mutex_unlock(&new_root->objectid_mutex);
9255 ++
9256 + /*
9257 + * insert the directory item
9258 + */
9259 +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
9260 +index 355a458..63a6152 100644
9261 +--- a/fs/btrfs/send.c
9262 ++++ b/fs/btrfs/send.c
9263 +@@ -1469,7 +1469,21 @@ static int read_symlink(struct btrfs_root *root,
9264 + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
9265 + if (ret < 0)
9266 + goto out;
9267 +- BUG_ON(ret);
9268 ++ if (ret) {
9269 ++ /*
9270 ++ * An empty symlink inode. Can happen in rare error paths when
9271 ++ * creating a symlink (transaction committed before the inode
9272 ++ * eviction handler removed the symlink inode items and a crash
9273 ++ * happened in between or the subvol was snapshoted in between).
9274 ++ * Print an informative message to dmesg/syslog so that the user
9275 ++ * can delete the symlink.
9276 ++ */
9277 ++ btrfs_err(root->fs_info,
9278 ++ "Found empty symlink inode %llu at root %llu",
9279 ++ ino, root->root_key.objectid);
9280 ++ ret = -EIO;
9281 ++ goto out;
9282 ++ }
9283 +
9284 + ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
9285 + struct btrfs_file_extent_item);
9286 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
9287 +index 24154e4..fe609b8 100644
9288 +--- a/fs/btrfs/super.c
9289 ++++ b/fs/btrfs/super.c
9290 +@@ -1956,6 +1956,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
9291 + * there are other factors that may change the result (like a new metadata
9292 + * chunk).
9293 + *
9294 ++ * If metadata is exhausted, f_bavail will be 0.
9295 ++ *
9296 + * FIXME: not accurate for mixed block groups, total and free/used are ok,
9297 + * available appears slightly larger.
9298 + */
9299 +@@ -1967,11 +1969,13 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
9300 + struct btrfs_space_info *found;
9301 + u64 total_used = 0;
9302 + u64 total_free_data = 0;
9303 ++ u64 total_free_meta = 0;
9304 + int bits = dentry->d_sb->s_blocksize_bits;
9305 + __be32 *fsid = (__be32 *)fs_info->fsid;
9306 + unsigned factor = 1;
9307 + struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
9308 + int ret;
9309 ++ u64 thresh = 0;
9310 +
9311 + /*
9312 + * holding chunk_muext to avoid allocating new chunks, holding
9313 +@@ -1997,6 +2001,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
9314 + }
9315 + }
9316 + }
9317 ++ if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
9318 ++ total_free_meta += found->disk_total - found->disk_used;
9319 +
9320 + total_used += found->disk_used;
9321 + }
9322 +@@ -2019,6 +2025,24 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
9323 + buf->f_bavail += div_u64(total_free_data, factor);
9324 + buf->f_bavail = buf->f_bavail >> bits;
9325 +
9326 ++ /*
9327 ++ * We calculate the remaining metadata space minus global reserve. If
9328 ++ * this is (supposedly) smaller than zero, there's no space. But this
9329 ++ * does not hold in practice, the exhausted state happens where's still
9330 ++ * some positive delta. So we apply some guesswork and compare the
9331 ++ * delta to a 4M threshold. (Practically observed delta was ~2M.)
9332 ++ *
9333 ++ * We probably cannot calculate the exact threshold value because this
9334 ++ * depends on the internal reservations requested by various
9335 ++ * operations, so some operations that consume a few metadata will
9336 ++ * succeed even if the Avail is zero. But this is better than the other
9337 ++ * way around.
9338 ++ */
9339 ++ thresh = 4 * 1024 * 1024;
9340 ++
9341 ++ if (total_free_meta - thresh < block_rsv->size)
9342 ++ buf->f_bavail = 0;
9343 ++
9344 + buf->f_type = BTRFS_SUPER_MAGIC;
9345 + buf->f_bsize = dentry->d_sb->s_blocksize;
9346 + buf->f_namelen = BTRFS_NAME_LEN;
9347 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
9348 +index 9e08447..9c62a6f 100644
9349 +--- a/fs/btrfs/volumes.c
9350 ++++ b/fs/btrfs/volumes.c
9351 +@@ -232,6 +232,7 @@ static struct btrfs_device *__alloc_device(void)
9352 + spin_lock_init(&dev->reada_lock);
9353 + atomic_set(&dev->reada_in_flight, 0);
9354 + atomic_set(&dev->dev_stats_ccnt, 0);
9355 ++ btrfs_device_data_ordered_init(dev);
9356 + INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
9357 + INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
9358 +
9359 +diff --git a/fs/direct-io.c b/fs/direct-io.c
9360 +index 602e844..01171d8 100644
9361 +--- a/fs/direct-io.c
9362 ++++ b/fs/direct-io.c
9363 +@@ -472,8 +472,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
9364 + dio->io_error = -EIO;
9365 +
9366 + if (dio->is_async && dio->rw == READ && dio->should_dirty) {
9367 +- bio_check_pages_dirty(bio); /* transfers ownership */
9368 + err = bio->bi_error;
9369 ++ bio_check_pages_dirty(bio); /* transfers ownership */
9370 + } else {
9371 + bio_for_each_segment_all(bvec, bio, i) {
9372 + struct page *page = bvec->bv_page;
9373 +diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
9374 +index 90001da..66842e5 100644
9375 +--- a/fs/efivarfs/file.c
9376 ++++ b/fs/efivarfs/file.c
9377 +@@ -10,6 +10,7 @@
9378 + #include <linux/efi.h>
9379 + #include <linux/fs.h>
9380 + #include <linux/slab.h>
9381 ++#include <linux/mount.h>
9382 +
9383 + #include "internal.h"
9384 +
9385 +@@ -103,9 +104,78 @@ out_free:
9386 + return size;
9387 + }
9388 +
9389 ++static int
9390 ++efivarfs_ioc_getxflags(struct file *file, void __user *arg)
9391 ++{
9392 ++ struct inode *inode = file->f_mapping->host;
9393 ++ unsigned int i_flags;
9394 ++ unsigned int flags = 0;
9395 ++
9396 ++ i_flags = inode->i_flags;
9397 ++ if (i_flags & S_IMMUTABLE)
9398 ++ flags |= FS_IMMUTABLE_FL;
9399 ++
9400 ++ if (copy_to_user(arg, &flags, sizeof(flags)))
9401 ++ return -EFAULT;
9402 ++ return 0;
9403 ++}
9404 ++
9405 ++static int
9406 ++efivarfs_ioc_setxflags(struct file *file, void __user *arg)
9407 ++{
9408 ++ struct inode *inode = file->f_mapping->host;
9409 ++ unsigned int flags;
9410 ++ unsigned int i_flags = 0;
9411 ++ int error;
9412 ++
9413 ++ if (!inode_owner_or_capable(inode))
9414 ++ return -EACCES;
9415 ++
9416 ++ if (copy_from_user(&flags, arg, sizeof(flags)))
9417 ++ return -EFAULT;
9418 ++
9419 ++ if (flags & ~FS_IMMUTABLE_FL)
9420 ++ return -EOPNOTSUPP;
9421 ++
9422 ++ if (!capable(CAP_LINUX_IMMUTABLE))
9423 ++ return -EPERM;
9424 ++
9425 ++ if (flags & FS_IMMUTABLE_FL)
9426 ++ i_flags |= S_IMMUTABLE;
9427 ++
9428 ++
9429 ++ error = mnt_want_write_file(file);
9430 ++ if (error)
9431 ++ return error;
9432 ++
9433 ++ mutex_lock(&inode->i_mutex);
9434 ++ inode_set_flags(inode, i_flags, S_IMMUTABLE);
9435 ++ mutex_unlock(&inode->i_mutex);
9436 ++
9437 ++ mnt_drop_write_file(file);
9438 ++
9439 ++ return 0;
9440 ++}
9441 ++
9442 ++long
9443 ++efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
9444 ++{
9445 ++ void __user *arg = (void __user *)p;
9446 ++
9447 ++ switch (cmd) {
9448 ++ case FS_IOC_GETFLAGS:
9449 ++ return efivarfs_ioc_getxflags(file, arg);
9450 ++ case FS_IOC_SETFLAGS:
9451 ++ return efivarfs_ioc_setxflags(file, arg);
9452 ++ }
9453 ++
9454 ++ return -ENOTTY;
9455 ++}
9456 ++
9457 + const struct file_operations efivarfs_file_operations = {
9458 + .open = simple_open,
9459 + .read = efivarfs_file_read,
9460 + .write = efivarfs_file_write,
9461 + .llseek = no_llseek,
9462 ++ .unlocked_ioctl = efivarfs_file_ioctl,
9463 + };
9464 +diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
9465 +index 3381b9d..e2ab6d0 100644
9466 +--- a/fs/efivarfs/inode.c
9467 ++++ b/fs/efivarfs/inode.c
9468 +@@ -15,7 +15,8 @@
9469 + #include "internal.h"
9470 +
9471 + struct inode *efivarfs_get_inode(struct super_block *sb,
9472 +- const struct inode *dir, int mode, dev_t dev)
9473 ++ const struct inode *dir, int mode,
9474 ++ dev_t dev, bool is_removable)
9475 + {
9476 + struct inode *inode = new_inode(sb);
9477 +
9478 +@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
9479 + inode->i_ino = get_next_ino();
9480 + inode->i_mode = mode;
9481 + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
9482 ++ inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
9483 + switch (mode & S_IFMT) {
9484 + case S_IFREG:
9485 + inode->i_fop = &efivarfs_file_operations;
9486 +@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
9487 + static int efivarfs_create(struct inode *dir, struct dentry *dentry,
9488 + umode_t mode, bool excl)
9489 + {
9490 +- struct inode *inode;
9491 ++ struct inode *inode = NULL;
9492 + struct efivar_entry *var;
9493 + int namelen, i = 0, err = 0;
9494 ++ bool is_removable = false;
9495 +
9496 + if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
9497 + return -EINVAL;
9498 +
9499 +- inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
9500 +- if (!inode)
9501 +- return -ENOMEM;
9502 +-
9503 + var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
9504 +- if (!var) {
9505 +- err = -ENOMEM;
9506 +- goto out;
9507 +- }
9508 ++ if (!var)
9509 ++ return -ENOMEM;
9510 +
9511 + /* length of the variable name itself: remove GUID and separator */
9512 + namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
9513 +@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
9514 + efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
9515 + &var->var.VendorGuid);
9516 +
9517 ++ if (efivar_variable_is_removable(var->var.VendorGuid,
9518 ++ dentry->d_name.name, namelen))
9519 ++ is_removable = true;
9520 ++
9521 ++ inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
9522 ++ if (!inode) {
9523 ++ err = -ENOMEM;
9524 ++ goto out;
9525 ++ }
9526 ++
9527 + for (i = 0; i < namelen; i++)
9528 + var->var.VariableName[i] = dentry->d_name.name[i];
9529 +
9530 +@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
9531 + out:
9532 + if (err) {
9533 + kfree(var);
9534 +- iput(inode);
9535 ++ if (inode)
9536 ++ iput(inode);
9537 + }
9538 + return err;
9539 + }
9540 +diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
9541 +index b5ff16a..b450518 100644
9542 +--- a/fs/efivarfs/internal.h
9543 ++++ b/fs/efivarfs/internal.h
9544 +@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations;
9545 + extern const struct inode_operations efivarfs_dir_inode_operations;
9546 + extern bool efivarfs_valid_name(const char *str, int len);
9547 + extern struct inode *efivarfs_get_inode(struct super_block *sb,
9548 +- const struct inode *dir, int mode, dev_t dev);
9549 ++ const struct inode *dir, int mode, dev_t dev,
9550 ++ bool is_removable);
9551 +
9552 + extern struct list_head efivarfs_list;
9553 +
9554 +diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
9555 +index 86a2121..abb244b 100644
9556 +--- a/fs/efivarfs/super.c
9557 ++++ b/fs/efivarfs/super.c
9558 +@@ -118,8 +118,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
9559 + struct dentry *dentry, *root = sb->s_root;
9560 + unsigned long size = 0;
9561 + char *name;
9562 +- int len, i;
9563 ++ int len;
9564 + int err = -ENOMEM;
9565 ++ bool is_removable = false;
9566 +
9567 + entry = kzalloc(sizeof(*entry), GFP_KERNEL);
9568 + if (!entry)
9569 +@@ -128,15 +129,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
9570 + memcpy(entry->var.VariableName, name16, name_size);
9571 + memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
9572 +
9573 +- len = ucs2_strlen(entry->var.VariableName);
9574 ++ len = ucs2_utf8size(entry->var.VariableName);
9575 +
9576 + /* name, plus '-', plus GUID, plus NUL*/
9577 + name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
9578 + if (!name)
9579 + goto fail;
9580 +
9581 +- for (i = 0; i < len; i++)
9582 +- name[i] = entry->var.VariableName[i] & 0xFF;
9583 ++ ucs2_as_utf8(name, entry->var.VariableName, len);
9584 ++
9585 ++ if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
9586 ++ is_removable = true;
9587 +
9588 + name[len] = '-';
9589 +
9590 +@@ -144,7 +147,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
9591 +
9592 + name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
9593 +
9594 +- inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0);
9595 ++ inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
9596 ++ is_removable);
9597 + if (!inode)
9598 + goto fail_name;
9599 +
9600 +@@ -200,7 +204,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
9601 + sb->s_d_op = &efivarfs_d_ops;
9602 + sb->s_time_gran = 1;
9603 +
9604 +- inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
9605 ++ inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true);
9606 + if (!inode)
9607 + return -ENOMEM;
9608 + inode->i_op = &efivarfs_dir_inode_operations;
9609 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
9610 +index ea433a7..06bda03 100644
9611 +--- a/fs/ext4/inode.c
9612 ++++ b/fs/ext4/inode.c
9613 +@@ -657,6 +657,34 @@ has_zeroout:
9614 + return retval;
9615 + }
9616 +
9617 ++/*
9618 ++ * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
9619 ++ * we have to be careful as someone else may be manipulating b_state as well.
9620 ++ */
9621 ++static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
9622 ++{
9623 ++ unsigned long old_state;
9624 ++ unsigned long new_state;
9625 ++
9626 ++ flags &= EXT4_MAP_FLAGS;
9627 ++
9628 ++ /* Dummy buffer_head? Set non-atomically. */
9629 ++ if (!bh->b_page) {
9630 ++ bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
9631 ++ return;
9632 ++ }
9633 ++ /*
9634 ++ * Someone else may be modifying b_state. Be careful! This is ugly but
9635 ++ * once we get rid of using bh as a container for mapping information
9636 ++ * to pass to / from get_block functions, this can go away.
9637 ++ */
9638 ++ do {
9639 ++ old_state = READ_ONCE(bh->b_state);
9640 ++ new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
9641 ++ } while (unlikely(
9642 ++ cmpxchg(&bh->b_state, old_state, new_state) != old_state));
9643 ++}
9644 ++
9645 + /* Maximum number of blocks we map for direct IO at once. */
9646 + #define DIO_MAX_BLOCKS 4096
9647 +
9648 +@@ -693,7 +721,7 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
9649 + ext4_io_end_t *io_end = ext4_inode_aio(inode);
9650 +
9651 + map_bh(bh, inode->i_sb, map.m_pblk);
9652 +- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
9653 ++ ext4_update_bh_state(bh, map.m_flags);
9654 + if (IS_DAX(inode) && buffer_unwritten(bh)) {
9655 + /*
9656 + * dgc: I suspect unwritten conversion on ext4+DAX is
9657 +@@ -1669,7 +1697,7 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
9658 + return ret;
9659 +
9660 + map_bh(bh, inode->i_sb, map.m_pblk);
9661 +- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
9662 ++ ext4_update_bh_state(bh, map.m_flags);
9663 +
9664 + if (buffer_unwritten(bh)) {
9665 + /* A delayed write to unwritten bh should be marked
9666 +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
9667 +index 023f6a1..e5232bb 100644
9668 +--- a/fs/fs-writeback.c
9669 ++++ b/fs/fs-writeback.c
9670 +@@ -317,6 +317,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
9671 + struct inode_switch_wbs_context *isw =
9672 + container_of(work, struct inode_switch_wbs_context, work);
9673 + struct inode *inode = isw->inode;
9674 ++ struct super_block *sb = inode->i_sb;
9675 + struct address_space *mapping = inode->i_mapping;
9676 + struct bdi_writeback *old_wb = inode->i_wb;
9677 + struct bdi_writeback *new_wb = isw->new_wb;
9678 +@@ -423,6 +424,7 @@ skip_switch:
9679 + wb_put(new_wb);
9680 +
9681 + iput(inode);
9682 ++ deactivate_super(sb);
9683 + kfree(isw);
9684 + }
9685 +
9686 +@@ -469,11 +471,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
9687 +
9688 + /* while holding I_WB_SWITCH, no one else can update the association */
9689 + spin_lock(&inode->i_lock);
9690 ++
9691 + if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
9692 +- inode_to_wb(inode) == isw->new_wb) {
9693 +- spin_unlock(&inode->i_lock);
9694 +- goto out_free;
9695 +- }
9696 ++ inode_to_wb(inode) == isw->new_wb)
9697 ++ goto out_unlock;
9698 ++
9699 ++ if (!atomic_inc_not_zero(&inode->i_sb->s_active))
9700 ++ goto out_unlock;
9701 ++
9702 + inode->i_state |= I_WB_SWITCH;
9703 + spin_unlock(&inode->i_lock);
9704 +
9705 +@@ -489,6 +494,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
9706 + call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
9707 + return;
9708 +
9709 ++out_unlock:
9710 ++ spin_unlock(&inode->i_lock);
9711 + out_free:
9712 + if (isw->new_wb)
9713 + wb_put(isw->new_wb);
9714 +diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
9715 +index 2ac99db..5a7b322 100644
9716 +--- a/fs/hostfs/hostfs_kern.c
9717 ++++ b/fs/hostfs/hostfs_kern.c
9718 +@@ -730,15 +730,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
9719 +
9720 + init_special_inode(inode, mode, dev);
9721 + err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
9722 +- if (!err)
9723 ++ if (err)
9724 + goto out_free;
9725 +
9726 + err = read_name(inode, name);
9727 + __putname(name);
9728 + if (err)
9729 + goto out_put;
9730 +- if (err)
9731 +- goto out_put;
9732 +
9733 + d_instantiate(dentry, inode);
9734 + return 0;
9735 +diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
9736 +index ae4d5a1..bffb908 100644
9737 +--- a/fs/hpfs/namei.c
9738 ++++ b/fs/hpfs/namei.c
9739 +@@ -375,12 +375,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
9740 + struct inode *inode = d_inode(dentry);
9741 + dnode_secno dno;
9742 + int r;
9743 +- int rep = 0;
9744 + int err;
9745 +
9746 + hpfs_lock(dir->i_sb);
9747 + hpfs_adjust_length(name, &len);
9748 +-again:
9749 ++
9750 + err = -ENOENT;
9751 + de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
9752 + if (!de)
9753 +@@ -400,33 +399,9 @@ again:
9754 + hpfs_error(dir->i_sb, "there was error when removing dirent");
9755 + err = -EFSERROR;
9756 + break;
9757 +- case 2: /* no space for deleting, try to truncate file */
9758 +-
9759 ++ case 2: /* no space for deleting */
9760 + err = -ENOSPC;
9761 +- if (rep++)
9762 +- break;
9763 +-
9764 +- dentry_unhash(dentry);
9765 +- if (!d_unhashed(dentry)) {
9766 +- hpfs_unlock(dir->i_sb);
9767 +- return -ENOSPC;
9768 +- }
9769 +- if (generic_permission(inode, MAY_WRITE) ||
9770 +- !S_ISREG(inode->i_mode) ||
9771 +- get_write_access(inode)) {
9772 +- d_rehash(dentry);
9773 +- } else {
9774 +- struct iattr newattrs;
9775 +- /*pr_info("truncating file before delete.\n");*/
9776 +- newattrs.ia_size = 0;
9777 +- newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
9778 +- err = notify_change(dentry, &newattrs, NULL);
9779 +- put_write_access(inode);
9780 +- if (!err)
9781 +- goto again;
9782 +- }
9783 +- hpfs_unlock(dir->i_sb);
9784 +- return -ENOSPC;
9785 ++ break;
9786 + default:
9787 + drop_nlink(inode);
9788 + err = 0;
9789 +diff --git a/fs/locks.c b/fs/locks.c
9790 +index 0d2b326..6333263 100644
9791 +--- a/fs/locks.c
9792 ++++ b/fs/locks.c
9793 +@@ -2182,7 +2182,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
9794 + goto out;
9795 + }
9796 +
9797 +-again:
9798 + error = flock_to_posix_lock(filp, file_lock, &flock);
9799 + if (error)
9800 + goto out;
9801 +@@ -2224,19 +2223,22 @@ again:
9802 + * Attempt to detect a close/fcntl race and recover by
9803 + * releasing the lock that was just acquired.
9804 + */
9805 +- /*
9806 +- * we need that spin_lock here - it prevents reordering between
9807 +- * update of i_flctx->flc_posix and check for it done in close().
9808 +- * rcu_read_lock() wouldn't do.
9809 +- */
9810 +- spin_lock(&current->files->file_lock);
9811 +- f = fcheck(fd);
9812 +- spin_unlock(&current->files->file_lock);
9813 +- if (!error && f != filp && flock.l_type != F_UNLCK) {
9814 +- flock.l_type = F_UNLCK;
9815 +- goto again;
9816 ++ if (!error && file_lock->fl_type != F_UNLCK) {
9817 ++ /*
9818 ++ * We need that spin_lock here - it prevents reordering between
9819 ++ * update of i_flctx->flc_posix and check for it done in
9820 ++ * close(). rcu_read_lock() wouldn't do.
9821 ++ */
9822 ++ spin_lock(&current->files->file_lock);
9823 ++ f = fcheck(fd);
9824 ++ spin_unlock(&current->files->file_lock);
9825 ++ if (f != filp) {
9826 ++ file_lock->fl_type = F_UNLCK;
9827 ++ error = do_lock_file_wait(filp, cmd, file_lock);
9828 ++ WARN_ON_ONCE(error);
9829 ++ error = -EBADF;
9830 ++ }
9831 + }
9832 +-
9833 + out:
9834 + locks_free_lock(file_lock);
9835 + return error;
9836 +@@ -2322,7 +2324,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
9837 + goto out;
9838 + }
9839 +
9840 +-again:
9841 + error = flock64_to_posix_lock(filp, file_lock, &flock);
9842 + if (error)
9843 + goto out;
9844 +@@ -2364,14 +2365,22 @@ again:
9845 + * Attempt to detect a close/fcntl race and recover by
9846 + * releasing the lock that was just acquired.
9847 + */
9848 +- spin_lock(&current->files->file_lock);
9849 +- f = fcheck(fd);
9850 +- spin_unlock(&current->files->file_lock);
9851 +- if (!error && f != filp && flock.l_type != F_UNLCK) {
9852 +- flock.l_type = F_UNLCK;
9853 +- goto again;
9854 ++ if (!error && file_lock->fl_type != F_UNLCK) {
9855 ++ /*
9856 ++ * We need that spin_lock here - it prevents reordering between
9857 ++ * update of i_flctx->flc_posix and check for it done in
9858 ++ * close(). rcu_read_lock() wouldn't do.
9859 ++ */
9860 ++ spin_lock(&current->files->file_lock);
9861 ++ f = fcheck(fd);
9862 ++ spin_unlock(&current->files->file_lock);
9863 ++ if (f != filp) {
9864 ++ file_lock->fl_type = F_UNLCK;
9865 ++ error = do_lock_file_wait(filp, cmd, file_lock);
9866 ++ WARN_ON_ONCE(error);
9867 ++ error = -EBADF;
9868 ++ }
9869 + }
9870 +-
9871 + out:
9872 + locks_free_lock(file_lock);
9873 + return error;
9874 +diff --git a/fs/namei.c b/fs/namei.c
9875 +index 0c3974c..d8ee4da 100644
9876 +--- a/fs/namei.c
9877 ++++ b/fs/namei.c
9878 +@@ -1711,6 +1711,11 @@ static inline int should_follow_link(struct nameidata *nd, struct path *link,
9879 + return 0;
9880 + if (!follow)
9881 + return 0;
9882 ++ /* make sure that d_is_symlink above matches inode */
9883 ++ if (nd->flags & LOOKUP_RCU) {
9884 ++ if (read_seqcount_retry(&link->dentry->d_seq, seq))
9885 ++ return -ECHILD;
9886 ++ }
9887 + return pick_link(nd, link, inode, seq);
9888 + }
9889 +
9890 +@@ -1742,11 +1747,11 @@ static int walk_component(struct nameidata *nd, int flags)
9891 + if (err < 0)
9892 + return err;
9893 +
9894 +- inode = d_backing_inode(path.dentry);
9895 + seq = 0; /* we are already out of RCU mode */
9896 + err = -ENOENT;
9897 + if (d_is_negative(path.dentry))
9898 + goto out_path_put;
9899 ++ inode = d_backing_inode(path.dentry);
9900 + }
9901 +
9902 + if (flags & WALK_PUT)
9903 +@@ -3130,12 +3135,12 @@ retry_lookup:
9904 + return error;
9905 +
9906 + BUG_ON(nd->flags & LOOKUP_RCU);
9907 +- inode = d_backing_inode(path.dentry);
9908 + seq = 0; /* out of RCU mode, so the value doesn't matter */
9909 + if (unlikely(d_is_negative(path.dentry))) {
9910 + path_to_nameidata(&path, nd);
9911 + return -ENOENT;
9912 + }
9913 ++ inode = d_backing_inode(path.dentry);
9914 + finish_lookup:
9915 + if (nd->depth)
9916 + put_link(nd);
9917 +@@ -3144,11 +3149,6 @@ finish_lookup:
9918 + if (unlikely(error))
9919 + return error;
9920 +
9921 +- if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
9922 +- path_to_nameidata(&path, nd);
9923 +- return -ELOOP;
9924 +- }
9925 +-
9926 + if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
9927 + path_to_nameidata(&path, nd);
9928 + } else {
9929 +@@ -3167,6 +3167,10 @@ finish_open:
9930 + return error;
9931 + }
9932 + audit_inode(nd->name, nd->path.dentry, 0);
9933 ++ if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
9934 ++ error = -ELOOP;
9935 ++ goto out;
9936 ++ }
9937 + error = -EISDIR;
9938 + if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
9939 + goto out;
9940 +@@ -3210,6 +3214,10 @@ opened:
9941 + goto exit_fput;
9942 + }
9943 + out:
9944 ++ if (unlikely(error > 0)) {
9945 ++ WARN_ON(1);
9946 ++ error = -EINVAL;
9947 ++ }
9948 + if (got_write)
9949 + mnt_drop_write(nd->path.mnt);
9950 + path_put(&save_parent);
9951 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
9952 +index f496ed7..98a4415 100644
9953 +--- a/fs/nfs/nfs4proc.c
9954 ++++ b/fs/nfs/nfs4proc.c
9955 +@@ -2461,9 +2461,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
9956 + dentry = d_add_unique(dentry, igrab(state->inode));
9957 + if (dentry == NULL) {
9958 + dentry = opendata->dentry;
9959 +- } else if (dentry != ctx->dentry) {
9960 ++ } else {
9961 + dput(ctx->dentry);
9962 +- ctx->dentry = dget(dentry);
9963 ++ ctx->dentry = dentry;
9964 + }
9965 + nfs_set_verifier(dentry,
9966 + nfs_save_change_attribute(d_inode(opendata->dir)));
9967 +diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
9968 +index 7f60472..e6795c7 100644
9969 +--- a/fs/ocfs2/aops.c
9970 ++++ b/fs/ocfs2/aops.c
9971 +@@ -956,6 +956,7 @@ clean_orphan:
9972 + tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
9973 + update_isize, end);
9974 + if (tmp_ret < 0) {
9975 ++ ocfs2_inode_unlock(inode, 1);
9976 + ret = tmp_ret;
9977 + mlog_errno(ret);
9978 + brelse(di_bh);
9979 +diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
9980 +index 0419485..0f1c6f3 100644
9981 +--- a/include/asm-generic/cputime_nsecs.h
9982 ++++ b/include/asm-generic/cputime_nsecs.h
9983 +@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
9984 + */
9985 + static inline cputime_t timespec_to_cputime(const struct timespec *val)
9986 + {
9987 +- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
9988 ++ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
9989 + return (__force cputime_t) ret;
9990 + }
9991 + static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
9992 +@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
9993 + */
9994 + static inline cputime_t timeval_to_cputime(const struct timeval *val)
9995 + {
9996 +- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
9997 ++ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
9998 ++ val->tv_usec * NSEC_PER_USEC;
9999 + return (__force cputime_t) ret;
10000 + }
10001 + static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
10002 +diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
10003 +index 7bfb063..461a055 100644
10004 +--- a/include/drm/drm_cache.h
10005 ++++ b/include/drm/drm_cache.h
10006 +@@ -35,4 +35,13 @@
10007 +
10008 + void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
10009 +
10010 ++static inline bool drm_arch_can_wc_memory(void)
10011 ++{
10012 ++#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
10013 ++ return false;
10014 ++#else
10015 ++ return true;
10016 ++#endif
10017 ++}
10018 ++
10019 + #endif
10020 +diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
10021 +index 5340099..f356f97 100644
10022 +--- a/include/drm/drm_dp_mst_helper.h
10023 ++++ b/include/drm/drm_dp_mst_helper.h
10024 +@@ -44,8 +44,6 @@ struct drm_dp_vcpi {
10025 + /**
10026 + * struct drm_dp_mst_port - MST port
10027 + * @kref: reference count for this port.
10028 +- * @guid_valid: for DP 1.2 devices if we have validated the GUID.
10029 +- * @guid: guid for DP 1.2 device on this port.
10030 + * @port_num: port number
10031 + * @input: if this port is an input port.
10032 + * @mcs: message capability status - DP 1.2 spec.
10033 +@@ -70,10 +68,6 @@ struct drm_dp_vcpi {
10034 + struct drm_dp_mst_port {
10035 + struct kref kref;
10036 +
10037 +- /* if dpcd 1.2 device is on this port - its GUID info */
10038 +- bool guid_valid;
10039 +- u8 guid[16];
10040 +-
10041 + u8 port_num;
10042 + bool input;
10043 + bool mcs;
10044 +@@ -109,10 +103,12 @@ struct drm_dp_mst_port {
10045 + * @tx_slots: transmission slots for this device.
10046 + * @last_seqno: last sequence number used to talk to this.
10047 + * @link_address_sent: if a link address message has been sent to this device yet.
10048 ++ * @guid: guid for DP 1.2 branch device. port under this branch can be
10049 ++ * identified by port #.
10050 + *
10051 + * This structure represents an MST branch device, there is one
10052 +- * primary branch device at the root, along with any others connected
10053 +- * to downstream ports
10054 ++ * primary branch device at the root, along with any other branches connected
10055 ++ * to downstream port of parent branches.
10056 + */
10057 + struct drm_dp_mst_branch {
10058 + struct kref kref;
10059 +@@ -131,6 +127,9 @@ struct drm_dp_mst_branch {
10060 + struct drm_dp_sideband_msg_tx *tx_slots[2];
10061 + int last_seqno;
10062 + bool link_address_sent;
10063 ++
10064 ++ /* global unique identifier to identify branch devices */
10065 ++ u8 guid[16];
10066 + };
10067 +
10068 +
10069 +@@ -405,11 +404,9 @@ struct drm_dp_payload {
10070 + * @conn_base_id: DRM connector ID this mgr is connected to.
10071 + * @down_rep_recv: msg receiver state for down replies.
10072 + * @up_req_recv: msg receiver state for up requests.
10073 +- * @lock: protects mst state, primary, guid, dpcd.
10074 ++ * @lock: protects mst state, primary, dpcd.
10075 + * @mst_state: if this manager is enabled for an MST capable port.
10076 + * @mst_primary: pointer to the primary branch device.
10077 +- * @guid_valid: GUID valid for the primary branch device.
10078 +- * @guid: GUID for primary port.
10079 + * @dpcd: cache of DPCD for primary port.
10080 + * @pbn_div: PBN to slots divisor.
10081 + *
10082 +@@ -431,13 +428,11 @@ struct drm_dp_mst_topology_mgr {
10083 + struct drm_dp_sideband_msg_rx up_req_recv;
10084 +
10085 + /* pointer to info about the initial MST device */
10086 +- struct mutex lock; /* protects mst_state + primary + guid + dpcd */
10087 ++ struct mutex lock; /* protects mst_state + primary + dpcd */
10088 +
10089 + bool mst_state;
10090 + struct drm_dp_mst_branch *mst_primary;
10091 +- /* primary MST device GUID */
10092 +- bool guid_valid;
10093 +- u8 guid[16];
10094 ++
10095 + u8 dpcd[DP_RECEIVER_CAP_SIZE];
10096 + u8 sink_count;
10097 + int pbn_div;
10098 +@@ -450,9 +445,7 @@ struct drm_dp_mst_topology_mgr {
10099 + the mstb tx_slots and txmsg->state once they are queued */
10100 + struct mutex qlock;
10101 + struct list_head tx_msg_downq;
10102 +- struct list_head tx_msg_upq;
10103 + bool tx_down_in_progress;
10104 +- bool tx_up_in_progress;
10105 +
10106 + /* payload info + lock for it */
10107 + struct mutex payload_lock;
10108 +diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
10109 +index d639049..553210c 100644
10110 +--- a/include/drm/drm_fixed.h
10111 ++++ b/include/drm/drm_fixed.h
10112 +@@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
10113 + #define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
10114 + #define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
10115 + #define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
10116 ++#define DRM_FIXED_EPSILON 1LL
10117 ++#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
10118 +
10119 + static inline s64 drm_int2fixp(int a)
10120 + {
10121 + return ((s64)a) << DRM_FIXED_POINT;
10122 + }
10123 +
10124 +-static inline int drm_fixp2int(int64_t a)
10125 ++static inline int drm_fixp2int(s64 a)
10126 + {
10127 + return ((s64)a) >> DRM_FIXED_POINT;
10128 + }
10129 +
10130 +-static inline unsigned drm_fixp_msbset(int64_t a)
10131 ++static inline int drm_fixp2int_ceil(s64 a)
10132 ++{
10133 ++ if (a > 0)
10134 ++ return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
10135 ++ else
10136 ++ return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
10137 ++}
10138 ++
10139 ++static inline unsigned drm_fixp_msbset(s64 a)
10140 + {
10141 + unsigned shift, sign = (a >> 63) & 1;
10142 +
10143 +@@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
10144 + return result;
10145 + }
10146 +
10147 ++static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
10148 ++{
10149 ++ s64 res;
10150 ++ bool a_neg = a < 0;
10151 ++ bool b_neg = b < 0;
10152 ++ u64 a_abs = a_neg ? -a : a;
10153 ++ u64 b_abs = b_neg ? -b : b;
10154 ++ u64 rem;
10155 ++
10156 ++ /* determine integer part */
10157 ++ u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem);
10158 ++
10159 ++ /* determine fractional part */
10160 ++ {
10161 ++ u32 i = DRM_FIXED_POINT;
10162 ++
10163 ++ do {
10164 ++ rem <<= 1;
10165 ++ res_abs <<= 1;
10166 ++ if (rem >= b_abs) {
10167 ++ res_abs |= 1;
10168 ++ rem -= b_abs;
10169 ++ }
10170 ++ } while (--i != 0);
10171 ++ }
10172 ++
10173 ++ /* round up LSB */
10174 ++ {
10175 ++ u64 summand = (rem << 1) >= b_abs;
10176 ++
10177 ++ res_abs += summand;
10178 ++ }
10179 ++
10180 ++ res = (s64) res_abs;
10181 ++ if (a_neg ^ b_neg)
10182 ++ res = -res;
10183 ++ return res;
10184 ++}
10185 ++
10186 + static inline s64 drm_fixp_exp(s64 x)
10187 + {
10188 + s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
10189 +diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
10190 +index 71b1d6c..8dbd787 100644
10191 +--- a/include/linux/ceph/messenger.h
10192 ++++ b/include/linux/ceph/messenger.h
10193 +@@ -220,6 +220,7 @@ struct ceph_connection {
10194 + struct ceph_entity_addr actual_peer_addr;
10195 +
10196 + /* message out temps */
10197 ++ struct ceph_msg_header out_hdr;
10198 + struct ceph_msg *out_msg; /* sending message (== tail of
10199 + out_sent) */
10200 + bool out_msg_done;
10201 +@@ -229,7 +230,6 @@ struct ceph_connection {
10202 + int out_kvec_left; /* kvec's left in out_kvec */
10203 + int out_skip; /* skip this many bytes */
10204 + int out_kvec_bytes; /* total bytes left */
10205 +- bool out_kvec_is_msg; /* kvec refers to out_msg */
10206 + int out_more; /* there is more data after the kvecs */
10207 + __le64 out_temp_ack; /* for writing an ack */
10208 + struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
10209 +diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
10210 +index 06b77f9d..8e30fae 100644
10211 +--- a/include/linux/cgroup-defs.h
10212 ++++ b/include/linux/cgroup-defs.h
10213 +@@ -133,6 +133,12 @@ struct cgroup_subsys_state {
10214 + */
10215 + u64 serial_nr;
10216 +
10217 ++ /*
10218 ++ * Incremented by online self and children. Used to guarantee that
10219 ++ * parents are not offlined before their children.
10220 ++ */
10221 ++ atomic_t online_cnt;
10222 ++
10223 + /* percpu_ref killing and RCU release */
10224 + struct rcu_head rcu_head;
10225 + struct work_struct destroy_work;
10226 +diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
10227 +index 85a868c..fea160e 100644
10228 +--- a/include/linux/cpuset.h
10229 ++++ b/include/linux/cpuset.h
10230 +@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
10231 + task_unlock(current);
10232 + }
10233 +
10234 ++extern void cpuset_post_attach_flush(void);
10235 ++
10236 + #else /* !CONFIG_CPUSETS */
10237 +
10238 + static inline bool cpusets_enabled(void) { return false; }
10239 +@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
10240 + return false;
10241 + }
10242 +
10243 ++static inline void cpuset_post_attach_flush(void)
10244 ++{
10245 ++}
10246 ++
10247 + #endif /* !CONFIG_CPUSETS */
10248 +
10249 + #endif /* _LINUX_CPUSET_H */
10250 +diff --git a/include/linux/efi.h b/include/linux/efi.h
10251 +index 569b5a8..47be3ad 100644
10252 +--- a/include/linux/efi.h
10253 ++++ b/include/linux/efi.h
10254 +@@ -1199,7 +1199,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
10255 + struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
10256 + struct list_head *head, bool remove);
10257 +
10258 +-bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
10259 ++bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
10260 ++ unsigned long data_size);
10261 ++bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
10262 ++ size_t len);
10263 +
10264 + extern struct work_struct efivar_work;
10265 + void efivar_run_worker(void);
10266 +diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
10267 +index 8fdc17b..ae6a711 100644
10268 +--- a/include/linux/hyperv.h
10269 ++++ b/include/linux/hyperv.h
10270 +@@ -630,6 +630,11 @@ struct hv_input_signal_event_buffer {
10271 + struct hv_input_signal_event event;
10272 + };
10273 +
10274 ++enum hv_signal_policy {
10275 ++ HV_SIGNAL_POLICY_DEFAULT = 0,
10276 ++ HV_SIGNAL_POLICY_EXPLICIT,
10277 ++};
10278 ++
10279 + struct vmbus_channel {
10280 + /* Unique channel id */
10281 + int id;
10282 +@@ -757,8 +762,21 @@ struct vmbus_channel {
10283 + * link up channels based on their CPU affinity.
10284 + */
10285 + struct list_head percpu_list;
10286 ++ /*
10287 ++ * Host signaling policy: The default policy will be
10288 ++ * based on the ring buffer state. We will also support
10289 ++ * a policy where the client driver can have explicit
10290 ++ * signaling control.
10291 ++ */
10292 ++ enum hv_signal_policy signal_policy;
10293 + };
10294 +
10295 ++static inline void set_channel_signal_state(struct vmbus_channel *c,
10296 ++ enum hv_signal_policy policy)
10297 ++{
10298 ++ c->signal_policy = policy;
10299 ++}
10300 ++
10301 + static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
10302 + {
10303 + c->batched_reading = state;
10304 +diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
10305 +index c0e9614..5455b66 100644
10306 +--- a/include/linux/nfs_fs.h
10307 ++++ b/include/linux/nfs_fs.h
10308 +@@ -544,9 +544,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
10309 +
10310 + static inline loff_t nfs_size_to_loff_t(__u64 size)
10311 + {
10312 +- if (size > (__u64) OFFSET_MAX - 1)
10313 +- return OFFSET_MAX - 1;
10314 +- return (loff_t) size;
10315 ++ return min_t(u64, size, OFFSET_MAX);
10316 + }
10317 +
10318 + static inline ino_t
10319 +diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
10320 +index 50777b5..92d112a 100644
10321 +--- a/include/linux/shmem_fs.h
10322 ++++ b/include/linux/shmem_fs.h
10323 +@@ -15,10 +15,7 @@ struct shmem_inode_info {
10324 + unsigned int seals; /* shmem seals */
10325 + unsigned long flags;
10326 + unsigned long alloced; /* data pages alloced to file */
10327 +- union {
10328 +- unsigned long swapped; /* subtotal assigned to swap */
10329 +- char *symlink; /* unswappable short symlink */
10330 +- };
10331 ++ unsigned long swapped; /* subtotal assigned to swap */
10332 + struct shared_policy policy; /* NUMA memory alloc policy */
10333 + struct list_head swaplist; /* chain of maybes on swap */
10334 + struct simple_xattrs xattrs; /* list of xattrs */
10335 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
10336 +index 9147f9f..75f136a 100644
10337 +--- a/include/linux/skbuff.h
10338 ++++ b/include/linux/skbuff.h
10339 +@@ -219,6 +219,7 @@ struct sk_buff;
10340 + #else
10341 + #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
10342 + #endif
10343 ++extern int sysctl_max_skb_frags;
10344 +
10345 + typedef struct skb_frag_struct skb_frag_t;
10346 +
10347 +diff --git a/include/linux/thermal.h b/include/linux/thermal.h
10348 +index 613c29b..e13a1ac 100644
10349 +--- a/include/linux/thermal.h
10350 ++++ b/include/linux/thermal.h
10351 +@@ -43,6 +43,9 @@
10352 + /* Default weight of a bound cooling device */
10353 + #define THERMAL_WEIGHT_DEFAULT 0
10354 +
10355 ++/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
10356 ++#define THERMAL_TEMP_INVALID -274000
10357 ++
10358 + /* Unit conversion macros */
10359 + #define DECI_KELVIN_TO_CELSIUS(t) ({ \
10360 + long _t = (t); \
10361 +@@ -167,6 +170,7 @@ struct thermal_attr {
10362 + * @forced_passive: If > 0, temperature at which to switch on all ACPI
10363 + * processor cooling devices. Currently only used by the
10364 + * step-wise governor.
10365 ++ * @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
10366 + * @ops: operations this &thermal_zone_device supports
10367 + * @tzp: thermal zone parameters
10368 + * @governor: pointer to the governor for this thermal zone
10369 +@@ -194,6 +198,7 @@ struct thermal_zone_device {
10370 + int emul_temperature;
10371 + int passive;
10372 + unsigned int forced_passive;
10373 ++ atomic_t need_update;
10374 + struct thermal_zone_device_ops *ops;
10375 + struct thermal_zone_params *tzp;
10376 + struct thermal_governor *governor;
10377 +diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
10378 +index cbb20af..bb679b4 100644
10379 +--- a/include/linux/ucs2_string.h
10380 ++++ b/include/linux/ucs2_string.h
10381 +@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s);
10382 + unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
10383 + int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
10384 +
10385 ++unsigned long ucs2_utf8size(const ucs2_char_t *src);
10386 ++unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
10387 ++ unsigned long maxlength);
10388 ++
10389 + #endif /* _LINUX_UCS2_STRING_H_ */
10390 +diff --git a/include/net/af_unix.h b/include/net/af_unix.h
10391 +index 2a91a05..9b4c418 100644
10392 +--- a/include/net/af_unix.h
10393 ++++ b/include/net/af_unix.h
10394 +@@ -6,8 +6,8 @@
10395 + #include <linux/mutex.h>
10396 + #include <net/sock.h>
10397 +
10398 +-void unix_inflight(struct file *fp);
10399 +-void unix_notinflight(struct file *fp);
10400 ++void unix_inflight(struct user_struct *user, struct file *fp);
10401 ++void unix_notinflight(struct user_struct *user, struct file *fp);
10402 + void unix_gc(void);
10403 + void wait_for_unix_gc(void);
10404 + struct sock *unix_get_socket(struct file *filp);
10405 +diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
10406 +index 6816f0f..30a56ab 100644
10407 +--- a/include/net/dst_metadata.h
10408 ++++ b/include/net/dst_metadata.h
10409 +@@ -44,6 +44,24 @@ static inline bool skb_valid_dst(const struct sk_buff *skb)
10410 + return dst && !(dst->flags & DST_METADATA);
10411 + }
10412 +
10413 ++static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
10414 ++ const struct sk_buff *skb_b)
10415 ++{
10416 ++ const struct metadata_dst *a, *b;
10417 ++
10418 ++ if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
10419 ++ return 0;
10420 ++
10421 ++ a = (const struct metadata_dst *) skb_dst(skb_a);
10422 ++ b = (const struct metadata_dst *) skb_dst(skb_b);
10423 ++
10424 ++ if (!a != !b || a->u.tun_info.options_len != b->u.tun_info.options_len)
10425 ++ return 1;
10426 ++
10427 ++ return memcmp(&a->u.tun_info, &b->u.tun_info,
10428 ++ sizeof(a->u.tun_info) + a->u.tun_info.options_len);
10429 ++}
10430 ++
10431 + struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
10432 + struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
10433 +
10434 +diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
10435 +index 481fe1c..49dcad4 100644
10436 +--- a/include/net/inet_connection_sock.h
10437 ++++ b/include/net/inet_connection_sock.h
10438 +@@ -270,8 +270,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
10439 + struct sock *newsk,
10440 + const struct request_sock *req);
10441 +
10442 +-void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
10443 +- struct sock *child);
10444 ++struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
10445 ++ struct request_sock *req,
10446 ++ struct sock *child);
10447 + void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
10448 + unsigned long timeout);
10449 + struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
10450 +diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
10451 +index 877f682..295d291 100644
10452 +--- a/include/net/ip6_route.h
10453 ++++ b/include/net/ip6_route.h
10454 +@@ -64,8 +64,16 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
10455 +
10456 + void ip6_route_input(struct sk_buff *skb);
10457 +
10458 +-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
10459 +- struct flowi6 *fl6);
10460 ++struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
10461 ++ struct flowi6 *fl6, int flags);
10462 ++
10463 ++static inline struct dst_entry *ip6_route_output(struct net *net,
10464 ++ const struct sock *sk,
10465 ++ struct flowi6 *fl6)
10466 ++{
10467 ++ return ip6_route_output_flags(net, sk, fl6, 0);
10468 ++}
10469 ++
10470 + struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
10471 + int flags);
10472 +
10473 +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
10474 +index 9f4df68..3f98233 100644
10475 +--- a/include/net/ip_fib.h
10476 ++++ b/include/net/ip_fib.h
10477 +@@ -61,6 +61,7 @@ struct fib_nh_exception {
10478 + struct rtable __rcu *fnhe_rth_input;
10479 + struct rtable __rcu *fnhe_rth_output;
10480 + unsigned long fnhe_stamp;
10481 ++ struct rcu_head rcu;
10482 + };
10483 +
10484 + struct fnhe_hash_bucket {
10485 +diff --git a/include/net/scm.h b/include/net/scm.h
10486 +index 262532d..59fa93c 100644
10487 +--- a/include/net/scm.h
10488 ++++ b/include/net/scm.h
10489 +@@ -21,6 +21,7 @@ struct scm_creds {
10490 + struct scm_fp_list {
10491 + short count;
10492 + short max;
10493 ++ struct user_struct *user;
10494 + struct file *fp[SCM_MAX_FD];
10495 + };
10496 +
10497 +diff --git a/include/net/tcp.h b/include/net/tcp.h
10498 +index f80e74c..414d822 100644
10499 +--- a/include/net/tcp.h
10500 ++++ b/include/net/tcp.h
10501 +@@ -449,7 +449,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
10502 +
10503 + void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
10504 + void tcp_v4_mtu_reduced(struct sock *sk);
10505 +-void tcp_req_err(struct sock *sk, u32 seq);
10506 ++void tcp_req_err(struct sock *sk, u32 seq, bool abort);
10507 + int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
10508 + struct sock *tcp_create_openreq_child(const struct sock *sk,
10509 + struct request_sock *req,
10510 +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
10511 +index aabf0ac..689f4d2 100644
10512 +--- a/include/target/target_core_base.h
10513 ++++ b/include/target/target_core_base.h
10514 +@@ -138,6 +138,7 @@ enum se_cmd_flags_table {
10515 + SCF_COMPARE_AND_WRITE = 0x00080000,
10516 + SCF_COMPARE_AND_WRITE_POST = 0x00100000,
10517 + SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
10518 ++ SCF_ACK_KREF = 0x00400000,
10519 + };
10520 +
10521 + /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
10522 +@@ -490,6 +491,8 @@ struct se_cmd {
10523 + #define CMD_T_DEV_ACTIVE (1 << 7)
10524 + #define CMD_T_REQUEST_STOP (1 << 8)
10525 + #define CMD_T_BUSY (1 << 9)
10526 ++#define CMD_T_TAS (1 << 10)
10527 ++#define CMD_T_FABRIC_STOP (1 << 11)
10528 + spinlock_t t_state_lock;
10529 + struct kref cmd_kref;
10530 + struct completion t_transport_stop_comp;
10531 +diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
10532 +index c2e5d6c..ebd10e6 100644
10533 +--- a/include/uapi/linux/Kbuild
10534 ++++ b/include/uapi/linux/Kbuild
10535 +@@ -307,7 +307,7 @@ header-y += nfs_mount.h
10536 + header-y += nl80211.h
10537 + header-y += n_r3964.h
10538 + header-y += nubus.h
10539 +-header-y += nvme.h
10540 ++header-y += nvme_ioctl.h
10541 + header-y += nvram.h
10542 + header-y += omap3isp.h
10543 + header-y += omapfb.h
10544 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
10545 +index d1d3e8f..2e7f7ab 100644
10546 +--- a/kernel/bpf/verifier.c
10547 ++++ b/kernel/bpf/verifier.c
10548 +@@ -2082,7 +2082,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
10549 + /* adjust offset of jmps if necessary */
10550 + if (i < pos && i + insn->off + 1 > pos)
10551 + insn->off += delta;
10552 +- else if (i > pos && i + insn->off + 1 < pos)
10553 ++ else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
10554 + insn->off -= delta;
10555 + }
10556 + }
10557 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
10558 +index 470f653..fb1ecfd 100644
10559 +--- a/kernel/cgroup.c
10560 ++++ b/kernel/cgroup.c
10561 +@@ -57,7 +57,7 @@
10562 + #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
10563 + #include <linux/kthread.h>
10564 + #include <linux/delay.h>
10565 +-
10566 ++#include <linux/cpuset.h>
10567 + #include <linux/atomic.h>
10568 +
10569 + /*
10570 +@@ -2764,6 +2764,7 @@ out_unlock_rcu:
10571 + out_unlock_threadgroup:
10572 + percpu_up_write(&cgroup_threadgroup_rwsem);
10573 + cgroup_kn_unlock(of->kn);
10574 ++ cpuset_post_attach_flush();
10575 + return ret ?: nbytes;
10576 + }
10577 +
10578 +@@ -4783,6 +4784,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
10579 + INIT_LIST_HEAD(&css->sibling);
10580 + INIT_LIST_HEAD(&css->children);
10581 + css->serial_nr = css_serial_nr_next++;
10582 ++ atomic_set(&css->online_cnt, 0);
10583 +
10584 + if (cgroup_parent(cgrp)) {
10585 + css->parent = cgroup_css(cgroup_parent(cgrp), ss);
10586 +@@ -4805,6 +4807,10 @@ static int online_css(struct cgroup_subsys_state *css)
10587 + if (!ret) {
10588 + css->flags |= CSS_ONLINE;
10589 + rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
10590 ++
10591 ++ atomic_inc(&css->online_cnt);
10592 ++ if (css->parent)
10593 ++ atomic_inc(&css->parent->online_cnt);
10594 + }
10595 + return ret;
10596 + }
10597 +@@ -5036,10 +5042,15 @@ static void css_killed_work_fn(struct work_struct *work)
10598 + container_of(work, struct cgroup_subsys_state, destroy_work);
10599 +
10600 + mutex_lock(&cgroup_mutex);
10601 +- offline_css(css);
10602 +- mutex_unlock(&cgroup_mutex);
10603 +
10604 +- css_put(css);
10605 ++ do {
10606 ++ offline_css(css);
10607 ++ css_put(css);
10608 ++ /* @css can't go away while we're holding cgroup_mutex */
10609 ++ css = css->parent;
10610 ++ } while (css && atomic_dec_and_test(&css->online_cnt));
10611 ++
10612 ++ mutex_unlock(&cgroup_mutex);
10613 + }
10614 +
10615 + /* css kill confirmation processing requires process context, bounce */
10616 +@@ -5048,8 +5059,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
10617 + struct cgroup_subsys_state *css =
10618 + container_of(ref, struct cgroup_subsys_state, refcnt);
10619 +
10620 +- INIT_WORK(&css->destroy_work, css_killed_work_fn);
10621 +- queue_work(cgroup_destroy_wq, &css->destroy_work);
10622 ++ if (atomic_dec_and_test(&css->online_cnt)) {
10623 ++ INIT_WORK(&css->destroy_work, css_killed_work_fn);
10624 ++ queue_work(cgroup_destroy_wq, &css->destroy_work);
10625 ++ }
10626 + }
10627 +
10628 + /**
10629 +diff --git a/kernel/cpuset.c b/kernel/cpuset.c
10630 +index 02a8ea5..2ade632 100644
10631 +--- a/kernel/cpuset.c
10632 ++++ b/kernel/cpuset.c
10633 +@@ -286,6 +286,8 @@ static struct cpuset top_cpuset = {
10634 + static DEFINE_MUTEX(cpuset_mutex);
10635 + static DEFINE_SPINLOCK(callback_lock);
10636 +
10637 ++static struct workqueue_struct *cpuset_migrate_mm_wq;
10638 ++
10639 + /*
10640 + * CPU / memory hotplug is handled asynchronously.
10641 + */
10642 +@@ -971,31 +973,51 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
10643 + }
10644 +
10645 + /*
10646 +- * cpuset_migrate_mm
10647 +- *
10648 +- * Migrate memory region from one set of nodes to another.
10649 +- *
10650 +- * Temporarilly set tasks mems_allowed to target nodes of migration,
10651 +- * so that the migration code can allocate pages on these nodes.
10652 +- *
10653 +- * While the mm_struct we are migrating is typically from some
10654 +- * other task, the task_struct mems_allowed that we are hacking
10655 +- * is for our current task, which must allocate new pages for that
10656 +- * migrating memory region.
10657 ++ * Migrate memory region from one set of nodes to another. This is
10658 ++ * performed asynchronously as it can be called from process migration path
10659 ++ * holding locks involved in process management. All mm migrations are
10660 ++ * performed in the queued order and can be waited for by flushing
10661 ++ * cpuset_migrate_mm_wq.
10662 + */
10663 +
10664 ++struct cpuset_migrate_mm_work {
10665 ++ struct work_struct work;
10666 ++ struct mm_struct *mm;
10667 ++ nodemask_t from;
10668 ++ nodemask_t to;
10669 ++};
10670 ++
10671 ++static void cpuset_migrate_mm_workfn(struct work_struct *work)
10672 ++{
10673 ++ struct cpuset_migrate_mm_work *mwork =
10674 ++ container_of(work, struct cpuset_migrate_mm_work, work);
10675 ++
10676 ++ /* on a wq worker, no need to worry about %current's mems_allowed */
10677 ++ do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
10678 ++ mmput(mwork->mm);
10679 ++ kfree(mwork);
10680 ++}
10681 ++
10682 + static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
10683 + const nodemask_t *to)
10684 + {
10685 +- struct task_struct *tsk = current;
10686 +-
10687 +- tsk->mems_allowed = *to;
10688 ++ struct cpuset_migrate_mm_work *mwork;
10689 +
10690 +- do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
10691 ++ mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
10692 ++ if (mwork) {
10693 ++ mwork->mm = mm;
10694 ++ mwork->from = *from;
10695 ++ mwork->to = *to;
10696 ++ INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
10697 ++ queue_work(cpuset_migrate_mm_wq, &mwork->work);
10698 ++ } else {
10699 ++ mmput(mm);
10700 ++ }
10701 ++}
10702 +
10703 +- rcu_read_lock();
10704 +- guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
10705 +- rcu_read_unlock();
10706 ++void cpuset_post_attach_flush(void)
10707 ++{
10708 ++ flush_workqueue(cpuset_migrate_mm_wq);
10709 + }
10710 +
10711 + /*
10712 +@@ -1096,7 +1118,8 @@ static void update_tasks_nodemask(struct cpuset *cs)
10713 + mpol_rebind_mm(mm, &cs->mems_allowed);
10714 + if (migrate)
10715 + cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
10716 +- mmput(mm);
10717 ++ else
10718 ++ mmput(mm);
10719 + }
10720 + css_task_iter_end(&it);
10721 +
10722 +@@ -1541,11 +1564,11 @@ static void cpuset_attach(struct cgroup_taskset *tset)
10723 + * @old_mems_allowed is the right nodesets that we
10724 + * migrate mm from.
10725 + */
10726 +- if (is_memory_migrate(cs)) {
10727 ++ if (is_memory_migrate(cs))
10728 + cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
10729 + &cpuset_attach_nodemask_to);
10730 +- }
10731 +- mmput(mm);
10732 ++ else
10733 ++ mmput(mm);
10734 + }
10735 + }
10736 +
10737 +@@ -1710,6 +1733,7 @@ out_unlock:
10738 + mutex_unlock(&cpuset_mutex);
10739 + kernfs_unbreak_active_protection(of->kn);
10740 + css_put(&cs->css);
10741 ++ flush_workqueue(cpuset_migrate_mm_wq);
10742 + return retval ?: nbytes;
10743 + }
10744 +
10745 +@@ -2355,6 +2379,9 @@ void __init cpuset_init_smp(void)
10746 + top_cpuset.effective_mems = node_states[N_MEMORY];
10747 +
10748 + register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
10749 ++
10750 ++ cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
10751 ++ BUG_ON(!cpuset_migrate_mm_wq);
10752 + }
10753 +
10754 + /**
10755 +diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
10756 +index a302cf9..57bff78 100644
10757 +--- a/kernel/irq/handle.c
10758 ++++ b/kernel/irq/handle.c
10759 +@@ -138,7 +138,8 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
10760 + unsigned int flags = 0, irq = desc->irq_data.irq;
10761 + struct irqaction *action = desc->action;
10762 +
10763 +- do {
10764 ++ /* action might have become NULL since we dropped the lock */
10765 ++ while (action) {
10766 + irqreturn_t res;
10767 +
10768 + trace_irq_handler_entry(irq, action);
10769 +@@ -173,7 +174,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
10770 +
10771 + retval |= res;
10772 + action = action->next;
10773 +- } while (action);
10774 ++ }
10775 +
10776 + add_interrupt_randomness(irq, flags);
10777 +
10778 +diff --git a/kernel/memremap.c b/kernel/memremap.c
10779 +index 7a4e473..25ced16 100644
10780 +--- a/kernel/memremap.c
10781 ++++ b/kernel/memremap.c
10782 +@@ -133,8 +133,10 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
10783 + if (addr) {
10784 + *ptr = addr;
10785 + devres_add(dev, ptr);
10786 +- } else
10787 ++ } else {
10788 + devres_free(ptr);
10789 ++ return ERR_PTR(-ENXIO);
10790 ++ }
10791 +
10792 + return addr;
10793 + }
10794 +diff --git a/kernel/resource.c b/kernel/resource.c
10795 +index f150dbb..249b1eb 100644
10796 +--- a/kernel/resource.c
10797 ++++ b/kernel/resource.c
10798 +@@ -1083,9 +1083,10 @@ struct resource * __request_region(struct resource *parent,
10799 + if (!conflict)
10800 + break;
10801 + if (conflict != parent) {
10802 +- parent = conflict;
10803 +- if (!(conflict->flags & IORESOURCE_BUSY))
10804 ++ if (!(conflict->flags & IORESOURCE_BUSY)) {
10805 ++ parent = conflict;
10806 + continue;
10807 ++ }
10808 + }
10809 + if (conflict->flags & flags & IORESOURCE_MUXED) {
10810 + add_wait_queue(&muxed_resource_wait, &wait);
10811 +diff --git a/kernel/seccomp.c b/kernel/seccomp.c
10812 +index 580ac2d..15a1795 100644
10813 +--- a/kernel/seccomp.c
10814 ++++ b/kernel/seccomp.c
10815 +@@ -316,24 +316,24 @@ static inline void seccomp_sync_threads(void)
10816 + put_seccomp_filter(thread);
10817 + smp_store_release(&thread->seccomp.filter,
10818 + caller->seccomp.filter);
10819 ++
10820 ++ /*
10821 ++ * Don't let an unprivileged task work around
10822 ++ * the no_new_privs restriction by creating
10823 ++ * a thread that sets it up, enters seccomp,
10824 ++ * then dies.
10825 ++ */
10826 ++ if (task_no_new_privs(caller))
10827 ++ task_set_no_new_privs(thread);
10828 ++
10829 + /*
10830 + * Opt the other thread into seccomp if needed.
10831 + * As threads are considered to be trust-realm
10832 + * equivalent (see ptrace_may_access), it is safe to
10833 + * allow one thread to transition the other.
10834 + */
10835 +- if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
10836 +- /*
10837 +- * Don't let an unprivileged task work around
10838 +- * the no_new_privs restriction by creating
10839 +- * a thread that sets it up, enters seccomp,
10840 +- * then dies.
10841 +- */
10842 +- if (task_no_new_privs(caller))
10843 +- task_set_no_new_privs(thread);
10844 +-
10845 ++ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
10846 + seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
10847 +- }
10848 + }
10849 + }
10850 +
10851 +diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
10852 +index ce033c7..9cff0ab 100644
10853 +--- a/kernel/time/posix-clock.c
10854 ++++ b/kernel/time/posix-clock.c
10855 +@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
10856 + static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
10857 + {
10858 + struct posix_clock *clk = get_posix_clock(fp);
10859 +- int result = 0;
10860 ++ unsigned int result = 0;
10861 +
10862 + if (!clk)
10863 +- return -ENODEV;
10864 ++ return POLLERR;
10865 +
10866 + if (clk->ops.poll)
10867 + result = clk->ops.poll(clk, fp, wait);
10868 +diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
10869 +index 7c7ec45..22c57e1 100644
10870 +--- a/kernel/time/tick-sched.c
10871 ++++ b/kernel/time/tick-sched.c
10872 +@@ -977,9 +977,9 @@ static void tick_nohz_switch_to_nohz(void)
10873 + /* Get the next period */
10874 + next = tick_init_jiffy_update();
10875 +
10876 +- hrtimer_forward_now(&ts->sched_timer, tick_period);
10877 + hrtimer_set_expires(&ts->sched_timer, next);
10878 +- tick_program_event(next, 1);
10879 ++ hrtimer_forward_now(&ts->sched_timer, tick_period);
10880 ++ tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
10881 + tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
10882 + }
10883 +
10884 +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
10885 +index d563c19..99188ee 100644
10886 +--- a/kernel/time/timekeeping.c
10887 ++++ b/kernel/time/timekeeping.c
10888 +@@ -305,8 +305,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
10889 +
10890 + delta = timekeeping_get_delta(tkr);
10891 +
10892 +- nsec = delta * tkr->mult + tkr->xtime_nsec;
10893 +- nsec >>= tkr->shift;
10894 ++ nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift;
10895 +
10896 + /* If arch requires, add in get_arch_timeoffset() */
10897 + return nsec + arch_gettimeoffset();
10898 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
10899 +index 4f6ef69..debf6e8 100644
10900 +--- a/kernel/trace/trace_events.c
10901 ++++ b/kernel/trace/trace_events.c
10902 +@@ -869,7 +869,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
10903 + * The ftrace subsystem is for showing formats only.
10904 + * They can not be enabled or disabled via the event files.
10905 + */
10906 +- if (call->class && call->class->reg)
10907 ++ if (call->class && call->class->reg &&
10908 ++ !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
10909 + return file;
10910 + }
10911 +
10912 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
10913 +index c579dba..450c21f 100644
10914 +--- a/kernel/workqueue.c
10915 ++++ b/kernel/workqueue.c
10916 +@@ -568,6 +568,16 @@ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
10917 + int node)
10918 + {
10919 + assert_rcu_or_wq_mutex_or_pool_mutex(wq);
10920 ++
10921 ++ /*
10922 ++ * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
10923 ++ * delayed item is pending. The plan is to keep CPU -> NODE
10924 ++ * mapping valid and stable across CPU on/offlines. Once that
10925 ++ * happens, this workaround can be removed.
10926 ++ */
10927 ++ if (unlikely(node == NUMA_NO_NODE))
10928 ++ return wq->dfl_pwq;
10929 ++
10930 + return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
10931 + }
10932 +
10933 +@@ -1458,13 +1468,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
10934 + timer_stats_timer_set_start_info(&dwork->timer);
10935 +
10936 + dwork->wq = wq;
10937 +- /* timer isn't guaranteed to run in this cpu, record earlier */
10938 +- if (cpu == WORK_CPU_UNBOUND)
10939 +- cpu = raw_smp_processor_id();
10940 + dwork->cpu = cpu;
10941 + timer->expires = jiffies + delay;
10942 +
10943 +- add_timer_on(timer, cpu);
10944 ++ if (unlikely(cpu != WORK_CPU_UNBOUND))
10945 ++ add_timer_on(timer, cpu);
10946 ++ else
10947 ++ add_timer(timer);
10948 + }
10949 +
10950 + /**
10951 +diff --git a/lib/Kconfig b/lib/Kconfig
10952 +index f0df318..1a48744 100644
10953 +--- a/lib/Kconfig
10954 ++++ b/lib/Kconfig
10955 +@@ -210,9 +210,11 @@ config RANDOM32_SELFTEST
10956 + # compression support is select'ed if needed
10957 + #
10958 + config 842_COMPRESS
10959 ++ select CRC32
10960 + tristate
10961 +
10962 + config 842_DECOMPRESS
10963 ++ select CRC32
10964 + tristate
10965 +
10966 + config ZLIB_INFLATE
10967 +diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
10968 +index 6f500ef..f0b323a 100644
10969 +--- a/lib/ucs2_string.c
10970 ++++ b/lib/ucs2_string.c
10971 +@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
10972 + }
10973 + }
10974 + EXPORT_SYMBOL(ucs2_strncmp);
10975 ++
10976 ++unsigned long
10977 ++ucs2_utf8size(const ucs2_char_t *src)
10978 ++{
10979 ++ unsigned long i;
10980 ++ unsigned long j = 0;
10981 ++
10982 ++ for (i = 0; i < ucs2_strlen(src); i++) {
10983 ++ u16 c = src[i];
10984 ++
10985 ++ if (c >= 0x800)
10986 ++ j += 3;
10987 ++ else if (c >= 0x80)
10988 ++ j += 2;
10989 ++ else
10990 ++ j += 1;
10991 ++ }
10992 ++
10993 ++ return j;
10994 ++}
10995 ++EXPORT_SYMBOL(ucs2_utf8size);
10996 ++
10997 ++/*
10998 ++ * copy at most maxlength bytes of whole utf8 characters to dest from the
10999 ++ * ucs2 string src.
11000 ++ *
11001 ++ * The return value is the number of characters copied, not including the
11002 ++ * final NUL character.
11003 ++ */
11004 ++unsigned long
11005 ++ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
11006 ++{
11007 ++ unsigned int i;
11008 ++ unsigned long j = 0;
11009 ++ unsigned long limit = ucs2_strnlen(src, maxlength);
11010 ++
11011 ++ for (i = 0; maxlength && i < limit; i++) {
11012 ++ u16 c = src[i];
11013 ++
11014 ++ if (c >= 0x800) {
11015 ++ if (maxlength < 3)
11016 ++ break;
11017 ++ maxlength -= 3;
11018 ++ dest[j++] = 0xe0 | (c & 0xf000) >> 12;
11019 ++ dest[j++] = 0x80 | (c & 0x0fc0) >> 6;
11020 ++ dest[j++] = 0x80 | (c & 0x003f);
11021 ++ } else if (c >= 0x80) {
11022 ++ if (maxlength < 2)
11023 ++ break;
11024 ++ maxlength -= 2;
11025 ++ dest[j++] = 0xc0 | (c & 0x7c0) >> 6;
11026 ++ dest[j++] = 0x80 | (c & 0x03f);
11027 ++ } else {
11028 ++ maxlength -= 1;
11029 ++ dest[j++] = c & 0x7f;
11030 ++ }
11031 ++ }
11032 ++ if (maxlength)
11033 ++ dest[j] = '\0';
11034 ++ return j;
11035 ++}
11036 ++EXPORT_SYMBOL(ucs2_as_utf8);
11037 +diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
11038 +index d3116be..300117f 100644
11039 +--- a/mm/balloon_compaction.c
11040 ++++ b/mm/balloon_compaction.c
11041 +@@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
11042 + bool dequeued_page;
11043 +
11044 + dequeued_page = false;
11045 ++ spin_lock_irqsave(&b_dev_info->pages_lock, flags);
11046 + list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
11047 + /*
11048 + * Block others from accessing the 'page' while we get around
11049 +@@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
11050 + continue;
11051 + }
11052 + #endif
11053 +- spin_lock_irqsave(&b_dev_info->pages_lock, flags);
11054 + balloon_page_delete(page);
11055 + __count_vm_event(BALLOON_DEFLATE);
11056 +- spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
11057 + unlock_page(page);
11058 + dequeued_page = true;
11059 + break;
11060 + }
11061 + }
11062 ++ spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
11063 +
11064 + if (!dequeued_page) {
11065 + /*
11066 +diff --git a/mm/memory.c b/mm/memory.c
11067 +index c387430..b80bf47 100644
11068 +--- a/mm/memory.c
11069 ++++ b/mm/memory.c
11070 +@@ -3399,8 +3399,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
11071 + if (unlikely(pmd_none(*pmd)) &&
11072 + unlikely(__pte_alloc(mm, vma, pmd, address)))
11073 + return VM_FAULT_OOM;
11074 +- /* if an huge pmd materialized from under us just retry later */
11075 +- if (unlikely(pmd_trans_huge(*pmd)))
11076 ++ /*
11077 ++ * If a huge pmd materialized under us just retry later. Use
11078 ++ * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
11079 ++ * didn't become pmd_trans_huge under us and then back to pmd_none, as
11080 ++ * a result of MADV_DONTNEED running immediately after a huge pmd fault
11081 ++ * in a different thread of this mm, in turn leading to a misleading
11082 ++ * pmd_trans_huge() retval. All we have to ensure is that it is a
11083 ++ * regular pmd that we can walk with pte_offset_map() and we can do that
11084 ++ * through an atomic read in C, which is what pmd_trans_unstable()
11085 ++ * provides.
11086 ++ */
11087 ++ if (unlikely(pmd_trans_unstable(pmd)))
11088 + return 0;
11089 + /*
11090 + * A regular pmd is established and it can't morph into a huge pmd
11091 +diff --git a/mm/migrate.c b/mm/migrate.c
11092 +index 7890d0b..6d17e0a 100644
11093 +--- a/mm/migrate.c
11094 ++++ b/mm/migrate.c
11095 +@@ -1578,7 +1578,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
11096 + (GFP_HIGHUSER_MOVABLE |
11097 + __GFP_THISNODE | __GFP_NOMEMALLOC |
11098 + __GFP_NORETRY | __GFP_NOWARN) &
11099 +- ~(__GFP_IO | __GFP_FS), 0);
11100 ++ ~__GFP_RECLAIM, 0);
11101 +
11102 + return newpage;
11103 + }
11104 +diff --git a/mm/shmem.c b/mm/shmem.c
11105 +index 2afcdbb..ea5a70c 100644
11106 +--- a/mm/shmem.c
11107 ++++ b/mm/shmem.c
11108 +@@ -620,8 +620,7 @@ static void shmem_evict_inode(struct inode *inode)
11109 + list_del_init(&info->swaplist);
11110 + mutex_unlock(&shmem_swaplist_mutex);
11111 + }
11112 +- } else
11113 +- kfree(info->symlink);
11114 ++ }
11115 +
11116 + simple_xattrs_free(&info->xattrs);
11117 + WARN_ON(inode->i_blocks);
11118 +@@ -2462,13 +2461,12 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
11119 + info = SHMEM_I(inode);
11120 + inode->i_size = len-1;
11121 + if (len <= SHORT_SYMLINK_LEN) {
11122 +- info->symlink = kmemdup(symname, len, GFP_KERNEL);
11123 +- if (!info->symlink) {
11124 ++ inode->i_link = kmemdup(symname, len, GFP_KERNEL);
11125 ++ if (!inode->i_link) {
11126 + iput(inode);
11127 + return -ENOMEM;
11128 + }
11129 + inode->i_op = &shmem_short_symlink_operations;
11130 +- inode->i_link = info->symlink;
11131 + } else {
11132 + error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
11133 + if (error) {
11134 +@@ -3083,6 +3081,7 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
11135 + static void shmem_destroy_callback(struct rcu_head *head)
11136 + {
11137 + struct inode *inode = container_of(head, struct inode, i_rcu);
11138 ++ kfree(inode->i_link);
11139 + kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
11140 + }
11141 +
11142 +diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
11143 +index 9e9cca3..795ddd8 100644
11144 +--- a/net/bluetooth/6lowpan.c
11145 ++++ b/net/bluetooth/6lowpan.c
11146 +@@ -307,6 +307,9 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
11147 +
11148 + /* check that it's our buffer */
11149 + if (lowpan_is_ipv6(*skb_network_header(skb))) {
11150 ++ /* Pull off the 1-byte of 6lowpan header. */
11151 ++ skb_pull(skb, 1);
11152 ++
11153 + /* Copy the packet so that the IPv6 header is
11154 + * properly aligned.
11155 + */
11156 +@@ -317,6 +320,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
11157 +
11158 + local_skb->protocol = htons(ETH_P_IPV6);
11159 + local_skb->pkt_type = PACKET_HOST;
11160 ++ local_skb->dev = dev;
11161 +
11162 + skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
11163 +
11164 +@@ -335,6 +339,8 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
11165 + if (!local_skb)
11166 + goto drop;
11167 +
11168 ++ local_skb->dev = dev;
11169 ++
11170 + ret = iphc_decompress(local_skb, dev, chan);
11171 + if (ret < 0) {
11172 + kfree_skb(local_skb);
11173 +@@ -343,7 +349,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
11174 +
11175 + local_skb->protocol = htons(ETH_P_IPV6);
11176 + local_skb->pkt_type = PACKET_HOST;
11177 +- local_skb->dev = dev;
11178 +
11179 + if (give_skb_to_upper(local_skb, dev)
11180 + != NET_RX_SUCCESS) {
11181 +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
11182 +index 85b82f7..24e9410 100644
11183 +--- a/net/bluetooth/hci_conn.c
11184 ++++ b/net/bluetooth/hci_conn.c
11185 +@@ -722,8 +722,12 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
11186 + if (hci_update_random_address(req, false, &own_addr_type))
11187 + return;
11188 +
11189 ++ /* Set window to be the same value as the interval to enable
11190 ++ * continuous scanning.
11191 ++ */
11192 + cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
11193 +- cp.scan_window = cpu_to_le16(hdev->le_scan_window);
11194 ++ cp.scan_window = cp.scan_interval;
11195 ++
11196 + bacpy(&cp.peer_addr, &conn->dst);
11197 + cp.peer_addr_type = conn->dst_type;
11198 + cp.own_address_type = own_addr_type;
11199 +diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
11200 +index 981f8a2..02778c5 100644
11201 +--- a/net/bluetooth/hci_request.c
11202 ++++ b/net/bluetooth/hci_request.c
11203 +@@ -175,21 +175,29 @@ static u8 update_white_list(struct hci_request *req)
11204 + * command to remove it from the controller.
11205 + */
11206 + list_for_each_entry(b, &hdev->le_white_list, list) {
11207 +- struct hci_cp_le_del_from_white_list cp;
11208 ++ /* If the device is neither in pend_le_conns nor
11209 ++ * pend_le_reports then remove it from the whitelist.
11210 ++ */
11211 ++ if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
11212 ++ &b->bdaddr, b->bdaddr_type) &&
11213 ++ !hci_pend_le_action_lookup(&hdev->pend_le_reports,
11214 ++ &b->bdaddr, b->bdaddr_type)) {
11215 ++ struct hci_cp_le_del_from_white_list cp;
11216 ++
11217 ++ cp.bdaddr_type = b->bdaddr_type;
11218 ++ bacpy(&cp.bdaddr, &b->bdaddr);
11219 +
11220 +- if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
11221 +- &b->bdaddr, b->bdaddr_type) ||
11222 +- hci_pend_le_action_lookup(&hdev->pend_le_reports,
11223 +- &b->bdaddr, b->bdaddr_type)) {
11224 +- white_list_entries++;
11225 ++ hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
11226 ++ sizeof(cp), &cp);
11227 + continue;
11228 + }
11229 +
11230 +- cp.bdaddr_type = b->bdaddr_type;
11231 +- bacpy(&cp.bdaddr, &b->bdaddr);
11232 ++ if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
11233 ++ /* White list can not be used with RPAs */
11234 ++ return 0x00;
11235 ++ }
11236 +
11237 +- hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
11238 +- sizeof(cp), &cp);
11239 ++ white_list_entries++;
11240 + }
11241 +
11242 + /* Since all no longer valid white list entries have been
11243 +diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
11244 +index ffed8a1..4b175df 100644
11245 +--- a/net/bluetooth/smp.c
11246 ++++ b/net/bluetooth/smp.c
11247 +@@ -1072,22 +1072,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
11248 + hcon->dst_type = smp->remote_irk->addr_type;
11249 + queue_work(hdev->workqueue, &conn->id_addr_update_work);
11250 + }
11251 +-
11252 +- /* When receiving an indentity resolving key for
11253 +- * a remote device that does not use a resolvable
11254 +- * private address, just remove the key so that
11255 +- * it is possible to use the controller white
11256 +- * list for scanning.
11257 +- *
11258 +- * Userspace will have been told to not store
11259 +- * this key at this point. So it is safe to
11260 +- * just remove it.
11261 +- */
11262 +- if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) {
11263 +- list_del_rcu(&smp->remote_irk->list);
11264 +- kfree_rcu(smp->remote_irk, rcu);
11265 +- smp->remote_irk = NULL;
11266 +- }
11267 + }
11268 +
11269 + if (smp->csrk) {
11270 +diff --git a/net/bridge/br.c b/net/bridge/br.c
11271 +index a1abe49..3addc05 100644
11272 +--- a/net/bridge/br.c
11273 ++++ b/net/bridge/br.c
11274 +@@ -121,6 +121,7 @@ static struct notifier_block br_device_notifier = {
11275 + .notifier_call = br_device_event
11276 + };
11277 +
11278 ++/* called with RTNL */
11279 + static int br_switchdev_event(struct notifier_block *unused,
11280 + unsigned long event, void *ptr)
11281 + {
11282 +@@ -130,7 +131,6 @@ static int br_switchdev_event(struct notifier_block *unused,
11283 + struct switchdev_notifier_fdb_info *fdb_info;
11284 + int err = NOTIFY_DONE;
11285 +
11286 +- rtnl_lock();
11287 + p = br_port_get_rtnl(dev);
11288 + if (!p)
11289 + goto out;
11290 +@@ -155,7 +155,6 @@ static int br_switchdev_event(struct notifier_block *unused,
11291 + }
11292 +
11293 + out:
11294 +- rtnl_unlock();
11295 + return err;
11296 + }
11297 +
11298 +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
11299 +index 9981039..63ae5dd 100644
11300 +--- a/net/ceph/messenger.c
11301 ++++ b/net/ceph/messenger.c
11302 +@@ -672,6 +672,8 @@ static void reset_connection(struct ceph_connection *con)
11303 + }
11304 + con->in_seq = 0;
11305 + con->in_seq_acked = 0;
11306 ++
11307 ++ con->out_skip = 0;
11308 + }
11309 +
11310 + /*
11311 +@@ -771,6 +773,8 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
11312 +
11313 + static void con_out_kvec_reset(struct ceph_connection *con)
11314 + {
11315 ++ BUG_ON(con->out_skip);
11316 ++
11317 + con->out_kvec_left = 0;
11318 + con->out_kvec_bytes = 0;
11319 + con->out_kvec_cur = &con->out_kvec[0];
11320 +@@ -779,9 +783,9 @@ static void con_out_kvec_reset(struct ceph_connection *con)
11321 + static void con_out_kvec_add(struct ceph_connection *con,
11322 + size_t size, void *data)
11323 + {
11324 +- int index;
11325 ++ int index = con->out_kvec_left;
11326 +
11327 +- index = con->out_kvec_left;
11328 ++ BUG_ON(con->out_skip);
11329 + BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
11330 +
11331 + con->out_kvec[index].iov_len = size;
11332 +@@ -790,6 +794,27 @@ static void con_out_kvec_add(struct ceph_connection *con,
11333 + con->out_kvec_bytes += size;
11334 + }
11335 +
11336 ++/*
11337 ++ * Chop off a kvec from the end. Return residual number of bytes for
11338 ++ * that kvec, i.e. how many bytes would have been written if the kvec
11339 ++ * hadn't been nuked.
11340 ++ */
11341 ++static int con_out_kvec_skip(struct ceph_connection *con)
11342 ++{
11343 ++ int off = con->out_kvec_cur - con->out_kvec;
11344 ++ int skip = 0;
11345 ++
11346 ++ if (con->out_kvec_bytes > 0) {
11347 ++ skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
11348 ++ BUG_ON(con->out_kvec_bytes < skip);
11349 ++ BUG_ON(!con->out_kvec_left);
11350 ++ con->out_kvec_bytes -= skip;
11351 ++ con->out_kvec_left--;
11352 ++ }
11353 ++
11354 ++ return skip;
11355 ++}
11356 ++
11357 + #ifdef CONFIG_BLOCK
11358 +
11359 + /*
11360 +@@ -1175,6 +1200,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
11361 + return new_piece;
11362 + }
11363 +
11364 ++static size_t sizeof_footer(struct ceph_connection *con)
11365 ++{
11366 ++ return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
11367 ++ sizeof(struct ceph_msg_footer) :
11368 ++ sizeof(struct ceph_msg_footer_old);
11369 ++}
11370 ++
11371 + static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
11372 + {
11373 + BUG_ON(!msg);
11374 +@@ -1197,7 +1229,6 @@ static void prepare_write_message_footer(struct ceph_connection *con)
11375 + m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
11376 +
11377 + dout("prepare_write_message_footer %p\n", con);
11378 +- con->out_kvec_is_msg = true;
11379 + con->out_kvec[v].iov_base = &m->footer;
11380 + if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
11381 + if (con->ops->sign_message)
11382 +@@ -1225,7 +1256,6 @@ static void prepare_write_message(struct ceph_connection *con)
11383 + u32 crc;
11384 +
11385 + con_out_kvec_reset(con);
11386 +- con->out_kvec_is_msg = true;
11387 + con->out_msg_done = false;
11388 +
11389 + /* Sneak an ack in there first? If we can get it into the same
11390 +@@ -1265,18 +1295,19 @@ static void prepare_write_message(struct ceph_connection *con)
11391 +
11392 + /* tag + hdr + front + middle */
11393 + con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
11394 +- con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
11395 ++ con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
11396 + con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
11397 +
11398 + if (m->middle)
11399 + con_out_kvec_add(con, m->middle->vec.iov_len,
11400 + m->middle->vec.iov_base);
11401 +
11402 +- /* fill in crc (except data pages), footer */
11403 ++ /* fill in hdr crc and finalize hdr */
11404 + crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
11405 + con->out_msg->hdr.crc = cpu_to_le32(crc);
11406 +- con->out_msg->footer.flags = 0;
11407 ++ memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
11408 +
11409 ++ /* fill in front and middle crc, footer */
11410 + crc = crc32c(0, m->front.iov_base, m->front.iov_len);
11411 + con->out_msg->footer.front_crc = cpu_to_le32(crc);
11412 + if (m->middle) {
11413 +@@ -1288,6 +1319,7 @@ static void prepare_write_message(struct ceph_connection *con)
11414 + dout("%s front_crc %u middle_crc %u\n", __func__,
11415 + le32_to_cpu(con->out_msg->footer.front_crc),
11416 + le32_to_cpu(con->out_msg->footer.middle_crc));
11417 ++ con->out_msg->footer.flags = 0;
11418 +
11419 + /* is there a data payload? */
11420 + con->out_msg->footer.data_crc = 0;
11421 +@@ -1492,7 +1524,6 @@ static int write_partial_kvec(struct ceph_connection *con)
11422 + }
11423 + }
11424 + con->out_kvec_left = 0;
11425 +- con->out_kvec_is_msg = false;
11426 + ret = 1;
11427 + out:
11428 + dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
11429 +@@ -1584,6 +1615,7 @@ static int write_partial_skip(struct ceph_connection *con)
11430 + {
11431 + int ret;
11432 +
11433 ++ dout("%s %p %d left\n", __func__, con, con->out_skip);
11434 + while (con->out_skip > 0) {
11435 + size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
11436 +
11437 +@@ -2313,9 +2345,9 @@ static int read_partial_message(struct ceph_connection *con)
11438 + ceph_pr_addr(&con->peer_addr.in_addr),
11439 + seq, con->in_seq + 1);
11440 + con->in_base_pos = -front_len - middle_len - data_len -
11441 +- sizeof(m->footer);
11442 ++ sizeof_footer(con);
11443 + con->in_tag = CEPH_MSGR_TAG_READY;
11444 +- return 0;
11445 ++ return 1;
11446 + } else if ((s64)seq - (s64)con->in_seq > 1) {
11447 + pr_err("read_partial_message bad seq %lld expected %lld\n",
11448 + seq, con->in_seq + 1);
11449 +@@ -2338,10 +2370,10 @@ static int read_partial_message(struct ceph_connection *con)
11450 + /* skip this message */
11451 + dout("alloc_msg said skip message\n");
11452 + con->in_base_pos = -front_len - middle_len - data_len -
11453 +- sizeof(m->footer);
11454 ++ sizeof_footer(con);
11455 + con->in_tag = CEPH_MSGR_TAG_READY;
11456 + con->in_seq++;
11457 +- return 0;
11458 ++ return 1;
11459 + }
11460 +
11461 + BUG_ON(!con->in_msg);
11462 +@@ -2506,13 +2538,13 @@ more:
11463 +
11464 + more_kvec:
11465 + /* kvec data queued? */
11466 +- if (con->out_skip) {
11467 +- ret = write_partial_skip(con);
11468 ++ if (con->out_kvec_left) {
11469 ++ ret = write_partial_kvec(con);
11470 + if (ret <= 0)
11471 + goto out;
11472 + }
11473 +- if (con->out_kvec_left) {
11474 +- ret = write_partial_kvec(con);
11475 ++ if (con->out_skip) {
11476 ++ ret = write_partial_skip(con);
11477 + if (ret <= 0)
11478 + goto out;
11479 + }
11480 +@@ -3050,16 +3082,31 @@ void ceph_msg_revoke(struct ceph_msg *msg)
11481 + ceph_msg_put(msg);
11482 + }
11483 + if (con->out_msg == msg) {
11484 +- dout("%s %p msg %p - was sending\n", __func__, con, msg);
11485 +- con->out_msg = NULL;
11486 +- if (con->out_kvec_is_msg) {
11487 +- con->out_skip = con->out_kvec_bytes;
11488 +- con->out_kvec_is_msg = false;
11489 ++ BUG_ON(con->out_skip);
11490 ++ /* footer */
11491 ++ if (con->out_msg_done) {
11492 ++ con->out_skip += con_out_kvec_skip(con);
11493 ++ } else {
11494 ++ BUG_ON(!msg->data_length);
11495 ++ if (con->peer_features & CEPH_FEATURE_MSG_AUTH)
11496 ++ con->out_skip += sizeof(msg->footer);
11497 ++ else
11498 ++ con->out_skip += sizeof(msg->old_footer);
11499 + }
11500 ++ /* data, middle, front */
11501 ++ if (msg->data_length)
11502 ++ con->out_skip += msg->cursor.total_resid;
11503 ++ if (msg->middle)
11504 ++ con->out_skip += con_out_kvec_skip(con);
11505 ++ con->out_skip += con_out_kvec_skip(con);
11506 ++
11507 ++ dout("%s %p msg %p - was sending, will write %d skip %d\n",
11508 ++ __func__, con, msg, con->out_kvec_bytes, con->out_skip);
11509 + msg->hdr.seq = 0;
11510 +-
11511 ++ con->out_msg = NULL;
11512 + ceph_msg_put(msg);
11513 + }
11514 ++
11515 + mutex_unlock(&con->mutex);
11516 + }
11517 +
11518 +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
11519 +index f8f2359..a28e47f 100644
11520 +--- a/net/ceph/osd_client.c
11521 ++++ b/net/ceph/osd_client.c
11522 +@@ -2843,8 +2843,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
11523 + mutex_lock(&osdc->request_mutex);
11524 + req = __lookup_request(osdc, tid);
11525 + if (!req) {
11526 +- pr_warn("%s osd%d tid %llu unknown, skipping\n",
11527 +- __func__, osd->o_osd, tid);
11528 ++ dout("%s osd%d tid %llu unknown, skipping\n", __func__,
11529 ++ osd->o_osd, tid);
11530 + m = NULL;
11531 + *skip = 1;
11532 + goto out;
11533 +diff --git a/net/core/dev.c b/net/core/dev.c
11534 +index 7f00f24..9efbdb3 100644
11535 +--- a/net/core/dev.c
11536 ++++ b/net/core/dev.c
11537 +@@ -4145,6 +4145,7 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
11538 +
11539 + diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
11540 + diffs |= p->vlan_tci ^ skb->vlan_tci;
11541 ++ diffs |= skb_metadata_dst_cmp(p, skb);
11542 + if (maclen == ETH_HLEN)
11543 + diffs |= compare_ether_header(skb_mac_header(p),
11544 + skb_mac_header(skb));
11545 +@@ -4342,10 +4343,12 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
11546 + break;
11547 +
11548 + case GRO_MERGED_FREE:
11549 +- if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
11550 ++ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
11551 ++ skb_dst_drop(skb);
11552 + kmem_cache_free(skbuff_head_cache, skb);
11553 +- else
11554 ++ } else {
11555 + __kfree_skb(skb);
11556 ++ }
11557 + break;
11558 +
11559 + case GRO_HELD:
11560 +@@ -7125,8 +7128,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
11561 + dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
11562 + setup(dev);
11563 +
11564 +- if (!dev->tx_queue_len)
11565 ++ if (!dev->tx_queue_len) {
11566 + dev->priv_flags |= IFF_NO_QUEUE;
11567 ++ dev->tx_queue_len = 1;
11568 ++ }
11569 +
11570 + dev->num_tx_queues = txqs;
11571 + dev->real_num_tx_queues = txqs;
11572 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
11573 +index d79699c..12e7003 100644
11574 +--- a/net/core/flow_dissector.c
11575 ++++ b/net/core/flow_dissector.c
11576 +@@ -208,7 +208,6 @@ ip:
11577 + case htons(ETH_P_IPV6): {
11578 + const struct ipv6hdr *iph;
11579 + struct ipv6hdr _iph;
11580 +- __be32 flow_label;
11581 +
11582 + ipv6:
11583 + iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
11584 +@@ -230,8 +229,12 @@ ipv6:
11585 + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
11586 + }
11587 +
11588 +- flow_label = ip6_flowlabel(iph);
11589 +- if (flow_label) {
11590 ++ if ((dissector_uses_key(flow_dissector,
11591 ++ FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
11592 ++ (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
11593 ++ ip6_flowlabel(iph)) {
11594 ++ __be32 flow_label = ip6_flowlabel(iph);
11595 ++
11596 + if (dissector_uses_key(flow_dissector,
11597 + FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
11598 + key_tags = skb_flow_dissector_target(flow_dissector,
11599 +@@ -396,6 +399,13 @@ ip_proto_again:
11600 + goto out_bad;
11601 + proto = eth->h_proto;
11602 + nhoff += sizeof(*eth);
11603 ++
11604 ++ /* Cap headers that we access via pointers at the
11605 ++ * end of the Ethernet header as our maximum alignment
11606 ++ * at that point is only 2 bytes.
11607 ++ */
11608 ++ if (NET_IP_ALIGN)
11609 ++ hlen = nhoff;
11610 + }
11611 +
11612 + key_control->flags |= FLOW_DIS_ENCAPSULATION;
11613 +diff --git a/net/core/scm.c b/net/core/scm.c
11614 +index 8a1741b..dce0acb 100644
11615 +--- a/net/core/scm.c
11616 ++++ b/net/core/scm.c
11617 +@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
11618 + *fplp = fpl;
11619 + fpl->count = 0;
11620 + fpl->max = SCM_MAX_FD;
11621 ++ fpl->user = NULL;
11622 + }
11623 + fpp = &fpl->fp[fpl->count];
11624 +
11625 +@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
11626 + *fpp++ = file;
11627 + fpl->count++;
11628 + }
11629 ++
11630 ++ if (!fpl->user)
11631 ++ fpl->user = get_uid(current_user());
11632 ++
11633 + return num;
11634 + }
11635 +
11636 +@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
11637 + scm->fp = NULL;
11638 + for (i=fpl->count-1; i>=0; i--)
11639 + fput(fpl->fp[i]);
11640 ++ free_uid(fpl->user);
11641 + kfree(fpl);
11642 + }
11643 + }
11644 +@@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
11645 + for (i = 0; i < fpl->count; i++)
11646 + get_file(fpl->fp[i]);
11647 + new_fpl->max = new_fpl->count;
11648 ++ new_fpl->user = get_uid(fpl->user);
11649 + }
11650 + return new_fpl;
11651 + }
11652 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
11653 +index b2df375..5bf88f5 100644
11654 +--- a/net/core/skbuff.c
11655 ++++ b/net/core/skbuff.c
11656 +@@ -79,6 +79,8 @@
11657 +
11658 + struct kmem_cache *skbuff_head_cache __read_mostly;
11659 + static struct kmem_cache *skbuff_fclone_cache __read_mostly;
11660 ++int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
11661 ++EXPORT_SYMBOL(sysctl_max_skb_frags);
11662 +
11663 + /**
11664 + * skb_panic - private function for out-of-line support
11665 +diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
11666 +index 95b6139..a6beb7b 100644
11667 +--- a/net/core/sysctl_net_core.c
11668 ++++ b/net/core/sysctl_net_core.c
11669 +@@ -26,6 +26,7 @@ static int zero = 0;
11670 + static int one = 1;
11671 + static int min_sndbuf = SOCK_MIN_SNDBUF;
11672 + static int min_rcvbuf = SOCK_MIN_RCVBUF;
11673 ++static int max_skb_frags = MAX_SKB_FRAGS;
11674 +
11675 + static int net_msg_warn; /* Unused, but still a sysctl */
11676 +
11677 +@@ -392,6 +393,15 @@ static struct ctl_table net_core_table[] = {
11678 + .mode = 0644,
11679 + .proc_handler = proc_dointvec
11680 + },
11681 ++ {
11682 ++ .procname = "max_skb_frags",
11683 ++ .data = &sysctl_max_skb_frags,
11684 ++ .maxlen = sizeof(int),
11685 ++ .mode = 0644,
11686 ++ .proc_handler = proc_dointvec_minmax,
11687 ++ .extra1 = &one,
11688 ++ .extra2 = &max_skb_frags,
11689 ++ },
11690 + { }
11691 + };
11692 +
11693 +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
11694 +index 5684e14..902d606 100644
11695 +--- a/net/dccp/ipv4.c
11696 ++++ b/net/dccp/ipv4.c
11697 +@@ -824,26 +824,26 @@ lookup:
11698 +
11699 + if (sk->sk_state == DCCP_NEW_SYN_RECV) {
11700 + struct request_sock *req = inet_reqsk(sk);
11701 +- struct sock *nsk = NULL;
11702 ++ struct sock *nsk;
11703 +
11704 + sk = req->rsk_listener;
11705 +- if (likely(sk->sk_state == DCCP_LISTEN)) {
11706 +- nsk = dccp_check_req(sk, skb, req);
11707 +- } else {
11708 ++ if (unlikely(sk->sk_state != DCCP_LISTEN)) {
11709 + inet_csk_reqsk_queue_drop_and_put(sk, req);
11710 + goto lookup;
11711 + }
11712 ++ sock_hold(sk);
11713 ++ nsk = dccp_check_req(sk, skb, req);
11714 + if (!nsk) {
11715 + reqsk_put(req);
11716 +- goto discard_it;
11717 ++ goto discard_and_relse;
11718 + }
11719 + if (nsk == sk) {
11720 +- sock_hold(sk);
11721 + reqsk_put(req);
11722 + } else if (dccp_child_process(sk, nsk, skb)) {
11723 + dccp_v4_ctl_send_reset(sk, skb);
11724 +- goto discard_it;
11725 ++ goto discard_and_relse;
11726 + } else {
11727 ++ sock_put(sk);
11728 + return 0;
11729 + }
11730 + }
11731 +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
11732 +index 9c6d050..b8608b7 100644
11733 +--- a/net/dccp/ipv6.c
11734 ++++ b/net/dccp/ipv6.c
11735 +@@ -691,26 +691,26 @@ lookup:
11736 +
11737 + if (sk->sk_state == DCCP_NEW_SYN_RECV) {
11738 + struct request_sock *req = inet_reqsk(sk);
11739 +- struct sock *nsk = NULL;
11740 ++ struct sock *nsk;
11741 +
11742 + sk = req->rsk_listener;
11743 +- if (likely(sk->sk_state == DCCP_LISTEN)) {
11744 +- nsk = dccp_check_req(sk, skb, req);
11745 +- } else {
11746 ++ if (unlikely(sk->sk_state != DCCP_LISTEN)) {
11747 + inet_csk_reqsk_queue_drop_and_put(sk, req);
11748 + goto lookup;
11749 + }
11750 ++ sock_hold(sk);
11751 ++ nsk = dccp_check_req(sk, skb, req);
11752 + if (!nsk) {
11753 + reqsk_put(req);
11754 +- goto discard_it;
11755 ++ goto discard_and_relse;
11756 + }
11757 + if (nsk == sk) {
11758 +- sock_hold(sk);
11759 + reqsk_put(req);
11760 + } else if (dccp_child_process(sk, nsk, skb)) {
11761 + dccp_v6_ctl_send_reset(sk, skb);
11762 +- goto discard_it;
11763 ++ goto discard_and_relse;
11764 + } else {
11765 ++ sock_put(sk);
11766 + return 0;
11767 + }
11768 + }
11769 +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
11770 +index cebd9d3..f6303b1 100644
11771 +--- a/net/ipv4/devinet.c
11772 ++++ b/net/ipv4/devinet.c
11773 +@@ -1847,7 +1847,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
11774 + if (err < 0)
11775 + goto errout;
11776 +
11777 +- err = EINVAL;
11778 ++ err = -EINVAL;
11779 + if (!tb[NETCONFA_IFINDEX])
11780 + goto errout;
11781 +
11782 +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
11783 +index 46b9c88..6414891 100644
11784 +--- a/net/ipv4/inet_connection_sock.c
11785 ++++ b/net/ipv4/inet_connection_sock.c
11786 +@@ -789,14 +789,16 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
11787 + reqsk_put(req);
11788 + }
11789 +
11790 +-void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
11791 +- struct sock *child)
11792 ++struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
11793 ++ struct request_sock *req,
11794 ++ struct sock *child)
11795 + {
11796 + struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
11797 +
11798 + spin_lock(&queue->rskq_lock);
11799 + if (unlikely(sk->sk_state != TCP_LISTEN)) {
11800 + inet_child_forget(sk, req, child);
11801 ++ child = NULL;
11802 + } else {
11803 + req->sk = child;
11804 + req->dl_next = NULL;
11805 +@@ -808,6 +810,7 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
11806 + sk_acceptq_added(sk);
11807 + }
11808 + spin_unlock(&queue->rskq_lock);
11809 ++ return child;
11810 + }
11811 + EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
11812 +
11813 +@@ -817,11 +820,8 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
11814 + if (own_req) {
11815 + inet_csk_reqsk_queue_drop(sk, req);
11816 + reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
11817 +- inet_csk_reqsk_queue_add(sk, req, child);
11818 +- /* Warning: caller must not call reqsk_put(req);
11819 +- * child stole last reference on it.
11820 +- */
11821 +- return child;
11822 ++ if (inet_csk_reqsk_queue_add(sk, req, child))
11823 ++ return child;
11824 + }
11825 + /* Too bad, another child took ownership of the request, undo. */
11826 + bh_unlock_sock(child);
11827 +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
11828 +index 1fe55ae..b8a0607d 100644
11829 +--- a/net/ipv4/ip_fragment.c
11830 ++++ b/net/ipv4/ip_fragment.c
11831 +@@ -661,6 +661,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
11832 + struct ipq *qp;
11833 +
11834 + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
11835 ++ skb_orphan(skb);
11836 +
11837 + /* Lookup (or create) queue header */
11838 + qp = ip_find(net, ip_hdr(skb), user, vif);
11839 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
11840 +index 5f73a7c..a501242 100644
11841 +--- a/net/ipv4/ip_sockglue.c
11842 ++++ b/net/ipv4/ip_sockglue.c
11843 +@@ -249,6 +249,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
11844 + switch (cmsg->cmsg_type) {
11845 + case IP_RETOPTS:
11846 + err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
11847 ++
11848 ++ /* Our caller is responsible for freeing ipc->opt */
11849 + err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
11850 + err < 40 ? err : 40);
11851 + if (err)
11852 +diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
11853 +index 6fb869f6..a04dee5 100644
11854 +--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
11855 ++++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
11856 +@@ -27,8 +27,6 @@ static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
11857 + {
11858 + int err;
11859 +
11860 +- skb_orphan(skb);
11861 +-
11862 + local_bh_disable();
11863 + err = ip_defrag(net, skb, user);
11864 + local_bh_enable();
11865 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
11866 +index e89094a..aa67e0e 100644
11867 +--- a/net/ipv4/ping.c
11868 ++++ b/net/ipv4/ping.c
11869 +@@ -746,8 +746,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
11870 +
11871 + if (msg->msg_controllen) {
11872 + err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
11873 +- if (err)
11874 ++ if (unlikely(err)) {
11875 ++ kfree(ipc.opt);
11876 + return err;
11877 ++ }
11878 + if (ipc.opt)
11879 + free = 1;
11880 + }
11881 +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
11882 +index bc35f18..7113bae 100644
11883 +--- a/net/ipv4/raw.c
11884 ++++ b/net/ipv4/raw.c
11885 +@@ -547,8 +547,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
11886 +
11887 + if (msg->msg_controllen) {
11888 + err = ip_cmsg_send(net, msg, &ipc, false);
11889 +- if (err)
11890 ++ if (unlikely(err)) {
11891 ++ kfree(ipc.opt);
11892 + goto out;
11893 ++ }
11894 + if (ipc.opt)
11895 + free = 1;
11896 + }
11897 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
11898 +index 85f184e..02c6229 100644
11899 +--- a/net/ipv4/route.c
11900 ++++ b/net/ipv4/route.c
11901 +@@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
11902 + static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
11903 + static int ip_rt_min_advmss __read_mostly = 256;
11904 +
11905 ++static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
11906 + /*
11907 + * Interface to generic destination cache.
11908 + */
11909 +@@ -755,7 +756,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
11910 + struct fib_nh *nh = &FIB_RES_NH(res);
11911 +
11912 + update_or_create_fnhe(nh, fl4->daddr, new_gw,
11913 +- 0, 0);
11914 ++ 0, jiffies + ip_rt_gc_timeout);
11915 + }
11916 + if (kill_route)
11917 + rt->dst.obsolete = DST_OBSOLETE_KILL;
11918 +@@ -1556,6 +1557,36 @@ static void ip_handle_martian_source(struct net_device *dev,
11919 + #endif
11920 + }
11921 +
11922 ++static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
11923 ++{
11924 ++ struct fnhe_hash_bucket *hash;
11925 ++ struct fib_nh_exception *fnhe, __rcu **fnhe_p;
11926 ++ u32 hval = fnhe_hashfun(daddr);
11927 ++
11928 ++ spin_lock_bh(&fnhe_lock);
11929 ++
11930 ++ hash = rcu_dereference_protected(nh->nh_exceptions,
11931 ++ lockdep_is_held(&fnhe_lock));
11932 ++ hash += hval;
11933 ++
11934 ++ fnhe_p = &hash->chain;
11935 ++ fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
11936 ++ while (fnhe) {
11937 ++ if (fnhe->fnhe_daddr == daddr) {
11938 ++ rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
11939 ++ fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
11940 ++ fnhe_flush_routes(fnhe);
11941 ++ kfree_rcu(fnhe, rcu);
11942 ++ break;
11943 ++ }
11944 ++ fnhe_p = &fnhe->fnhe_next;
11945 ++ fnhe = rcu_dereference_protected(fnhe->fnhe_next,
11946 ++ lockdep_is_held(&fnhe_lock));
11947 ++ }
11948 ++
11949 ++ spin_unlock_bh(&fnhe_lock);
11950 ++}
11951 ++
11952 + /* called in rcu_read_lock() section */
11953 + static int __mkroute_input(struct sk_buff *skb,
11954 + const struct fib_result *res,
11955 +@@ -1609,11 +1640,20 @@ static int __mkroute_input(struct sk_buff *skb,
11956 +
11957 + fnhe = find_exception(&FIB_RES_NH(*res), daddr);
11958 + if (do_cache) {
11959 +- if (fnhe)
11960 ++ if (fnhe) {
11961 + rth = rcu_dereference(fnhe->fnhe_rth_input);
11962 +- else
11963 +- rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
11964 ++ if (rth && rth->dst.expires &&
11965 ++ time_after(jiffies, rth->dst.expires)) {
11966 ++ ip_del_fnhe(&FIB_RES_NH(*res), daddr);
11967 ++ fnhe = NULL;
11968 ++ } else {
11969 ++ goto rt_cache;
11970 ++ }
11971 ++ }
11972 ++
11973 ++ rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
11974 +
11975 ++rt_cache:
11976 + if (rt_cache_valid(rth)) {
11977 + skb_dst_set_noref(skb, &rth->dst);
11978 + goto out;
11979 +@@ -2014,19 +2054,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
11980 + struct fib_nh *nh = &FIB_RES_NH(*res);
11981 +
11982 + fnhe = find_exception(nh, fl4->daddr);
11983 +- if (fnhe)
11984 ++ if (fnhe) {
11985 + prth = &fnhe->fnhe_rth_output;
11986 +- else {
11987 +- if (unlikely(fl4->flowi4_flags &
11988 +- FLOWI_FLAG_KNOWN_NH &&
11989 +- !(nh->nh_gw &&
11990 +- nh->nh_scope == RT_SCOPE_LINK))) {
11991 +- do_cache = false;
11992 +- goto add;
11993 ++ rth = rcu_dereference(*prth);
11994 ++ if (rth && rth->dst.expires &&
11995 ++ time_after(jiffies, rth->dst.expires)) {
11996 ++ ip_del_fnhe(nh, fl4->daddr);
11997 ++ fnhe = NULL;
11998 ++ } else {
11999 ++ goto rt_cache;
12000 + }
12001 +- prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
12002 + }
12003 ++
12004 ++ if (unlikely(fl4->flowi4_flags &
12005 ++ FLOWI_FLAG_KNOWN_NH &&
12006 ++ !(nh->nh_gw &&
12007 ++ nh->nh_scope == RT_SCOPE_LINK))) {
12008 ++ do_cache = false;
12009 ++ goto add;
12010 ++ }
12011 ++ prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
12012 + rth = rcu_dereference(*prth);
12013 ++
12014 ++rt_cache:
12015 + if (rt_cache_valid(rth)) {
12016 + dst_hold(&rth->dst);
12017 + return rth;
12018 +@@ -2569,7 +2619,6 @@ void ip_rt_multicast_event(struct in_device *in_dev)
12019 + }
12020 +
12021 + #ifdef CONFIG_SYSCTL
12022 +-static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
12023 + static int ip_rt_gc_interval __read_mostly = 60 * HZ;
12024 + static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
12025 + static int ip_rt_gc_elasticity __read_mostly = 8;
12026 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
12027 +index c82cca1..036a76b 100644
12028 +--- a/net/ipv4/tcp.c
12029 ++++ b/net/ipv4/tcp.c
12030 +@@ -279,6 +279,7 @@
12031 +
12032 + #include <asm/uaccess.h>
12033 + #include <asm/ioctls.h>
12034 ++#include <asm/unaligned.h>
12035 + #include <net/busy_poll.h>
12036 +
12037 + int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
12038 +@@ -938,7 +939,7 @@ new_segment:
12039 +
12040 + i = skb_shinfo(skb)->nr_frags;
12041 + can_coalesce = skb_can_coalesce(skb, i, page, offset);
12042 +- if (!can_coalesce && i >= MAX_SKB_FRAGS) {
12043 ++ if (!can_coalesce && i >= sysctl_max_skb_frags) {
12044 + tcp_mark_push(tp, skb);
12045 + goto new_segment;
12046 + }
12047 +@@ -1211,7 +1212,7 @@ new_segment:
12048 +
12049 + if (!skb_can_coalesce(skb, i, pfrag->page,
12050 + pfrag->offset)) {
12051 +- if (i == MAX_SKB_FRAGS || !sg) {
12052 ++ if (i == sysctl_max_skb_frags || !sg) {
12053 + tcp_mark_push(tp, skb);
12054 + goto new_segment;
12055 + }
12056 +@@ -2637,6 +2638,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
12057 + const struct inet_connection_sock *icsk = inet_csk(sk);
12058 + u32 now = tcp_time_stamp;
12059 + unsigned int start;
12060 ++ u64 rate64;
12061 + u32 rate;
12062 +
12063 + memset(info, 0, sizeof(*info));
12064 +@@ -2702,15 +2704,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
12065 + info->tcpi_total_retrans = tp->total_retrans;
12066 +
12067 + rate = READ_ONCE(sk->sk_pacing_rate);
12068 +- info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
12069 ++ rate64 = rate != ~0U ? rate : ~0ULL;
12070 ++ put_unaligned(rate64, &info->tcpi_pacing_rate);
12071 +
12072 + rate = READ_ONCE(sk->sk_max_pacing_rate);
12073 +- info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
12074 ++ rate64 = rate != ~0U ? rate : ~0ULL;
12075 ++ put_unaligned(rate64, &info->tcpi_max_pacing_rate);
12076 +
12077 + do {
12078 + start = u64_stats_fetch_begin_irq(&tp->syncp);
12079 +- info->tcpi_bytes_acked = tp->bytes_acked;
12080 +- info->tcpi_bytes_received = tp->bytes_received;
12081 ++ put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
12082 ++ put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
12083 + } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
12084 + info->tcpi_segs_out = tp->segs_out;
12085 + info->tcpi_segs_in = tp->segs_in;
12086 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
12087 +index d8841a2..8c7e631 100644
12088 +--- a/net/ipv4/tcp_ipv4.c
12089 ++++ b/net/ipv4/tcp_ipv4.c
12090 +@@ -312,7 +312,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
12091 +
12092 +
12093 + /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
12094 +-void tcp_req_err(struct sock *sk, u32 seq)
12095 ++void tcp_req_err(struct sock *sk, u32 seq, bool abort)
12096 + {
12097 + struct request_sock *req = inet_reqsk(sk);
12098 + struct net *net = sock_net(sk);
12099 +@@ -324,7 +324,7 @@ void tcp_req_err(struct sock *sk, u32 seq)
12100 +
12101 + if (seq != tcp_rsk(req)->snt_isn) {
12102 + NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
12103 +- } else {
12104 ++ } else if (abort) {
12105 + /*
12106 + * Still in SYN_RECV, just remove it silently.
12107 + * There is no good way to pass the error to the newly
12108 +@@ -384,7 +384,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
12109 + }
12110 + seq = ntohl(th->seq);
12111 + if (sk->sk_state == TCP_NEW_SYN_RECV)
12112 +- return tcp_req_err(sk, seq);
12113 ++ return tcp_req_err(sk, seq,
12114 ++ type == ICMP_PARAMETERPROB ||
12115 ++ type == ICMP_TIME_EXCEEDED ||
12116 ++ (type == ICMP_DEST_UNREACH &&
12117 ++ (code == ICMP_NET_UNREACH ||
12118 ++ code == ICMP_HOST_UNREACH)));
12119 +
12120 + bh_lock_sock(sk);
12121 + /* If too many ICMPs get dropped on busy
12122 +@@ -705,7 +710,8 @@ release_sk1:
12123 + outside socket context is ugly, certainly. What can I do?
12124 + */
12125 +
12126 +-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
12127 ++static void tcp_v4_send_ack(struct net *net,
12128 ++ struct sk_buff *skb, u32 seq, u32 ack,
12129 + u32 win, u32 tsval, u32 tsecr, int oif,
12130 + struct tcp_md5sig_key *key,
12131 + int reply_flags, u8 tos)
12132 +@@ -720,7 +726,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
12133 + ];
12134 + } rep;
12135 + struct ip_reply_arg arg;
12136 +- struct net *net = dev_net(skb_dst(skb)->dev);
12137 +
12138 + memset(&rep.th, 0, sizeof(struct tcphdr));
12139 + memset(&arg, 0, sizeof(arg));
12140 +@@ -782,7 +787,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
12141 + struct inet_timewait_sock *tw = inet_twsk(sk);
12142 + struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
12143 +
12144 +- tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
12145 ++ tcp_v4_send_ack(sock_net(sk), skb,
12146 ++ tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
12147 + tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
12148 + tcp_time_stamp + tcptw->tw_ts_offset,
12149 + tcptw->tw_ts_recent,
12150 +@@ -801,8 +807,10 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
12151 + /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
12152 + * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
12153 + */
12154 +- tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
12155 +- tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
12156 ++ u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
12157 ++ tcp_sk(sk)->snd_nxt;
12158 ++
12159 ++ tcp_v4_send_ack(sock_net(sk), skb, seq,
12160 + tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
12161 + tcp_time_stamp,
12162 + req->ts_recent,
12163 +@@ -1586,28 +1594,30 @@ process:
12164 +
12165 + if (sk->sk_state == TCP_NEW_SYN_RECV) {
12166 + struct request_sock *req = inet_reqsk(sk);
12167 +- struct sock *nsk = NULL;
12168 ++ struct sock *nsk;
12169 +
12170 + sk = req->rsk_listener;
12171 +- if (tcp_v4_inbound_md5_hash(sk, skb))
12172 +- goto discard_and_relse;
12173 +- if (likely(sk->sk_state == TCP_LISTEN)) {
12174 +- nsk = tcp_check_req(sk, skb, req, false);
12175 +- } else {
12176 ++ if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
12177 ++ reqsk_put(req);
12178 ++ goto discard_it;
12179 ++ }
12180 ++ if (unlikely(sk->sk_state != TCP_LISTEN)) {
12181 + inet_csk_reqsk_queue_drop_and_put(sk, req);
12182 + goto lookup;
12183 + }
12184 ++ sock_hold(sk);
12185 ++ nsk = tcp_check_req(sk, skb, req, false);
12186 + if (!nsk) {
12187 + reqsk_put(req);
12188 +- goto discard_it;
12189 ++ goto discard_and_relse;
12190 + }
12191 + if (nsk == sk) {
12192 +- sock_hold(sk);
12193 + reqsk_put(req);
12194 + } else if (tcp_child_process(sk, nsk, skb)) {
12195 + tcp_v4_send_reset(nsk, skb);
12196 +- goto discard_it;
12197 ++ goto discard_and_relse;
12198 + } else {
12199 ++ sock_put(sk);
12200 + return 0;
12201 + }
12202 + }
12203 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
12204 +index c438908..7f8ab46 100644
12205 +--- a/net/ipv4/udp.c
12206 ++++ b/net/ipv4/udp.c
12207 +@@ -966,8 +966,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
12208 + if (msg->msg_controllen) {
12209 + err = ip_cmsg_send(sock_net(sk), msg, &ipc,
12210 + sk->sk_family == AF_INET6);
12211 +- if (err)
12212 ++ if (unlikely(err)) {
12213 ++ kfree(ipc.opt);
12214 + return err;
12215 ++ }
12216 + if (ipc.opt)
12217 + free = 1;
12218 + connected = 0;
12219 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
12220 +index 1f21087..e8d3da0 100644
12221 +--- a/net/ipv6/addrconf.c
12222 ++++ b/net/ipv6/addrconf.c
12223 +@@ -583,7 +583,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
12224 + if (err < 0)
12225 + goto errout;
12226 +
12227 +- err = EINVAL;
12228 ++ err = -EINVAL;
12229 + if (!tb[NETCONFA_IFINDEX])
12230 + goto errout;
12231 +
12232 +@@ -3506,6 +3506,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
12233 + {
12234 + struct inet6_dev *idev = ifp->idev;
12235 + struct net_device *dev = idev->dev;
12236 ++ bool notify = false;
12237 +
12238 + addrconf_join_solict(dev, &ifp->addr);
12239 +
12240 +@@ -3551,7 +3552,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
12241 + /* Because optimistic nodes can use this address,
12242 + * notify listeners. If DAD fails, RTM_DELADDR is sent.
12243 + */
12244 +- ipv6_ifa_notify(RTM_NEWADDR, ifp);
12245 ++ notify = true;
12246 + }
12247 + }
12248 +
12249 +@@ -3559,6 +3560,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
12250 + out:
12251 + spin_unlock(&ifp->lock);
12252 + read_unlock_bh(&idev->lock);
12253 ++ if (notify)
12254 ++ ipv6_ifa_notify(RTM_NEWADDR, ifp);
12255 + }
12256 +
12257 + static void addrconf_dad_start(struct inet6_ifaddr *ifp)
12258 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
12259 +index 517c55b..4281621 100644
12260 +--- a/net/ipv6/datagram.c
12261 ++++ b/net/ipv6/datagram.c
12262 +@@ -162,6 +162,9 @@ ipv4_connected:
12263 + fl6.fl6_dport = inet->inet_dport;
12264 + fl6.fl6_sport = inet->inet_sport;
12265 +
12266 ++ if (!fl6.flowi6_oif)
12267 ++ fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
12268 ++
12269 + if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
12270 + fl6.flowi6_oif = np->mcast_oif;
12271 +
12272 +diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
12273 +index 1f9ebe3..dc2db4f 100644
12274 +--- a/net/ipv6/ip6_flowlabel.c
12275 ++++ b/net/ipv6/ip6_flowlabel.c
12276 +@@ -540,12 +540,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
12277 + }
12278 + spin_lock_bh(&ip6_sk_fl_lock);
12279 + for (sflp = &np->ipv6_fl_list;
12280 +- (sfl = rcu_dereference(*sflp)) != NULL;
12281 ++ (sfl = rcu_dereference_protected(*sflp,
12282 ++ lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
12283 + sflp = &sfl->next) {
12284 + if (sfl->fl->label == freq.flr_label) {
12285 + if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
12286 + np->flow_label &= ~IPV6_FLOWLABEL_MASK;
12287 +- *sflp = rcu_dereference(sfl->next);
12288 ++ *sflp = sfl->next;
12289 + spin_unlock_bh(&ip6_sk_fl_lock);
12290 + fl_release(sfl->fl);
12291 + kfree_rcu(sfl, rcu);
12292 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
12293 +index 6473889..31144c4 100644
12294 +--- a/net/ipv6/ip6_output.c
12295 ++++ b/net/ipv6/ip6_output.c
12296 +@@ -909,6 +909,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
12297 + struct rt6_info *rt;
12298 + #endif
12299 + int err;
12300 ++ int flags = 0;
12301 +
12302 + /* The correct way to handle this would be to do
12303 + * ip6_route_get_saddr, and then ip6_route_output; however,
12304 +@@ -940,10 +941,13 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
12305 + dst_release(*dst);
12306 + *dst = NULL;
12307 + }
12308 ++
12309 ++ if (fl6->flowi6_oif)
12310 ++ flags |= RT6_LOOKUP_F_IFACE;
12311 + }
12312 +
12313 + if (!*dst)
12314 +- *dst = ip6_route_output(net, sk, fl6);
12315 ++ *dst = ip6_route_output_flags(net, sk, fl6, flags);
12316 +
12317 + err = (*dst)->error;
12318 + if (err)
12319 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
12320 +index 826e6aa..3f164d3 100644
12321 +--- a/net/ipv6/route.c
12322 ++++ b/net/ipv6/route.c
12323 +@@ -1174,11 +1174,10 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
12324 + return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
12325 + }
12326 +
12327 +-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
12328 +- struct flowi6 *fl6)
12329 ++struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
12330 ++ struct flowi6 *fl6, int flags)
12331 + {
12332 + struct dst_entry *dst;
12333 +- int flags = 0;
12334 + bool any_src;
12335 +
12336 + dst = l3mdev_rt6_dst_by_oif(net, fl6);
12337 +@@ -1199,7 +1198,7 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
12338 +
12339 + return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
12340 + }
12341 +-EXPORT_SYMBOL(ip6_route_output);
12342 ++EXPORT_SYMBOL_GPL(ip6_route_output_flags);
12343 +
12344 + struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
12345 + {
12346 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
12347 +index bd100b4..b8d4056 100644
12348 +--- a/net/ipv6/tcp_ipv6.c
12349 ++++ b/net/ipv6/tcp_ipv6.c
12350 +@@ -328,6 +328,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
12351 + struct tcp_sock *tp;
12352 + __u32 seq, snd_una;
12353 + struct sock *sk;
12354 ++ bool fatal;
12355 + int err;
12356 +
12357 + sk = __inet6_lookup_established(net, &tcp_hashinfo,
12358 +@@ -346,8 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
12359 + return;
12360 + }
12361 + seq = ntohl(th->seq);
12362 ++ fatal = icmpv6_err_convert(type, code, &err);
12363 + if (sk->sk_state == TCP_NEW_SYN_RECV)
12364 +- return tcp_req_err(sk, seq);
12365 ++ return tcp_req_err(sk, seq, fatal);
12366 +
12367 + bh_lock_sock(sk);
12368 + if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
12369 +@@ -401,7 +403,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
12370 + goto out;
12371 + }
12372 +
12373 +- icmpv6_err_convert(type, code, &err);
12374 +
12375 + /* Might be for an request_sock */
12376 + switch (sk->sk_state) {
12377 +@@ -1387,7 +1388,7 @@ process:
12378 +
12379 + if (sk->sk_state == TCP_NEW_SYN_RECV) {
12380 + struct request_sock *req = inet_reqsk(sk);
12381 +- struct sock *nsk = NULL;
12382 ++ struct sock *nsk;
12383 +
12384 + sk = req->rsk_listener;
12385 + tcp_v6_fill_cb(skb, hdr, th);
12386 +@@ -1395,24 +1396,24 @@ process:
12387 + reqsk_put(req);
12388 + goto discard_it;
12389 + }
12390 +- if (likely(sk->sk_state == TCP_LISTEN)) {
12391 +- nsk = tcp_check_req(sk, skb, req, false);
12392 +- } else {
12393 ++ if (unlikely(sk->sk_state != TCP_LISTEN)) {
12394 + inet_csk_reqsk_queue_drop_and_put(sk, req);
12395 + goto lookup;
12396 + }
12397 ++ sock_hold(sk);
12398 ++ nsk = tcp_check_req(sk, skb, req, false);
12399 + if (!nsk) {
12400 + reqsk_put(req);
12401 +- goto discard_it;
12402 ++ goto discard_and_relse;
12403 + }
12404 + if (nsk == sk) {
12405 +- sock_hold(sk);
12406 + reqsk_put(req);
12407 + tcp_v6_restore_cb(skb);
12408 + } else if (tcp_child_process(sk, nsk, skb)) {
12409 + tcp_v6_send_reset(nsk, skb);
12410 +- goto discard_it;
12411 ++ goto discard_and_relse;
12412 + } else {
12413 ++ sock_put(sk);
12414 + return 0;
12415 + }
12416 + }
12417 +diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
12418 +index 435608c..20ab7b2 100644
12419 +--- a/net/iucv/af_iucv.c
12420 ++++ b/net/iucv/af_iucv.c
12421 +@@ -708,6 +708,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
12422 + if (!addr || addr->sa_family != AF_IUCV)
12423 + return -EINVAL;
12424 +
12425 ++ if (addr_len < sizeof(struct sockaddr_iucv))
12426 ++ return -EINVAL;
12427 ++
12428 + lock_sock(sk);
12429 + if (sk->sk_state != IUCV_OPEN) {
12430 + err = -EBADFD;
12431 +diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
12432 +index f93c5be..2caaa84 100644
12433 +--- a/net/l2tp/l2tp_netlink.c
12434 ++++ b/net/l2tp/l2tp_netlink.c
12435 +@@ -124,8 +124,13 @@ static int l2tp_tunnel_notify(struct genl_family *family,
12436 + ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
12437 + NLM_F_ACK, tunnel, cmd);
12438 +
12439 +- if (ret >= 0)
12440 +- return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
12441 ++ if (ret >= 0) {
12442 ++ ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
12443 ++ /* We don't care if no one is listening */
12444 ++ if (ret == -ESRCH)
12445 ++ ret = 0;
12446 ++ return ret;
12447 ++ }
12448 +
12449 + nlmsg_free(msg);
12450 +
12451 +@@ -147,8 +152,13 @@ static int l2tp_session_notify(struct genl_family *family,
12452 + ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
12453 + NLM_F_ACK, session, cmd);
12454 +
12455 +- if (ret >= 0)
12456 +- return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
12457 ++ if (ret >= 0) {
12458 ++ ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
12459 ++ /* We don't care if no one is listening */
12460 ++ if (ret == -ESRCH)
12461 ++ ret = 0;
12462 ++ return ret;
12463 ++ }
12464 +
12465 + nlmsg_free(msg);
12466 +
12467 +diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
12468 +index 337bb5d..6a12b0f 100644
12469 +--- a/net/mac80211/ibss.c
12470 ++++ b/net/mac80211/ibss.c
12471 +@@ -1732,7 +1732,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
12472 + if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
12473 + continue;
12474 + sdata->u.ibss.last_scan_completed = jiffies;
12475 +- ieee80211_queue_work(&local->hw, &sdata->work);
12476 + }
12477 + mutex_unlock(&local->iflist_mtx);
12478 + }
12479 +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
12480 +index fa28500..6f85b6a 100644
12481 +--- a/net/mac80211/mesh.c
12482 ++++ b/net/mac80211/mesh.c
12483 +@@ -1370,17 +1370,6 @@ out:
12484 + sdata_unlock(sdata);
12485 + }
12486 +
12487 +-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
12488 +-{
12489 +- struct ieee80211_sub_if_data *sdata;
12490 +-
12491 +- rcu_read_lock();
12492 +- list_for_each_entry_rcu(sdata, &local->interfaces, list)
12493 +- if (ieee80211_vif_is_mesh(&sdata->vif) &&
12494 +- ieee80211_sdata_running(sdata))
12495 +- ieee80211_queue_work(&local->hw, &sdata->work);
12496 +- rcu_read_unlock();
12497 +-}
12498 +
12499 + void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
12500 + {
12501 +diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
12502 +index a159634..4a8019f 100644
12503 +--- a/net/mac80211/mesh.h
12504 ++++ b/net/mac80211/mesh.h
12505 +@@ -362,14 +362,10 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
12506 + return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
12507 + }
12508 +
12509 +-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
12510 +-
12511 + void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
12512 + void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
12513 + void ieee80211s_stop(void);
12514 + #else
12515 +-static inline void
12516 +-ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
12517 + static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
12518 + { return false; }
12519 + static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
12520 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
12521 +index 3aa0434..83097c3 100644
12522 +--- a/net/mac80211/mlme.c
12523 ++++ b/net/mac80211/mlme.c
12524 +@@ -4003,8 +4003,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
12525 + if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
12526 + ieee80211_queue_work(&sdata->local->hw,
12527 + &sdata->u.mgd.monitor_work);
12528 +- /* and do all the other regular work too */
12529 +- ieee80211_queue_work(&sdata->local->hw, &sdata->work);
12530 + }
12531 + }
12532 +
12533 +diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
12534 +index a413e52..acbe182 100644
12535 +--- a/net/mac80211/scan.c
12536 ++++ b/net/mac80211/scan.c
12537 +@@ -314,6 +314,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
12538 + bool was_scanning = local->scanning;
12539 + struct cfg80211_scan_request *scan_req;
12540 + struct ieee80211_sub_if_data *scan_sdata;
12541 ++ struct ieee80211_sub_if_data *sdata;
12542 +
12543 + lockdep_assert_held(&local->mtx);
12544 +
12545 +@@ -373,7 +374,16 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
12546 +
12547 + ieee80211_mlme_notify_scan_completed(local);
12548 + ieee80211_ibss_notify_scan_completed(local);
12549 +- ieee80211_mesh_notify_scan_completed(local);
12550 ++
12551 ++ /* Requeue all the work that might have been ignored while
12552 ++ * the scan was in progress; if there was none this will
12553 ++ * just be a no-op for the particular interface.
12554 ++ */
12555 ++ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
12556 ++ if (ieee80211_sdata_running(sdata))
12557 ++ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
12558 ++ }
12559 ++
12560 + if (was_scanning)
12561 + ieee80211_start_next_roc(local);
12562 + }
12563 +diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
12564 +index 1605691..d933cb8 100644
12565 +--- a/net/openvswitch/vport-vxlan.c
12566 ++++ b/net/openvswitch/vport-vxlan.c
12567 +@@ -90,7 +90,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
12568 + int err;
12569 + struct vxlan_config conf = {
12570 + .no_share = true,
12571 +- .flags = VXLAN_F_COLLECT_METADATA,
12572 ++ .flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX,
12573 + };
12574 +
12575 + if (!options) {
12576 +diff --git a/net/rfkill/core.c b/net/rfkill/core.c
12577 +index f53bf3b6..cf5b69a 100644
12578 +--- a/net/rfkill/core.c
12579 ++++ b/net/rfkill/core.c
12580 +@@ -1095,17 +1095,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
12581 + return res;
12582 + }
12583 +
12584 +-static bool rfkill_readable(struct rfkill_data *data)
12585 +-{
12586 +- bool r;
12587 +-
12588 +- mutex_lock(&data->mtx);
12589 +- r = !list_empty(&data->events);
12590 +- mutex_unlock(&data->mtx);
12591 +-
12592 +- return r;
12593 +-}
12594 +-
12595 + static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
12596 + size_t count, loff_t *pos)
12597 + {
12598 +@@ -1122,8 +1111,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
12599 + goto out;
12600 + }
12601 + mutex_unlock(&data->mtx);
12602 ++ /* since we re-check and it just compares pointers,
12603 ++ * using !list_empty() without locking isn't a problem
12604 ++ */
12605 + ret = wait_event_interruptible(data->read_wait,
12606 +- rfkill_readable(data));
12607 ++ !list_empty(&data->events));
12608 + mutex_lock(&data->mtx);
12609 +
12610 + if (ret)
12611 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
12612 +index b5c2cf2..af1acf0 100644
12613 +--- a/net/sched/sch_api.c
12614 ++++ b/net/sched/sch_api.c
12615 +@@ -1852,6 +1852,7 @@ reset:
12616 + }
12617 +
12618 + tp = old_tp;
12619 ++ protocol = tc_skb_protocol(skb);
12620 + goto reclassify;
12621 + #endif
12622 + }
12623 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
12624 +index 3d9ea9a..8b4ff31 100644
12625 +--- a/net/sctp/protocol.c
12626 ++++ b/net/sctp/protocol.c
12627 +@@ -60,6 +60,8 @@
12628 + #include <net/inet_common.h>
12629 + #include <net/inet_ecn.h>
12630 +
12631 ++#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
12632 ++
12633 + /* Global data structures. */
12634 + struct sctp_globals sctp_globals __read_mostly;
12635 +
12636 +@@ -1352,6 +1354,8 @@ static __init int sctp_init(void)
12637 + unsigned long limit;
12638 + int max_share;
12639 + int order;
12640 ++ int num_entries;
12641 ++ int max_entry_order;
12642 +
12643 + sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
12644 +
12645 +@@ -1404,14 +1408,24 @@ static __init int sctp_init(void)
12646 +
12647 + /* Size and allocate the association hash table.
12648 + * The methodology is similar to that of the tcp hash tables.
12649 ++ * Though not identical. Start by getting a goal size
12650 + */
12651 + if (totalram_pages >= (128 * 1024))
12652 + goal = totalram_pages >> (22 - PAGE_SHIFT);
12653 + else
12654 + goal = totalram_pages >> (24 - PAGE_SHIFT);
12655 +
12656 +- for (order = 0; (1UL << order) < goal; order++)
12657 +- ;
12658 ++ /* Then compute the page order for said goal */
12659 ++ order = get_order(goal);
12660 ++
12661 ++ /* Now compute the required page order for the maximum sized table we
12662 ++ * want to create
12663 ++ */
12664 ++ max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
12665 ++ sizeof(struct sctp_bind_hashbucket));
12666 ++
12667 ++ /* Limit the page order by that maximum hash table size */
12668 ++ order = min(order, max_entry_order);
12669 +
12670 + do {
12671 + sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE /
12672 +@@ -1445,20 +1459,35 @@ static __init int sctp_init(void)
12673 + INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
12674 + }
12675 +
12676 +- /* Allocate and initialize the SCTP port hash table. */
12677 ++ /* Allocate and initialize the SCTP port hash table.
12678 ++ * Note that order is initalized to start at the max sized
12679 ++ * table we want to support. If we can't get that many pages
12680 ++ * reduce the order and try again
12681 ++ */
12682 + do {
12683 +- sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
12684 +- sizeof(struct sctp_bind_hashbucket);
12685 +- if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
12686 +- continue;
12687 + sctp_port_hashtable = (struct sctp_bind_hashbucket *)
12688 + __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
12689 + } while (!sctp_port_hashtable && --order > 0);
12690 ++
12691 + if (!sctp_port_hashtable) {
12692 + pr_err("Failed bind hash alloc\n");
12693 + status = -ENOMEM;
12694 + goto err_bhash_alloc;
12695 + }
12696 ++
12697 ++ /* Now compute the number of entries that will fit in the
12698 ++ * port hash space we allocated
12699 ++ */
12700 ++ num_entries = (1UL << order) * PAGE_SIZE /
12701 ++ sizeof(struct sctp_bind_hashbucket);
12702 ++
12703 ++ /* And finish by rounding it down to the nearest power of two
12704 ++ * this wastes some memory of course, but its needed because
12705 ++ * the hash function operates based on the assumption that
12706 ++ * that the number of entries is a power of two
12707 ++ */
12708 ++ sctp_port_hashsize = rounddown_pow_of_two(num_entries);
12709 ++
12710 + for (i = 0; i < sctp_port_hashsize; i++) {
12711 + spin_lock_init(&sctp_port_hashtable[i].lock);
12712 + INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
12713 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
12714 +index ef1d90f..be1489f 100644
12715 +--- a/net/sctp/socket.c
12716 ++++ b/net/sctp/socket.c
12717 +@@ -5542,6 +5542,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
12718 + struct sctp_hmac_algo_param *hmacs;
12719 + __u16 data_len = 0;
12720 + u32 num_idents;
12721 ++ int i;
12722 +
12723 + if (!ep->auth_enable)
12724 + return -EACCES;
12725 +@@ -5559,8 +5560,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
12726 + return -EFAULT;
12727 + if (put_user(num_idents, &p->shmac_num_idents))
12728 + return -EFAULT;
12729 +- if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
12730 +- return -EFAULT;
12731 ++ for (i = 0; i < num_idents; i++) {
12732 ++ __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
12733 ++
12734 ++ if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
12735 ++ return -EFAULT;
12736 ++ }
12737 + return 0;
12738 + }
12739 +
12740 +@@ -6640,6 +6645,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
12741 +
12742 + if (cmsgs->srinfo->sinfo_flags &
12743 + ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
12744 ++ SCTP_SACK_IMMEDIATELY |
12745 + SCTP_ABORT | SCTP_EOF))
12746 + return -EINVAL;
12747 + break;
12748 +@@ -6663,6 +6669,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
12749 +
12750 + if (cmsgs->sinfo->snd_flags &
12751 + ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
12752 ++ SCTP_SACK_IMMEDIATELY |
12753 + SCTP_ABORT | SCTP_EOF))
12754 + return -EINVAL;
12755 + break;
12756 +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
12757 +index 5e4f815..21e2035 100644
12758 +--- a/net/sunrpc/cache.c
12759 ++++ b/net/sunrpc/cache.c
12760 +@@ -1225,7 +1225,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
12761 + if (bp[0] == '\\' && bp[1] == 'x') {
12762 + /* HEX STRING */
12763 + bp += 2;
12764 +- while (len < bufsize) {
12765 ++ while (len < bufsize - 1) {
12766 + int h, l;
12767 +
12768 + h = hex_to_bin(bp[0]);
12769 +diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
12770 +index f34e535..d5d7132 100644
12771 +--- a/net/switchdev/switchdev.c
12772 ++++ b/net/switchdev/switchdev.c
12773 +@@ -20,6 +20,7 @@
12774 + #include <linux/list.h>
12775 + #include <linux/workqueue.h>
12776 + #include <linux/if_vlan.h>
12777 ++#include <linux/rtnetlink.h>
12778 + #include <net/ip_fib.h>
12779 + #include <net/switchdev.h>
12780 +
12781 +@@ -565,7 +566,6 @@ int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
12782 + }
12783 + EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
12784 +
12785 +-static DEFINE_MUTEX(switchdev_mutex);
12786 + static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
12787 +
12788 + /**
12789 +@@ -580,9 +580,9 @@ int register_switchdev_notifier(struct notifier_block *nb)
12790 + {
12791 + int err;
12792 +
12793 +- mutex_lock(&switchdev_mutex);
12794 ++ rtnl_lock();
12795 + err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
12796 +- mutex_unlock(&switchdev_mutex);
12797 ++ rtnl_unlock();
12798 + return err;
12799 + }
12800 + EXPORT_SYMBOL_GPL(register_switchdev_notifier);
12801 +@@ -598,9 +598,9 @@ int unregister_switchdev_notifier(struct notifier_block *nb)
12802 + {
12803 + int err;
12804 +
12805 +- mutex_lock(&switchdev_mutex);
12806 ++ rtnl_lock();
12807 + err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
12808 +- mutex_unlock(&switchdev_mutex);
12809 ++ rtnl_unlock();
12810 + return err;
12811 + }
12812 + EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
12813 +@@ -614,16 +614,17 @@ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
12814 + * Call all network notifier blocks. This should be called by driver
12815 + * when it needs to propagate hardware event.
12816 + * Return values are same as for atomic_notifier_call_chain().
12817 ++ * rtnl_lock must be held.
12818 + */
12819 + int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
12820 + struct switchdev_notifier_info *info)
12821 + {
12822 + int err;
12823 +
12824 ++ ASSERT_RTNL();
12825 ++
12826 + info->dev = dev;
12827 +- mutex_lock(&switchdev_mutex);
12828 + err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
12829 +- mutex_unlock(&switchdev_mutex);
12830 + return err;
12831 + }
12832 + EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
12833 +diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
12834 +index 9dc239d..92e367a 100644
12835 +--- a/net/tipc/bcast.c
12836 ++++ b/net/tipc/bcast.c
12837 +@@ -399,8 +399,10 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
12838 +
12839 + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
12840 + NLM_F_MULTI, TIPC_NL_LINK_GET);
12841 +- if (!hdr)
12842 ++ if (!hdr) {
12843 ++ tipc_bcast_unlock(net);
12844 + return -EMSGSIZE;
12845 ++ }
12846 +
12847 + attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
12848 + if (!attrs)
12849 +diff --git a/net/tipc/node.c b/net/tipc/node.c
12850 +index 20cddec..3926b56 100644
12851 +--- a/net/tipc/node.c
12852 ++++ b/net/tipc/node.c
12853 +@@ -168,12 +168,6 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
12854 + skb_queue_head_init(&n_ptr->bc_entry.inputq1);
12855 + __skb_queue_head_init(&n_ptr->bc_entry.arrvq);
12856 + skb_queue_head_init(&n_ptr->bc_entry.inputq2);
12857 +- hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
12858 +- list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
12859 +- if (n_ptr->addr < temp_node->addr)
12860 +- break;
12861 +- }
12862 +- list_add_tail_rcu(&n_ptr->list, &temp_node->list);
12863 + n_ptr->state = SELF_DOWN_PEER_LEAVING;
12864 + n_ptr->signature = INVALID_NODE_SIG;
12865 + n_ptr->active_links[0] = INVALID_BEARER_ID;
12866 +@@ -193,6 +187,12 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
12867 + tipc_node_get(n_ptr);
12868 + setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
12869 + n_ptr->keepalive_intv = U32_MAX;
12870 ++ hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
12871 ++ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
12872 ++ if (n_ptr->addr < temp_node->addr)
12873 ++ break;
12874 ++ }
12875 ++ list_add_tail_rcu(&n_ptr->list, &temp_node->list);
12876 + exit:
12877 + spin_unlock_bh(&tn->node_list_lock);
12878 + return n_ptr;
12879 +diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
12880 +index 350cca3..69ee2ee 100644
12881 +--- a/net/tipc/subscr.c
12882 ++++ b/net/tipc/subscr.c
12883 +@@ -289,15 +289,14 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
12884 + struct sockaddr_tipc *addr, void *usr_data,
12885 + void *buf, size_t len)
12886 + {
12887 +- struct tipc_subscriber *subscriber = usr_data;
12888 ++ struct tipc_subscriber *subscrb = usr_data;
12889 + struct tipc_subscription *sub = NULL;
12890 + struct tipc_net *tn = net_generic(net, tipc_net_id);
12891 +
12892 +- tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscriber, &sub);
12893 +- if (sub)
12894 +- tipc_nametbl_subscribe(sub);
12895 +- else
12896 +- tipc_conn_terminate(tn->topsrv, subscriber->conid);
12897 ++ if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub))
12898 ++ return tipc_conn_terminate(tn->topsrv, subscrb->conid);
12899 ++
12900 ++ tipc_nametbl_subscribe(sub);
12901 + }
12902 +
12903 + /* Handle one request to establish a new subscriber */
12904 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
12905 +index e3f85bc..898a53a 100644
12906 +--- a/net/unix/af_unix.c
12907 ++++ b/net/unix/af_unix.c
12908 +@@ -1496,7 +1496,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
12909 + UNIXCB(skb).fp = NULL;
12910 +
12911 + for (i = scm->fp->count-1; i >= 0; i--)
12912 +- unix_notinflight(scm->fp->fp[i]);
12913 ++ unix_notinflight(scm->fp->user, scm->fp->fp[i]);
12914 + }
12915 +
12916 + static void unix_destruct_scm(struct sk_buff *skb)
12917 +@@ -1561,7 +1561,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
12918 + return -ENOMEM;
12919 +
12920 + for (i = scm->fp->count - 1; i >= 0; i--)
12921 +- unix_inflight(scm->fp->fp[i]);
12922 ++ unix_inflight(scm->fp->user, scm->fp->fp[i]);
12923 + return max_level;
12924 + }
12925 +
12926 +@@ -1781,7 +1781,12 @@ restart_locked:
12927 + goto out_unlock;
12928 + }
12929 +
12930 +- if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
12931 ++ /* other == sk && unix_peer(other) != sk if
12932 ++ * - unix_peer(sk) == NULL, destination address bound to sk
12933 ++ * - unix_peer(sk) == sk by time of get but disconnected before lock
12934 ++ */
12935 ++ if (other != sk &&
12936 ++ unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
12937 + if (timeo) {
12938 + timeo = unix_wait_for_peer(other, timeo);
12939 +
12940 +@@ -2270,13 +2275,15 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
12941 + size_t size = state->size;
12942 + unsigned int last_len;
12943 +
12944 +- err = -EINVAL;
12945 +- if (sk->sk_state != TCP_ESTABLISHED)
12946 ++ if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
12947 ++ err = -EINVAL;
12948 + goto out;
12949 ++ }
12950 +
12951 +- err = -EOPNOTSUPP;
12952 +- if (flags & MSG_OOB)
12953 ++ if (unlikely(flags & MSG_OOB)) {
12954 ++ err = -EOPNOTSUPP;
12955 + goto out;
12956 ++ }
12957 +
12958 + target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
12959 + timeo = sock_rcvtimeo(sk, noblock);
12960 +@@ -2322,9 +2329,11 @@ again:
12961 + goto unlock;
12962 +
12963 + unix_state_unlock(sk);
12964 +- err = -EAGAIN;
12965 +- if (!timeo)
12966 ++ if (!timeo) {
12967 ++ err = -EAGAIN;
12968 + break;
12969 ++ }
12970 ++
12971 + mutex_unlock(&u->readlock);
12972 +
12973 + timeo = unix_stream_data_wait(sk, timeo, last,
12974 +@@ -2332,6 +2341,7 @@ again:
12975 +
12976 + if (signal_pending(current)) {
12977 + err = sock_intr_errno(timeo);
12978 ++ scm_destroy(&scm);
12979 + goto out;
12980 + }
12981 +
12982 +diff --git a/net/unix/diag.c b/net/unix/diag.c
12983 +index c512f64..4d96797 100644
12984 +--- a/net/unix/diag.c
12985 ++++ b/net/unix/diag.c
12986 +@@ -220,7 +220,7 @@ done:
12987 + return skb->len;
12988 + }
12989 +
12990 +-static struct sock *unix_lookup_by_ino(int ino)
12991 ++static struct sock *unix_lookup_by_ino(unsigned int ino)
12992 + {
12993 + int i;
12994 + struct sock *sk;
12995 +diff --git a/net/unix/garbage.c b/net/unix/garbage.c
12996 +index 8fcdc22..6a0d485 100644
12997 +--- a/net/unix/garbage.c
12998 ++++ b/net/unix/garbage.c
12999 +@@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp)
13000 + * descriptor if it is for an AF_UNIX socket.
13001 + */
13002 +
13003 +-void unix_inflight(struct file *fp)
13004 ++void unix_inflight(struct user_struct *user, struct file *fp)
13005 + {
13006 + struct sock *s = unix_get_socket(fp);
13007 +
13008 +@@ -133,11 +133,11 @@ void unix_inflight(struct file *fp)
13009 + }
13010 + unix_tot_inflight++;
13011 + }
13012 +- fp->f_cred->user->unix_inflight++;
13013 ++ user->unix_inflight++;
13014 + spin_unlock(&unix_gc_lock);
13015 + }
13016 +
13017 +-void unix_notinflight(struct file *fp)
13018 ++void unix_notinflight(struct user_struct *user, struct file *fp)
13019 + {
13020 + struct sock *s = unix_get_socket(fp);
13021 +
13022 +@@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp)
13023 + list_del_init(&u->link);
13024 + unix_tot_inflight--;
13025 + }
13026 +- fp->f_cred->user->unix_inflight--;
13027 ++ user->unix_inflight--;
13028 + spin_unlock(&unix_gc_lock);
13029 + }
13030 +
13031 +diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
13032 +index dacf71a..ba6c34e 100755
13033 +--- a/scripts/link-vmlinux.sh
13034 ++++ b/scripts/link-vmlinux.sh
13035 +@@ -62,7 +62,7 @@ vmlinux_link()
13036 + -Wl,--start-group \
13037 + ${KBUILD_VMLINUX_MAIN} \
13038 + -Wl,--end-group \
13039 +- -lutil -lrt ${1}
13040 ++ -lutil -lrt -lpthread ${1}
13041 + rm -f linux
13042 + fi
13043 + }
13044 +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
13045 +index ff81026..7c57c7f 100644
13046 +--- a/security/smack/smack_lsm.c
13047 ++++ b/security/smack/smack_lsm.c
13048 +@@ -398,12 +398,10 @@ static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead,
13049 + */
13050 + static inline unsigned int smk_ptrace_mode(unsigned int mode)
13051 + {
13052 +- switch (mode) {
13053 +- case PTRACE_MODE_READ:
13054 +- return MAY_READ;
13055 +- case PTRACE_MODE_ATTACH:
13056 ++ if (mode & PTRACE_MODE_ATTACH)
13057 + return MAY_READWRITE;
13058 +- }
13059 ++ if (mode & PTRACE_MODE_READ)
13060 ++ return MAY_READ;
13061 +
13062 + return 0;
13063 + }
13064 +diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
13065 +index d3c19c9..cb6ed10 100644
13066 +--- a/security/yama/yama_lsm.c
13067 ++++ b/security/yama/yama_lsm.c
13068 +@@ -281,7 +281,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
13069 + int rc = 0;
13070 +
13071 + /* require ptrace target be a child of ptracer on attach */
13072 +- if (mode == PTRACE_MODE_ATTACH) {
13073 ++ if (mode & PTRACE_MODE_ATTACH) {
13074 + switch (ptrace_scope) {
13075 + case YAMA_SCOPE_DISABLED:
13076 + /* No additional restrictions. */
13077 +@@ -307,7 +307,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
13078 + }
13079 + }
13080 +
13081 +- if (rc) {
13082 ++ if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) {
13083 + printk_ratelimited(KERN_NOTICE
13084 + "ptrace of pid %d was attempted by: %s (pid %d)\n",
13085 + child->pid, current->comm, current->pid);
13086 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
13087 +index 2c13298..2ff692d 100644
13088 +--- a/sound/pci/hda/hda_intel.c
13089 ++++ b/sound/pci/hda/hda_intel.c
13090 +@@ -357,7 +357,10 @@ enum {
13091 + ((pci)->device == 0x0d0c) || \
13092 + ((pci)->device == 0x160c))
13093 +
13094 +-#define IS_BROXTON(pci) ((pci)->device == 0x5a98)
13095 ++#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
13096 ++#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
13097 ++#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
13098 ++#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
13099 +
13100 + static char *driver_short_names[] = {
13101 + [AZX_DRIVER_ICH] = "HDA Intel",
13102 +@@ -534,13 +537,13 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
13103 +
13104 + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
13105 + snd_hdac_set_codec_wakeup(bus, true);
13106 +- if (IS_BROXTON(pci)) {
13107 ++ if (IS_SKL_PLUS(pci)) {
13108 + pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
13109 + val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
13110 + pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
13111 + }
13112 + azx_init_chip(chip, full_reset);
13113 +- if (IS_BROXTON(pci)) {
13114 ++ if (IS_SKL_PLUS(pci)) {
13115 + pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
13116 + val = val | INTEL_HDA_CGCTL_MISCBDCGE;
13117 + pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
13118 +@@ -549,7 +552,7 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
13119 + snd_hdac_set_codec_wakeup(bus, false);
13120 +
13121 + /* reduce dma latency to avoid noise */
13122 +- if (IS_BROXTON(pci))
13123 ++ if (IS_BXT(pci))
13124 + bxt_reduce_dma_latency(chip);
13125 + }
13126 +
13127 +@@ -971,11 +974,6 @@ static int azx_resume(struct device *dev)
13128 + /* put codec down to D3 at hibernation for Intel SKL+;
13129 + * otherwise BIOS may still access the codec and screw up the driver
13130 + */
13131 +-#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
13132 +-#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
13133 +-#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
13134 +-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
13135 +-
13136 + static int azx_freeze_noirq(struct device *dev)
13137 + {
13138 + struct pci_dev *pci = to_pci_dev(dev);
13139 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
13140 +index efd4980..72fa58d 100644
13141 +--- a/sound/pci/hda/patch_realtek.c
13142 ++++ b/sound/pci/hda/patch_realtek.c
13143 +@@ -4749,6 +4749,7 @@ enum {
13144 + ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
13145 + ALC293_FIXUP_LENOVO_SPK_NOISE,
13146 + ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
13147 ++ ALC255_FIXUP_DELL_SPK_NOISE,
13148 + };
13149 +
13150 + static const struct hda_fixup alc269_fixups[] = {
13151 +@@ -5368,6 +5369,12 @@ static const struct hda_fixup alc269_fixups[] = {
13152 + .type = HDA_FIXUP_FUNC,
13153 + .v.func = alc233_fixup_lenovo_line2_mic_hotkey,
13154 + },
13155 ++ [ALC255_FIXUP_DELL_SPK_NOISE] = {
13156 ++ .type = HDA_FIXUP_FUNC,
13157 ++ .v.func = alc_fixup_disable_aamix,
13158 ++ .chained = true,
13159 ++ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
13160 ++ },
13161 + };
13162 +
13163 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
13164 +@@ -5410,6 +5417,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
13165 + SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
13166 + SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
13167 + SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
13168 ++ SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
13169 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
13170 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
13171 + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
13172 +diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
13173 +index 96234b6..5d51d6f 100644
13174 +--- a/tools/hv/hv_vss_daemon.c
13175 ++++ b/tools/hv/hv_vss_daemon.c
13176 +@@ -254,7 +254,7 @@ int main(int argc, char *argv[])
13177 + syslog(LOG_ERR, "Illegal op:%d\n", op);
13178 + }
13179 + vss_msg->error = error;
13180 +- len = write(vss_fd, &error, sizeof(struct hv_vss_msg));
13181 ++ len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg));
13182 + if (len != sizeof(struct hv_vss_msg)) {
13183 + syslog(LOG_ERR, "write failed; error: %d %s", errno,
13184 + strerror(errno));
13185 +diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
13186 +index 2d9d830..4a3a72c 100644
13187 +--- a/tools/perf/util/stat.c
13188 ++++ b/tools/perf/util/stat.c
13189 +@@ -310,7 +310,6 @@ int perf_stat_process_counter(struct perf_stat_config *config,
13190 + int i, ret;
13191 +
13192 + aggr->val = aggr->ena = aggr->run = 0;
13193 +- init_stats(ps->res_stats);
13194 +
13195 + if (counter->per_pkg)
13196 + zero_per_pkg(counter);
13197 +diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
13198 +index 77edcdc..0572784 100755
13199 +--- a/tools/testing/selftests/efivarfs/efivarfs.sh
13200 ++++ b/tools/testing/selftests/efivarfs/efivarfs.sh
13201 +@@ -88,7 +88,11 @@ test_delete()
13202 + exit 1
13203 + fi
13204 +
13205 +- rm $file
13206 ++ rm $file 2>/dev/null
13207 ++ if [ $? -ne 0 ]; then
13208 ++ chattr -i $file
13209 ++ rm $file
13210 ++ fi
13211 +
13212 + if [ -e $file ]; then
13213 + echo "$file couldn't be deleted" >&2
13214 +@@ -111,6 +115,7 @@ test_zero_size_delete()
13215 + exit 1
13216 + fi
13217 +
13218 ++ chattr -i $file
13219 + printf "$attrs" > $file
13220 +
13221 + if [ -e $file ]; then
13222 +@@ -141,7 +146,11 @@ test_valid_filenames()
13223 + echo "$file could not be created" >&2
13224 + ret=1
13225 + else
13226 +- rm $file
13227 ++ rm $file 2>/dev/null
13228 ++ if [ $? -ne 0 ]; then
13229 ++ chattr -i $file
13230 ++ rm $file
13231 ++ fi
13232 + fi
13233 + done
13234 +
13235 +@@ -174,7 +183,11 @@ test_invalid_filenames()
13236 +
13237 + if [ -e $file ]; then
13238 + echo "Creating $file should have failed" >&2
13239 +- rm $file
13240 ++ rm $file 2>/dev/null
13241 ++ if [ $? -ne 0 ]; then
13242 ++ chattr -i $file
13243 ++ rm $file
13244 ++ fi
13245 + ret=1
13246 + fi
13247 + done
13248 +diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
13249 +index 8c07644..4af74f7 100644
13250 +--- a/tools/testing/selftests/efivarfs/open-unlink.c
13251 ++++ b/tools/testing/selftests/efivarfs/open-unlink.c
13252 +@@ -1,10 +1,68 @@
13253 ++#include <errno.h>
13254 + #include <stdio.h>
13255 + #include <stdint.h>
13256 + #include <stdlib.h>
13257 + #include <unistd.h>
13258 ++#include <sys/ioctl.h>
13259 + #include <sys/types.h>
13260 + #include <sys/stat.h>
13261 + #include <fcntl.h>
13262 ++#include <linux/fs.h>
13263 ++
13264 ++static int set_immutable(const char *path, int immutable)
13265 ++{
13266 ++ unsigned int flags;
13267 ++ int fd;
13268 ++ int rc;
13269 ++ int error;
13270 ++
13271 ++ fd = open(path, O_RDONLY);
13272 ++ if (fd < 0)
13273 ++ return fd;
13274 ++
13275 ++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
13276 ++ if (rc < 0) {
13277 ++ error = errno;
13278 ++ close(fd);
13279 ++ errno = error;
13280 ++ return rc;
13281 ++ }
13282 ++
13283 ++ if (immutable)
13284 ++ flags |= FS_IMMUTABLE_FL;
13285 ++ else
13286 ++ flags &= ~FS_IMMUTABLE_FL;
13287 ++
13288 ++ rc = ioctl(fd, FS_IOC_SETFLAGS, &flags);
13289 ++ error = errno;
13290 ++ close(fd);
13291 ++ errno = error;
13292 ++ return rc;
13293 ++}
13294 ++
13295 ++static int get_immutable(const char *path)
13296 ++{
13297 ++ unsigned int flags;
13298 ++ int fd;
13299 ++ int rc;
13300 ++ int error;
13301 ++
13302 ++ fd = open(path, O_RDONLY);
13303 ++ if (fd < 0)
13304 ++ return fd;
13305 ++
13306 ++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
13307 ++ if (rc < 0) {
13308 ++ error = errno;
13309 ++ close(fd);
13310 ++ errno = error;
13311 ++ return rc;
13312 ++ }
13313 ++ close(fd);
13314 ++ if (flags & FS_IMMUTABLE_FL)
13315 ++ return 1;
13316 ++ return 0;
13317 ++}
13318 +
13319 + int main(int argc, char **argv)
13320 + {
13321 +@@ -27,7 +85,7 @@ int main(int argc, char **argv)
13322 + buf[4] = 0;
13323 +
13324 + /* create a test variable */
13325 +- fd = open(path, O_WRONLY | O_CREAT);
13326 ++ fd = open(path, O_WRONLY | O_CREAT, 0600);
13327 + if (fd < 0) {
13328 + perror("open(O_WRONLY)");
13329 + return EXIT_FAILURE;
13330 +@@ -41,6 +99,18 @@ int main(int argc, char **argv)
13331 +
13332 + close(fd);
13333 +
13334 ++ rc = get_immutable(path);
13335 ++ if (rc < 0) {
13336 ++ perror("ioctl(FS_IOC_GETFLAGS)");
13337 ++ return EXIT_FAILURE;
13338 ++ } else if (rc) {
13339 ++ rc = set_immutable(path, 0);
13340 ++ if (rc < 0) {
13341 ++ perror("ioctl(FS_IOC_SETFLAGS)");
13342 ++ return EXIT_FAILURE;
13343 ++ }
13344 ++ }
13345 ++
13346 + fd = open(path, O_RDONLY);
13347 + if (fd < 0) {
13348 + perror("open");
13349 +diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
13350 +index 7a2f449..5d10f10 100644
13351 +--- a/virt/kvm/arm/vgic.c
13352 ++++ b/virt/kvm/arm/vgic.c
13353 +@@ -1875,8 +1875,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
13354 + static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
13355 + {
13356 + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
13357 +-
13358 +- int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
13359 ++ int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
13360 ++ int sz = nr_longs * sizeof(unsigned long);
13361 + vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
13362 + vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
13363 + vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
13364 +diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
13365 +index 77d42be..4f70d12 100644
13366 +--- a/virt/kvm/async_pf.c
13367 ++++ b/virt/kvm/async_pf.c
13368 +@@ -173,7 +173,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
13369 + * do alloc nowait since if we are going to sleep anyway we
13370 + * may as well sleep faulting in page
13371 + */
13372 +- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
13373 ++ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
13374 + if (!work)
13375 + return 0;
13376 +
13377
13378 diff --git a/4.4.3/4420_grsecurity-3.1-4.4.3-201602282149.patch b/4.4.4/4420_grsecurity-3.1-4.4.4-201603032158.patch
13379 similarity index 99%
13380 rename from 4.4.3/4420_grsecurity-3.1-4.4.3-201602282149.patch
13381 rename to 4.4.4/4420_grsecurity-3.1-4.4.4-201603032158.patch
13382 index fcd8074..88b7093 100644
13383 --- a/4.4.3/4420_grsecurity-3.1-4.4.3-201602282149.patch
13384 +++ b/4.4.4/4420_grsecurity-3.1-4.4.4-201603032158.patch
13385 @@ -449,7 +449,7 @@ index af70d15..ccd3786 100644
13386
13387 A toggle value indicating if modules are allowed to be loaded
13388 diff --git a/Makefile b/Makefile
13389 -index 802be10..383fd5d 100644
13390 +index 344bc6f..4753efd 100644
13391 --- a/Makefile
13392 +++ b/Makefile
13393 @@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
13394 @@ -998,7 +998,7 @@ index 34e1569..b48ad87 100644
13395 kexec is a system call that implements the ability to shutdown your
13396 current kernel, and to start another kernel. It is like a reboot
13397 diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
13398 -index 259c0ca..48eaaa1 100644
13399 +index ddbb361..caf403d 100644
13400 --- a/arch/arm/Kconfig.debug
13401 +++ b/arch/arm/Kconfig.debug
13402 @@ -7,6 +7,7 @@ config ARM_PTDUMP
13403 @@ -6375,7 +6375,7 @@ index 8feaed6..1bd8a64 100644
13404
13405 /**
13406 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
13407 -index 2046c02..8183239 100644
13408 +index 21ed715..774a251 100644
13409 --- a/arch/mips/include/asm/page.h
13410 +++ b/arch/mips/include/asm/page.h
13411 @@ -118,7 +118,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
13412 @@ -6404,7 +6404,7 @@ index b336037..5b874cc 100644
13413
13414 /*
13415 diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
13416 -index 8957f15..c5b802e 100644
13417 +index 18826aa..f5a6216 100644
13418 --- a/arch/mips/include/asm/pgtable.h
13419 +++ b/arch/mips/include/asm/pgtable.h
13420 @@ -20,6 +20,9 @@
13421 @@ -10399,7 +10399,7 @@ index 646988d..b88905f 100644
13422 info.flags = 0;
13423 info.length = len;
13424 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
13425 -index 30e7ddb..266a3b0 100644
13426 +index c690c8e..1d5798e 100644
13427 --- a/arch/sparc/kernel/sys_sparc_64.c
13428 +++ b/arch/sparc/kernel/sys_sparc_64.c
13429 @@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
13430 @@ -16278,7 +16278,7 @@ index a55697d..66473ae 100644
13431 -END(ignore_sysret)
13432 +ENDPROC(ignore_sysret)
13433 diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
13434 -index 6a1ae37..f1c3bfd 100644
13435 +index 15cfeba..da22a57 100644
13436 --- a/arch/x86/entry/entry_64_compat.S
13437 +++ b/arch/x86/entry/entry_64_compat.S
13438 @@ -13,8 +13,10 @@
13439 @@ -16437,7 +16437,7 @@ index 6a1ae37..f1c3bfd 100644
13440
13441 /*
13442 * Emulated IA32 system calls via int 0x80.
13443 -@@ -285,11 +340,11 @@ ENTRY(entry_INT80_compat)
13444 +@@ -286,11 +341,11 @@ ENTRY(entry_INT80_compat)
13445 pushq %rdx /* pt_regs->dx */
13446 pushq %rcx /* pt_regs->cx */
13447 pushq $-ENOSYS /* pt_regs->ax */
13448 @@ -16454,7 +16454,7 @@ index 6a1ae37..f1c3bfd 100644
13449 pushq %rbx /* pt_regs->rbx */
13450 pushq %rbp /* pt_regs->rbp */
13451 pushq %r12 /* pt_regs->r12 */
13452 -@@ -298,6 +353,12 @@ ENTRY(entry_INT80_compat)
13453 +@@ -299,6 +354,12 @@ ENTRY(entry_INT80_compat)
13454 pushq %r15 /* pt_regs->r15 */
13455 cld
13456
13457 @@ -16467,7 +16467,7 @@ index 6a1ae37..f1c3bfd 100644
13458 /*
13459 * User mode is traced as though IRQs are on, and the interrupt
13460 * gate turned them off.
13461 -@@ -309,10 +370,12 @@ ENTRY(entry_INT80_compat)
13462 +@@ -310,10 +371,12 @@ ENTRY(entry_INT80_compat)
13463 .Lsyscall_32_done:
13464
13465 /* Go back to user mode. */
13466 @@ -18868,6 +18868,19 @@ index 1e3408e..67c5ba1 100644
13467
13468 extern void elcr_set_level_irq(unsigned int irq);
13469
13470 +diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
13471 +index 055ea99..7dabb68 100644
13472 +--- a/arch/x86/include/asm/hypervisor.h
13473 ++++ b/arch/x86/include/asm/hypervisor.h
13474 +@@ -43,7 +43,7 @@ struct hypervisor_x86 {
13475 +
13476 + /* X2APIC detection (run once per boot) */
13477 + bool (*x2apic_available)(void);
13478 +-};
13479 ++} __do_const;
13480 +
13481 + extern const struct hypervisor_x86 *x86_hyper;
13482 +
13483 diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
13484 index 39bcefc..272d904 100644
13485 --- a/arch/x86/include/asm/i8259.h
13486 @@ -22829,7 +22842,7 @@ index 971cf88..a8e01ae 100644
13487 .name = "bigsmp",
13488 .probe = probe_bigsmp,
13489 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13490 -index f253218..b71d723 100644
13491 +index fdb0fbf..1426add 100644
13492 --- a/arch/x86/kernel/apic/io_apic.c
13493 +++ b/arch/x86/kernel/apic/io_apic.c
13494 @@ -1682,7 +1682,7 @@ static unsigned int startup_ioapic_irq(struct irq_data *data)
13495 @@ -22904,7 +22917,7 @@ index 7694ae6..5abb08e 100644
13496
13497 static int cmdline_apic __initdata;
13498 diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
13499 -index 861bc59..a721835 100644
13500 +index a35f6b5..cced8817 100644
13501 --- a/arch/x86/kernel/apic/vector.c
13502 +++ b/arch/x86/kernel/apic/vector.c
13503 @@ -36,6 +36,7 @@ static struct irq_chip lapic_controller;
13504 @@ -23700,6 +23713,19 @@ index ce47402..4a6bdf8 100644
13505 }
13506
13507 static void microcode_fini_cpu(int cpu)
13508 +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
13509 +index 20e242e..14b1629 100644
13510 +--- a/arch/x86/kernel/cpu/mshyperv.c
13511 ++++ b/arch/x86/kernel/cpu/mshyperv.c
13512 +@@ -193,7 +193,7 @@ static void __init ms_hyperv_init_platform(void)
13513 + mark_tsc_unstable("running on Hyper-V");
13514 + }
13515 +
13516 +-const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
13517 ++const struct hypervisor_x86 x86_hyper_ms_hyperv = {
13518 + .name = "Microsoft HyperV",
13519 + .detect = ms_hyperv_platform,
13520 + .init_platform = ms_hyperv_init_platform,
13521 diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
13522 index 3b533cf..b40d426 100644
13523 --- a/arch/x86/kernel/cpu/mtrr/generic.c
13524 @@ -24161,6 +24187,19 @@ index 2f0a4a9..8f4b802 100644
13525
13526 struct pci2phy_map {
13527 struct list_head list;
13528 +diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
13529 +index 628a059..83bced6 100644
13530 +--- a/arch/x86/kernel/cpu/vmware.c
13531 ++++ b/arch/x86/kernel/cpu/vmware.c
13532 +@@ -137,7 +137,7 @@ static bool __init vmware_legacy_x2apic_available(void)
13533 + (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0;
13534 + }
13535 +
13536 +-const __refconst struct hypervisor_x86 x86_hyper_vmware = {
13537 ++const struct hypervisor_x86 x86_hyper_vmware = {
13538 + .name = "VMware",
13539 + .detect = vmware_platform,
13540 + .set_cpu_features = vmware_set_cpu_features,
13541 diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
13542 index afa64ad..dce67dd 100644
13543 --- a/arch/x86/kernel/crash_dump_64.c
13544 @@ -26051,7 +26090,7 @@ index 37dae79..620dd84 100644
13545 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
13546 t->iopl = level << 12;
13547 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
13548 -index f8062aa..c37b60f 100644
13549 +index 61521dc..5ce5a37 100644
13550 --- a/arch/x86/kernel/irq.c
13551 +++ b/arch/x86/kernel/irq.c
13552 @@ -28,7 +28,7 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
13553 @@ -26552,6 +26591,19 @@ index c2bedae..25e7ab60 100644
13554 .attr = {
13555 .name = "data",
13556 .mode = S_IRUGO,
13557 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
13558 +index 47190bd..0165c4d 100644
13559 +--- a/arch/x86/kernel/kvm.c
13560 ++++ b/arch/x86/kernel/kvm.c
13561 +@@ -553,7 +553,7 @@ static uint32_t __init kvm_detect(void)
13562 + return kvm_cpuid_base();
13563 + }
13564 +
13565 +-const struct hypervisor_x86 x86_hyper_kvm __refconst = {
13566 ++const struct hypervisor_x86 x86_hyper_kvm = {
13567 + .name = "KVM",
13568 + .detect = kvm_detect,
13569 + .x2apic_available = kvm_para_available,
13570 diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
13571 index 2bd81e3..2d5e042 100644
13572 --- a/arch/x86/kernel/kvmclock.c
13573 @@ -29408,7 +29460,7 @@ index 6525e92..28559d2 100644
13574
13575 out:
13576 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
13577 -index 1505587..0f0516c 100644
13578 +index b9b09fe..138addd 100644
13579 --- a/arch/x86/kvm/emulate.c
13580 +++ b/arch/x86/kvm/emulate.c
13581 @@ -1881,7 +1881,7 @@ static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
13582 @@ -29534,7 +29586,7 @@ index 4d30b86..94115f0 100644
13583 #define APIC_LVT_NUM 6
13584 /* 14 is the version for Xeon and Pentium 8.4.8*/
13585 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
13586 -index 3058a22..cb2670f 100644
13587 +index 7be8a25..7d71250 100644
13588 --- a/arch/x86/kvm/paging_tmpl.h
13589 +++ b/arch/x86/kvm/paging_tmpl.h
13590 @@ -335,7 +335,7 @@ retry_walk:
13591 @@ -29767,7 +29819,7 @@ index 10e7693..aa4d471 100644
13592 .disabled_by_bios = vmx_disabled_by_bios,
13593 .hardware_setup = hardware_setup,
13594 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
13595 -index 9a2ed89..f2f4bc5 100644
13596 +index 6ef3856..12e4701 100644
13597 --- a/arch/x86/kvm/x86.c
13598 +++ b/arch/x86/kvm/x86.c
13599 @@ -1937,8 +1937,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
13600 @@ -29790,7 +29842,7 @@ index 9a2ed89..f2f4bc5 100644
13601 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
13602 num_msrs_to_save * sizeof(u32)))
13603 goto out;
13604 -@@ -3028,7 +3030,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
13605 +@@ -3029,7 +3031,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
13606
13607 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
13608 {
13609 @@ -29799,7 +29851,7 @@ index 9a2ed89..f2f4bc5 100644
13610 u64 xstate_bv = xsave->header.xfeatures;
13611 u64 valid;
13612
13613 -@@ -3064,7 +3066,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
13614 +@@ -3065,7 +3067,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
13615
13616 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
13617 {
13618 @@ -29808,7 +29860,7 @@ index 9a2ed89..f2f4bc5 100644
13619 u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
13620 u64 valid;
13621
13622 -@@ -3108,7 +3110,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
13623 +@@ -3109,7 +3111,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
13624 fill_xsave((u8 *) guest_xsave->region, vcpu);
13625 } else {
13626 memcpy(guest_xsave->region,
13627 @@ -29817,7 +29869,7 @@ index 9a2ed89..f2f4bc5 100644
13628 sizeof(struct fxregs_state));
13629 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
13630 XFEATURE_MASK_FPSSE;
13631 -@@ -3133,7 +3135,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
13632 +@@ -3134,7 +3136,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
13633 } else {
13634 if (xstate_bv & ~XFEATURE_MASK_FPSSE)
13635 return -EINVAL;
13636 @@ -29826,7 +29878,7 @@ index 9a2ed89..f2f4bc5 100644
13637 guest_xsave->region, sizeof(struct fxregs_state));
13638 }
13639 return 0;
13640 -@@ -6363,6 +6365,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
13641 +@@ -6364,6 +6366,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
13642 * exiting to the userspace. Otherwise, the value will be returned to the
13643 * userspace.
13644 */
13645 @@ -29834,7 +29886,7 @@ index 9a2ed89..f2f4bc5 100644
13646 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
13647 {
13648 int r;
13649 -@@ -6611,6 +6614,7 @@ out:
13650 +@@ -6612,6 +6615,7 @@ out:
13651 return r;
13652 }
13653
13654 @@ -29842,7 +29894,7 @@ index 9a2ed89..f2f4bc5 100644
13655 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
13656 {
13657 if (!kvm_arch_vcpu_runnable(vcpu) &&
13658 -@@ -7158,7 +7162,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
13659 +@@ -7159,7 +7163,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
13660 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
13661 {
13662 struct fxregs_state *fxsave =
13663 @@ -29851,7 +29903,7 @@ index 9a2ed89..f2f4bc5 100644
13664
13665 memcpy(fpu->fpr, fxsave->st_space, 128);
13666 fpu->fcw = fxsave->cwd;
13667 -@@ -7175,7 +7179,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
13668 +@@ -7176,7 +7180,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
13669 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
13670 {
13671 struct fxregs_state *fxsave =
13672 @@ -29860,7 +29912,7 @@ index 9a2ed89..f2f4bc5 100644
13673
13674 memcpy(fxsave->st_space, fpu->fpr, 128);
13675 fxsave->cwd = fpu->fcw;
13676 -@@ -7191,9 +7195,9 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
13677 +@@ -7192,9 +7196,9 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
13678
13679 static void fx_init(struct kvm_vcpu *vcpu)
13680 {
13681 @@ -29872,7 +29924,7 @@ index 9a2ed89..f2f4bc5 100644
13682 host_xcr0 | XSTATE_COMPACTION_ENABLED;
13683
13684 /*
13685 -@@ -7217,7 +7221,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
13686 +@@ -7218,7 +7222,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
13687 kvm_put_guest_xcr0(vcpu);
13688 vcpu->guest_fpu_loaded = 1;
13689 __kernel_fpu_begin();
13690 @@ -29881,7 +29933,7 @@ index 9a2ed89..f2f4bc5 100644
13691 trace_kvm_fpu(1);
13692 }
13693
13694 -@@ -7520,6 +7524,8 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
13695 +@@ -7521,6 +7525,8 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
13696
13697 struct static_key kvm_no_apic_vcpu __read_mostly;
13698
13699 @@ -29890,7 +29942,7 @@ index 9a2ed89..f2f4bc5 100644
13700 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
13701 {
13702 struct page *page;
13703 -@@ -7536,11 +7542,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
13704 +@@ -7537,11 +7543,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
13705 else
13706 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
13707
13708 @@ -29909,7 +29961,7 @@ index 9a2ed89..f2f4bc5 100644
13709 vcpu->arch.pio_data = page_address(page);
13710
13711 kvm_set_tsc_khz(vcpu, max_tsc_khz);
13712 -@@ -7596,6 +7605,9 @@ fail_mmu_destroy:
13713 +@@ -7597,6 +7606,9 @@ fail_mmu_destroy:
13714 kvm_mmu_destroy(vcpu);
13715 fail_free_pio_data:
13716 free_page((unsigned long)vcpu->arch.pio_data);
13717 @@ -29919,7 +29971,7 @@ index 9a2ed89..f2f4bc5 100644
13718 fail:
13719 return r;
13720 }
13721 -@@ -7613,6 +7625,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
13722 +@@ -7614,6 +7626,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
13723 free_page((unsigned long)vcpu->arch.pio_data);
13724 if (!lapic_in_kernel(vcpu))
13725 static_key_slow_dec(&kvm_no_apic_vcpu);
13726 @@ -34346,7 +34398,7 @@ index 0057a7acc..95c7edd 100644
13727 might_sleep();
13728 if (is_enabled()) /* recheck and proper locking in *_core() */
13729 diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
13730 -index b2fd67d..086bcb9 100644
13731 +index ef05755..7125725 100644
13732 --- a/arch/x86/mm/mpx.c
13733 +++ b/arch/x86/mm/mpx.c
13734 @@ -193,7 +193,7 @@ static int mpx_insn_decode(struct insn *insn,
13735 @@ -36844,10 +36896,10 @@ index 2f33760..835e50a 100644
13736 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
13737 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
13738 diff --git a/block/bio.c b/block/bio.c
13739 -index 4f184d9..eb9fa89 100644
13740 +index d4d1443..bb167da 100644
13741 --- a/block/bio.c
13742 +++ b/block/bio.c
13743 -@@ -1140,7 +1140,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
13744 +@@ -1143,7 +1143,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
13745 /*
13746 * Overflow, abort
13747 */
13748 @@ -36856,7 +36908,7 @@ index 4f184d9..eb9fa89 100644
13749 return ERR_PTR(-EINVAL);
13750
13751 nr_pages += end - start;
13752 -@@ -1265,7 +1265,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
13753 +@@ -1268,7 +1268,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
13754 /*
13755 * Overflow, abort
13756 */
13757 @@ -37268,7 +37320,7 @@ index d51a30a..b6891a3 100644
13758
13759 stream->workspace = vzalloc(zlib_inflate_workspacesize());
13760 diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
13761 -index 3405f7a..1155093 100644
13762 +index 5fdac39..ce3c90e 100644
13763 --- a/drivers/acpi/acpi_video.c
13764 +++ b/drivers/acpi/acpi_video.c
13765 @@ -412,7 +412,7 @@ static int video_enable_only_lcd(const struct dmi_system_id *d)
13766 @@ -37566,7 +37618,7 @@ index 82707f9..a6b19f5 100644
13767 * Award BIOS on this AOpen makes thermal control almost worthless.
13768 * http://bugzilla.kernel.org/show_bug.cgi?id=8842
13769 diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
13770 -index daaf1c4..4b583a2 100644
13771 +index 80e55cb..f660caf 100644
13772 --- a/drivers/acpi/video_detect.c
13773 +++ b/drivers/acpi/video_detect.c
13774 @@ -41,7 +41,6 @@ ACPI_MODULE_NAME("video");
13775 @@ -37577,7 +37629,7 @@ index daaf1c4..4b583a2 100644
13776 static struct work_struct backlight_notify_work;
13777
13778 static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
13779 -@@ -302,6 +301,10 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
13780 +@@ -294,6 +293,10 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
13781 return NOTIFY_OK;
13782 }
13783
13784 @@ -37588,7 +37640,7 @@ index daaf1c4..4b583a2 100644
13785 /*
13786 * Determine which type of backlight interface to use on this system,
13787 * First check cmdline, then dmi quirks, then do autodetect.
13788 -@@ -332,8 +335,6 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
13789 +@@ -324,8 +327,6 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
13790 &video_caps, NULL);
13791 INIT_WORK(&backlight_notify_work,
13792 acpi_video_backlight_notify_work);
13793 @@ -40723,10 +40775,10 @@ index 8412ce5..3a40e15 100644
13794 ret = create_boost_sysfs_file();
13795 if (ret)
13796 diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
13797 -index b260576..21a0a29 100644
13798 +index d994b0f..b0b9d15 100644
13799 --- a/drivers/cpufreq/cpufreq_governor.c
13800 +++ b/drivers/cpufreq/cpufreq_governor.c
13801 -@@ -460,7 +460,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
13802 +@@ -465,7 +465,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
13803 cs_dbs_info->down_skip = 0;
13804 cs_dbs_info->requested_freq = policy->cur;
13805 } else {
13806 @@ -41156,10 +41208,10 @@ index 11707df..2ea96f7 100644
13807
13808 /* Run before NMI debug handler and KGDB */
13809 diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
13810 -index 592af5f..bb1d583 100644
13811 +index 5358737..2064670 100644
13812 --- a/drivers/edac/edac_device.c
13813 +++ b/drivers/edac/edac_device.c
13814 -@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
13815 +@@ -474,9 +474,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
13816 */
13817 int edac_device_alloc_index(void)
13818 {
13819 @@ -41172,7 +41224,7 @@ index 592af5f..bb1d583 100644
13820 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
13821
13822 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
13823 -index a75acea..589dd01 100644
13824 +index 58aed67..e6817a1 100644
13825 --- a/drivers/edac/edac_mc_sysfs.c
13826 +++ b/drivers/edac/edac_mc_sysfs.c
13827 @@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
13828 @@ -41185,7 +41237,7 @@ index a75acea..589dd01 100644
13829 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
13830 static struct dev_ch_attribute dev_attr_legacy_##_name = \
13831 diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
13832 -index 2cf44b4d..6dd2dc7 100644
13833 +index b4b3860..08d7faa 100644
13834 --- a/drivers/edac/edac_pci.c
13835 +++ b/drivers/edac/edac_pci.c
13836 @@ -29,7 +29,7 @@
13837 @@ -41197,7 +41249,7 @@ index 2cf44b4d..6dd2dc7 100644
13838
13839 /*
13840 * edac_pci_alloc_ctl_info
13841 -@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
13842 +@@ -314,7 +314,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
13843 */
13844 int edac_pci_alloc_index(void)
13845 {
13846 @@ -41493,10 +41545,10 @@ index 027ca212..65689be 100644
13847 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
13848 }
13849 diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
13850 -index 756eca8..2336d08 100644
13851 +index 10e6774..c2d96de 100644
13852 --- a/drivers/firmware/efi/efivars.c
13853 +++ b/drivers/firmware/efi/efivars.c
13854 -@@ -590,7 +590,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
13855 +@@ -583,7 +583,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
13856 static int
13857 create_efivars_bin_attributes(void)
13858 {
13859 @@ -41712,7 +41764,7 @@ index 4e4c308..d041d75 100644
13860
13861 /*
13862 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
13863 -index 048cfe0..4ed6d8f 100644
13864 +index bb1099c..8eff7b2 100644
13865 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
13866 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
13867 @@ -2338,7 +2338,7 @@ static inline void amdgpu_unregister_atpx_handler(void) {}
13868 @@ -41809,7 +41861,7 @@ index 8e99514..3d68786 100644
13869
13870 void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
13871 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
13872 -index d5b4213..111bf04 100644
13873 +index c961fe0..acde4f5 100644
13874 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
13875 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
13876 @@ -1075,7 +1075,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
13877 @@ -41822,7 +41874,7 @@ index d5b4213..111bf04 100644
13878
13879 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
13880 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
13881 -index 0508c5c..cce2be3 100644
13882 +index 8d6668c..ea61792 100644
13883 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
13884 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
13885 @@ -481,7 +481,7 @@ static struct drm_driver kms_driver = {
13886 @@ -42773,7 +42825,7 @@ index 93ec5dc..204ec92 100644
13887 #define I810_BASE(reg) ((unsigned long) \
13888 dev_priv->mmio_map->handle)
13889 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
13890 -index b4741d1..a0dc9fc 100644
13891 +index 61fcb3b..bad2d5f 100644
13892 --- a/drivers/gpu/drm/i915/i915_dma.c
13893 +++ b/drivers/gpu/drm/i915/i915_dma.c
13894 @@ -354,7 +354,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
13895 @@ -42978,10 +43030,10 @@ index 97f3a56..32c712e 100644
13896 ret = drm_ioctl(filp, cmd, arg);
13897
13898 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
13899 -index 0d228f9..a00f50a 100644
13900 +index 0f42a27..8d376ee 100644
13901 --- a/drivers/gpu/drm/i915/i915_irq.c
13902 +++ b/drivers/gpu/drm/i915/i915_irq.c
13903 -@@ -4395,14 +4395,15 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
13904 +@@ -4399,14 +4399,15 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
13905
13906 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
13907
13908 @@ -43000,7 +43052,7 @@ index 0d228f9..a00f50a 100644
13909 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
13910 }
13911
13912 -@@ -4414,32 +4415,32 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
13913 +@@ -4418,32 +4419,32 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
13914 if (!IS_GEN2(dev_priv))
13915 dev->vblank_disable_immediate = true;
13916
13917 @@ -43053,7 +43105,7 @@ index 0d228f9..a00f50a 100644
13918 if (IS_BROXTON(dev))
13919 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
13920 else if (HAS_PCH_SPT(dev))
13921 -@@ -4447,35 +4448,36 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
13922 +@@ -4451,35 +4452,36 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
13923 else
13924 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
13925 } else if (HAS_PCH_SPLIT(dev)) {
13926 @@ -43111,10 +43163,10 @@ index 0d228f9..a00f50a 100644
13927
13928 /**
13929 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
13930 -index 32cf973..62e0cfa 100644
13931 +index f859a5b..c6ef76b 100644
13932 --- a/drivers/gpu/drm/i915/intel_display.c
13933 +++ b/drivers/gpu/drm/i915/intel_display.c
13934 -@@ -14720,13 +14720,13 @@ struct intel_quirk {
13935 +@@ -14731,13 +14731,13 @@ struct intel_quirk {
13936 int subsystem_vendor;
13937 int subsystem_device;
13938 void (*hook)(struct drm_device *dev);
13939 @@ -43130,7 +43182,7 @@ index 32cf973..62e0cfa 100644
13940
13941 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
13942 {
13943 -@@ -14734,18 +14734,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
13944 +@@ -14745,18 +14745,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
13945 return 1;
13946 }
13947
13948 @@ -43565,10 +43617,10 @@ index 01a8694..584fb48 100644
13949 wait_queue_head_t display_event;
13950 wait_queue_head_t cursor_event;
13951 diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
13952 -index 2ae8577..0554f54 100644
13953 +index 7c2e782..d3ca7da 100644
13954 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c
13955 +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
13956 -@@ -183,7 +183,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
13957 +@@ -184,7 +184,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
13958
13959 /* TODO copy slow path code from i915 */
13960 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
13961 @@ -43577,7 +43629,7 @@ index 2ae8577..0554f54 100644
13962
13963 {
13964 struct qxl_drawable *draw = fb_cmd;
13965 -@@ -203,7 +203,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
13966 +@@ -204,7 +204,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
13967 struct drm_qxl_reloc reloc;
13968
13969 if (copy_from_user(&reloc,
13970 @@ -43586,7 +43638,7 @@ index 2ae8577..0554f54 100644
13971 sizeof(reloc))) {
13972 ret = -EFAULT;
13973 goto out_free_bos;
13974 -@@ -282,10 +282,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
13975 +@@ -283,10 +283,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
13976
13977 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
13978
13979 @@ -43600,7 +43652,7 @@ index 2ae8577..0554f54 100644
13980 sizeof(user_cmd)))
13981 return -EFAULT;
13982
13983 -@@ -439,4 +439,4 @@ const struct drm_ioctl_desc qxl_ioctls[] = {
13984 +@@ -440,4 +440,4 @@ const struct drm_ioctl_desc qxl_ioctls[] = {
13985 DRM_AUTH),
13986 };
13987
13988 @@ -43872,7 +43924,7 @@ index b928c17..e5d9400 100644
13989 if (regcomp
13990 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
13991 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
13992 -index c566993..0bf8fae 100644
13993 +index d690df5..4aaaead 100644
13994 --- a/drivers/gpu/drm/radeon/radeon_device.c
13995 +++ b/drivers/gpu/drm/radeon/radeon_device.c
13996 @@ -1253,7 +1253,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
13997 @@ -44060,7 +44112,7 @@ index 15aee72..c6df119 100644
13998 -int radeon_max_ioctl = ARRAY_SIZE(radeon_ioctls);
13999 +const int radeon_max_ioctl = ARRAY_SIZE(radeon_ioctls);
14000 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
14001 -index e343074..2042c8b 100644
14002 +index e06ac54..46eabfd 100644
14003 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
14004 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
14005 @@ -961,7 +961,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
14006 @@ -44771,7 +44823,7 @@ index c13fb5b..55a3802 100644
14007
14008 *off += size;
14009 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
14010 -index c4dcab0..a505f18 100644
14011 +index 9098f13..19a4855 100644
14012 --- a/drivers/hv/channel.c
14013 +++ b/drivers/hv/channel.c
14014 @@ -382,7 +382,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
14015 @@ -44783,7 +44835,7 @@ index c4dcab0..a505f18 100644
14016
14017 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
14018 if (ret)
14019 -@@ -696,9 +696,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
14020 +@@ -705,9 +705,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
14021 * Adjust the size down since vmbus_channel_packet_page_buffer is the
14022 * largest size we support
14023 */
14024 @@ -45000,7 +45052,7 @@ index 6a27eb2..349ed23 100644
14025 };
14026
14027 diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
14028 -index c848789..e9e9217 100644
14029 +index c43318d..72f7656 100644
14030 --- a/drivers/hwmon/dell-smm-hwmon.c
14031 +++ b/drivers/hwmon/dell-smm-hwmon.c
14032 @@ -819,7 +819,7 @@ static const struct i8k_config_data i8k_config_data[] = {
14033 @@ -45294,7 +45346,7 @@ index b13936d..65322b2 100644
14034
14035 if (chipset >= AK_MAX_TYPE) {
14036 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
14037 -index 0a26dd6..54c83de 100644
14038 +index d6d2b35..a97866a 100644
14039 --- a/drivers/infiniband/core/cm.c
14040 +++ b/drivers/infiniband/core/cm.c
14041 @@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
14042 @@ -45539,28 +45591,6 @@ index 1c02dea..5f1efa6 100644
14043 INIT_UDATA(&udata, buf + sizeof cmd,
14044 (unsigned long) cmd.response + sizeof resp,
14045 in_len - sizeof cmd, out_len - sizeof resp);
14046 -diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
14047 -index cb78b1e..f504ba7 100644
14048 ---- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
14049 -+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
14050 -@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
14051 - error = l2t_send(tdev, skb, l2e);
14052 - if (error < 0)
14053 - kfree_skb(skb);
14054 -- return error;
14055 -+ return error < 0 ? error : 0;
14056 - }
14057 -
14058 - int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
14059 -@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
14060 - error = cxgb3_ofld_send(tdev, skb);
14061 - if (error < 0)
14062 - kfree_skb(skb);
14063 -- return error;
14064 -+ return error < 0 ? error : 0;
14065 - }
14066 -
14067 - static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
14068 diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
14069 index 58fce174..f6de2c2 100644
14070 --- a/drivers/infiniband/hw/cxgb4/device.c
14071 @@ -47579,6 +47609,41 @@ index 40634b0..4f5855e 100644
14072
14073 // Every interrupt can come to us here
14074 // But we must truly tell each apart.
14075 +diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
14076 +index 134e4fa..243bffe 100644
14077 +--- a/drivers/lightnvm/rrpc.c
14078 ++++ b/drivers/lightnvm/rrpc.c
14079 +@@ -218,7 +218,7 @@ static void rrpc_put_blks(struct rrpc *rrpc)
14080 +
14081 + static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
14082 + {
14083 +- int next = atomic_inc_return(&rrpc->next_lun);
14084 ++ int next = atomic_inc_return_unchecked(&rrpc->next_lun);
14085 +
14086 + return &rrpc->luns[next % rrpc->nr_luns];
14087 + }
14088 +@@ -1286,7 +1286,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
14089 + rrpc->nr_luns = lun_end - lun_begin + 1;
14090 +
14091 + /* simple round-robin strategy */
14092 +- atomic_set(&rrpc->next_lun, -1);
14093 ++ atomic_set_unchecked(&rrpc->next_lun, -1);
14094 +
14095 + ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
14096 + if (ret) {
14097 +diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
14098 +index a9696a0..75d0008 100644
14099 +--- a/drivers/lightnvm/rrpc.h
14100 ++++ b/drivers/lightnvm/rrpc.h
14101 +@@ -99,7 +99,7 @@ struct rrpc {
14102 + /* Write strategy variables. Move these into each for structure for each
14103 + * strategy
14104 + */
14105 +- atomic_t next_lun; /* Whenever a page is written, this is updated
14106 ++ atomic_unchecked_t next_lun; /* Whenever a page is written, this is updated
14107 + * to point to the next write lun
14108 + */
14109 +
14110 diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
14111 index 4d20088..de60cb2 100644
14112 --- a/drivers/md/bcache/Kconfig
14113 @@ -47627,7 +47692,7 @@ index 6b420a5..d5acb8f 100644
14114
14115 struct gc_stat {
14116 diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
14117 -index 83392f8..fc8f340 100644
14118 +index 22b9e34..ac456ec 100644
14119 --- a/drivers/md/bcache/btree.c
14120 +++ b/drivers/md/bcache/btree.c
14121 @@ -468,7 +468,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
14122 @@ -47821,7 +47886,7 @@ index adbff14..018c2d2 100644
14123 struct cache_stat_collector collector;
14124
14125 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
14126 -index 679a093..b4dd03d 100644
14127 +index 8d0ead9..2b81525 100644
14128 --- a/drivers/md/bcache/super.c
14129 +++ b/drivers/md/bcache/super.c
14130 @@ -530,7 +530,7 @@ void bch_prio_write(struct cache *ca)
14131 @@ -48331,7 +48396,7 @@ index c219a05..15a27ca 100644
14132 pmd->bl_info.value_type.inc = data_block_inc;
14133 pmd->bl_info.value_type.dec = data_block_dec;
14134 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
14135 -index 5df4048..1344a0d 100644
14136 +index dd83492..d111dcf 100644
14137 --- a/drivers/md/dm.c
14138 +++ b/drivers/md/dm.c
14139 @@ -194,9 +194,9 @@ struct mapped_device {
14140 @@ -48383,7 +48448,7 @@ index 5df4048..1344a0d 100644
14141 {
14142 rcu_read_unlock();
14143 }
14144 -@@ -2315,8 +2319,8 @@ static struct mapped_device *alloc_dev(int minor)
14145 +@@ -2317,8 +2321,8 @@ static struct mapped_device *alloc_dev(int minor)
14146 spin_lock_init(&md->deferred_lock);
14147 atomic_set(&md->holders, 1);
14148 atomic_set(&md->open_count, 0);
14149 @@ -48394,7 +48459,7 @@ index 5df4048..1344a0d 100644
14150 INIT_LIST_HEAD(&md->uevent_list);
14151 INIT_LIST_HEAD(&md->table_devices);
14152 spin_lock_init(&md->uevent_lock);
14153 -@@ -2457,7 +2461,7 @@ static void event_callback(void *context)
14154 +@@ -2459,7 +2463,7 @@ static void event_callback(void *context)
14155
14156 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
14157
14158 @@ -48403,7 +48468,7 @@ index 5df4048..1344a0d 100644
14159 wake_up(&md->eventq);
14160 }
14161
14162 -@@ -3400,18 +3404,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
14163 +@@ -3402,18 +3406,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
14164
14165 uint32_t dm_next_uevent_seq(struct mapped_device *md)
14166 {
14167 @@ -48613,10 +48678,10 @@ index dfa57b4..7af9cda 100644
14168
14169 struct md_personality
14170 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
14171 -index fca6dbc..74ec612 100644
14172 +index 7e44005..20e035a 100644
14173 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
14174 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
14175 -@@ -703,7 +703,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
14176 +@@ -700,7 +700,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
14177 * Flick into a mode where all blocks get allocated in the new area.
14178 */
14179 smm->begin = old_len;
14180 @@ -48625,7 +48690,7 @@ index fca6dbc..74ec612 100644
14181
14182 /*
14183 * Extend.
14184 -@@ -741,7 +741,7 @@ out:
14185 +@@ -738,7 +738,7 @@ out:
14186 /*
14187 * Switch back to normal behaviour.
14188 */
14189 @@ -50863,7 +50928,7 @@ index f695b58..7b7d017 100644
14190 +} __do_const;
14191 #endif /* _DW_MMC_H_ */
14192 diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
14193 -index fb26674..3172c2b 100644
14194 +index acece32..a872279 100644
14195 --- a/drivers/mmc/host/mmci.c
14196 +++ b/drivers/mmc/host/mmci.c
14197 @@ -1633,7 +1633,9 @@ static int mmci_probe(struct amba_device *dev,
14198 @@ -51045,6 +51110,19 @@ index 141c2a4..ca734ed 100644
14199 .kind = "can",
14200 .maxtype = IFLA_CAN_MAX,
14201 .policy = can_policy,
14202 +diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c
14203 +index c1b6676..50a8a51 100644
14204 +--- a/drivers/net/can/led.c
14205 ++++ b/drivers/net/can/led.c
14206 +@@ -128,7 +128,7 @@ static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
14207 + }
14208 +
14209 + /* notifier block for netdevice event */
14210 +-static struct notifier_block can_netdev_notifier __read_mostly = {
14211 ++static struct notifier_block can_netdev_notifier = {
14212 + .notifier_call = can_led_notifier,
14213 + };
14214 +
14215 diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
14216 index 674f367..ec3a31f 100644
14217 --- a/drivers/net/can/vcan.c
14218 @@ -52146,6 +52224,19 @@ index 1203d89..7895359 100644
14219
14220 netdev->netdev_ops = &mlx5e_netdev_ops;
14221 netdev->watchdog_timeo = 15 * HZ;
14222 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
14223 +index 3be4a23..e89602b 100644
14224 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
14225 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
14226 +@@ -1915,7 +1915,7 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
14227 + return NOTIFY_DONE;
14228 + }
14229 +
14230 +-static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
14231 ++static struct notifier_block mlxsw_sp_netdevice_nb = {
14232 + .notifier_call = mlxsw_sp_netdevice_event,
14233 + };
14234 +
14235 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
14236 index 6223930..975033d 100644
14237 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
14238 @@ -52276,6 +52367,28 @@ index 79ef799..59bbd1f 100644
14239
14240 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
14241 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
14242 +diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
14243 +index 52ec3d6..ed19f16 100644
14244 +--- a/drivers/net/ethernet/rocker/rocker.c
14245 ++++ b/drivers/net/ethernet/rocker/rocker.c
14246 +@@ -5410,7 +5410,7 @@ out:
14247 + return NOTIFY_DONE;
14248 + }
14249 +
14250 +-static struct notifier_block rocker_netdevice_nb __read_mostly = {
14251 ++static struct notifier_block rocker_netdevice_nb = {
14252 + .notifier_call = rocker_netdevice_event,
14253 + };
14254 +
14255 +@@ -5453,7 +5453,7 @@ static int rocker_netevent_event(struct notifier_block *unused,
14256 + return NOTIFY_DONE;
14257 + }
14258 +
14259 +-static struct notifier_block rocker_netevent_nb __read_mostly = {
14260 ++static struct notifier_block rocker_netevent_nb = {
14261 + .notifier_call = rocker_netevent_event,
14262 + };
14263 +
14264 diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
14265 index c771e0a..bbb368d 100644
14266 --- a/drivers/net/ethernet/sfc/ptp.c
14267 @@ -52471,6 +52584,29 @@ index 8c48bb2..0a03401 100644
14268 kfree_skb(skb);
14269 }
14270 }
14271 +diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
14272 +index a9268db..19d067f 100644
14273 +--- a/drivers/net/ipvlan/ipvlan_main.c
14274 ++++ b/drivers/net/ipvlan/ipvlan_main.c
14275 +@@ -751,15 +751,15 @@ static int ipvlan_addr4_event(struct notifier_block *unused,
14276 + return NOTIFY_OK;
14277 + }
14278 +
14279 +-static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = {
14280 ++static struct notifier_block ipvlan_addr4_notifier_block = {
14281 + .notifier_call = ipvlan_addr4_event,
14282 + };
14283 +
14284 +-static struct notifier_block ipvlan_notifier_block __read_mostly = {
14285 ++static struct notifier_block ipvlan_notifier_block = {
14286 + .notifier_call = ipvlan_device_event,
14287 + };
14288 +
14289 +-static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = {
14290 ++static struct notifier_block ipvlan_addr6_notifier_block = {
14291 + .notifier_call = ipvlan_addr6_event,
14292 + };
14293 +
14294 diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
14295 index a0849f4..147a4a6 100644
14296 --- a/drivers/net/irda/vlsi_ir.c
14297 @@ -52721,86 +52857,6 @@ index 9a863c6..8e2d8c9 100644
14298 break;
14299 err = 0;
14300 break;
14301 -diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
14302 -index 597c53e..f7e8c79 100644
14303 ---- a/drivers/net/ppp/pptp.c
14304 -+++ b/drivers/net/ppp/pptp.c
14305 -@@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
14306 - return i < MAX_CALLID;
14307 - }
14308 -
14309 --static int add_chan(struct pppox_sock *sock)
14310 -+static int add_chan(struct pppox_sock *sock,
14311 -+ struct pptp_addr *sa)
14312 - {
14313 - static int call_id;
14314 -
14315 - spin_lock(&chan_lock);
14316 -- if (!sock->proto.pptp.src_addr.call_id) {
14317 -+ if (!sa->call_id) {
14318 - call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
14319 - if (call_id == MAX_CALLID) {
14320 - call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
14321 - if (call_id == MAX_CALLID)
14322 - goto out_err;
14323 - }
14324 -- sock->proto.pptp.src_addr.call_id = call_id;
14325 -- } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
14326 -+ sa->call_id = call_id;
14327 -+ } else if (test_bit(sa->call_id, callid_bitmap)) {
14328 - goto out_err;
14329 -+ }
14330 -
14331 -- set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
14332 -- rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
14333 -+ sock->proto.pptp.src_addr = *sa;
14334 -+ set_bit(sa->call_id, callid_bitmap);
14335 -+ rcu_assign_pointer(callid_sock[sa->call_id], sock);
14336 - spin_unlock(&chan_lock);
14337 -
14338 - return 0;
14339 -@@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
14340 - struct sock *sk = sock->sk;
14341 - struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
14342 - struct pppox_sock *po = pppox_sk(sk);
14343 -- struct pptp_opt *opt = &po->proto.pptp;
14344 - int error = 0;
14345 -
14346 - if (sockaddr_len < sizeof(struct sockaddr_pppox))
14347 -@@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
14348 -
14349 - lock_sock(sk);
14350 -
14351 -- opt->src_addr = sp->sa_addr.pptp;
14352 -- if (add_chan(po))
14353 -+ if (sk->sk_state & PPPOX_DEAD) {
14354 -+ error = -EALREADY;
14355 -+ goto out;
14356 -+ }
14357 -+
14358 -+ if (sk->sk_state & PPPOX_BOUND) {
14359 -+ error = -EBUSY;
14360 -+ goto out;
14361 -+ }
14362 -+
14363 -+ if (add_chan(po, &sp->sa_addr.pptp))
14364 - error = -EBUSY;
14365 -+ else
14366 -+ sk->sk_state |= PPPOX_BOUND;
14367 -
14368 -+out:
14369 - release_sock(sk);
14370 - return error;
14371 - }
14372 -@@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
14373 - }
14374 -
14375 - opt->dst_addr = sp->sa_addr.pptp;
14376 -- sk->sk_state = PPPOX_CONNECTED;
14377 -+ sk->sk_state |= PPPOX_CONNECTED;
14378 -
14379 - end:
14380 - release_sock(sk);
14381 diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
14382 index 27ed252..80cffde 100644
14383 --- a/drivers/net/slip/slhc.c
14384 @@ -53008,7 +53064,7 @@ index f94ab78..675a3a4 100644
14385 #define VIRTNET_DRIVER_VERSION "1.0.0"
14386
14387 diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
14388 -index 0a242b2..91f07ed 100644
14389 +index 0a242b2..eaf24af 100644
14390 --- a/drivers/net/vrf.c
14391 +++ b/drivers/net/vrf.c
14392 @@ -939,7 +939,7 @@ static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
14393 @@ -53020,11 +53076,20 @@ index 0a242b2..91f07ed 100644
14394 .kind = DRV_NAME,
14395 .priv_size = sizeof(struct net_vrf),
14396
14397 +@@ -973,7 +973,7 @@ out:
14398 + return NOTIFY_DONE;
14399 + }
14400 +
14401 +-static struct notifier_block vrf_notifier_block __read_mostly = {
14402 ++static struct notifier_block vrf_notifier_block = {
14403 + .notifier_call = vrf_device_event,
14404 + };
14405 +
14406 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
14407 -index 405a7b6..3248ac3 100644
14408 +index e0fcda4..b548741 100644
14409 --- a/drivers/net/vxlan.c
14410 +++ b/drivers/net/vxlan.c
14411 -@@ -3135,7 +3135,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
14412 +@@ -3144,7 +3144,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
14413 return vxlan->net;
14414 }
14415
14416 @@ -53033,7 +53098,7 @@ index 405a7b6..3248ac3 100644
14417 .kind = "vxlan",
14418 .maxtype = IFLA_VXLAN_MAX,
14419 .policy = vxlan_policy,
14420 -@@ -3183,7 +3183,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
14421 +@@ -3192,7 +3192,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
14422 return NOTIFY_DONE;
14423 }
14424
14425 @@ -53946,10 +54011,10 @@ index c652a66..1f75da8 100644
14426 crypto_hdr[2] = 0;
14427 crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
14428 diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
14429 -index 9028345..5b66ca3 100644
14430 +index 8c72047..e54deaa 100644
14431 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
14432 +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
14433 -@@ -2049,7 +2049,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
14434 +@@ -2058,7 +2058,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
14435 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
14436
14437 char buf[8];
14438 @@ -53958,7 +54023,7 @@ index 9028345..5b66ca3 100644
14439 u32 reset_flag;
14440
14441 memset(buf, 0, sizeof(buf));
14442 -@@ -2070,7 +2070,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
14443 +@@ -2079,7 +2079,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
14444 {
14445 struct iwl_trans *trans = file->private_data;
14446 char buf[8];
14447 @@ -56546,10 +56611,10 @@ index 7686bfe..4710893 100644
14448 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
14449 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
14450 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
14451 -index bfa9a64..d3b3ec2 100644
14452 +index fc6674d..8f3aa03 100644
14453 --- a/drivers/scsi/qla2xxx/qla_os.c
14454 +++ b/drivers/scsi/qla2xxx/qla_os.c
14455 -@@ -1423,8 +1423,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
14456 +@@ -1429,8 +1429,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
14457 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
14458 /* Ok, a 64bit DMA mask is applicable. */
14459 ha->flags.enable_64bit_addressing = 1;
14460 @@ -56825,7 +56890,7 @@ index e3cd3ec..00560ec 100644
14461
14462 transport_setup_device(&rport->dev);
14463 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
14464 -index 84fa4c4..8333258 100644
14465 +index bb669d3..2074023 100644
14466 --- a/drivers/scsi/sd.c
14467 +++ b/drivers/scsi/sd.c
14468 @@ -112,7 +112,7 @@ static int sd_resume(struct device *);
14469 @@ -57326,6 +57391,64 @@ index e541a01..a41777d 100644
14470 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
14471 sdata, wqe->atomic_wr.swap);
14472 goto send_comp;
14473 +diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
14474 +index d900546..266edcf 100644
14475 +--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
14476 ++++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
14477 +@@ -3983,7 +3983,7 @@ static void init_mlme_ext_priv_value(struct adapter *padapter)
14478 + _12M_RATE_, _24M_RATE_, 0xff,
14479 + };
14480 +
14481 +- atomic_set(&pmlmeext->event_seq, 0);
14482 ++ atomic_set_unchecked(&pmlmeext->event_seq, 0);
14483 + pmlmeext->mgnt_seq = 0;/* reset to zero when disconnect at client mode */
14484 +
14485 + pmlmeext->cur_channel = padapter->registrypriv.channel;
14486 +@@ -4310,7 +4310,7 @@ void report_survey_event(struct adapter *padapter,
14487 + pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
14488 + pc2h_evt_hdr->len = sizeof(struct survey_event);
14489 + pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
14490 +- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
14491 ++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
14492 +
14493 + psurvey_evt = (struct survey_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
14494 +
14495 +@@ -4362,7 +4362,7 @@ void report_surveydone_event(struct adapter *padapter)
14496 + pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
14497 + pc2h_evt_hdr->len = sizeof(struct surveydone_event);
14498 + pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
14499 +- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
14500 ++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
14501 +
14502 + psurveydone_evt = (struct surveydone_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
14503 + psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
14504 +@@ -4408,7 +4408,7 @@ void report_join_res(struct adapter *padapter, int res)
14505 + pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
14506 + pc2h_evt_hdr->len = sizeof(struct joinbss_event);
14507 + pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
14508 +- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
14509 ++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
14510 +
14511 + pjoinbss_evt = (struct joinbss_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
14512 + memcpy((unsigned char *)(&(pjoinbss_evt->network.network)), &(pmlmeinfo->network), sizeof(struct wlan_bssid_ex));
14513 +@@ -4461,7 +4461,7 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
14514 + pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
14515 + pc2h_evt_hdr->len = sizeof(struct stadel_event);
14516 + pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
14517 +- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
14518 ++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
14519 +
14520 + pdel_sta_evt = (struct stadel_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
14521 + memcpy((unsigned char *)(&(pdel_sta_evt->macaddr)), MacAddr, ETH_ALEN);
14522 +@@ -4516,7 +4516,7 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
14523 + pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
14524 + pc2h_evt_hdr->len = sizeof(struct stassoc_event);
14525 + pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
14526 +- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
14527 ++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
14528 +
14529 + padd_sta_evt = (struct stassoc_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
14530 + memcpy((unsigned char *)(&(padd_sta_evt->macaddr)), MacAddr, ETH_ALEN);
14531 diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
14532 index 1b1c102..375e471 100644
14533 --- a/drivers/staging/rtl8188eu/include/hal_intf.h
14534 @@ -57339,6 +57462,19 @@ index 1b1c102..375e471 100644
14535
14536 enum rt_eeprom_type {
14537 EEPROM_93C46,
14538 +diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
14539 +index 9093a5f..5863f9b 100644
14540 +--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
14541 ++++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
14542 +@@ -409,7 +409,7 @@ struct p2p_oper_class_map {
14543 + struct mlme_ext_priv {
14544 + struct adapter *padapter;
14545 + u8 mlmeext_init;
14546 +- atomic_t event_seq;
14547 ++ atomic_unchecked_t event_seq;
14548 + u16 mgnt_seq;
14549 +
14550 + unsigned char cur_channel;
14551 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
14552 index 26dd24c..2eb37c9 100644
14553 --- a/drivers/staging/rtl8712/rtl871x_io.h
14554 @@ -57352,6 +57488,112 @@ index 26dd24c..2eb37c9 100644
14555
14556 struct io_req {
14557 struct list_head list;
14558 +diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
14559 +index d28f29a..e3d2f19 100644
14560 +--- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
14561 ++++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
14562 +@@ -368,7 +368,7 @@ static void init_mlme_ext_priv23a_value(struct rtw_adapter *padapter)
14563 + _1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_,
14564 + _12M_RATE_, _24M_RATE_, 0xff,};
14565 +
14566 +- atomic_set(&pmlmeext->event_seq, 0);
14567 ++ atomic_set_unchecked(&pmlmeext->event_seq, 0);
14568 + /* reset to zero when disconnect at client mode */
14569 + pmlmeext->mgnt_seq = 0;
14570 +
14571 +@@ -4743,7 +4743,7 @@ void report_survey_event23a(struct rtw_adapter *padapter,
14572 + pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
14573 + pc2h_evt_hdr->len = sizeof(struct survey_event);
14574 + pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
14575 +- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
14576 ++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
14577 +
14578 + psurvey_evt = (struct survey_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
14579 +
14580 +@@ -4794,7 +4794,7 @@ void report_surveydone_event23a(struct rtw_adapter *padapter)
14581 + pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
14582 + pc2h_evt_hdr->len = sizeof(struct surveydone_event);
14583 + pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
14584 +- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
14585 ++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
14586 +
14587 + psurveydone_evt = (struct surveydone_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
14588 + psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
14589 +@@ -4838,7 +4838,7 @@ void report_join_res23a(struct rtw_adapter *padapter, int res)
14590 + pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
14591 + pc2h_evt_hdr->len = sizeof(struct joinbss_event);
14592 + pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
14593 +- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
14594 ++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
14595 +
14596 + pjoinbss_evt = (struct joinbss_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
14597 + memcpy((unsigned char *)&pjoinbss_evt->network.network,
14598 +@@ -4888,7 +4888,7 @@ void report_del_sta_event23a(struct rtw_adapter *padapter,
14599 + pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
14600 + pc2h_evt_hdr->len = sizeof(struct stadel_event);
14601 + pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
14602 +- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
14603 ++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
14604 +
14605 + pdel_sta_evt = (struct stadel_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
14606 + ether_addr_copy((unsigned char *)&pdel_sta_evt->macaddr, MacAddr);
14607 +@@ -4942,7 +4942,7 @@ void report_add_sta_event23a(struct rtw_adapter *padapter,
14608 + pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
14609 + pc2h_evt_hdr->len = sizeof(struct stassoc_event);
14610 + pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
14611 +- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
14612 ++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
14613 +
14614 + padd_sta_evt = (struct stassoc_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
14615 + ether_addr_copy((unsigned char *)&padd_sta_evt->macaddr, MacAddr);
14616 +diff --git a/drivers/staging/rtl8723au/include/drv_types.h b/drivers/staging/rtl8723au/include/drv_types.h
14617 +index e83463a..84230f3 100644
14618 +--- a/drivers/staging/rtl8723au/include/drv_types.h
14619 ++++ b/drivers/staging/rtl8723au/include/drv_types.h
14620 +@@ -185,7 +185,7 @@ struct dvobj_priv {
14621 +
14622 + struct usb_interface *pusbintf;
14623 + struct usb_device *pusbdev;
14624 +- atomic_t continual_urb_error;
14625 ++ atomic_unchecked_t continual_urb_error;
14626 +
14627 + /*-------- below is for PCIE INTERFACE --------*/
14628 +
14629 +diff --git a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
14630 +index ea2a6c9..91d10ea 100644
14631 +--- a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
14632 ++++ b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
14633 +@@ -406,7 +406,7 @@ struct p2p_oper_class_map {
14634 + struct mlme_ext_priv {
14635 + struct rtw_adapter *padapter;
14636 + u8 mlmeext_init;
14637 +- atomic_t event_seq;
14638 ++ atomic_unchecked_t event_seq;
14639 + u16 mgnt_seq;
14640 +
14641 + /* struct fw_priv fwpriv; */
14642 +diff --git a/drivers/staging/rtl8723au/include/usb_ops.h b/drivers/staging/rtl8723au/include/usb_ops.h
14643 +index ff11e13..69680d1 100644
14644 +--- a/drivers/staging/rtl8723au/include/usb_ops.h
14645 ++++ b/drivers/staging/rtl8723au/include/usb_ops.h
14646 +@@ -48,7 +48,7 @@ static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
14647 + int ret = false;
14648 + int value;
14649 +
14650 +- value = atomic_inc_return(&dvobj->continual_urb_error);
14651 ++ value = atomic_inc_return_unchecked(&dvobj->continual_urb_error);
14652 + if (value > MAX_CONTINUAL_URB_ERR) {
14653 + DBG_8723A("[dvobj:%p][ERROR] continual_urb_error:%d > %d\n",
14654 + dvobj, value, MAX_CONTINUAL_URB_ERR);
14655 +@@ -60,7 +60,7 @@ static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
14656 + /* Set the continual_urb_error of this @param dvobjprive to 0 */
14657 + static inline void rtw_reset_continual_urb_error(struct dvobj_priv *dvobj)
14658 + {
14659 +- atomic_set(&dvobj->continual_urb_error, 0);
14660 ++ atomic_set_unchecked(&dvobj->continual_urb_error, 0);
14661 + }
14662 +
14663 + bool rtl8723au_chip_configure(struct rtw_adapter *padapter);
14664 diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
14665 index 860e1c2..609ee2a 100644
14666 --- a/drivers/staging/sm750fb/sm750.c
14667 @@ -57676,7 +57918,7 @@ index be4eedc..96aaf2f 100644
14668 tz->ops = NULL;
14669 tz->sensor_data = NULL;
14670 diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
14671 -index 7fc919f..19f23a5 100644
14672 +index 7fc919f..5521ec1 100644
14673 --- a/drivers/thermal/x86_pkg_temp_thermal.c
14674 +++ b/drivers/thermal/x86_pkg_temp_thermal.c
14675 @@ -567,7 +567,7 @@ static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
14676 @@ -57684,7 +57926,7 @@ index 7fc919f..19f23a5 100644
14677 }
14678
14679 -static struct notifier_block pkg_temp_thermal_notifier __refdata = {
14680 -+static struct notifier_block pkg_temp_thermal_notifier __refconst = {
14681 ++static struct notifier_block pkg_temp_thermal_notifier = {
14682 .notifier_call = pkg_temp_thermal_cpu_callback,
14683 };
14684
14685 @@ -77319,7 +77561,7 @@ index e06dd75a..22221aa 100644
14686 /* first set the basic ref node struct up */
14687 atomic_set(&ref->refs, 1);
14688 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
14689 -index 0ddca67..ddd9880 100644
14690 +index 4958360..70b753e 100644
14691 --- a/fs/btrfs/disk-io.c
14692 +++ b/fs/btrfs/disk-io.c
14693 @@ -1263,7 +1263,7 @@ static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
14694 @@ -77331,7 +77573,7 @@ index 0ddca67..ddd9880 100644
14695 atomic_set(&root->orphan_inodes, 0);
14696 atomic_set(&root->refs, 1);
14697 atomic_set(&root->will_be_snapshoted, 0);
14698 -@@ -2564,7 +2564,7 @@ int open_ctree(struct super_block *sb,
14699 +@@ -2579,7 +2579,7 @@ int open_ctree(struct super_block *sb,
14700 atomic_set(&fs_info->nr_async_bios, 0);
14701 atomic_set(&fs_info->defrag_running, 0);
14702 atomic_set(&fs_info->qgroup_op_seq, 0);
14703 @@ -77533,7 +77775,7 @@ index 1a33d3e..4830234 100644
14704 * build a list of bios to read all the missing parts of this
14705 * stripe
14706 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
14707 -index 24154e4..ac07531 100644
14708 +index fe609b8..6475fee 100644
14709 --- a/fs/btrfs/super.c
14710 +++ b/fs/btrfs/super.c
14711 @@ -248,7 +248,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
14712 @@ -77664,7 +77906,7 @@ index 6916a78..4598936 100644
14713
14714 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
14715 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
14716 -index 9e08447..e21fee0 100644
14717 +index 9c62a6f..3c7df72 100644
14718 --- a/fs/btrfs/volumes.c
14719 +++ b/fs/btrfs/volumes.c
14720 @@ -231,7 +231,7 @@ static struct btrfs_device *__alloc_device(void)
14721 @@ -77673,10 +77915,10 @@ index 9e08447..e21fee0 100644
14722 atomic_set(&dev->reada_in_flight, 0);
14723 - atomic_set(&dev->dev_stats_ccnt, 0);
14724 + atomic_set_unchecked(&dev->dev_stats_ccnt, 0);
14725 + btrfs_device_data_ordered_init(dev);
14726 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
14727 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
14728 -
14729 -@@ -5184,7 +5184,7 @@ static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
14730 +@@ -5185,7 +5185,7 @@ static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
14731 sizeof(u64) * (total_stripes),
14732 GFP_NOFS|__GFP_NOFAIL);
14733
14734 @@ -77685,7 +77927,7 @@ index 9e08447..e21fee0 100644
14735 atomic_set(&bbio->refs, 1);
14736
14737 return bbio;
14738 -@@ -5865,7 +5865,7 @@ static void btrfs_end_bio(struct bio *bio)
14739 +@@ -5866,7 +5866,7 @@ static void btrfs_end_bio(struct bio *bio)
14740 int is_orig_bio = 0;
14741
14742 if (bio->bi_error) {
14743 @@ -77694,7 +77936,7 @@ index 9e08447..e21fee0 100644
14744 if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
14745 unsigned int stripe_index =
14746 btrfs_io_bio(bio)->stripe_index;
14747 -@@ -5903,7 +5903,7 @@ static void btrfs_end_bio(struct bio *bio)
14748 +@@ -5904,7 +5904,7 @@ static void btrfs_end_bio(struct bio *bio)
14749 /* only send an error to the higher layers if it is
14750 * beyond the tolerance of the btrfs bio
14751 */
14752 @@ -77703,7 +77945,7 @@ index 9e08447..e21fee0 100644
14753 bio->bi_error = -EIO;
14754 } else {
14755 /*
14756 -@@ -6014,7 +6014,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
14757 +@@ -6015,7 +6015,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
14758
14759 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
14760 {
14761 @@ -77712,7 +77954,7 @@ index 9e08447..e21fee0 100644
14762 if (atomic_dec_and_test(&bbio->stripes_pending)) {
14763 /* Shoud be the original bio. */
14764 WARN_ON(bio != bbio->orig_bio);
14765 -@@ -6776,10 +6776,10 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
14766 +@@ -6777,10 +6777,10 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
14767 if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
14768 continue;
14769
14770 @@ -80972,10 +81214,10 @@ index 5797d45..7d7d79a 100644
14771
14772 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
14773 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
14774 -index 023f6a1..9132167 100644
14775 +index e5232bb..d7b20d1 100644
14776 --- a/fs/fs-writeback.c
14777 +++ b/fs/fs-writeback.c
14778 -@@ -845,9 +845,9 @@ restart:
14779 +@@ -852,9 +852,9 @@ restart:
14780 #else /* CONFIG_CGROUP_WRITEBACK */
14781
14782 static struct bdi_writeback *
14783 @@ -80987,7 +81229,7 @@ index 023f6a1..9132167 100644
14784 {
14785 struct bdi_writeback *wb = inode_to_wb(inode);
14786
14787 -@@ -856,8 +856,8 @@ locked_inode_to_wb_and_lock_list(struct inode *inode)
14788 +@@ -863,8 +863,8 @@ locked_inode_to_wb_and_lock_list(struct inode *inode)
14789 return wb;
14790 }
14791
14792 @@ -80997,7 +81239,7 @@ index 023f6a1..9132167 100644
14793 {
14794 struct bdi_writeback *wb = inode_to_wb(inode);
14795
14796 -@@ -1101,9 +1101,8 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc)
14797 +@@ -1108,9 +1108,8 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc)
14798 * Wait for writeback on an inode to complete. Called with i_lock held.
14799 * Caller must make sure inode cannot go away when we drop i_lock.
14800 */
14801 @@ -81008,7 +81250,7 @@ index 023f6a1..9132167 100644
14802 {
14803 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
14804 wait_queue_head_t *wqh;
14805 -@@ -1132,8 +1131,8 @@ void inode_wait_for_writeback(struct inode *inode)
14806 +@@ -1139,8 +1138,8 @@ void inode_wait_for_writeback(struct inode *inode)
14807 * held and drops it. It is aimed for callers not holding any inode reference
14808 * so once i_lock is dropped, inode can go away.
14809 */
14810 @@ -83115,7 +83357,7 @@ index 14db05d..687f6d8 100644
14811 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
14812
14813 diff --git a/fs/namei.c b/fs/namei.c
14814 -index 0c3974c..a52e0f8 100644
14815 +index d8ee4da..47a7c9c 100644
14816 --- a/fs/namei.c
14817 +++ b/fs/namei.c
14818 @@ -336,17 +336,32 @@ int generic_permission(struct inode *inode, int mask)
14819 @@ -83310,7 +83552,7 @@ index 0c3974c..a52e0f8 100644
14820 last = nd->stack + nd->depth++;
14821 last->link = *link;
14822 last->cookie = NULL;
14823 -@@ -1828,7 +1917,7 @@ EXPORT_SYMBOL(full_name_hash);
14824 +@@ -1833,7 +1922,7 @@ EXPORT_SYMBOL(full_name_hash);
14825 static inline u64 hash_name(const char *name)
14826 {
14827 unsigned long a, b, adata, bdata, mask, hash, len;
14828 @@ -83319,7 +83561,7 @@ index 0c3974c..a52e0f8 100644
14829
14830 hash = a = 0;
14831 len = -sizeof(unsigned long);
14832 -@@ -1996,6 +2085,10 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
14833 +@@ -2001,6 +2090,10 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
14834 nd->last_type = LAST_ROOT; /* if there are only slashes... */
14835 nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
14836 nd->depth = 0;
14837 @@ -83330,7 +83572,7 @@ index 0c3974c..a52e0f8 100644
14838 if (flags & LOOKUP_ROOT) {
14839 struct dentry *root = nd->root.dentry;
14840 struct inode *inode = root->d_inode;
14841 -@@ -2133,6 +2226,11 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
14842 +@@ -2138,6 +2231,11 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
14843 if (!err)
14844 err = complete_walk(nd);
14845
14846 @@ -83342,7 +83584,7 @@ index 0c3974c..a52e0f8 100644
14847 if (!err && nd->flags & LOOKUP_DIRECTORY)
14848 if (!d_can_lookup(nd->path.dentry))
14849 err = -ENOTDIR;
14850 -@@ -2181,6 +2279,10 @@ static int path_parentat(struct nameidata *nd, unsigned flags,
14851 +@@ -2186,6 +2284,10 @@ static int path_parentat(struct nameidata *nd, unsigned flags,
14852 err = link_path_walk(s, nd);
14853 if (!err)
14854 err = complete_walk(nd);
14855 @@ -83353,7 +83595,7 @@ index 0c3974c..a52e0f8 100644
14856 if (!err) {
14857 *parent = nd->path;
14858 nd->path.mnt = NULL;
14859 -@@ -2712,6 +2814,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
14860 +@@ -2717,6 +2819,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
14861 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
14862 return -EPERM;
14863
14864 @@ -83367,7 +83609,7 @@ index 0c3974c..a52e0f8 100644
14865 return 0;
14866 }
14867
14868 -@@ -2978,6 +3087,18 @@ static int lookup_open(struct nameidata *nd, struct path *path,
14869 +@@ -2983,6 +3092,18 @@ static int lookup_open(struct nameidata *nd, struct path *path,
14870 /* Negative dentry, just create the file */
14871 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
14872 umode_t mode = op->mode;
14873 @@ -83386,7 +83628,7 @@ index 0c3974c..a52e0f8 100644
14874 if (!IS_POSIXACL(dir->d_inode))
14875 mode &= ~current_umask();
14876 /*
14877 -@@ -2999,6 +3120,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
14878 +@@ -3004,6 +3125,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
14879 nd->flags & LOOKUP_EXCL);
14880 if (error)
14881 goto out_dput;
14882 @@ -83395,7 +83637,7 @@ index 0c3974c..a52e0f8 100644
14883 }
14884 out_no_open:
14885 path->dentry = dentry;
14886 -@@ -3104,11 +3227,24 @@ retry_lookup:
14887 +@@ -3109,11 +3232,24 @@ retry_lookup:
14888 goto finish_open_created;
14889 }
14890
14891 @@ -83421,19 +83663,7 @@ index 0c3974c..a52e0f8 100644
14892
14893 /*
14894 * If atomic_open() acquired write access it is dropped now due to
14895 -@@ -3144,6 +3280,11 @@ finish_lookup:
14896 - if (unlikely(error))
14897 - return error;
14898 -
14899 -+ if (gr_handle_nameidata_symlinkowner(nd, inode)) {
14900 -+ path_to_nameidata(&path, nd);
14901 -+ return -EACCES;
14902 -+ }
14903 -+
14904 - if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
14905 - path_to_nameidata(&path, nd);
14906 - return -ELOOP;
14907 -@@ -3166,6 +3307,12 @@ finish_open:
14908 +@@ -3166,6 +3302,17 @@ finish_open:
14909 path_put(&save_parent);
14910 return error;
14911 }
14912 @@ -83443,10 +83673,15 @@ index 0c3974c..a52e0f8 100644
14913 + goto out;
14914 + }
14915 +
14916 ++ if (gr_handle_nameidata_symlinkowner(nd, inode)) {
14917 ++ error = -EACCES;
14918 ++ goto out;
14919 ++ }
14920 ++
14921 audit_inode(nd->name, nd->path.dentry, 0);
14922 - error = -EISDIR;
14923 - if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
14924 -@@ -3432,9 +3579,11 @@ static struct dentry *filename_create(int dfd, struct filename *name,
14925 + if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
14926 + error = -ELOOP;
14927 +@@ -3440,9 +3587,11 @@ static struct dentry *filename_create(int dfd, struct filename *name,
14928 goto unlock;
14929
14930 error = -EEXIST;
14931 @@ -83460,7 +83695,7 @@ index 0c3974c..a52e0f8 100644
14932 /*
14933 * Special case - lookup gave negative, but... we had foo/bar/
14934 * From the vfs_mknod() POV we just have a negative dentry -
14935 -@@ -3488,6 +3637,20 @@ inline struct dentry *user_path_create(int dfd, const char __user *pathname,
14936 +@@ -3496,6 +3645,20 @@ inline struct dentry *user_path_create(int dfd, const char __user *pathname,
14937 }
14938 EXPORT_SYMBOL(user_path_create);
14939
14940 @@ -83481,7 +83716,7 @@ index 0c3974c..a52e0f8 100644
14941 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
14942 {
14943 int error = may_create(dir, dentry);
14944 -@@ -3551,6 +3714,17 @@ retry:
14945 +@@ -3559,6 +3722,17 @@ retry:
14946
14947 if (!IS_POSIXACL(path.dentry->d_inode))
14948 mode &= ~current_umask();
14949 @@ -83499,7 +83734,7 @@ index 0c3974c..a52e0f8 100644
14950 error = security_path_mknod(&path, dentry, mode, dev);
14951 if (error)
14952 goto out;
14953 -@@ -3566,6 +3740,8 @@ retry:
14954 +@@ -3574,6 +3748,8 @@ retry:
14955 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
14956 break;
14957 }
14958 @@ -83508,7 +83743,7 @@ index 0c3974c..a52e0f8 100644
14959 out:
14960 done_path_create(&path, dentry);
14961 if (retry_estale(error, lookup_flags)) {
14962 -@@ -3620,9 +3796,16 @@ retry:
14963 +@@ -3628,9 +3804,16 @@ retry:
14964
14965 if (!IS_POSIXACL(path.dentry->d_inode))
14966 mode &= ~current_umask();
14967 @@ -83525,7 +83760,7 @@ index 0c3974c..a52e0f8 100644
14968 done_path_create(&path, dentry);
14969 if (retry_estale(error, lookup_flags)) {
14970 lookup_flags |= LOOKUP_REVAL;
14971 -@@ -3655,7 +3838,7 @@ void dentry_unhash(struct dentry *dentry)
14972 +@@ -3663,7 +3846,7 @@ void dentry_unhash(struct dentry *dentry)
14973 {
14974 shrink_dcache_parent(dentry);
14975 spin_lock(&dentry->d_lock);
14976 @@ -83534,7 +83769,7 @@ index 0c3974c..a52e0f8 100644
14977 __d_drop(dentry);
14978 spin_unlock(&dentry->d_lock);
14979 }
14980 -@@ -3708,6 +3891,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
14981 +@@ -3716,6 +3899,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
14982 struct path path;
14983 struct qstr last;
14984 int type;
14985 @@ -83543,7 +83778,7 @@ index 0c3974c..a52e0f8 100644
14986 unsigned int lookup_flags = 0;
14987 retry:
14988 name = user_path_parent(dfd, pathname,
14989 -@@ -3740,10 +3925,20 @@ retry:
14990 +@@ -3748,10 +3933,20 @@ retry:
14991 error = -ENOENT;
14992 goto exit3;
14993 }
14994 @@ -83564,7 +83799,7 @@ index 0c3974c..a52e0f8 100644
14995 exit3:
14996 dput(dentry);
14997 exit2:
14998 -@@ -3838,6 +4033,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
14999 +@@ -3846,6 +4041,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
15000 int type;
15001 struct inode *inode = NULL;
15002 struct inode *delegated_inode = NULL;
15003 @@ -83573,7 +83808,7 @@ index 0c3974c..a52e0f8 100644
15004 unsigned int lookup_flags = 0;
15005 retry:
15006 name = user_path_parent(dfd, pathname,
15007 -@@ -3864,10 +4061,21 @@ retry_deleg:
15008 +@@ -3872,10 +4069,21 @@ retry_deleg:
15009 if (d_is_negative(dentry))
15010 goto slashes;
15011 ihold(inode);
15012 @@ -83595,7 +83830,7 @@ index 0c3974c..a52e0f8 100644
15013 exit2:
15014 dput(dentry);
15015 }
15016 -@@ -3956,9 +4164,17 @@ retry:
15017 +@@ -3964,9 +4172,17 @@ retry:
15018 if (IS_ERR(dentry))
15019 goto out_putname;
15020
15021 @@ -83613,7 +83848,7 @@ index 0c3974c..a52e0f8 100644
15022 done_path_create(&path, dentry);
15023 if (retry_estale(error, lookup_flags)) {
15024 lookup_flags |= LOOKUP_REVAL;
15025 -@@ -4062,6 +4278,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
15026 +@@ -4070,6 +4286,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
15027 struct dentry *new_dentry;
15028 struct path old_path, new_path;
15029 struct inode *delegated_inode = NULL;
15030 @@ -83621,7 +83856,7 @@ index 0c3974c..a52e0f8 100644
15031 int how = 0;
15032 int error;
15033
15034 -@@ -4085,7 +4302,7 @@ retry:
15035 +@@ -4093,7 +4310,7 @@ retry:
15036 if (error)
15037 return error;
15038
15039 @@ -83630,7 +83865,7 @@ index 0c3974c..a52e0f8 100644
15040 (how & LOOKUP_REVAL));
15041 error = PTR_ERR(new_dentry);
15042 if (IS_ERR(new_dentry))
15043 -@@ -4097,11 +4314,26 @@ retry:
15044 +@@ -4105,11 +4322,26 @@ retry:
15045 error = may_linkat(&old_path);
15046 if (unlikely(error))
15047 goto out_dput;
15048 @@ -83657,7 +83892,7 @@ index 0c3974c..a52e0f8 100644
15049 done_path_create(&new_path, new_dentry);
15050 if (delegated_inode) {
15051 error = break_deleg_wait(&delegated_inode);
15052 -@@ -4416,6 +4648,20 @@ retry_deleg:
15053 +@@ -4424,6 +4656,20 @@ retry_deleg:
15054 if (new_dentry == trap)
15055 goto exit5;
15056
15057 @@ -83678,7 +83913,7 @@ index 0c3974c..a52e0f8 100644
15058 error = security_path_rename(&old_path, old_dentry,
15059 &new_path, new_dentry, flags);
15060 if (error)
15061 -@@ -4423,6 +4669,9 @@ retry_deleg:
15062 +@@ -4431,6 +4677,9 @@ retry_deleg:
15063 error = vfs_rename(old_path.dentry->d_inode, old_dentry,
15064 new_path.dentry->d_inode, new_dentry,
15065 &delegated_inode, flags);
15066 @@ -83688,7 +83923,7 @@ index 0c3974c..a52e0f8 100644
15067 exit5:
15068 dput(new_dentry);
15069 exit4:
15070 -@@ -4479,14 +4728,24 @@ EXPORT_SYMBOL(vfs_whiteout);
15071 +@@ -4487,14 +4736,24 @@ EXPORT_SYMBOL(vfs_whiteout);
15072
15073 int readlink_copy(char __user *buffer, int buflen, const char *link)
15074 {
15075 @@ -85895,7 +86130,7 @@ index a352d57..cb94a5c 100644
15076 }
15077 fs_initcall(proc_interrupts_init);
15078 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
15079 -index 92e6726..a600d4fa 100644
15080 +index 92e6726..93a72d0 100644
15081 --- a/fs/proc/kcore.c
15082 +++ b/fs/proc/kcore.c
15083 @@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
15084 @@ -85959,7 +86194,7 @@ index 92e6726..a600d4fa 100644
15085 }
15086
15087 -static struct notifier_block kcore_callback_nb __meminitdata = {
15088 -+static struct notifier_block kcore_callback_nb __meminitconst = {
15089 ++static struct notifier_block kcore_callback_nb = {
15090 .notifier_call = kcore_callback,
15091 .priority = 0,
15092 };
15093 @@ -99346,29 +99581,6 @@ index 1bfcfe5..e04c5c9 100644
15094 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
15095
15096 #endif /* __ASM_GENERIC_CACHE_H */
15097 -diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
15098 -index 0419485..0f1c6f3 100644
15099 ---- a/include/asm-generic/cputime_nsecs.h
15100 -+++ b/include/asm-generic/cputime_nsecs.h
15101 -@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
15102 - */
15103 - static inline cputime_t timespec_to_cputime(const struct timespec *val)
15104 - {
15105 -- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
15106 -+ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
15107 - return (__force cputime_t) ret;
15108 - }
15109 - static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
15110 -@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
15111 - */
15112 - static inline cputime_t timeval_to_cputime(const struct timeval *val)
15113 - {
15114 -- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
15115 -+ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
15116 -+ val->tv_usec * NSEC_PER_USEC;
15117 - return (__force cputime_t) ret;
15118 - }
15119 - static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
15120 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
15121 index 0d68a1e..b74a761 100644
15122 --- a/include/asm-generic/emergency-restart.h
15123 @@ -100151,10 +100363,10 @@ index 8609d57..86e4d79 100644
15124 int (*generic_packet) (struct cdrom_device_info *,
15125 struct packet_command *);
15126 diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
15127 -index 06b77f9d..d08b456 100644
15128 +index 8e30fae..38632f8 100644
15129 --- a/include/linux/cgroup-defs.h
15130 +++ b/include/linux/cgroup-defs.h
15131 -@@ -407,7 +407,7 @@ struct cftype {
15132 +@@ -413,7 +413,7 @@ struct cftype {
15133 #ifdef CONFIG_DEBUG_LOCK_ALLOC
15134 struct lock_class_key lockdep_key;
15135 #endif
15136 @@ -100888,7 +101100,7 @@ index 2e551e2..8ea30b5 100644
15137 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
15138
15139 diff --git a/include/linux/efi.h b/include/linux/efi.h
15140 -index 569b5a8..55dbf24 100644
15141 +index 47be3ad..b2b1b58 100644
15142 --- a/include/linux/efi.h
15143 +++ b/include/linux/efi.h
15144 @@ -1094,6 +1094,7 @@ struct efivar_operations {
15145 @@ -105499,22 +105711,6 @@ index 6fb8016..2cf60e7 100644
15146
15147 /* shm_mode upper byte flags */
15148 #define SHM_DEST 01000 /* segment will be destroyed on last detach */
15149 -diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
15150 -index 50777b5..92d112a 100644
15151 ---- a/include/linux/shmem_fs.h
15152 -+++ b/include/linux/shmem_fs.h
15153 -@@ -15,10 +15,7 @@ struct shmem_inode_info {
15154 - unsigned int seals; /* shmem seals */
15155 - unsigned long flags;
15156 - unsigned long alloced; /* data pages alloced to file */
15157 -- union {
15158 -- unsigned long swapped; /* subtotal assigned to swap */
15159 -- char *symlink; /* unswappable short symlink */
15160 -- };
15161 -+ unsigned long swapped; /* subtotal assigned to swap */
15162 - struct shared_policy policy; /* NUMA memory alloc policy */
15163 - struct list_head swaplist; /* chain of maybes on swap */
15164 - struct simple_xattrs xattrs; /* list of xattrs */
15165 diff --git a/include/linux/signal.h b/include/linux/signal.h
15166 index 92557bb..53fa513 100644
15167 --- a/include/linux/signal.h
15168 @@ -105529,10 +105725,10 @@ index 92557bb..53fa513 100644
15169
15170 static inline void disallow_signal(int sig)
15171 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
15172 -index 9147f9f..ad74b50 100644
15173 +index 75f136a..fa3b724 100644
15174 --- a/include/linux/skbuff.h
15175 +++ b/include/linux/skbuff.h
15176 -@@ -808,7 +808,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
15177 +@@ -809,7 +809,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
15178 int node);
15179 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
15180 struct sk_buff *build_skb(void *data, unsigned int frag_size);
15181 @@ -105541,7 +105737,7 @@ index 9147f9f..ad74b50 100644
15182 gfp_t priority)
15183 {
15184 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
15185 -@@ -2079,7 +2079,7 @@ static inline int skb_checksum_start_offset(const struct sk_buff *skb)
15186 +@@ -2080,7 +2080,7 @@ static inline int skb_checksum_start_offset(const struct sk_buff *skb)
15187 return skb->csum_start - skb_headroom(skb);
15188 }
15189
15190 @@ -105550,7 +105746,7 @@ index 9147f9f..ad74b50 100644
15191 {
15192 return skb_transport_header(skb) - skb->data;
15193 }
15194 -@@ -2094,7 +2094,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
15195 +@@ -2095,7 +2095,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
15196 return skb->inner_transport_header - skb->inner_network_header;
15197 }
15198
15199 @@ -105559,7 +105755,7 @@ index 9147f9f..ad74b50 100644
15200 {
15201 return skb_network_header(skb) - skb->data;
15202 }
15203 -@@ -2154,7 +2154,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
15204 +@@ -2155,7 +2155,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
15205 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
15206 */
15207 #ifndef NET_SKB_PAD
15208 @@ -105568,7 +105764,7 @@ index 9147f9f..ad74b50 100644
15209 #endif
15210
15211 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
15212 -@@ -2794,9 +2794,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
15213 +@@ -2795,9 +2795,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
15214 int *err);
15215 unsigned int datagram_poll(struct file *file, struct socket *sock,
15216 struct poll_table_struct *wait);
15217 @@ -105580,7 +105776,7 @@ index 9147f9f..ad74b50 100644
15218 struct msghdr *msg, int size)
15219 {
15220 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
15221 -@@ -3325,6 +3325,9 @@ static inline void nf_reset(struct sk_buff *skb)
15222 +@@ -3326,6 +3326,9 @@ static inline void nf_reset(struct sk_buff *skb)
15223 nf_bridge_put(skb->nf_bridge);
15224 skb->nf_bridge = NULL;
15225 #endif
15226 @@ -106836,20 +107032,9 @@ index 5122b5e..598b440 100644
15227 void v9fs_register_trans(struct p9_trans_module *m);
15228 void v9fs_unregister_trans(struct p9_trans_module *m);
15229 diff --git a/include/net/af_unix.h b/include/net/af_unix.h
15230 -index 2a91a05..f3ff431 100644
15231 +index 9b4c418..f3ff431 100644
15232 --- a/include/net/af_unix.h
15233 +++ b/include/net/af_unix.h
15234 -@@ -6,8 +6,8 @@
15235 - #include <linux/mutex.h>
15236 - #include <net/sock.h>
15237 -
15238 --void unix_inflight(struct file *fp);
15239 --void unix_notinflight(struct file *fp);
15240 -+void unix_inflight(struct user_struct *user, struct file *fp);
15241 -+void unix_notinflight(struct user_struct *user, struct file *fp);
15242 - void unix_gc(void);
15243 - void wait_for_unix_gc(void);
15244 - struct sock *unix_get_socket(struct file *filp);
15245 @@ -36,7 +36,7 @@ struct unix_skb_parms {
15246 u32 secid; /* Security ID */
15247 #endif
15248 @@ -106962,7 +107147,7 @@ index cf6c745..8a0cf00 100644
15249 return;
15250 }
15251 diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
15252 -index 481fe1c..3adeb9d 100644
15253 +index 49dcad4..6d2c708 100644
15254 --- a/include/net/inet_connection_sock.h
15255 +++ b/include/net/inet_connection_sock.h
15256 @@ -65,7 +65,7 @@ struct inet_connection_sock_af_ops {
15257 @@ -107014,10 +107199,10 @@ index 1a98f1c..2a44de6 100644
15258
15259 static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
15260 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
15261 -index 9f4df68..f9705be 100644
15262 +index 3f98233..3d46645 100644
15263 --- a/include/net/ip_fib.h
15264 +++ b/include/net/ip_fib.h
15265 -@@ -173,7 +173,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
15266 +@@ -174,7 +174,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
15267
15268 #define FIB_RES_SADDR(net, res) \
15269 ((FIB_RES_NH(res).nh_saddr_genid == \
15270 @@ -107460,18 +107645,6 @@ index 2f87c1b..5a03287 100644
15271
15272 int __rtnl_link_register(struct rtnl_link_ops *ops);
15273 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
15274 -diff --git a/include/net/scm.h b/include/net/scm.h
15275 -index 262532d..59fa93c 100644
15276 ---- a/include/net/scm.h
15277 -+++ b/include/net/scm.h
15278 -@@ -21,6 +21,7 @@ struct scm_creds {
15279 - struct scm_fp_list {
15280 - short count;
15281 - short max;
15282 -+ struct user_struct *user;
15283 - struct file *fp[SCM_MAX_FD];
15284 - };
15285 -
15286 diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
15287 index 4a5b9a3..ca27d73 100644
15288 --- a/include/net/sctp/checksum.h
15289 @@ -107633,7 +107806,7 @@ index 14d3c07..c273ad8 100644
15290
15291 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
15292 diff --git a/include/net/tcp.h b/include/net/tcp.h
15293 -index f80e74c..1e64f3c 100644
15294 +index 414d822..f99ea64 100644
15295 --- a/include/net/tcp.h
15296 +++ b/include/net/tcp.h
15297 @@ -550,7 +550,7 @@ void tcp_retransmit_timer(struct sock *sk);
15298 @@ -109194,19 +109367,6 @@ index 3b39550..e470527 100644
15299
15300 if (!access_ok(VERIFY_READ, uattr, 1))
15301 return -EFAULT;
15302 -diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
15303 -index d1d3e8f..2e7f7ab 100644
15304 ---- a/kernel/bpf/verifier.c
15305 -+++ b/kernel/bpf/verifier.c
15306 -@@ -2082,7 +2082,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
15307 - /* adjust offset of jmps if necessary */
15308 - if (i < pos && i + insn->off + 1 > pos)
15309 - insn->off += delta;
15310 -- else if (i > pos && i + insn->off + 1 < pos)
15311 -+ else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
15312 - insn->off -= delta;
15313 - }
15314 - }
15315 diff --git a/kernel/capability.c b/kernel/capability.c
15316 index 45432b5..988f1e4 100644
15317 --- a/kernel/capability.c
15318 @@ -109306,10 +109466,10 @@ index 45432b5..988f1e4 100644
15319 +}
15320 +EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
15321 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
15322 -index 470f653..5ea1e67 100644
15323 +index fb1ecfd..f6add73 100644
15324 --- a/kernel/cgroup.c
15325 +++ b/kernel/cgroup.c
15326 -@@ -3345,7 +3345,7 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
15327 +@@ -3346,7 +3346,7 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
15328 key = &cft->lockdep_key;
15329 #endif
15330 kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
15331 @@ -109318,7 +109478,7 @@ index 470f653..5ea1e67 100644
15332 NULL, key);
15333 if (IS_ERR(kn))
15334 return PTR_ERR(kn);
15335 -@@ -3449,11 +3449,14 @@ static void cgroup_exit_cftypes(struct cftype *cfts)
15336 +@@ -3450,11 +3450,14 @@ static void cgroup_exit_cftypes(struct cftype *cfts)
15337 /* free copy for custom atomic_write_len, see init_cftypes() */
15338 if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
15339 kfree(cft->kf_ops);
15340 @@ -109336,7 +109496,7 @@ index 470f653..5ea1e67 100644
15341 }
15342 }
15343
15344 -@@ -3484,8 +3487,10 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
15345 +@@ -3485,8 +3488,10 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
15346 kf_ops->atomic_write_len = cft->max_write_len;
15347 }
15348
15349 @@ -109349,7 +109509,7 @@ index 470f653..5ea1e67 100644
15350 }
15351
15352 return 0;
15353 -@@ -3498,7 +3503,7 @@ static int cgroup_rm_cftypes_locked(struct cftype *cfts)
15354 +@@ -3499,7 +3504,7 @@ static int cgroup_rm_cftypes_locked(struct cftype *cfts)
15355 if (!cfts || !cfts[0].ss)
15356 return -ENOENT;
15357
15358 @@ -109358,7 +109518,7 @@ index 470f653..5ea1e67 100644
15359 cgroup_apply_cftypes(cfts, false);
15360 cgroup_exit_cftypes(cfts);
15361 return 0;
15362 -@@ -3555,7 +3560,7 @@ static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
15363 +@@ -3556,7 +3561,7 @@ static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
15364
15365 mutex_lock(&cgroup_mutex);
15366
15367 @@ -109367,7 +109527,7 @@ index 470f653..5ea1e67 100644
15368 ret = cgroup_apply_cftypes(cfts, true);
15369 if (ret)
15370 cgroup_rm_cftypes_locked(cfts);
15371 -@@ -3576,8 +3581,10 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
15372 +@@ -3577,8 +3582,10 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
15373 {
15374 struct cftype *cft;
15375
15376 @@ -109379,7 +109539,7 @@ index 470f653..5ea1e67 100644
15377 return cgroup_add_cftypes(ss, cfts);
15378 }
15379
15380 -@@ -3593,8 +3600,10 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
15381 +@@ -3594,8 +3601,10 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
15382 {
15383 struct cftype *cft;
15384
15385 @@ -109391,7 +109551,7 @@ index 470f653..5ea1e67 100644
15386 return cgroup_add_cftypes(ss, cfts);
15387 }
15388
15389 -@@ -5725,6 +5734,9 @@ static void cgroup_release_agent(struct work_struct *work)
15390 +@@ -5738,6 +5747,9 @@ static void cgroup_release_agent(struct work_struct *work)
15391 if (!pathbuf || !agentbuf)
15392 goto out;
15393
15394 @@ -109401,7 +109561,7 @@ index 470f653..5ea1e67 100644
15395 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
15396 if (!path)
15397 goto out;
15398 -@@ -5900,7 +5912,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
15399 +@@ -5913,7 +5925,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
15400 struct task_struct *task;
15401 int count = 0;
15402
15403 @@ -113425,7 +113585,7 @@ index ef7093c..1cc3d0f 100644
15404 return 0;
15405 }
15406 diff --git a/kernel/resource.c b/kernel/resource.c
15407 -index f150dbb..33735c2 100644
15408 +index 249b1eb..b3451db 100644
15409 --- a/kernel/resource.c
15410 +++ b/kernel/resource.c
15411 @@ -84,8 +84,8 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos)
15412 @@ -113689,46 +113849,6 @@ index b242775..b497b69 100644
15413
15414 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
15415 {
15416 -diff --git a/kernel/seccomp.c b/kernel/seccomp.c
15417 -index 580ac2d..15a1795 100644
15418 ---- a/kernel/seccomp.c
15419 -+++ b/kernel/seccomp.c
15420 -@@ -316,24 +316,24 @@ static inline void seccomp_sync_threads(void)
15421 - put_seccomp_filter(thread);
15422 - smp_store_release(&thread->seccomp.filter,
15423 - caller->seccomp.filter);
15424 -+
15425 -+ /*
15426 -+ * Don't let an unprivileged task work around
15427 -+ * the no_new_privs restriction by creating
15428 -+ * a thread that sets it up, enters seccomp,
15429 -+ * then dies.
15430 -+ */
15431 -+ if (task_no_new_privs(caller))
15432 -+ task_set_no_new_privs(thread);
15433 -+
15434 - /*
15435 - * Opt the other thread into seccomp if needed.
15436 - * As threads are considered to be trust-realm
15437 - * equivalent (see ptrace_may_access), it is safe to
15438 - * allow one thread to transition the other.
15439 - */
15440 -- if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
15441 -- /*
15442 -- * Don't let an unprivileged task work around
15443 -- * the no_new_privs restriction by creating
15444 -- * a thread that sets it up, enters seccomp,
15445 -- * then dies.
15446 -- */
15447 -- if (task_no_new_privs(caller))
15448 -- task_set_no_new_privs(thread);
15449 --
15450 -+ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
15451 - seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
15452 -- }
15453 - }
15454 - }
15455 -
15456 diff --git a/kernel/signal.c b/kernel/signal.c
15457 index f3f1f7a..d2e7863 100644
15458 --- a/kernel/signal.c
15459 @@ -114791,7 +114911,7 @@ index 86751c6..7875536 100644
15460 update_vsyscall_tz();
15461 if (firsttime) {
15462 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
15463 -index d563c19..5108cb0 100644
15464 +index 99188ee..29f96f9 100644
15465 --- a/kernel/time/timekeeping.c
15466 +++ b/kernel/time/timekeeping.c
15467 @@ -15,6 +15,7 @@
15468 @@ -114802,7 +114922,7 @@ index d563c19..5108cb0 100644
15469 #include <linux/syscore_ops.h>
15470 #include <linux/clocksource.h>
15471 #include <linux/jiffies.h>
15472 -@@ -916,6 +917,8 @@ int do_settimeofday64(const struct timespec64 *ts)
15473 +@@ -915,6 +916,8 @@ int do_settimeofday64(const struct timespec64 *ts)
15474 if (!timespec64_valid_strict(ts))
15475 return -EINVAL;
15476
15477 @@ -115439,10 +115559,10 @@ index 0f06532..247c8e7 100644
15478 + return atomic64_inc_return_unchecked(&trace_counter);
15479 }
15480 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
15481 -index 4f6ef69..90c3b0f 100644
15482 +index debf6e8..60fa064 100644
15483 --- a/kernel/trace/trace_events.c
15484 +++ b/kernel/trace/trace_events.c
15485 -@@ -2366,7 +2366,6 @@ __trace_early_add_new_event(struct trace_event_call *call,
15486 +@@ -2367,7 +2367,6 @@ __trace_early_add_new_event(struct trace_event_call *call,
15487 return 0;
15488 }
15489
15490 @@ -115679,10 +115799,10 @@ index 18f34cf..e7513f2 100644
15491 .thread_should_run = watchdog_should_run,
15492 .thread_fn = watchdog,
15493 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
15494 -index c579dba..8c0345d 100644
15495 +index 450c21f..16482d9 100644
15496 --- a/kernel/workqueue.c
15497 +++ b/kernel/workqueue.c
15498 -@@ -1856,9 +1856,8 @@ static void pool_mayday_timeout(unsigned long __pool)
15499 +@@ -1866,9 +1866,8 @@ static void pool_mayday_timeout(unsigned long __pool)
15500 * multiple times. Does GFP_KERNEL allocations. Called only from
15501 * manager.
15502 */
15503 @@ -115693,7 +115813,7 @@ index c579dba..8c0345d 100644
15504 {
15505 restart:
15506 spin_unlock_irq(&pool->lock);
15507 -@@ -1948,9 +1947,8 @@ static bool manage_workers(struct worker *worker)
15508 +@@ -1958,9 +1957,8 @@ static bool manage_workers(struct worker *worker)
15509 * CONTEXT:
15510 * spin_lock_irq(pool->lock) which is released and regrabbed.
15511 */
15512 @@ -115704,7 +115824,7 @@ index c579dba..8c0345d 100644
15513 {
15514 struct pool_workqueue *pwq = get_work_pwq(work);
15515 struct worker_pool *pool = worker->pool;
15516 -@@ -4452,7 +4450,7 @@ static void rebind_workers(struct worker_pool *pool)
15517 +@@ -4462,7 +4460,7 @@ static void rebind_workers(struct worker_pool *pool)
15518 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
15519 worker_flags |= WORKER_REBOUND;
15520 worker_flags &= ~WORKER_UNBOUND;
15521 @@ -117101,7 +117221,7 @@ index ef6963b..09c45dc 100644
15522 idx = vma_hugecache_offset(h, vma, address);
15523
15524 diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
15525 -index d8fb10d..8606223 100644
15526 +index d8fb10d..1c74822 100644
15527 --- a/mm/hugetlb_cgroup.c
15528 +++ b/mm/hugetlb_cgroup.c
15529 @@ -27,7 +27,6 @@ struct hugetlb_cgroup {
15530 @@ -117194,10 +117314,10 @@ index d8fb10d..8606223 100644
15531 + snprintf(names[3], MAX_CFTYPE_NAME, "%s.failcnt", buf);
15532 +
15533 + pax_open_kernel();
15534 -+ strncpy((void *)h->cgroup_files[0]->name, names[0], MAX_CFTYPE_NAME);
15535 -+ strncpy((void *)h->cgroup_files[1]->name, names[1], MAX_CFTYPE_NAME);
15536 -+ strncpy((void *)h->cgroup_files[2]->name, names[2], MAX_CFTYPE_NAME);
15537 -+ strncpy((void *)h->cgroup_files[3]->name, names[3], MAX_CFTYPE_NAME);
15538 ++ strncpy((void *)(*h->cgroup_files)[0].name, names[0], MAX_CFTYPE_NAME);
15539 ++ strncpy((void *)(*h->cgroup_files)[1].name, names[1], MAX_CFTYPE_NAME);
15540 ++ strncpy((void *)(*h->cgroup_files)[2].name, names[2], MAX_CFTYPE_NAME);
15541 ++ strncpy((void *)(*h->cgroup_files)[3].name, names[3], MAX_CFTYPE_NAME);
15542 + pax_close_kernel();
15543
15544 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
15545 @@ -117412,7 +117532,7 @@ index 750b789..b1b1b59 100644
15546 /*
15547 * free pages are specially detected outside this table:
15548 diff --git a/mm/memory.c b/mm/memory.c
15549 -index c387430..119fd96 100644
15550 +index b80bf47..d3fd553 100644
15551 --- a/mm/memory.c
15552 +++ b/mm/memory.c
15553 @@ -415,6 +415,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
15554 @@ -117970,7 +118090,7 @@ index c387430..119fd96 100644
15555 pgd = pgd_offset(mm, address);
15556 pud = pud_alloc(mm, pgd, address);
15557 if (!pud)
15558 -@@ -3478,6 +3749,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
15559 +@@ -3488,6 +3759,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
15560 spin_unlock(&mm->page_table_lock);
15561 return 0;
15562 }
15563 @@ -117994,7 +118114,7 @@ index c387430..119fd96 100644
15564 #endif /* __PAGETABLE_PUD_FOLDED */
15565
15566 #ifndef __PAGETABLE_PMD_FOLDED
15567 -@@ -3510,6 +3798,32 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
15568 +@@ -3520,6 +3808,32 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
15569 spin_unlock(&mm->page_table_lock);
15570 return 0;
15571 }
15572 @@ -118027,7 +118147,7 @@ index c387430..119fd96 100644
15573 #endif /* __PAGETABLE_PMD_FOLDED */
15574
15575 static int __follow_pte(struct mm_struct *mm, unsigned long address,
15576 -@@ -3619,8 +3933,8 @@ out:
15577 +@@ -3629,8 +3943,8 @@ out:
15578 return ret;
15579 }
15580
15581 @@ -118038,7 +118158,7 @@ index c387430..119fd96 100644
15582 {
15583 resource_size_t phys_addr;
15584 unsigned long prot = 0;
15585 -@@ -3646,8 +3960,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
15586 +@@ -3656,8 +3970,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
15587 * Access another process' address space as given in mm. If non-NULL, use the
15588 * given task for page fault accounting.
15589 */
15590 @@ -118049,7 +118169,7 @@ index c387430..119fd96 100644
15591 {
15592 struct vm_area_struct *vma;
15593 void *old_buf = buf;
15594 -@@ -3655,7 +3969,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
15595 +@@ -3665,7 +3979,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
15596 down_read(&mm->mmap_sem);
15597 /* ignore errors, just check how much was successfully transferred */
15598 while (len) {
15599 @@ -118058,7 +118178,7 @@ index c387430..119fd96 100644
15600 void *maddr;
15601 struct page *page = NULL;
15602
15603 -@@ -3716,8 +4030,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
15604 +@@ -3726,8 +4040,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
15605 *
15606 * The caller must hold a reference on @mm.
15607 */
15608 @@ -118069,7 +118189,7 @@ index c387430..119fd96 100644
15609 {
15610 return __access_remote_vm(NULL, mm, addr, buf, len, write);
15611 }
15612 -@@ -3727,11 +4041,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
15613 +@@ -3737,11 +4051,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
15614 * Source/target buffer must be kernel space,
15615 * Do not walk the page table directly, use get_user_pages
15616 */
15617 @@ -118161,7 +118281,7 @@ index 87a1779..ebf95d4 100644
15618 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
15619
15620 diff --git a/mm/migrate.c b/mm/migrate.c
15621 -index 7890d0b..00200c6 100644
15622 +index 6d17e0a..64ef47b 100644
15623 --- a/mm/migrate.c
15624 +++ b/mm/migrate.c
15625 @@ -1505,8 +1505,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
15626 @@ -118259,7 +118379,7 @@ index d6006b1..a72cbda 100644
15627 capable(CAP_IPC_LOCK))
15628 ret = apply_mlockall_flags(flags);
15629 diff --git a/mm/mm_init.c b/mm/mm_init.c
15630 -index fdadf91..5f527d1 100644
15631 +index fdadf91..90c6bcc 100644
15632 --- a/mm/mm_init.c
15633 +++ b/mm/mm_init.c
15634 @@ -170,7 +170,7 @@ static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
15635 @@ -118267,7 +118387,7 @@ index fdadf91..5f527d1 100644
15636 }
15637
15638 -static struct notifier_block compute_batch_nb __meminitdata = {
15639 -+static struct notifier_block compute_batch_nb __meminitconst = {
15640 ++static struct notifier_block compute_batch_nb = {
15641 .notifier_call = mm_compute_batch_notifier,
15642 .priority = IPC_CALLBACK_PRI, /* use lowest priority */
15643 };
15644 @@ -120279,7 +120399,7 @@ index b577fbb..ccd4d4e 100644
15645
15646 /*
15647 diff --git a/mm/shmem.c b/mm/shmem.c
15648 -index 2afcdbb..4b38523 100644
15649 +index ea5a70c..4b38523 100644
15650 --- a/mm/shmem.c
15651 +++ b/mm/shmem.c
15652 @@ -33,7 +33,7 @@
15653 @@ -120300,33 +120420,7 @@ index 2afcdbb..4b38523 100644
15654
15655 /*
15656 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
15657 -@@ -620,8 +620,7 @@ static void shmem_evict_inode(struct inode *inode)
15658 - list_del_init(&info->swaplist);
15659 - mutex_unlock(&shmem_swaplist_mutex);
15660 - }
15661 -- } else
15662 -- kfree(info->symlink);
15663 -+ }
15664 -
15665 - simple_xattrs_free(&info->xattrs);
15666 - WARN_ON(inode->i_blocks);
15667 -@@ -2462,13 +2461,12 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
15668 - info = SHMEM_I(inode);
15669 - inode->i_size = len-1;
15670 - if (len <= SHORT_SYMLINK_LEN) {
15671 -- info->symlink = kmemdup(symname, len, GFP_KERNEL);
15672 -- if (!info->symlink) {
15673 -+ inode->i_link = kmemdup(symname, len, GFP_KERNEL);
15674 -+ if (!inode->i_link) {
15675 - iput(inode);
15676 - return -ENOMEM;
15677 - }
15678 - inode->i_op = &shmem_short_symlink_operations;
15679 -- inode->i_link = info->symlink;
15680 - } else {
15681 - error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
15682 - if (error) {
15683 -@@ -2566,6 +2564,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
15684 +@@ -2564,6 +2564,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
15685 static int shmem_xattr_validate(const char *name)
15686 {
15687 struct { const char *prefix; size_t len; } arr[] = {
15688 @@ -120338,7 +120432,7 @@ index 2afcdbb..4b38523 100644
15689 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
15690 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
15691 };
15692 -@@ -2621,6 +2624,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
15693 +@@ -2619,6 +2624,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
15694 if (err)
15695 return err;
15696
15697 @@ -120354,7 +120448,7 @@ index 2afcdbb..4b38523 100644
15698 return simple_xattr_set(&info->xattrs, name, value, size, flags);
15699 }
15700
15701 -@@ -3004,8 +3016,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
15702 +@@ -3002,8 +3016,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
15703 int err = -ENOMEM;
15704
15705 /* Round up to L1_CACHE_BYTES to resist false sharing */
15706 @@ -120364,14 +120458,6 @@ index 2afcdbb..4b38523 100644
15707 if (!sbinfo)
15708 return -ENOMEM;
15709
15710 -@@ -3083,6 +3094,7 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
15711 - static void shmem_destroy_callback(struct rcu_head *head)
15712 - {
15713 - struct inode *inode = container_of(head, struct inode, i_rcu);
15714 -+ kfree(inode->i_link);
15715 - kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
15716 - }
15717 -
15718 diff --git a/mm/slab.c b/mm/slab.c
15719 index 4765c97..26f5c11 100644
15720 --- a/mm/slab.c
15721 @@ -121289,7 +121375,7 @@ index 17e8f8c..56d3370 100644
15722 EXPORT_SYMBOL(kmem_cache_free);
15723
15724 diff --git a/mm/slub.c b/mm/slub.c
15725 -index 4699751..ac3f662 100644
15726 +index 4699751..bb1f0cf 100644
15727 --- a/mm/slub.c
15728 +++ b/mm/slub.c
15729 @@ -34,6 +34,7 @@
15730 @@ -121561,6 +121647,15 @@ index 4699751..ac3f662 100644
15731 return -EINVAL;
15732
15733 s->flags &= ~SLAB_FAILSLAB;
15734 +@@ -5094,7 +5202,7 @@ STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
15735 + STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
15736 + #endif
15737 +
15738 +-static struct attribute *slab_attrs[] = {
15739 ++static struct attribute *slab_attrs[] __read_only = {
15740 + &slab_size_attr.attr,
15741 + &object_size_attr.attr,
15742 + &objs_per_slab_attr.attr,
15743 @@ -5129,6 +5237,12 @@ static struct attribute *slab_attrs[] = {
15744 #ifdef CONFIG_ZONE_DMA
15745 &cache_dma_attr.attr,
15746 @@ -122964,7 +123059,7 @@ index 1a19b98..df2b4ec 100644
15747 if (!can_dir) {
15748 printk(KERN_INFO "can: failed to create /proc/net/can . "
15749 diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
15750 -index 9981039..d1f9f35 100644
15751 +index 63ae5dd..40e8dce 100644
15752 --- a/net/ceph/messenger.c
15753 +++ b/net/ceph/messenger.c
15754 @@ -190,7 +190,7 @@ static void con_fault(struct ceph_connection *con);
15755 @@ -123117,7 +123212,7 @@ index d62af69..2e07b22 100644
15756
15757 return err;
15758 diff --git a/net/core/dev.c b/net/core/dev.c
15759 -index 7f00f24..db000e2 100644
15760 +index 9efbdb3..16a834b 100644
15761 --- a/net/core/dev.c
15762 +++ b/net/core/dev.c
15763 @@ -1748,7 +1748,7 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
15764 @@ -123165,7 +123260,7 @@ index 7f00f24..db000e2 100644
15765 kfree_skb(skb);
15766 /* Jamal, now you will not able to escape explaining
15767 * me how you were going to use this. :-)
15768 -@@ -4841,7 +4841,7 @@ out_unlock:
15769 +@@ -4844,7 +4844,7 @@ out_unlock:
15770 return work;
15771 }
15772
15773 @@ -123174,7 +123269,7 @@ index 7f00f24..db000e2 100644
15774 {
15775 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
15776 unsigned long time_limit = jiffies + 2;
15777 -@@ -7001,8 +7001,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
15778 +@@ -7004,8 +7004,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
15779 } else {
15780 netdev_stats_to_stats64(storage, &dev->stats);
15781 }
15782 @@ -123552,37 +123647,10 @@ index 34ba7a0..5ebb8ef 100644
15783 .min_dump_alloc = min_dump_alloc,
15784 };
15785 diff --git a/net/core/scm.c b/net/core/scm.c
15786 -index 8a1741b..69f6cac 100644
15787 +index dce0acb..69f6cac 100644
15788 --- a/net/core/scm.c
15789 +++ b/net/core/scm.c
15790 -@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
15791 - *fplp = fpl;
15792 - fpl->count = 0;
15793 - fpl->max = SCM_MAX_FD;
15794 -+ fpl->user = NULL;
15795 - }
15796 - fpp = &fpl->fp[fpl->count];
15797 -
15798 -@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
15799 - *fpp++ = file;
15800 - fpl->count++;
15801 - }
15802 -+
15803 -+ if (!fpl->user)
15804 -+ fpl->user = get_uid(current_user());
15805 -+
15806 - return num;
15807 - }
15808 -
15809 -@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
15810 - scm->fp = NULL;
15811 - for (i=fpl->count-1; i>=0; i--)
15812 - fput(fpl->fp[i]);
15813 -+ free_uid(fpl->user);
15814 - kfree(fpl);
15815 - }
15816 - }
15817 -@@ -209,9 +215,9 @@ EXPORT_SYMBOL(__scm_send);
15818 +@@ -215,9 +215,9 @@ EXPORT_SYMBOL(__scm_send);
15819 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
15820 {
15821 struct cmsghdr __user *cm
15822 @@ -123594,7 +123662,7 @@ index 8a1741b..69f6cac 100644
15823 int err;
15824
15825 if (MSG_CMSG_COMPAT & msg->msg_flags)
15826 -@@ -232,7 +238,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
15827 +@@ -238,7 +238,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
15828 err = -EFAULT;
15829 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
15830 goto out;
15831 @@ -123603,7 +123671,7 @@ index 8a1741b..69f6cac 100644
15832 goto out;
15833 cmlen = CMSG_SPACE(len);
15834 if (msg->msg_controllen < cmlen)
15835 -@@ -248,7 +254,7 @@ EXPORT_SYMBOL(put_cmsg);
15836 +@@ -254,7 +254,7 @@ EXPORT_SYMBOL(put_cmsg);
15837 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
15838 {
15839 struct cmsghdr __user *cm
15840 @@ -123612,7 +123680,7 @@ index 8a1741b..69f6cac 100644
15841
15842 int fdmax = 0;
15843 int fdnum = scm->fp->count;
15844 -@@ -268,7 +274,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
15845 +@@ -274,7 +274,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
15846 if (fdnum < fdmax)
15847 fdmax = fdnum;
15848
15849 @@ -123621,7 +123689,7 @@ index 8a1741b..69f6cac 100644
15850 i++, cmfptr++)
15851 {
15852 struct socket *sock;
15853 -@@ -297,7 +303,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
15854 +@@ -303,7 +303,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
15855
15856 if (i > 0)
15857 {
15858 @@ -123630,19 +123698,11 @@ index 8a1741b..69f6cac 100644
15859 err = put_user(SOL_SOCKET, &cm->cmsg_level);
15860 if (!err)
15861 err = put_user(SCM_RIGHTS, &cm->cmsg_type);
15862 -@@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
15863 - for (i = 0; i < fpl->count; i++)
15864 - get_file(fpl->fp[i]);
15865 - new_fpl->max = new_fpl->count;
15866 -+ new_fpl->user = get_uid(fpl->user);
15867 - }
15868 - return new_fpl;
15869 - }
15870 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
15871 -index b2df375..f54b133 100644
15872 +index 5bf88f5..008242b 100644
15873 --- a/net/core/skbuff.c
15874 +++ b/net/core/skbuff.c
15875 -@@ -969,7 +969,8 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off)
15876 +@@ -971,7 +971,8 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off)
15877 if (skb->ip_summed == CHECKSUM_PARTIAL)
15878 skb->csum_start += off;
15879 /* {transport,network,mac}_header and tail are relative to skb->head */
15880 @@ -123652,7 +123712,7 @@ index b2df375..f54b133 100644
15881 skb->network_header += off;
15882 if (skb_mac_header_was_set(skb))
15883 skb->mac_header += off;
15884 -@@ -2103,7 +2104,7 @@ EXPORT_SYMBOL(__skb_checksum);
15885 +@@ -2105,7 +2106,7 @@ EXPORT_SYMBOL(__skb_checksum);
15886 __wsum skb_checksum(const struct sk_buff *skb, int offset,
15887 int len, __wsum csum)
15888 {
15889 @@ -123661,7 +123721,7 @@ index b2df375..f54b133 100644
15890 .update = csum_partial_ext,
15891 .combine = csum_block_add_ext,
15892 };
15893 -@@ -3318,12 +3319,14 @@ void __init skb_init(void)
15894 +@@ -3320,12 +3321,14 @@ void __init skb_init(void)
15895 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
15896 sizeof(struct sk_buff),
15897 0,
15898 @@ -123863,10 +123923,10 @@ index 0c1d58d..e6ad04f 100644
15899 }
15900 EXPORT_SYMBOL_GPL(sock_diag_unregister);
15901 diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
15902 -index 95b6139..3048623 100644
15903 +index a6beb7b..4d833b2 100644
15904 --- a/net/core/sysctl_net_core.c
15905 +++ b/net/core/sysctl_net_core.c
15906 -@@ -35,7 +35,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
15907 +@@ -36,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
15908 {
15909 unsigned int orig_size, size;
15910 int ret, i;
15911 @@ -123875,7 +123935,7 @@ index 95b6139..3048623 100644
15912 .data = &size,
15913 .maxlen = sizeof(size),
15914 .mode = table->mode
15915 -@@ -203,7 +203,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
15916 +@@ -204,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
15917 void __user *buffer, size_t *lenp, loff_t *ppos)
15918 {
15919 char id[IFNAMSIZ];
15920 @@ -123884,7 +123944,7 @@ index 95b6139..3048623 100644
15921 .data = id,
15922 .maxlen = IFNAMSIZ,
15923 };
15924 -@@ -221,7 +221,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
15925 +@@ -222,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
15926 static int proc_do_rss_key(struct ctl_table *table, int write,
15927 void __user *buffer, size_t *lenp, loff_t *ppos)
15928 {
15929 @@ -123893,7 +123953,7 @@ index 95b6139..3048623 100644
15930 char buf[NETDEV_RSS_KEY_LEN * 3];
15931
15932 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
15933 -@@ -285,7 +285,7 @@ static struct ctl_table net_core_table[] = {
15934 +@@ -286,7 +286,7 @@ static struct ctl_table net_core_table[] = {
15935 .mode = 0444,
15936 .proc_handler = proc_do_rss_key,
15937 },
15938 @@ -123902,7 +123962,7 @@ index 95b6139..3048623 100644
15939 {
15940 .procname = "bpf_jit_enable",
15941 .data = &bpf_jit_enable,
15942 -@@ -409,13 +409,12 @@ static struct ctl_table netns_core_table[] = {
15943 +@@ -419,13 +419,12 @@ static struct ctl_table netns_core_table[] = {
15944
15945 static __net_init int sysctl_core_net_init(struct net *net)
15946 {
15947 @@ -123918,7 +123978,7 @@ index 95b6139..3048623 100644
15948 if (tbl == NULL)
15949 goto err_dup;
15950
15951 -@@ -425,17 +424,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
15952 +@@ -435,17 +434,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
15953 if (net->user_ns != &init_user_ns) {
15954 tbl[0].procname = NULL;
15955 }
15956 @@ -123940,7 +124000,7 @@ index 95b6139..3048623 100644
15957 err_dup:
15958 return -ENOMEM;
15959 }
15960 -@@ -450,7 +448,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
15961 +@@ -460,7 +458,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
15962 kfree(tbl);
15963 }
15964
15965 @@ -124118,7 +124178,7 @@ index 59b3e0e..ff060b8 100644
15966 struct dst_entry *dst = NULL;
15967
15968 diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
15969 -index cebd9d3..12e9abe 100644
15970 +index f6303b1..d524bab 100644
15971 --- a/net/ipv4/devinet.c
15972 +++ b/net/ipv4/devinet.c
15973 @@ -69,7 +69,8 @@
15974 @@ -124250,7 +124310,7 @@ index d97268e..6ee80d4 100644
15975 return nh->nh_saddr;
15976 }
15977 diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
15978 -index 46b9c88..b52cf2f 100644
15979 +index 6414891..30ec9bf 100644
15980 --- a/net/ipv4/inet_connection_sock.c
15981 +++ b/net/ipv4/inet_connection_sock.c
15982 @@ -670,8 +670,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
15983 @@ -124342,7 +124402,7 @@ index 86fa458..5f601b9 100644
15984 p->rate_tokens = 0;
15985 /* 60*HZ is arbitrary, but chosen enough high so that the first
15986 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
15987 -index 1fe55ae..9be62bd 100644
15988 +index b8a0607d..0ef8880 100644
15989 --- a/net/ipv4/ip_fragment.c
15990 +++ b/net/ipv4/ip_fragment.c
15991 @@ -284,7 +284,7 @@ static int ip_frag_too_far(struct ipq *qp)
15992 @@ -124354,7 +124414,7 @@ index 1fe55ae..9be62bd 100644
15993 qp->rid = end;
15994
15995 rc = qp->q.fragments && (end - start) > max;
15996 -@@ -774,12 +774,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
15997 +@@ -775,12 +775,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
15998
15999 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
16000 {
16001 @@ -124369,7 +124429,7 @@ index 1fe55ae..9be62bd 100644
16002 if (!table)
16003 goto err_alloc;
16004
16005 -@@ -793,9 +792,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
16006 +@@ -794,9 +793,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
16007 /* Don't export sysctls to unprivileged users */
16008 if (net->user_ns != &init_user_ns)
16009 table[0].procname = NULL;
16010 @@ -124382,7 +124442,7 @@ index 1fe55ae..9be62bd 100644
16011 if (!hdr)
16012 goto err_reg;
16013
16014 -@@ -803,8 +803,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
16015 +@@ -804,8 +804,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
16016 return 0;
16017
16018 err_reg:
16019 @@ -124449,10 +124509,10 @@ index b1209b6..c2f63ba 100644
16020 ICMP_PROT_UNREACH, 0);
16021 }
16022 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
16023 -index 5f73a7c..bfd78f8 100644
16024 +index a501242..ea6b81d 100644
16025 --- a/net/ipv4/ip_sockglue.c
16026 +++ b/net/ipv4/ip_sockglue.c
16027 -@@ -1308,7 +1308,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
16028 +@@ -1310,7 +1310,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
16029 len = min_t(unsigned int, len, opt->optlen);
16030 if (put_user(len, optlen))
16031 return -EFAULT;
16032 @@ -124462,7 +124522,7 @@ index 5f73a7c..bfd78f8 100644
16033 return -EFAULT;
16034 return 0;
16035 }
16036 -@@ -1441,7 +1442,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
16037 +@@ -1443,7 +1444,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
16038 if (sk->sk_type != SOCK_STREAM)
16039 return -ENOPROTOOPT;
16040
16041 @@ -124658,7 +124718,7 @@ index 4a9e6db..06174e1 100644
16042 pr_err("Unable to proc dir entry\n");
16043 return -ENOMEM;
16044 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
16045 -index e89094a..bd431045 100644
16046 +index aa67e0e..3c65672 100644
16047 --- a/net/ipv4/ping.c
16048 +++ b/net/ipv4/ping.c
16049 @@ -59,7 +59,7 @@ struct ping_table {
16050 @@ -124697,7 +124757,7 @@ index e89094a..bd431045 100644
16051 info, (u8 *)icmph);
16052 #endif
16053 }
16054 -@@ -919,10 +919,10 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
16055 +@@ -921,10 +921,10 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
16056 }
16057
16058 if (inet6_sk(sk)->rxopt.all)
16059 @@ -124710,7 +124770,7 @@ index e89094a..bd431045 100644
16060 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
16061 ip_cmsg_recv(msg, skb);
16062 #endif
16063 -@@ -1117,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
16064 +@@ -1119,7 +1119,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
16065 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
16066 0, sock_i_ino(sp),
16067 atomic_read(&sp->sk_refcnt), sp,
16068 @@ -124758,7 +124818,7 @@ index 3abd9d7..c5e4052 100644
16069
16070 /*
16071 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
16072 -index bc35f18..f94a500 100644
16073 +index 7113bae..0e9e9a6 100644
16074 --- a/net/ipv4/raw.c
16075 +++ b/net/ipv4/raw.c
16076 @@ -323,7 +323,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
16077 @@ -124770,7 +124830,7 @@ index bc35f18..f94a500 100644
16078 kfree_skb(skb);
16079 return NET_RX_DROP;
16080 }
16081 -@@ -781,16 +781,20 @@ static int raw_init(struct sock *sk)
16082 +@@ -783,16 +783,20 @@ static int raw_init(struct sock *sk)
16083
16084 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
16085 {
16086 @@ -124792,7 +124852,7 @@ index bc35f18..f94a500 100644
16087
16088 if (get_user(len, optlen))
16089 goto out;
16090 -@@ -800,8 +804,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
16091 +@@ -802,8 +806,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
16092 if (len > sizeof(struct icmp_filter))
16093 len = sizeof(struct icmp_filter);
16094 ret = -EFAULT;
16095 @@ -124803,7 +124863,7 @@ index bc35f18..f94a500 100644
16096 goto out;
16097 ret = 0;
16098 out: return ret;
16099 -@@ -1030,7 +1034,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
16100 +@@ -1032,7 +1036,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
16101 0, 0L, 0,
16102 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
16103 0, sock_i_ino(sp),
16104 @@ -124813,10 +124873,10 @@ index bc35f18..f94a500 100644
16105
16106 static int raw_seq_show(struct seq_file *seq, void *v)
16107 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
16108 -index 85f184e..2cb2c0b 100644
16109 +index 02c6229..68cc2a6 100644
16110 --- a/net/ipv4/route.c
16111 +++ b/net/ipv4/route.c
16112 -@@ -231,7 +231,7 @@ static const struct seq_operations rt_cache_seq_ops = {
16113 +@@ -232,7 +232,7 @@ static const struct seq_operations rt_cache_seq_ops = {
16114
16115 static int rt_cache_seq_open(struct inode *inode, struct file *file)
16116 {
16117 @@ -124825,7 +124885,7 @@ index 85f184e..2cb2c0b 100644
16118 }
16119
16120 static const struct file_operations rt_cache_seq_fops = {
16121 -@@ -322,7 +322,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
16122 +@@ -323,7 +323,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
16123
16124 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
16125 {
16126 @@ -124834,7 +124894,7 @@ index 85f184e..2cb2c0b 100644
16127 }
16128
16129 static const struct file_operations rt_cpu_seq_fops = {
16130 -@@ -360,7 +360,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
16131 +@@ -361,7 +361,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
16132
16133 static int rt_acct_proc_open(struct inode *inode, struct file *file)
16134 {
16135 @@ -124843,7 +124903,7 @@ index 85f184e..2cb2c0b 100644
16136 }
16137
16138 static const struct file_operations rt_acct_proc_fops = {
16139 -@@ -462,7 +462,7 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
16140 +@@ -463,7 +463,7 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
16141
16142 #define IP_IDENTS_SZ 2048u
16143
16144 @@ -124852,7 +124912,7 @@ index 85f184e..2cb2c0b 100644
16145 static u32 *ip_tstamps __read_mostly;
16146
16147 /* In order to protect privacy, we add a perturbation to identifiers
16148 -@@ -472,7 +472,7 @@ static u32 *ip_tstamps __read_mostly;
16149 +@@ -473,7 +473,7 @@ static u32 *ip_tstamps __read_mostly;
16150 u32 ip_idents_reserve(u32 hash, int segs)
16151 {
16152 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
16153 @@ -124861,7 +124921,7 @@ index 85f184e..2cb2c0b 100644
16154 u32 old = ACCESS_ONCE(*p_tstamp);
16155 u32 now = (u32)jiffies;
16156 u32 delta = 0;
16157 -@@ -480,7 +480,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
16158 +@@ -481,7 +481,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
16159 if (old != now && cmpxchg(p_tstamp, old, now) == old)
16160 delta = prandom_u32_max(now - old);
16161
16162 @@ -124870,7 +124930,7 @@ index 85f184e..2cb2c0b 100644
16163 }
16164 EXPORT_SYMBOL(ip_idents_reserve);
16165
16166 -@@ -2706,34 +2706,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
16167 +@@ -2755,34 +2755,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
16168 .maxlen = sizeof(int),
16169 .mode = 0200,
16170 .proc_handler = ipv4_sysctl_rtcache_flush,
16171 @@ -124913,7 +124973,7 @@ index 85f184e..2cb2c0b 100644
16172 err_dup:
16173 return -ENOMEM;
16174 }
16175 -@@ -2756,8 +2756,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
16176 +@@ -2805,8 +2805,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
16177
16178 static __net_init int rt_genid_init(struct net *net)
16179 {
16180 @@ -124924,7 +124984,7 @@ index 85f184e..2cb2c0b 100644
16181 get_random_bytes(&net->ipv4.dev_addr_genid,
16182 sizeof(net->ipv4.dev_addr_genid));
16183 return 0;
16184 -@@ -2801,11 +2801,7 @@ int __init ip_rt_init(void)
16185 +@@ -2850,11 +2850,7 @@ int __init ip_rt_init(void)
16186 int rc = 0;
16187 int cpu;
16188
16189 @@ -125094,7 +125154,7 @@ index d4c5115..f949b08 100644
16190 write_pnet(&ireq->ireq_net, sock_net(sk_listener));
16191 ireq->ireq_family = sk_listener->sk_family;
16192 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
16193 -index d8841a2..d31baa8 100644
16194 +index 8c7e631..d1dfdaf 100644
16195 --- a/net/ipv4/tcp_ipv4.c
16196 +++ b/net/ipv4/tcp_ipv4.c
16197 @@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
16198 @@ -125108,7 +125168,7 @@ index d8841a2..d31baa8 100644
16199 #ifdef CONFIG_TCP_MD5SIG
16200 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
16201 __be32 daddr, __be32 saddr, const struct tcphdr *th);
16202 -@@ -1412,6 +1416,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
16203 +@@ -1420,6 +1424,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
16204 return 0;
16205
16206 reset:
16207 @@ -125118,7 +125178,7 @@ index d8841a2..d31baa8 100644
16208 tcp_v4_send_reset(rsk, skb);
16209 discard:
16210 kfree_skb(skb);
16211 -@@ -1577,12 +1584,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
16212 +@@ -1585,12 +1592,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
16213
16214 lookup:
16215 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
16216 @@ -125141,7 +125201,7 @@ index d8841a2..d31baa8 100644
16217
16218 if (sk->sk_state == TCP_NEW_SYN_RECV) {
16219 struct request_sock *req = inet_reqsk(sk);
16220 -@@ -1665,6 +1679,10 @@ csum_error:
16221 +@@ -1675,6 +1689,10 @@ csum_error:
16222 bad_packet:
16223 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
16224 } else {
16225 @@ -125222,7 +125282,7 @@ index 193ba1f..aeda727 100644
16226 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
16227 /* Has it gone just too far? */
16228 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
16229 -index c438908..5f28ba3 100644
16230 +index 7f8ab46..6d152f7 100644
16231 --- a/net/ipv4/udp.c
16232 +++ b/net/ipv4/udp.c
16233 @@ -87,6 +87,7 @@
16234 @@ -125273,7 +125333,7 @@ index c438908..5f28ba3 100644
16235 daddr = inet->inet_daddr;
16236 dport = inet->inet_dport;
16237 /* Open fast path for connected socket.
16238 -@@ -1204,7 +1221,7 @@ static unsigned int first_packet_length(struct sock *sk)
16239 +@@ -1206,7 +1223,7 @@ static unsigned int first_packet_length(struct sock *sk)
16240 IS_UDPLITE(sk));
16241 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
16242 IS_UDPLITE(sk));
16243 @@ -125282,7 +125342,7 @@ index c438908..5f28ba3 100644
16244 __skb_unlink(skb, rcvq);
16245 __skb_queue_tail(&list_kill, skb);
16246 }
16247 -@@ -1284,6 +1301,10 @@ try_again:
16248 +@@ -1286,6 +1303,10 @@ try_again:
16249 if (!skb)
16250 goto out;
16251
16252 @@ -125293,7 +125353,7 @@ index c438908..5f28ba3 100644
16253 ulen = skb->len - sizeof(struct udphdr);
16254 copied = len;
16255 if (copied > ulen)
16256 -@@ -1316,7 +1337,7 @@ try_again:
16257 +@@ -1318,7 +1339,7 @@ try_again:
16258 if (unlikely(err)) {
16259 trace_kfree_skb(skb, udp_recvmsg);
16260 if (!peeked) {
16261 @@ -125302,7 +125362,7 @@ index c438908..5f28ba3 100644
16262 UDP_INC_STATS_USER(sock_net(sk),
16263 UDP_MIB_INERRORS, is_udplite);
16264 }
16265 -@@ -1610,7 +1631,7 @@ csum_error:
16266 +@@ -1612,7 +1633,7 @@ csum_error:
16267 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
16268 drop:
16269 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
16270 @@ -125311,7 +125371,7 @@ index c438908..5f28ba3 100644
16271 kfree_skb(skb);
16272 return -1;
16273 }
16274 -@@ -1628,7 +1649,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
16275 +@@ -1630,7 +1651,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
16276 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
16277
16278 if (!skb1) {
16279 @@ -125320,7 +125380,7 @@ index c438908..5f28ba3 100644
16280 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
16281 IS_UDPLITE(sk));
16282 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
16283 -@@ -1834,6 +1855,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
16284 +@@ -1836,6 +1857,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
16285 goto csum_error;
16286
16287 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
16288 @@ -125330,7 +125390,7 @@ index c438908..5f28ba3 100644
16289 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
16290
16291 /*
16292 -@@ -2438,7 +2462,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
16293 +@@ -2440,7 +2464,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
16294 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
16295 0, sock_i_ino(sp),
16296 atomic_read(&sp->sk_refcnt), sp,
16297 @@ -125440,7 +125500,7 @@ index 983bb99..ebc39e1 100644
16298 Support for IPsec ESP.
16299
16300 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
16301 -index 1f21087..78bc7e1 100644
16302 +index e8d3da0..c1ab725 100644
16303 --- a/net/ipv6/addrconf.c
16304 +++ b/net/ipv6/addrconf.c
16305 @@ -179,7 +179,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
16306 @@ -125479,7 +125539,7 @@ index 1f21087..78bc7e1 100644
16307
16308 if (ops->ndo_do_ioctl) {
16309 mm_segment_t oldfs = get_fs();
16310 -@@ -3864,16 +3864,23 @@ static const struct file_operations if6_fops = {
16311 +@@ -3867,16 +3867,23 @@ static const struct file_operations if6_fops = {
16312 .release = seq_release_net,
16313 };
16314
16315 @@ -125504,7 +125564,7 @@ index 1f21087..78bc7e1 100644
16316 }
16317
16318 static struct pernet_operations if6_proc_net_ops = {
16319 -@@ -4492,7 +4499,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
16320 +@@ -4495,7 +4502,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
16321 s_ip_idx = ip_idx = cb->args[2];
16322
16323 rcu_read_lock();
16324 @@ -125513,7 +125573,7 @@ index 1f21087..78bc7e1 100644
16325 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
16326 idx = 0;
16327 head = &net->dev_index_head[h];
16328 -@@ -4702,7 +4709,7 @@ static inline size_t inet6_if_nlmsg_size(void)
16329 +@@ -4705,7 +4712,7 @@ static inline size_t inet6_if_nlmsg_size(void)
16330 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
16331 }
16332
16333 @@ -125522,7 +125582,7 @@ index 1f21087..78bc7e1 100644
16334 int items, int bytes)
16335 {
16336 int i;
16337 -@@ -4712,7 +4719,7 @@ static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
16338 +@@ -4715,7 +4722,7 @@ static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
16339 /* Use put_unaligned() because stats may not be aligned for u64. */
16340 put_unaligned(items, &stats[0]);
16341 for (i = 1; i < items; i++)
16342 @@ -125531,7 +125591,7 @@ index 1f21087..78bc7e1 100644
16343
16344 memset(&stats[items], 0, pad);
16345 }
16346 -@@ -5166,7 +5173,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
16347 +@@ -5169,7 +5176,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
16348 rt_genid_bump_ipv6(net);
16349 break;
16350 }
16351 @@ -125540,7 +125600,7 @@ index 1f21087..78bc7e1 100644
16352 }
16353
16354 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
16355 -@@ -5186,7 +5193,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
16356 +@@ -5189,7 +5196,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
16357 int *valp = ctl->data;
16358 int val = *valp;
16359 loff_t pos = *ppos;
16360 @@ -125549,7 +125609,7 @@ index 1f21087..78bc7e1 100644
16361 int ret;
16362
16363 /*
16364 -@@ -5211,7 +5218,7 @@ int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
16365 +@@ -5214,7 +5221,7 @@ int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
16366 {
16367 struct inet6_dev *idev = ctl->extra1;
16368 int min_mtu = IPV6_MIN_MTU;
16369 @@ -125558,7 +125618,7 @@ index 1f21087..78bc7e1 100644
16370
16371 lctl = *ctl;
16372 lctl.extra1 = &min_mtu;
16373 -@@ -5286,7 +5293,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
16374 +@@ -5289,7 +5296,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
16375 int *valp = ctl->data;
16376 int val = *valp;
16377 loff_t pos = *ppos;
16378 @@ -125567,7 +125627,7 @@ index 1f21087..78bc7e1 100644
16379 int ret;
16380
16381 /*
16382 -@@ -5351,7 +5358,7 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
16383 +@@ -5354,7 +5361,7 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
16384 int err;
16385 struct in6_addr addr;
16386 char str[IPV6_MAX_STRLEN];
16387 @@ -125576,7 +125636,7 @@ index 1f21087..78bc7e1 100644
16388 struct net *net = ctl->extra2;
16389 struct ipv6_stable_secret *secret = ctl->data;
16390
16391 -@@ -5420,7 +5427,7 @@ int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
16392 +@@ -5423,7 +5430,7 @@ int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
16393 int *valp = ctl->data;
16394 int val = *valp;
16395 loff_t pos = *ppos;
16396 @@ -125599,10 +125659,10 @@ index 9f5137c..a7eabd9 100644
16397 err = ipv6_init_mibs(net);
16398 if (err)
16399 diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
16400 -index 517c55b..b6462b8 100644
16401 +index 4281621..b1e9d2d 100644
16402 --- a/net/ipv6/datagram.c
16403 +++ b/net/ipv6/datagram.c
16404 -@@ -979,5 +979,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
16405 +@@ -982,5 +982,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
16406 0,
16407 sock_i_ino(sp),
16408 atomic_read(&sp->sk_refcnt), sp,
16409 @@ -126076,10 +126136,10 @@ index 45f5ae5..1c57cbe 100644
16410 return -ENOMEM;
16411 }
16412 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
16413 -index 826e6aa..02a2386 100644
16414 +index 3f164d3..cd507d9 100644
16415 --- a/net/ipv6/route.c
16416 +++ b/net/ipv6/route.c
16417 -@@ -3504,7 +3504,7 @@ struct ctl_table ipv6_route_table_template[] = {
16418 +@@ -3503,7 +3503,7 @@ struct ctl_table ipv6_route_table_template[] = {
16419
16420 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
16421 {
16422 @@ -126124,7 +126184,7 @@ index 45243bb..cdb398e 100644
16423 struct ctl_table *ipv6_icmp_table;
16424 int err;
16425 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
16426 -index bd100b4..72c83d8 100644
16427 +index b8d4056..9509e20 100644
16428 --- a/net/ipv6/tcp_ipv6.c
16429 +++ b/net/ipv6/tcp_ipv6.c
16430 @@ -102,6 +102,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
16431 @@ -126138,7 +126198,7 @@ index bd100b4..72c83d8 100644
16432 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
16433 {
16434 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
16435 -@@ -1268,6 +1272,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
16436 +@@ -1269,6 +1273,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
16437 return 0;
16438
16439 reset:
16440 @@ -126148,7 +126208,7 @@ index bd100b4..72c83d8 100644
16441 tcp_v6_send_reset(sk, skb);
16442 discard:
16443 if (opt_skb)
16444 -@@ -1378,12 +1385,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
16445 +@@ -1379,12 +1386,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
16446 lookup:
16447 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
16448 inet6_iif(skb));
16449 @@ -126171,7 +126231,7 @@ index bd100b4..72c83d8 100644
16450
16451 if (sk->sk_state == TCP_NEW_SYN_RECV) {
16452 struct request_sock *req = inet_reqsk(sk);
16453 -@@ -1471,6 +1486,10 @@ csum_error:
16454 +@@ -1472,6 +1487,10 @@ csum_error:
16455 bad_packet:
16456 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
16457 } else {
16458 @@ -126388,7 +126448,7 @@ index b9ac598..f88cc56 100644
16459 return;
16460
16461 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
16462 -index 435608c..1b7d8bb 100644
16463 +index 20ab7b2..1b7d8bb 100644
16464 --- a/net/iucv/af_iucv.c
16465 +++ b/net/iucv/af_iucv.c
16466 @@ -685,10 +685,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
16467 @@ -126404,16 +126464,6 @@ index 435608c..1b7d8bb 100644
16468 }
16469 memcpy(iucv->src_name, name, 8);
16470 }
16471 -@@ -708,6 +708,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
16472 - if (!addr || addr->sa_family != AF_IUCV)
16473 - return -EINVAL;
16474 -
16475 -+ if (addr_len < sizeof(struct sockaddr_iucv))
16476 -+ return -EINVAL;
16477 -+
16478 - lock_sock(sk);
16479 - if (sk->sk_state != IUCV_OPEN) {
16480 - err = -EBADFD;
16481 diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
16482 index 7eaa000..2fa7f35 100644
16483 --- a/net/iucv/iucv.c
16484 @@ -126917,7 +126967,7 @@ index d824c38..f3ded28 100644
16485 bip_ipn_set64(mmie->sequence_number, pn64);
16486
16487 diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
16488 -index 7079cd3..abf6a59 100644
16489 +index 7079cd3..c299f08 100644
16490 --- a/net/mac802154/iface.c
16491 +++ b/net/mac802154/iface.c
16492 @@ -386,7 +386,7 @@ static int ieee802154_header_create(struct sk_buff *skb,
16493 @@ -126929,6 +126979,15 @@ index 7079cd3..abf6a59 100644
16494
16495 if (mac802154_set_header_security(sdata, &hdr, cb) < 0)
16496 return -EINVAL;
16497 +@@ -451,7 +451,7 @@ static int mac802154_header_create(struct sk_buff *skb,
16498 + memset(&hdr.fc, 0, sizeof(hdr.fc));
16499 + hdr.fc.type = IEEE802154_FC_TYPE_DATA;
16500 + hdr.fc.ack_request = wpan_dev->ackreq;
16501 +- hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF;
16502 ++ hdr.seq = atomic_inc_return_unchecked(&dev->ieee802154_ptr->dsn) & 0xFF;
16503 +
16504 + /* TODO currently a workaround to give zero cb block to set
16505 + * security parameters defaults according MIB.
16506 @@ -576,7 +576,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
16507 get_random_bytes(&tmp, sizeof(tmp));
16508 atomic_set(&wpan_dev->bsn, tmp);
16509 @@ -128667,10 +128726,10 @@ index ec52912..059504b 100644
16510
16511 /* Initialize IPv6 support and register with socket layer. */
16512 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
16513 -index 3d9ea9a..d3aee1a 100644
16514 +index 8b4ff31..92b21ee 100644
16515 --- a/net/sctp/protocol.c
16516 +++ b/net/sctp/protocol.c
16517 -@@ -856,8 +856,10 @@ int sctp_register_af(struct sctp_af *af)
16518 +@@ -858,8 +858,10 @@ int sctp_register_af(struct sctp_af *af)
16519 return 0;
16520 }
16521
16522 @@ -128682,7 +128741,7 @@ index 3d9ea9a..d3aee1a 100644
16523 return 1;
16524 }
16525
16526 -@@ -987,7 +989,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
16527 +@@ -989,7 +991,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
16528
16529 static struct sctp_af sctp_af_inet;
16530
16531 @@ -128691,7 +128750,7 @@ index 3d9ea9a..d3aee1a 100644
16532 .event_msgname = sctp_inet_event_msgname,
16533 .skb_msgname = sctp_inet_skb_msgname,
16534 .af_supported = sctp_inet_af_supported,
16535 -@@ -1059,7 +1061,7 @@ static const struct net_protocol sctp_protocol = {
16536 +@@ -1061,7 +1063,7 @@ static const struct net_protocol sctp_protocol = {
16537 };
16538
16539 /* IPv4 address related functions. */
16540 @@ -128700,7 +128759,7 @@ index 3d9ea9a..d3aee1a 100644
16541 .sa_family = AF_INET,
16542 .sctp_xmit = sctp_v4_xmit,
16543 .setsockopt = ip_setsockopt,
16544 -@@ -1143,7 +1145,7 @@ static void sctp_v4_pf_init(void)
16545 +@@ -1145,7 +1147,7 @@ static void sctp_v4_pf_init(void)
16546
16547 static void sctp_v4_pf_exit(void)
16548 {
16549 @@ -128854,7 +128913,7 @@ index 22c2bf3..f1f08c8 100644
16550
16551 /*
16552 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
16553 -index ef1d90f..23b38b3 100644
16554 +index be1489f..5364cd7 100644
16555 --- a/net/sctp/socket.c
16556 +++ b/net/sctp/socket.c
16557 @@ -2192,11 +2192,13 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
16558 @@ -129295,7 +129354,7 @@ index 1095be9..815d777 100644
16559 /* make a copy for the caller */
16560 *handle = ctxh;
16561 diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
16562 -index 5e4f815..15e403f 100644
16563 +index 21e2035..2e567b1 100644
16564 --- a/net/sunrpc/cache.c
16565 +++ b/net/sunrpc/cache.c
16566 @@ -1623,7 +1623,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
16567 @@ -129669,7 +129728,7 @@ index 1eadc95..f6ccd08 100644
16568
16569 kfree_skb(args);
16570 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
16571 -index 350cca3..a108fc5 100644
16572 +index 69ee2ee..3352a8d 100644
16573 --- a/net/tipc/subscr.c
16574 +++ b/net/tipc/subscr.c
16575 @@ -75,7 +75,7 @@ static void tipc_subscrp_send_event(struct tipc_subscription *sub,
16576 @@ -129682,7 +129741,7 @@ index 350cca3..a108fc5 100644
16577 sub->evt.event = htohl(event, sub->swap);
16578 sub->evt.found_lower = htohl(found_lower, sub->swap);
16579 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
16580 -index e3f85bc..a6c35a6 100644
16581 +index 898a53a..5a23004 100644
16582 --- a/net/unix/af_unix.c
16583 +++ b/net/unix/af_unix.c
16584 @@ -919,6 +919,12 @@ static struct sock *unix_find_other(struct net *net,
16585 @@ -129732,33 +129791,7 @@ index e3f85bc..a6c35a6 100644
16586 return err;
16587 }
16588
16589 -@@ -1496,7 +1515,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
16590 - UNIXCB(skb).fp = NULL;
16591 -
16592 - for (i = scm->fp->count-1; i >= 0; i--)
16593 -- unix_notinflight(scm->fp->fp[i]);
16594 -+ unix_notinflight(scm->fp->user, scm->fp->fp[i]);
16595 - }
16596 -
16597 - static void unix_destruct_scm(struct sk_buff *skb)
16598 -@@ -1561,7 +1580,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
16599 - return -ENOMEM;
16600 -
16601 - for (i = scm->fp->count - 1; i >= 0; i--)
16602 -- unix_inflight(scm->fp->fp[i]);
16603 -+ unix_inflight(scm->fp->user, scm->fp->fp[i]);
16604 - return max_level;
16605 - }
16606 -
16607 -@@ -2332,6 +2351,7 @@ again:
16608 -
16609 - if (signal_pending(current)) {
16610 - err = sock_intr_errno(timeo);
16611 -+ scm_destroy(&scm);
16612 - goto out;
16613 - }
16614 -
16615 -@@ -2796,9 +2816,13 @@ static int unix_seq_show(struct seq_file *seq, void *v)
16616 +@@ -2806,9 +2825,13 @@ static int unix_seq_show(struct seq_file *seq, void *v)
16617 seq_puts(seq, "Num RefCount Protocol Flags Type St "
16618 "Inode Path\n");
16619 else {
16620 @@ -129773,7 +129806,7 @@ index e3f85bc..a6c35a6 100644
16621
16622 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
16623 s,
16624 -@@ -2823,10 +2847,29 @@ static int unix_seq_show(struct seq_file *seq, void *v)
16625 +@@ -2833,10 +2856,29 @@ static int unix_seq_show(struct seq_file *seq, void *v)
16626 seq_putc(seq, '@');
16627 i++;
16628 }
16629 @@ -129808,7 +129841,7 @@ index e3f85bc..a6c35a6 100644
16630 }
16631
16632 diff --git a/net/unix/diag.c b/net/unix/diag.c
16633 -index c512f64..284072f 100644
16634 +index 4d96797..ab6a813 100644
16635 --- a/net/unix/diag.c
16636 +++ b/net/unix/diag.c
16637 @@ -299,7 +299,7 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
16638 @@ -129820,42 +129853,6 @@ index c512f64..284072f 100644
16639 .dump = unix_diag_dump,
16640 };
16641 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
16642 -diff --git a/net/unix/garbage.c b/net/unix/garbage.c
16643 -index 8fcdc22..6a0d485 100644
16644 ---- a/net/unix/garbage.c
16645 -+++ b/net/unix/garbage.c
16646 -@@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp)
16647 - * descriptor if it is for an AF_UNIX socket.
16648 - */
16649 -
16650 --void unix_inflight(struct file *fp)
16651 -+void unix_inflight(struct user_struct *user, struct file *fp)
16652 - {
16653 - struct sock *s = unix_get_socket(fp);
16654 -
16655 -@@ -133,11 +133,11 @@ void unix_inflight(struct file *fp)
16656 - }
16657 - unix_tot_inflight++;
16658 - }
16659 -- fp->f_cred->user->unix_inflight++;
16660 -+ user->unix_inflight++;
16661 - spin_unlock(&unix_gc_lock);
16662 - }
16663 -
16664 --void unix_notinflight(struct file *fp)
16665 -+void unix_notinflight(struct user_struct *user, struct file *fp)
16666 - {
16667 - struct sock *s = unix_get_socket(fp);
16668 -
16669 -@@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp)
16670 - list_del_init(&u->link);
16671 - unix_tot_inflight--;
16672 - }
16673 -- fp->f_cred->user->unix_inflight--;
16674 -+ user->unix_inflight--;
16675 - spin_unlock(&unix_gc_lock);
16676 - }
16677 -
16678 diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
16679 index b3d5150..ff3a837 100644
16680 --- a/net/unix/sysctl_net_unix.c
16681 @@ -130835,7 +130832,7 @@ index 25cf0c2..eb178ce 100644
16682 return -1;
16683 if (!exact1 && exact2)
16684 diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
16685 -index dacf71a..f67b2c4 100755
16686 +index ba6c34e..ea10bce 100755
16687 --- a/scripts/link-vmlinux.sh
16688 +++ b/scripts/link-vmlinux.sh
16689 @@ -179,7 +179,7 @@ else
16690 @@ -132596,26 +132593,6 @@ index 1450f85..a91e0bc 100644
16691 rt_genid_bump_all(net);
16692 }
16693 rtnl_unlock();
16694 -diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
16695 -index ff81026..7c57c7f 100644
16696 ---- a/security/smack/smack_lsm.c
16697 -+++ b/security/smack/smack_lsm.c
16698 -@@ -398,12 +398,10 @@ static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead,
16699 - */
16700 - static inline unsigned int smk_ptrace_mode(unsigned int mode)
16701 - {
16702 -- switch (mode) {
16703 -- case PTRACE_MODE_READ:
16704 -- return MAY_READ;
16705 -- case PTRACE_MODE_ATTACH:
16706 -+ if (mode & PTRACE_MODE_ATTACH)
16707 - return MAY_READWRITE;
16708 -- }
16709 -+ if (mode & PTRACE_MODE_READ)
16710 -+ return MAY_READ;
16711 -
16712 - return 0;
16713 - }
16714 diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
16715 index 2367b10..a0c3c51 100644
16716 --- a/security/tomoyo/file.c
16717 @@ -132775,27 +132752,9 @@ index 90c605e..bf3a29a 100644
16718 help
16719 This selects Yama, which extends DAC support with additional
16720 diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
16721 -index d3c19c9..fb00554 100644
16722 +index cb6ed10..fb00554 100644
16723 --- a/security/yama/yama_lsm.c
16724 +++ b/security/yama/yama_lsm.c
16725 -@@ -281,7 +281,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
16726 - int rc = 0;
16727 -
16728 - /* require ptrace target be a child of ptracer on attach */
16729 -- if (mode == PTRACE_MODE_ATTACH) {
16730 -+ if (mode & PTRACE_MODE_ATTACH) {
16731 - switch (ptrace_scope) {
16732 - case YAMA_SCOPE_DISABLED:
16733 - /* No additional restrictions. */
16734 -@@ -307,7 +307,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
16735 - }
16736 - }
16737 -
16738 -- if (rc) {
16739 -+ if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) {
16740 - printk_ratelimited(KERN_NOTICE
16741 - "ptrace of pid %d was attempted by: %s (pid %d)\n",
16742 - child->pid, current->comm, current->pid);
16743 @@ -357,7 +357,7 @@ static struct security_hook_list yama_hooks[] = {
16744 static int yama_dointvec_minmax(struct ctl_table *table, int write,
16745 void __user *buffer, size_t *lenp, loff_t *ppos)
16746 @@ -153835,10 +153794,10 @@ index 0000000..fc58e16
16747 +}
16748 diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
16749 new file mode 100644
16750 -index 0000000..acc340b
16751 +index 0000000..9e45ae9
16752 --- /dev/null
16753 +++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
16754 -@@ -0,0 +1,21510 @@
16755 +@@ -0,0 +1,21511 @@
16756 +enable_so_recv_ctrl_pipe_us_data_0 recv_ctrl_pipe us_data 0 0 NULL
16757 +enable_so___earlyonly_bootmem_alloc_fndecl_3 __earlyonly_bootmem_alloc fndecl 2-3-4 3 NULL
16758 +enable_so_v9fs_xattr_get_acl_fndecl_4 v9fs_xattr_get_acl fndecl 5 4 NULL
16759 @@ -173167,6 +173126,7 @@ index 0000000..acc340b
16760 +enable_so_virtqueue_add_fndecl_58841 virtqueue_add fndecl 3 58841 NULL
16761 +enable_so_nr_scratch_dwc3_58852 nr_scratch dwc3 0 58852 NULL
16762 +enable_so_ms_lib_read_extrablock_fndecl_58857 ms_lib_read_extrablock fndecl 4 58857 NULL
16763 ++enable_so_ucs2_utf8size_fndecl_58859 ucs2_utf8size fndecl 0 58859 NULL
16764 +enable_so_exofs_iget_fndecl_58862 exofs_iget fndecl 2 58862 NULL
16765 +enable_so_vid_batadv_softif_vlan_58864 vid batadv_softif_vlan 0 58864 NULL nohasharray
16766 +enable_so_pd_groupsize_ptlrpcd_58864 pd_groupsize ptlrpcd 0 58864 &enable_so_vid_batadv_softif_vlan_58864
16767
16768 diff --git a/4.4.3/4425_grsec_remove_EI_PAX.patch b/4.4.4/4425_grsec_remove_EI_PAX.patch
16769 similarity index 100%
16770 rename from 4.4.3/4425_grsec_remove_EI_PAX.patch
16771 rename to 4.4.4/4425_grsec_remove_EI_PAX.patch
16772
16773 diff --git a/4.4.3/4427_force_XATTR_PAX_tmpfs.patch b/4.4.4/4427_force_XATTR_PAX_tmpfs.patch
16774 similarity index 100%
16775 rename from 4.4.3/4427_force_XATTR_PAX_tmpfs.patch
16776 rename to 4.4.4/4427_force_XATTR_PAX_tmpfs.patch
16777
16778 diff --git a/4.4.3/4430_grsec-remove-localversion-grsec.patch b/4.4.4/4430_grsec-remove-localversion-grsec.patch
16779 similarity index 100%
16780 rename from 4.4.3/4430_grsec-remove-localversion-grsec.patch
16781 rename to 4.4.4/4430_grsec-remove-localversion-grsec.patch
16782
16783 diff --git a/4.4.3/4435_grsec-mute-warnings.patch b/4.4.4/4435_grsec-mute-warnings.patch
16784 similarity index 100%
16785 rename from 4.4.3/4435_grsec-mute-warnings.patch
16786 rename to 4.4.4/4435_grsec-mute-warnings.patch
16787
16788 diff --git a/4.4.3/4440_grsec-remove-protected-paths.patch b/4.4.4/4440_grsec-remove-protected-paths.patch
16789 similarity index 100%
16790 rename from 4.4.3/4440_grsec-remove-protected-paths.patch
16791 rename to 4.4.4/4440_grsec-remove-protected-paths.patch
16792
16793 diff --git a/4.4.3/4450_grsec-kconfig-default-gids.patch b/4.4.4/4450_grsec-kconfig-default-gids.patch
16794 similarity index 100%
16795 rename from 4.4.3/4450_grsec-kconfig-default-gids.patch
16796 rename to 4.4.4/4450_grsec-kconfig-default-gids.patch
16797
16798 diff --git a/4.4.3/4465_selinux-avc_audit-log-curr_ip.patch b/4.4.4/4465_selinux-avc_audit-log-curr_ip.patch
16799 similarity index 100%
16800 rename from 4.4.3/4465_selinux-avc_audit-log-curr_ip.patch
16801 rename to 4.4.4/4465_selinux-avc_audit-log-curr_ip.patch
16802
16803 diff --git a/4.4.3/4470_disable-compat_vdso.patch b/4.4.4/4470_disable-compat_vdso.patch
16804 similarity index 100%
16805 rename from 4.4.3/4470_disable-compat_vdso.patch
16806 rename to 4.4.4/4470_disable-compat_vdso.patch
16807
16808 diff --git a/4.4.3/4475_emutramp_default_on.patch b/4.4.4/4475_emutramp_default_on.patch
16809 similarity index 100%
16810 rename from 4.4.3/4475_emutramp_default_on.patch
16811 rename to 4.4.4/4475_emutramp_default_on.patch