Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r2455 - genpatches-2.6/trunk/3.10
Date: Mon, 29 Jul 2013 00:39:25
Message-Id: 20130729003920.5E50F2171C@flycatcher.gentoo.org
1 Author: mpagano
2 Date: 2013-07-29 00:39:20 +0000 (Mon, 29 Jul 2013)
3 New Revision: 2455
4
5 Added:
6 genpatches-2.6/trunk/3.10/1003_linux-3.10.4.patch
7 Modified:
8 genpatches-2.6/trunk/3.10/0000_README
9 Log:
10 Linux patch 3.10.4
11
12 Modified: genpatches-2.6/trunk/3.10/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/3.10/0000_README 2013-07-26 12:37:10 UTC (rev 2454)
15 +++ genpatches-2.6/trunk/3.10/0000_README 2013-07-29 00:39:20 UTC (rev 2455)
16 @@ -51,6 +51,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 3.10.3
19
20 +Patch: 1003_linux-3.10.4.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 3.10.4
23 +
24 Patch: 1500_XATTR_USER_PREFIX.patch
25 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
26 Desc: Support for namespace user.pax.* on tmpfs.
27
28 Added: genpatches-2.6/trunk/3.10/1003_linux-3.10.4.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/3.10/1003_linux-3.10.4.patch (rev 0)
31 +++ genpatches-2.6/trunk/3.10/1003_linux-3.10.4.patch 2013-07-29 00:39:20 UTC (rev 2455)
32 @@ -0,0 +1,3086 @@
33 +diff --git a/Makefile b/Makefile
34 +index b548552..b4df9b2 100644
35 +--- a/Makefile
36 ++++ b/Makefile
37 +@@ -1,6 +1,6 @@
38 + VERSION = 3
39 + PATCHLEVEL = 10
40 +-SUBLEVEL = 3
41 ++SUBLEVEL = 4
42 + EXTRAVERSION =
43 + NAME = Unicycling Gorilla
44 +
45 +diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
46 +index a7cd2cf..3490a24 100644
47 +--- a/arch/arm/mach-footbridge/dc21285.c
48 ++++ b/arch/arm/mach-footbridge/dc21285.c
49 +@@ -276,8 +276,6 @@ int __init dc21285_setup(int nr, struct pci_sys_data *sys)
50 +
51 + sys->mem_offset = DC21285_PCI_MEM;
52 +
53 +- pci_ioremap_io(0, DC21285_PCI_IO);
54 +-
55 + pci_add_resource_offset(&sys->resources, &res[0], sys->mem_offset);
56 + pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset);
57 +
58 +diff --git a/arch/arm/mach-s3c24xx/clock-s3c2410.c b/arch/arm/mach-s3c24xx/clock-s3c2410.c
59 +index 34fffdf..5645536 100644
60 +--- a/arch/arm/mach-s3c24xx/clock-s3c2410.c
61 ++++ b/arch/arm/mach-s3c24xx/clock-s3c2410.c
62 +@@ -119,66 +119,101 @@ static struct clk init_clocks_off[] = {
63 + }
64 + };
65 +
66 +-static struct clk init_clocks[] = {
67 +- {
68 +- .name = "lcd",
69 +- .parent = &clk_h,
70 +- .enable = s3c2410_clkcon_enable,
71 +- .ctrlbit = S3C2410_CLKCON_LCDC,
72 +- }, {
73 +- .name = "gpio",
74 +- .parent = &clk_p,
75 +- .enable = s3c2410_clkcon_enable,
76 +- .ctrlbit = S3C2410_CLKCON_GPIO,
77 +- }, {
78 +- .name = "usb-host",
79 +- .parent = &clk_h,
80 +- .enable = s3c2410_clkcon_enable,
81 +- .ctrlbit = S3C2410_CLKCON_USBH,
82 +- }, {
83 +- .name = "usb-device",
84 +- .parent = &clk_h,
85 +- .enable = s3c2410_clkcon_enable,
86 +- .ctrlbit = S3C2410_CLKCON_USBD,
87 +- }, {
88 +- .name = "timers",
89 +- .parent = &clk_p,
90 +- .enable = s3c2410_clkcon_enable,
91 +- .ctrlbit = S3C2410_CLKCON_PWMT,
92 +- }, {
93 +- .name = "uart",
94 +- .devname = "s3c2410-uart.0",
95 +- .parent = &clk_p,
96 +- .enable = s3c2410_clkcon_enable,
97 +- .ctrlbit = S3C2410_CLKCON_UART0,
98 +- }, {
99 +- .name = "uart",
100 +- .devname = "s3c2410-uart.1",
101 +- .parent = &clk_p,
102 +- .enable = s3c2410_clkcon_enable,
103 +- .ctrlbit = S3C2410_CLKCON_UART1,
104 +- }, {
105 +- .name = "uart",
106 +- .devname = "s3c2410-uart.2",
107 +- .parent = &clk_p,
108 +- .enable = s3c2410_clkcon_enable,
109 +- .ctrlbit = S3C2410_CLKCON_UART2,
110 +- }, {
111 +- .name = "rtc",
112 +- .parent = &clk_p,
113 +- .enable = s3c2410_clkcon_enable,
114 +- .ctrlbit = S3C2410_CLKCON_RTC,
115 +- }, {
116 +- .name = "watchdog",
117 +- .parent = &clk_p,
118 +- .ctrlbit = 0,
119 +- }, {
120 +- .name = "usb-bus-host",
121 +- .parent = &clk_usb_bus,
122 +- }, {
123 +- .name = "usb-bus-gadget",
124 +- .parent = &clk_usb_bus,
125 +- },
126 ++static struct clk clk_lcd = {
127 ++ .name = "lcd",
128 ++ .parent = &clk_h,
129 ++ .enable = s3c2410_clkcon_enable,
130 ++ .ctrlbit = S3C2410_CLKCON_LCDC,
131 ++};
132 ++
133 ++static struct clk clk_gpio = {
134 ++ .name = "gpio",
135 ++ .parent = &clk_p,
136 ++ .enable = s3c2410_clkcon_enable,
137 ++ .ctrlbit = S3C2410_CLKCON_GPIO,
138 ++};
139 ++
140 ++static struct clk clk_usb_host = {
141 ++ .name = "usb-host",
142 ++ .parent = &clk_h,
143 ++ .enable = s3c2410_clkcon_enable,
144 ++ .ctrlbit = S3C2410_CLKCON_USBH,
145 ++};
146 ++
147 ++static struct clk clk_usb_device = {
148 ++ .name = "usb-device",
149 ++ .parent = &clk_h,
150 ++ .enable = s3c2410_clkcon_enable,
151 ++ .ctrlbit = S3C2410_CLKCON_USBD,
152 ++};
153 ++
154 ++static struct clk clk_timers = {
155 ++ .name = "timers",
156 ++ .parent = &clk_p,
157 ++ .enable = s3c2410_clkcon_enable,
158 ++ .ctrlbit = S3C2410_CLKCON_PWMT,
159 ++};
160 ++
161 ++struct clk s3c24xx_clk_uart0 = {
162 ++ .name = "uart",
163 ++ .devname = "s3c2410-uart.0",
164 ++ .parent = &clk_p,
165 ++ .enable = s3c2410_clkcon_enable,
166 ++ .ctrlbit = S3C2410_CLKCON_UART0,
167 ++};
168 ++
169 ++struct clk s3c24xx_clk_uart1 = {
170 ++ .name = "uart",
171 ++ .devname = "s3c2410-uart.1",
172 ++ .parent = &clk_p,
173 ++ .enable = s3c2410_clkcon_enable,
174 ++ .ctrlbit = S3C2410_CLKCON_UART1,
175 ++};
176 ++
177 ++struct clk s3c24xx_clk_uart2 = {
178 ++ .name = "uart",
179 ++ .devname = "s3c2410-uart.2",
180 ++ .parent = &clk_p,
181 ++ .enable = s3c2410_clkcon_enable,
182 ++ .ctrlbit = S3C2410_CLKCON_UART2,
183 ++};
184 ++
185 ++static struct clk clk_rtc = {
186 ++ .name = "rtc",
187 ++ .parent = &clk_p,
188 ++ .enable = s3c2410_clkcon_enable,
189 ++ .ctrlbit = S3C2410_CLKCON_RTC,
190 ++};
191 ++
192 ++static struct clk clk_watchdog = {
193 ++ .name = "watchdog",
194 ++ .parent = &clk_p,
195 ++ .ctrlbit = 0,
196 ++};
197 ++
198 ++static struct clk clk_usb_bus_host = {
199 ++ .name = "usb-bus-host",
200 ++ .parent = &clk_usb_bus,
201 ++};
202 ++
203 ++static struct clk clk_usb_bus_gadget = {
204 ++ .name = "usb-bus-gadget",
205 ++ .parent = &clk_usb_bus,
206 ++};
207 ++
208 ++static struct clk *init_clocks[] = {
209 ++ &clk_lcd,
210 ++ &clk_gpio,
211 ++ &clk_usb_host,
212 ++ &clk_usb_device,
213 ++ &clk_timers,
214 ++ &s3c24xx_clk_uart0,
215 ++ &s3c24xx_clk_uart1,
216 ++ &s3c24xx_clk_uart2,
217 ++ &clk_rtc,
218 ++ &clk_watchdog,
219 ++ &clk_usb_bus_host,
220 ++ &clk_usb_bus_gadget,
221 + };
222 +
223 + /* s3c2410_baseclk_add()
224 +@@ -195,7 +230,6 @@ int __init s3c2410_baseclk_add(void)
225 + {
226 + unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW);
227 + unsigned long clkcon = __raw_readl(S3C2410_CLKCON);
228 +- struct clk *clkp;
229 + struct clk *xtal;
230 + int ret;
231 + int ptr;
232 +@@ -207,8 +241,9 @@ int __init s3c2410_baseclk_add(void)
233 +
234 + /* register clocks from clock array */
235 +
236 +- clkp = init_clocks;
237 +- for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++, clkp++) {
238 ++ for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++) {
239 ++ struct clk *clkp = init_clocks[ptr];
240 ++
241 + /* ensure that we note the clock state */
242 +
243 + clkp->usage = clkcon & clkp->ctrlbit ? 1 : 0;
244 +diff --git a/arch/arm/mach-s3c24xx/clock-s3c2440.c b/arch/arm/mach-s3c24xx/clock-s3c2440.c
245 +index 1069b56..aaf006d 100644
246 +--- a/arch/arm/mach-s3c24xx/clock-s3c2440.c
247 ++++ b/arch/arm/mach-s3c24xx/clock-s3c2440.c
248 +@@ -166,6 +166,9 @@ static struct clk_lookup s3c2440_clk_lookup[] = {
249 + CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk),
250 + CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
251 + CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n),
252 ++ CLKDEV_INIT("s3c2440-uart.0", "uart", &s3c24xx_clk_uart0),
253 ++ CLKDEV_INIT("s3c2440-uart.1", "uart", &s3c24xx_clk_uart1),
254 ++ CLKDEV_INIT("s3c2440-uart.2", "uart", &s3c24xx_clk_uart2),
255 + CLKDEV_INIT("s3c2440-camif", "camera", &s3c2440_clk_cam_upll),
256 + };
257 +
258 +diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
259 +index a62753d..df45d6e 100644
260 +--- a/arch/arm/plat-samsung/include/plat/clock.h
261 ++++ b/arch/arm/plat-samsung/include/plat/clock.h
262 +@@ -83,6 +83,11 @@ extern struct clk clk_ext;
263 + extern struct clksrc_clk clk_epllref;
264 + extern struct clksrc_clk clk_esysclk;
265 +
266 ++/* S3C24XX UART clocks */
267 ++extern struct clk s3c24xx_clk_uart0;
268 ++extern struct clk s3c24xx_clk_uart1;
269 ++extern struct clk s3c24xx_clk_uart2;
270 ++
271 + /* S3C64XX specific clocks */
272 + extern struct clk clk_h2;
273 + extern struct clk clk_27m;
274 +diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
275 +index 1e1e18c..2a75ff2 100644
276 +--- a/arch/mips/cavium-octeon/setup.c
277 ++++ b/arch/mips/cavium-octeon/setup.c
278 +@@ -7,6 +7,7 @@
279 + * Copyright (C) 2008, 2009 Wind River Systems
280 + * written by Ralf Baechle <ralf@××××××××××.org>
281 + */
282 ++#include <linux/compiler.h>
283 + #include <linux/init.h>
284 + #include <linux/kernel.h>
285 + #include <linux/console.h>
286 +@@ -712,7 +713,7 @@ void __init prom_init(void)
287 + if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
288 + pr_info("Skipping L2 locking due to reduced L2 cache size\n");
289 + } else {
290 +- uint32_t ebase = read_c0_ebase() & 0x3ffff000;
291 ++ uint32_t __maybe_unused ebase = read_c0_ebase() & 0x3ffff000;
292 + #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
293 + /* TLB refill */
294 + cvmx_l2c_lock_mem_region(ebase, 0x100);
295 +diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c
296 +index 961b87f..f76389a 100644
297 +--- a/arch/sparc/kernel/asm-offsets.c
298 ++++ b/arch/sparc/kernel/asm-offsets.c
299 +@@ -49,6 +49,8 @@ int foo(void)
300 + DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
301 + BLANK();
302 + DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
303 ++ BLANK();
304 ++ DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
305 +
306 + /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */
307 + return 0;
308 +diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S
309 +index 44aad32..969f964 100644
310 +--- a/arch/sparc/mm/hypersparc.S
311 ++++ b/arch/sparc/mm/hypersparc.S
312 +@@ -74,7 +74,7 @@ hypersparc_flush_cache_mm_out:
313 +
314 + /* The things we do for performance... */
315 + hypersparc_flush_cache_range:
316 +- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
317 ++ ld [%o0 + VMA_VM_MM], %o0
318 + #ifndef CONFIG_SMP
319 + ld [%o0 + AOFF_mm_context], %g1
320 + cmp %g1, -1
321 +@@ -163,7 +163,7 @@ hypersparc_flush_cache_range_out:
322 + */
323 + /* Verified, my ass... */
324 + hypersparc_flush_cache_page:
325 +- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
326 ++ ld [%o0 + VMA_VM_MM], %o0
327 + ld [%o0 + AOFF_mm_context], %g2
328 + #ifndef CONFIG_SMP
329 + cmp %g2, -1
330 +@@ -284,7 +284,7 @@ hypersparc_flush_tlb_mm_out:
331 + sta %g5, [%g1] ASI_M_MMUREGS
332 +
333 + hypersparc_flush_tlb_range:
334 +- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
335 ++ ld [%o0 + VMA_VM_MM], %o0
336 + mov SRMMU_CTX_REG, %g1
337 + ld [%o0 + AOFF_mm_context], %o3
338 + lda [%g1] ASI_M_MMUREGS, %g5
339 +@@ -307,7 +307,7 @@ hypersparc_flush_tlb_range_out:
340 + sta %g5, [%g1] ASI_M_MMUREGS
341 +
342 + hypersparc_flush_tlb_page:
343 +- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
344 ++ ld [%o0 + VMA_VM_MM], %o0
345 + mov SRMMU_CTX_REG, %g1
346 + ld [%o0 + AOFF_mm_context], %o3
347 + andn %o1, (PAGE_SIZE - 1), %o1
348 +diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S
349 +index c801c39..5d2b88d 100644
350 +--- a/arch/sparc/mm/swift.S
351 ++++ b/arch/sparc/mm/swift.S
352 +@@ -105,7 +105,7 @@ swift_flush_cache_mm_out:
353 +
354 + .globl swift_flush_cache_range
355 + swift_flush_cache_range:
356 +- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
357 ++ ld [%o0 + VMA_VM_MM], %o0
358 + sub %o2, %o1, %o2
359 + sethi %hi(4096), %o3
360 + cmp %o2, %o3
361 +@@ -116,7 +116,7 @@ swift_flush_cache_range:
362 +
363 + .globl swift_flush_cache_page
364 + swift_flush_cache_page:
365 +- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
366 ++ ld [%o0 + VMA_VM_MM], %o0
367 + 70:
368 + ld [%o0 + AOFF_mm_context], %g2
369 + cmp %g2, -1
370 +@@ -219,7 +219,7 @@ swift_flush_sig_insns:
371 + .globl swift_flush_tlb_range
372 + .globl swift_flush_tlb_all
373 + swift_flush_tlb_range:
374 +- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
375 ++ ld [%o0 + VMA_VM_MM], %o0
376 + swift_flush_tlb_mm:
377 + ld [%o0 + AOFF_mm_context], %g2
378 + cmp %g2, -1
379 +@@ -233,7 +233,7 @@ swift_flush_tlb_all_out:
380 +
381 + .globl swift_flush_tlb_page
382 + swift_flush_tlb_page:
383 +- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
384 ++ ld [%o0 + VMA_VM_MM], %o0
385 + mov SRMMU_CTX_REG, %g1
386 + ld [%o0 + AOFF_mm_context], %o3
387 + andn %o1, (PAGE_SIZE - 1), %o1
388 +diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S
389 +index 4e55e8f..bf10a34 100644
390 +--- a/arch/sparc/mm/tsunami.S
391 ++++ b/arch/sparc/mm/tsunami.S
392 +@@ -24,7 +24,7 @@
393 + /* Sliiick... */
394 + tsunami_flush_cache_page:
395 + tsunami_flush_cache_range:
396 +- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
397 ++ ld [%o0 + VMA_VM_MM], %o0
398 + tsunami_flush_cache_mm:
399 + ld [%o0 + AOFF_mm_context], %g2
400 + cmp %g2, -1
401 +@@ -46,7 +46,7 @@ tsunami_flush_sig_insns:
402 +
403 + /* More slick stuff... */
404 + tsunami_flush_tlb_range:
405 +- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
406 ++ ld [%o0 + VMA_VM_MM], %o0
407 + tsunami_flush_tlb_mm:
408 + ld [%o0 + AOFF_mm_context], %g2
409 + cmp %g2, -1
410 +@@ -65,7 +65,7 @@ tsunami_flush_tlb_out:
411 +
412 + /* This one can be done in a fine grained manner... */
413 + tsunami_flush_tlb_page:
414 +- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
415 ++ ld [%o0 + VMA_VM_MM], %o0
416 + mov SRMMU_CTX_REG, %g1
417 + ld [%o0 + AOFF_mm_context], %o3
418 + andn %o1, (PAGE_SIZE - 1), %o1
419 +diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
420 +index bf8ee06..852257f 100644
421 +--- a/arch/sparc/mm/viking.S
422 ++++ b/arch/sparc/mm/viking.S
423 +@@ -108,7 +108,7 @@ viking_mxcc_flush_page:
424 + viking_flush_cache_page:
425 + viking_flush_cache_range:
426 + #ifndef CONFIG_SMP
427 +- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
428 ++ ld [%o0 + VMA_VM_MM], %o0
429 + #endif
430 + viking_flush_cache_mm:
431 + #ifndef CONFIG_SMP
432 +@@ -148,7 +148,7 @@ viking_flush_tlb_mm:
433 + #endif
434 +
435 + viking_flush_tlb_range:
436 +- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
437 ++ ld [%o0 + VMA_VM_MM], %o0
438 + mov SRMMU_CTX_REG, %g1
439 + ld [%o0 + AOFF_mm_context], %o3
440 + lda [%g1] ASI_M_MMUREGS, %g5
441 +@@ -173,7 +173,7 @@ viking_flush_tlb_range:
442 + #endif
443 +
444 + viking_flush_tlb_page:
445 +- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
446 ++ ld [%o0 + VMA_VM_MM], %o0
447 + mov SRMMU_CTX_REG, %g1
448 + ld [%o0 + AOFF_mm_context], %o3
449 + lda [%g1] ASI_M_MMUREGS, %g5
450 +@@ -239,7 +239,7 @@ sun4dsmp_flush_tlb_range:
451 + tst %g5
452 + bne 3f
453 + mov SRMMU_CTX_REG, %g1
454 +- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
455 ++ ld [%o0 + VMA_VM_MM], %o0
456 + ld [%o0 + AOFF_mm_context], %o3
457 + lda [%g1] ASI_M_MMUREGS, %g5
458 + sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
459 +@@ -265,7 +265,7 @@ sun4dsmp_flush_tlb_page:
460 + tst %g5
461 + bne 2f
462 + mov SRMMU_CTX_REG, %g1
463 +- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
464 ++ ld [%o0 + VMA_VM_MM], %o0
465 + ld [%o0 + AOFF_mm_context], %o3
466 + lda [%g1] ASI_M_MMUREGS, %g5
467 + and %o1, PAGE_MASK, %o1
468 +diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
469 +index 27e86d9..89e1090 100644
470 +--- a/drivers/edac/edac_mc.c
471 ++++ b/drivers/edac/edac_mc.c
472 +@@ -48,6 +48,8 @@ static LIST_HEAD(mc_devices);
473 + */
474 + static void const *edac_mc_owner;
475 +
476 ++static struct bus_type mc_bus[EDAC_MAX_MCS];
477 ++
478 + unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
479 + unsigned len)
480 + {
481 +@@ -723,6 +725,11 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
482 + int ret = -EINVAL;
483 + edac_dbg(0, "\n");
484 +
485 ++ if (mci->mc_idx >= EDAC_MAX_MCS) {
486 ++ pr_warn_once("Too many memory controllers: %d\n", mci->mc_idx);
487 ++ return -ENODEV;
488 ++ }
489 ++
490 + #ifdef CONFIG_EDAC_DEBUG
491 + if (edac_debug_level >= 3)
492 + edac_mc_dump_mci(mci);
493 +@@ -762,6 +769,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
494 + /* set load time so that error rate can be tracked */
495 + mci->start_time = jiffies;
496 +
497 ++ mci->bus = &mc_bus[mci->mc_idx];
498 ++
499 + if (edac_create_sysfs_mci_device(mci)) {
500 + edac_mc_printk(mci, KERN_WARNING,
501 + "failed to create sysfs device\n");
502 +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
503 +index 67610a6..c4d700a 100644
504 +--- a/drivers/edac/edac_mc_sysfs.c
505 ++++ b/drivers/edac/edac_mc_sysfs.c
506 +@@ -370,7 +370,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
507 + return -ENODEV;
508 +
509 + csrow->dev.type = &csrow_attr_type;
510 +- csrow->dev.bus = &mci->bus;
511 ++ csrow->dev.bus = mci->bus;
512 + device_initialize(&csrow->dev);
513 + csrow->dev.parent = &mci->dev;
514 + csrow->mci = mci;
515 +@@ -605,7 +605,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
516 + dimm->mci = mci;
517 +
518 + dimm->dev.type = &dimm_attr_type;
519 +- dimm->dev.bus = &mci->bus;
520 ++ dimm->dev.bus = mci->bus;
521 + device_initialize(&dimm->dev);
522 +
523 + dimm->dev.parent = &mci->dev;
524 +@@ -975,11 +975,13 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
525 + * The memory controller needs its own bus, in order to avoid
526 + * namespace conflicts at /sys/bus/edac.
527 + */
528 +- mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
529 +- if (!mci->bus.name)
530 ++ mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
531 ++ if (!mci->bus->name)
532 + return -ENOMEM;
533 +- edac_dbg(0, "creating bus %s\n", mci->bus.name);
534 +- err = bus_register(&mci->bus);
535 ++
536 ++ edac_dbg(0, "creating bus %s\n", mci->bus->name);
537 ++
538 ++ err = bus_register(mci->bus);
539 + if (err < 0)
540 + return err;
541 +
542 +@@ -988,7 +990,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
543 + device_initialize(&mci->dev);
544 +
545 + mci->dev.parent = mci_pdev;
546 +- mci->dev.bus = &mci->bus;
547 ++ mci->dev.bus = mci->bus;
548 + dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
549 + dev_set_drvdata(&mci->dev, mci);
550 + pm_runtime_forbid(&mci->dev);
551 +@@ -997,8 +999,8 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
552 + err = device_add(&mci->dev);
553 + if (err < 0) {
554 + edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
555 +- bus_unregister(&mci->bus);
556 +- kfree(mci->bus.name);
557 ++ bus_unregister(mci->bus);
558 ++ kfree(mci->bus->name);
559 + return err;
560 + }
561 +
562 +@@ -1064,8 +1066,8 @@ fail:
563 + }
564 + fail2:
565 + device_unregister(&mci->dev);
566 +- bus_unregister(&mci->bus);
567 +- kfree(mci->bus.name);
568 ++ bus_unregister(mci->bus);
569 ++ kfree(mci->bus->name);
570 + return err;
571 + }
572 +
573 +@@ -1098,8 +1100,8 @@ void edac_unregister_sysfs(struct mem_ctl_info *mci)
574 + {
575 + edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
576 + device_unregister(&mci->dev);
577 +- bus_unregister(&mci->bus);
578 +- kfree(mci->bus.name);
579 ++ bus_unregister(mci->bus);
580 ++ kfree(mci->bus->name);
581 + }
582 +
583 + static void mc_attr_release(struct device *dev)
584 +diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
585 +index 1b63517..157b934 100644
586 +--- a/drivers/edac/i5100_edac.c
587 ++++ b/drivers/edac/i5100_edac.c
588 +@@ -974,7 +974,7 @@ static int i5100_setup_debugfs(struct mem_ctl_info *mci)
589 + if (!i5100_debugfs)
590 + return -ENODEV;
591 +
592 +- priv->debugfs = debugfs_create_dir(mci->bus.name, i5100_debugfs);
593 ++ priv->debugfs = debugfs_create_dir(mci->bus->name, i5100_debugfs);
594 +
595 + if (!priv->debugfs)
596 + return -ENOMEM;
597 +diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
598 +index d3e15b4..c42b14b 100644
599 +--- a/drivers/md/bcache/bcache.h
600 ++++ b/drivers/md/bcache/bcache.h
601 +@@ -437,6 +437,7 @@ struct bcache_device {
602 +
603 + /* If nonzero, we're detaching/unregistering from cache set */
604 + atomic_t detaching;
605 ++ int flush_done;
606 +
607 + atomic_long_t sectors_dirty;
608 + unsigned long sectors_dirty_gc;
609 +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
610 +index 7a5658f..7b687a6 100644
611 +--- a/drivers/md/bcache/btree.c
612 ++++ b/drivers/md/bcache/btree.c
613 +@@ -1419,8 +1419,10 @@ static void btree_gc_start(struct cache_set *c)
614 + for_each_cache(ca, c, i)
615 + for_each_bucket(b, ca) {
616 + b->gc_gen = b->gen;
617 +- if (!atomic_read(&b->pin))
618 ++ if (!atomic_read(&b->pin)) {
619 + SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
620 ++ SET_GC_SECTORS_USED(b, 0);
621 ++ }
622 + }
623 +
624 + for (d = c->devices;
625 +diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
626 +index bd05a9a..9aba201 100644
627 +--- a/drivers/md/bcache/closure.c
628 ++++ b/drivers/md/bcache/closure.c
629 +@@ -66,16 +66,18 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
630 + } else {
631 + struct closure *parent = cl->parent;
632 + struct closure_waitlist *wait = closure_waitlist(cl);
633 ++ closure_fn *destructor = cl->fn;
634 +
635 + closure_debug_destroy(cl);
636 +
637 ++ smp_mb();
638 + atomic_set(&cl->remaining, -1);
639 +
640 + if (wait)
641 + closure_wake_up(wait);
642 +
643 +- if (cl->fn)
644 +- cl->fn(cl);
645 ++ if (destructor)
646 ++ destructor(cl);
647 +
648 + if (parent)
649 + closure_put(parent);
650 +diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
651 +index 8c8dfdc..8a54d3b 100644
652 +--- a/drivers/md/bcache/journal.c
653 ++++ b/drivers/md/bcache/journal.c
654 +@@ -182,9 +182,14 @@ bsearch:
655 + pr_debug("starting binary search, l %u r %u", l, r);
656 +
657 + while (l + 1 < r) {
658 ++ seq = list_entry(list->prev, struct journal_replay,
659 ++ list)->j.seq;
660 ++
661 + m = (l + r) >> 1;
662 ++ read_bucket(m);
663 +
664 +- if (read_bucket(m))
665 ++ if (seq != list_entry(list->prev, struct journal_replay,
666 ++ list)->j.seq)
667 + l = m;
668 + else
669 + r = m;
670 +diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
671 +index e5ff12e..2f36743 100644
672 +--- a/drivers/md/bcache/request.c
673 ++++ b/drivers/md/bcache/request.c
674 +@@ -489,6 +489,12 @@ static void bch_insert_data_loop(struct closure *cl)
675 + bch_queue_gc(op->c);
676 + }
677 +
678 ++ /*
679 ++ * Journal writes are marked REQ_FLUSH; if the original write was a
680 ++ * flush, it'll wait on the journal write.
681 ++ */
682 ++ bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
683 ++
684 + do {
685 + unsigned i;
686 + struct bkey *k;
687 +@@ -716,7 +722,7 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
688 + s->task = current;
689 + s->orig_bio = bio;
690 + s->write = (bio->bi_rw & REQ_WRITE) != 0;
691 +- s->op.flush_journal = (bio->bi_rw & REQ_FLUSH) != 0;
692 ++ s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
693 + s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
694 + s->recoverable = 1;
695 + s->start_time = jiffies;
696 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
697 +index f88e2b6..b4713ce 100644
698 +--- a/drivers/md/bcache/super.c
699 ++++ b/drivers/md/bcache/super.c
700 +@@ -704,7 +704,8 @@ static void bcache_device_detach(struct bcache_device *d)
701 + atomic_set(&d->detaching, 0);
702 + }
703 +
704 +- bcache_device_unlink(d);
705 ++ if (!d->flush_done)
706 ++ bcache_device_unlink(d);
707 +
708 + d->c->devices[d->id] = NULL;
709 + closure_put(&d->c->caching);
710 +@@ -781,6 +782,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
711 + set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags);
712 + set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
713 +
714 ++ blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
715 ++
716 + return 0;
717 + }
718 +
719 +@@ -1014,6 +1017,14 @@ static void cached_dev_flush(struct closure *cl)
720 + struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
721 + struct bcache_device *d = &dc->disk;
722 +
723 ++ mutex_lock(&bch_register_lock);
724 ++ d->flush_done = 1;
725 ++
726 ++ if (d->c)
727 ++ bcache_device_unlink(d);
728 ++
729 ++ mutex_unlock(&bch_register_lock);
730 ++
731 + bch_cache_accounting_destroy(&dc->accounting);
732 + kobject_del(&d->kobj);
733 +
734 +@@ -1303,18 +1314,22 @@ static void cache_set_flush(struct closure *cl)
735 + static void __cache_set_unregister(struct closure *cl)
736 + {
737 + struct cache_set *c = container_of(cl, struct cache_set, caching);
738 +- struct cached_dev *dc, *t;
739 ++ struct cached_dev *dc;
740 + size_t i;
741 +
742 + mutex_lock(&bch_register_lock);
743 +
744 +- if (test_bit(CACHE_SET_UNREGISTERING, &c->flags))
745 +- list_for_each_entry_safe(dc, t, &c->cached_devs, list)
746 +- bch_cached_dev_detach(dc);
747 +-
748 + for (i = 0; i < c->nr_uuids; i++)
749 +- if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i]))
750 +- bcache_device_stop(c->devices[i]);
751 ++ if (c->devices[i]) {
752 ++ if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
753 ++ test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
754 ++ dc = container_of(c->devices[i],
755 ++ struct cached_dev, disk);
756 ++ bch_cached_dev_detach(dc);
757 ++ } else {
758 ++ bcache_device_stop(c->devices[i]);
759 ++ }
760 ++ }
761 +
762 + mutex_unlock(&bch_register_lock);
763 +
764 +diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
765 +index a1a3a51..0b4616b 100644
766 +--- a/drivers/media/dvb-core/dmxdev.c
767 ++++ b/drivers/media/dvb-core/dmxdev.c
768 +@@ -377,10 +377,8 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
769 + ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2,
770 + buffer2_len);
771 + }
772 +- if (ret < 0) {
773 +- dvb_ringbuffer_flush(&dmxdevfilter->buffer);
774 ++ if (ret < 0)
775 + dmxdevfilter->buffer.error = ret;
776 +- }
777 + if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
778 + dmxdevfilter->state = DMXDEV_STATE_DONE;
779 + spin_unlock(&dmxdevfilter->dev->lock);
780 +@@ -416,10 +414,8 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
781 + ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
782 + if (ret == buffer1_len)
783 + ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
784 +- if (ret < 0) {
785 +- dvb_ringbuffer_flush(buffer);
786 ++ if (ret < 0)
787 + buffer->error = ret;
788 +- }
789 + spin_unlock(&dmxdevfilter->dev->lock);
790 + wake_up(&buffer->queue);
791 + return 0;
792 +diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
793 +index 10460fd..dbcdfbf 100644
794 +--- a/drivers/media/pci/saa7134/saa7134-alsa.c
795 ++++ b/drivers/media/pci/saa7134/saa7134-alsa.c
796 +@@ -172,7 +172,9 @@ static void saa7134_irq_alsa_done(struct saa7134_dev *dev,
797 + dprintk("irq: overrun [full=%d/%d] - Blocks in %d\n",dev->dmasound.read_count,
798 + dev->dmasound.bufsize, dev->dmasound.blocks);
799 + spin_unlock(&dev->slock);
800 ++ snd_pcm_stream_lock(dev->dmasound.substream);
801 + snd_pcm_stop(dev->dmasound.substream,SNDRV_PCM_STATE_XRUN);
802 ++ snd_pcm_stream_unlock(dev->dmasound.substream);
803 + return;
804 + }
805 +
806 +diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
807 +index 42aa54a..b710c6b 100644
808 +--- a/drivers/net/dummy.c
809 ++++ b/drivers/net/dummy.c
810 +@@ -185,6 +185,8 @@ static int __init dummy_init_module(void)
811 +
812 + rtnl_lock();
813 + err = __rtnl_link_register(&dummy_link_ops);
814 ++ if (err < 0)
815 ++ goto out;
816 +
817 + for (i = 0; i < numdummies && !err; i++) {
818 + err = dummy_init_one();
819 +@@ -192,6 +194,8 @@ static int __init dummy_init_module(void)
820 + }
821 + if (err < 0)
822 + __rtnl_link_unregister(&dummy_link_ops);
823 ++
824 ++out:
825 + rtnl_unlock();
826 +
827 + return err;
828 +diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
829 +index 418de8b..d30085c 100644
830 +--- a/drivers/net/ethernet/atheros/alx/main.c
831 ++++ b/drivers/net/ethernet/atheros/alx/main.c
832 +@@ -1303,6 +1303,8 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
833 +
834 + SET_NETDEV_DEV(netdev, &pdev->dev);
835 + alx = netdev_priv(netdev);
836 ++ spin_lock_init(&alx->hw.mdio_lock);
837 ++ spin_lock_init(&alx->irq_lock);
838 + alx->dev = netdev;
839 + alx->hw.pdev = pdev;
840 + alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
841 +@@ -1385,9 +1387,6 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
842 +
843 + INIT_WORK(&alx->link_check_wk, alx_link_check);
844 + INIT_WORK(&alx->reset_wk, alx_reset);
845 +- spin_lock_init(&alx->hw.mdio_lock);
846 +- spin_lock_init(&alx->irq_lock);
847 +-
848 + netif_carrier_off(netdev);
849 +
850 + err = register_netdev(netdev);
851 +diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
852 +index 0688bb8..c23bb02 100644
853 +--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
854 ++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
855 +@@ -1665,8 +1665,8 @@ check_sum:
856 + return 0;
857 + }
858 +
859 +-static void atl1e_tx_map(struct atl1e_adapter *adapter,
860 +- struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
861 ++static int atl1e_tx_map(struct atl1e_adapter *adapter,
862 ++ struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
863 + {
864 + struct atl1e_tpd_desc *use_tpd = NULL;
865 + struct atl1e_tx_buffer *tx_buffer = NULL;
866 +@@ -1677,6 +1677,8 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
867 + u16 nr_frags;
868 + u16 f;
869 + int segment;
870 ++ int ring_start = adapter->tx_ring.next_to_use;
871 ++ int ring_end;
872 +
873 + nr_frags = skb_shinfo(skb)->nr_frags;
874 + segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
875 +@@ -1689,6 +1691,9 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
876 + tx_buffer->length = map_len;
877 + tx_buffer->dma = pci_map_single(adapter->pdev,
878 + skb->data, hdr_len, PCI_DMA_TODEVICE);
879 ++ if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma))
880 ++ return -ENOSPC;
881 ++
882 + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
883 + mapped_len += map_len;
884 + use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
885 +@@ -1715,6 +1720,22 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
886 + tx_buffer->dma =
887 + pci_map_single(adapter->pdev, skb->data + mapped_len,
888 + map_len, PCI_DMA_TODEVICE);
889 ++
890 ++ if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
891 ++ /* We need to unwind the mappings we've done */
892 ++ ring_end = adapter->tx_ring.next_to_use;
893 ++ adapter->tx_ring.next_to_use = ring_start;
894 ++ while (adapter->tx_ring.next_to_use != ring_end) {
895 ++ tpd = atl1e_get_tpd(adapter);
896 ++ tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
897 ++ pci_unmap_single(adapter->pdev, tx_buffer->dma,
898 ++ tx_buffer->length, PCI_DMA_TODEVICE);
899 ++ }
900 ++ /* Reset the tx rings next pointer */
901 ++ adapter->tx_ring.next_to_use = ring_start;
902 ++ return -ENOSPC;
903 ++ }
904 ++
905 + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
906 + mapped_len += map_len;
907 + use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
908 +@@ -1750,6 +1771,23 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
909 + (i * MAX_TX_BUF_LEN),
910 + tx_buffer->length,
911 + DMA_TO_DEVICE);
912 ++
913 ++ if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
914 ++ /* We need to unwind the mappings we've done */
915 ++ ring_end = adapter->tx_ring.next_to_use;
916 ++ adapter->tx_ring.next_to_use = ring_start;
917 ++ while (adapter->tx_ring.next_to_use != ring_end) {
918 ++ tpd = atl1e_get_tpd(adapter);
919 ++ tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
920 ++ dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma,
921 ++ tx_buffer->length, DMA_TO_DEVICE);
922 ++ }
923 ++
924 ++ /* Reset the ring next to use pointer */
925 ++ adapter->tx_ring.next_to_use = ring_start;
926 ++ return -ENOSPC;
927 ++ }
928 ++
929 + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
930 + use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
931 + use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
932 +@@ -1767,6 +1805,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
933 + /* The last buffer info contain the skb address,
934 + so it will be free after unmap */
935 + tx_buffer->skb = skb;
936 ++ return 0;
937 + }
938 +
939 + static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count,
940 +@@ -1834,10 +1873,15 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
941 + return NETDEV_TX_OK;
942 + }
943 +
944 +- atl1e_tx_map(adapter, skb, tpd);
945 ++ if (atl1e_tx_map(adapter, skb, tpd)) {
946 ++ dev_kfree_skb_any(skb);
947 ++ goto out;
948 ++ }
949 ++
950 + atl1e_tx_queue(adapter, tpd_req, tpd);
951 +
952 + netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
953 ++out:
954 + spin_unlock_irqrestore(&adapter->tx_lock, flags);
955 + return NETDEV_TX_OK;
956 + }
957 +diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
958 +index c89aa41..b4e0dc8 100644
959 +--- a/drivers/net/ethernet/cadence/macb.c
960 ++++ b/drivers/net/ethernet/cadence/macb.c
961 +@@ -1070,7 +1070,7 @@ static void macb_configure_dma(struct macb *bp)
962 + static void macb_configure_caps(struct macb *bp)
963 + {
964 + if (macb_is_gem(bp)) {
965 +- if (GEM_BF(IRQCOR, gem_readl(bp, DCFG1)) == 0)
966 ++ if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
967 + bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
968 + }
969 + }
970 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
971 +index a0b4be5..6e43426 100644
972 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
973 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
974 +@@ -782,16 +782,22 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
975 +
976 + if (vlan_tx_tag_present(skb))
977 + vlan_tag = be_get_tx_vlan_tag(adapter, skb);
978 +- else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
979 +- vlan_tag = adapter->pvid;
980 ++
981 ++ if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
982 ++ if (!vlan_tag)
983 ++ vlan_tag = adapter->pvid;
984 ++ /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
985 ++ * skip VLAN insertion
986 ++ */
987 ++ if (skip_hw_vlan)
988 ++ *skip_hw_vlan = true;
989 ++ }
990 +
991 + if (vlan_tag) {
992 + skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
993 + if (unlikely(!skb))
994 + return skb;
995 + skb->vlan_tci = 0;
996 +- if (skip_hw_vlan)
997 +- *skip_hw_vlan = true;
998 + }
999 +
1000 + /* Insert the outer VLAN, if any */
1001 +diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
1002 +index a7dfe36..5173eaa 100644
1003 +--- a/drivers/net/ethernet/sfc/rx.c
1004 ++++ b/drivers/net/ethernet/sfc/rx.c
1005 +@@ -282,9 +282,9 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
1006 + }
1007 +
1008 + /* Recycle the pages that are used by buffers that have just been received. */
1009 +-static void efx_recycle_rx_buffers(struct efx_channel *channel,
1010 +- struct efx_rx_buffer *rx_buf,
1011 +- unsigned int n_frags)
1012 ++static void efx_recycle_rx_pages(struct efx_channel *channel,
1013 ++ struct efx_rx_buffer *rx_buf,
1014 ++ unsigned int n_frags)
1015 + {
1016 + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
1017 +
1018 +@@ -294,6 +294,20 @@ static void efx_recycle_rx_buffers(struct efx_channel *channel,
1019 + } while (--n_frags);
1020 + }
1021 +
1022 ++static void efx_discard_rx_packet(struct efx_channel *channel,
1023 ++ struct efx_rx_buffer *rx_buf,
1024 ++ unsigned int n_frags)
1025 ++{
1026 ++ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
1027 ++
1028 ++ efx_recycle_rx_pages(channel, rx_buf, n_frags);
1029 ++
1030 ++ do {
1031 ++ efx_free_rx_buffer(rx_buf);
1032 ++ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
1033 ++ } while (--n_frags);
1034 ++}
1035 ++
1036 + /**
1037 + * efx_fast_push_rx_descriptors - push new RX descriptors quickly
1038 + * @rx_queue: RX descriptor queue
1039 +@@ -533,8 +547,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
1040 + */
1041 + if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
1042 + efx_rx_flush_packet(channel);
1043 +- put_page(rx_buf->page);
1044 +- efx_recycle_rx_buffers(channel, rx_buf, n_frags);
1045 ++ efx_discard_rx_packet(channel, rx_buf, n_frags);
1046 + return;
1047 + }
1048 +
1049 +@@ -570,9 +583,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
1050 + efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
1051 + }
1052 +
1053 +- /* All fragments have been DMA-synced, so recycle buffers and pages. */
1054 ++ /* All fragments have been DMA-synced, so recycle pages. */
1055 + rx_buf = efx_rx_buffer(rx_queue, index);
1056 +- efx_recycle_rx_buffers(channel, rx_buf, n_frags);
1057 ++ efx_recycle_rx_pages(channel, rx_buf, n_frags);
1058 +
1059 + /* Pipeline receives so that we give time for packet headers to be
1060 + * prefetched into cache.
1061 +diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
1062 +index 1df0ff3..3df5684 100644
1063 +--- a/drivers/net/ethernet/sun/sunvnet.c
1064 ++++ b/drivers/net/ethernet/sun/sunvnet.c
1065 +@@ -1239,6 +1239,8 @@ static int vnet_port_remove(struct vio_dev *vdev)
1066 + dev_set_drvdata(&vdev->dev, NULL);
1067 +
1068 + kfree(port);
1069 ++
1070 ++ unregister_netdev(vp->dev);
1071 + }
1072 + return 0;
1073 + }
1074 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1075 +index 4dccead..23a0fff 100644
1076 +--- a/drivers/net/hyperv/netvsc_drv.c
1077 ++++ b/drivers/net/hyperv/netvsc_drv.c
1078 +@@ -431,8 +431,8 @@ static int netvsc_probe(struct hv_device *dev,
1079 + net->netdev_ops = &device_ops;
1080 +
1081 + /* TODO: Add GSO and Checksum offload */
1082 +- net->hw_features = NETIF_F_SG;
1083 +- net->features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX;
1084 ++ net->hw_features = 0;
1085 ++ net->features = NETIF_F_HW_VLAN_CTAG_TX;
1086 +
1087 + SET_ETHTOOL_OPS(net, &ethtool_ops);
1088 + SET_NETDEV_DEV(net, &dev->device);
1089 +diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
1090 +index dc9f6a4..a3bed28 100644
1091 +--- a/drivers/net/ifb.c
1092 ++++ b/drivers/net/ifb.c
1093 +@@ -291,11 +291,17 @@ static int __init ifb_init_module(void)
1094 +
1095 + rtnl_lock();
1096 + err = __rtnl_link_register(&ifb_link_ops);
1097 ++ if (err < 0)
1098 ++ goto out;
1099 +
1100 +- for (i = 0; i < numifbs && !err; i++)
1101 ++ for (i = 0; i < numifbs && !err; i++) {
1102 + err = ifb_init_one(i);
1103 ++ cond_resched();
1104 ++ }
1105 + if (err)
1106 + __rtnl_link_unregister(&ifb_link_ops);
1107 ++
1108 ++out:
1109 + rtnl_unlock();
1110 +
1111 + return err;
1112 +diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
1113 +index b6dd6a7..523d6b2 100644
1114 +--- a/drivers/net/macvtap.c
1115 ++++ b/drivers/net/macvtap.c
1116 +@@ -633,6 +633,28 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
1117 + return 0;
1118 + }
1119 +
1120 ++static unsigned long iov_pages(const struct iovec *iv, int offset,
1121 ++ unsigned long nr_segs)
1122 ++{
1123 ++ unsigned long seg, base;
1124 ++ int pages = 0, len, size;
1125 ++
1126 ++ while (nr_segs && (offset >= iv->iov_len)) {
1127 ++ offset -= iv->iov_len;
1128 ++ ++iv;
1129 ++ --nr_segs;
1130 ++ }
1131 ++
1132 ++ for (seg = 0; seg < nr_segs; seg++) {
1133 ++ base = (unsigned long)iv[seg].iov_base + offset;
1134 ++ len = iv[seg].iov_len - offset;
1135 ++ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
1136 ++ pages += size;
1137 ++ offset = 0;
1138 ++ }
1139 ++
1140 ++ return pages;
1141 ++}
1142 +
1143 + /* Get packet from user space buffer */
1144 + static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
1145 +@@ -647,6 +669,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
1146 + int vnet_hdr_len = 0;
1147 + int copylen = 0;
1148 + bool zerocopy = false;
1149 ++ size_t linear;
1150 +
1151 + if (q->flags & IFF_VNET_HDR) {
1152 + vnet_hdr_len = q->vnet_hdr_sz;
1153 +@@ -678,42 +701,35 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
1154 + if (unlikely(count > UIO_MAXIOV))
1155 + goto err;
1156 +
1157 +- if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
1158 +- zerocopy = true;
1159 ++ if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
1160 ++ copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
1161 ++ linear = copylen;
1162 ++ if (iov_pages(iv, vnet_hdr_len + copylen, count)
1163 ++ <= MAX_SKB_FRAGS)
1164 ++ zerocopy = true;
1165 ++ }
1166 +
1167 +- if (zerocopy) {
1168 +- /* Userspace may produce vectors with count greater than
1169 +- * MAX_SKB_FRAGS, so we need to linearize parts of the skb
1170 +- * to let the rest of data to be fit in the frags.
1171 +- */
1172 +- if (count > MAX_SKB_FRAGS) {
1173 +- copylen = iov_length(iv, count - MAX_SKB_FRAGS);
1174 +- if (copylen < vnet_hdr_len)
1175 +- copylen = 0;
1176 +- else
1177 +- copylen -= vnet_hdr_len;
1178 +- }
1179 +- /* There are 256 bytes to be copied in skb, so there is enough
1180 +- * room for skb expand head in case it is used.
1181 +- * The rest buffer is mapped from userspace.
1182 +- */
1183 +- if (copylen < vnet_hdr.hdr_len)
1184 +- copylen = vnet_hdr.hdr_len;
1185 +- if (!copylen)
1186 +- copylen = GOODCOPY_LEN;
1187 +- } else
1188 ++ if (!zerocopy) {
1189 + copylen = len;
1190 ++ linear = vnet_hdr.hdr_len;
1191 ++ }
1192 +
1193 + skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
1194 +- vnet_hdr.hdr_len, noblock, &err);
1195 ++ linear, noblock, &err);
1196 + if (!skb)
1197 + goto err;
1198 +
1199 + if (zerocopy)
1200 + err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
1201 +- else
1202 ++ else {
1203 + err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
1204 + len);
1205 ++ if (!err && m && m->msg_control) {
1206 ++ struct ubuf_info *uarg = m->msg_control;
1207 ++ uarg->callback(uarg, false);
1208 ++ }
1209 ++ }
1210 ++
1211 + if (err)
1212 + goto err_kfree;
1213 +
1214 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1215 +index 9c61f87..2491eb2 100644
1216 +--- a/drivers/net/tun.c
1217 ++++ b/drivers/net/tun.c
1218 +@@ -1037,6 +1037,29 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
1219 + return 0;
1220 + }
1221 +
1222 ++static unsigned long iov_pages(const struct iovec *iv, int offset,
1223 ++ unsigned long nr_segs)
1224 ++{
1225 ++ unsigned long seg, base;
1226 ++ int pages = 0, len, size;
1227 ++
1228 ++ while (nr_segs && (offset >= iv->iov_len)) {
1229 ++ offset -= iv->iov_len;
1230 ++ ++iv;
1231 ++ --nr_segs;
1232 ++ }
1233 ++
1234 ++ for (seg = 0; seg < nr_segs; seg++) {
1235 ++ base = (unsigned long)iv[seg].iov_base + offset;
1236 ++ len = iv[seg].iov_len - offset;
1237 ++ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
1238 ++ pages += size;
1239 ++ offset = 0;
1240 ++ }
1241 ++
1242 ++ return pages;
1243 ++}
1244 ++
1245 + /* Get packet from user space buffer */
1246 + static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1247 + void *msg_control, const struct iovec *iv,
1248 +@@ -1044,7 +1067,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1249 + {
1250 + struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1251 + struct sk_buff *skb;
1252 +- size_t len = total_len, align = NET_SKB_PAD;
1253 ++ size_t len = total_len, align = NET_SKB_PAD, linear;
1254 + struct virtio_net_hdr gso = { 0 };
1255 + int offset = 0;
1256 + int copylen;
1257 +@@ -1084,34 +1107,23 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1258 + return -EINVAL;
1259 + }
1260 +
1261 +- if (msg_control)
1262 +- zerocopy = true;
1263 +-
1264 +- if (zerocopy) {
1265 +- /* Userspace may produce vectors with count greater than
1266 +- * MAX_SKB_FRAGS, so we need to linearize parts of the skb
1267 +- * to let the rest of data to be fit in the frags.
1268 +- */
1269 +- if (count > MAX_SKB_FRAGS) {
1270 +- copylen = iov_length(iv, count - MAX_SKB_FRAGS);
1271 +- if (copylen < offset)
1272 +- copylen = 0;
1273 +- else
1274 +- copylen -= offset;
1275 +- } else
1276 +- copylen = 0;
1277 +- /* There are 256 bytes to be copied in skb, so there is enough
1278 +- * room for skb expand head in case it is used.
1279 ++ if (msg_control) {
1280 ++ /* There are 256 bytes to be copied in skb, so there is
1281 ++ * enough room for skb expand head in case it is used.
1282 + * The rest of the buffer is mapped from userspace.
1283 + */
1284 +- if (copylen < gso.hdr_len)
1285 +- copylen = gso.hdr_len;
1286 +- if (!copylen)
1287 +- copylen = GOODCOPY_LEN;
1288 +- } else
1289 ++ copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
1290 ++ linear = copylen;
1291 ++ if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
1292 ++ zerocopy = true;
1293 ++ }
1294 ++
1295 ++ if (!zerocopy) {
1296 + copylen = len;
1297 ++ linear = gso.hdr_len;
1298 ++ }
1299 +
1300 +- skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock);
1301 ++ skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
1302 + if (IS_ERR(skb)) {
1303 + if (PTR_ERR(skb) != -EAGAIN)
1304 + tun->dev->stats.rx_dropped++;
1305 +@@ -1120,8 +1132,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1306 +
1307 + if (zerocopy)
1308 + err = zerocopy_sg_from_iovec(skb, iv, offset, count);
1309 +- else
1310 ++ else {
1311 + err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
1312 ++ if (!err && msg_control) {
1313 ++ struct ubuf_info *uarg = msg_control;
1314 ++ uarg->callback(uarg, false);
1315 ++ }
1316 ++ }
1317 +
1318 + if (err) {
1319 + tun->dev->stats.rx_dropped++;
1320 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
1321 +index c9e0038..42d670a 100644
1322 +--- a/drivers/net/virtio_net.c
1323 ++++ b/drivers/net/virtio_net.c
1324 +@@ -602,7 +602,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1325 + container_of(napi, struct receive_queue, napi);
1326 + struct virtnet_info *vi = rq->vq->vdev->priv;
1327 + void *buf;
1328 +- unsigned int len, received = 0;
1329 ++ unsigned int r, len, received = 0;
1330 +
1331 + again:
1332 + while (received < budget &&
1333 +@@ -619,8 +619,9 @@ again:
1334 +
1335 + /* Out of packets? */
1336 + if (received < budget) {
1337 ++ r = virtqueue_enable_cb_prepare(rq->vq);
1338 + napi_complete(napi);
1339 +- if (unlikely(!virtqueue_enable_cb(rq->vq)) &&
1340 ++ if (unlikely(virtqueue_poll(rq->vq, r)) &&
1341 + napi_schedule_prep(napi)) {
1342 + virtqueue_disable_cb(rq->vq);
1343 + __napi_schedule(napi);
1344 +diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
1345 +index 809b7a3..5d3b0f0 100644
1346 +--- a/drivers/rapidio/switches/idt_gen2.c
1347 ++++ b/drivers/rapidio/switches/idt_gen2.c
1348 +@@ -15,6 +15,8 @@
1349 + #include <linux/rio_drv.h>
1350 + #include <linux/rio_ids.h>
1351 + #include <linux/delay.h>
1352 ++
1353 ++#include <asm/page.h>
1354 + #include "../rio.h"
1355 +
1356 + #define LOCAL_RTE_CONF_DESTID_SEL 0x010070
1357 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
1358 +index 3a9ddae..89178b8 100644
1359 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
1360 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
1361 +@@ -4852,10 +4852,12 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
1362 + sense, sense_handle);
1363 + }
1364 +
1365 +- for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) {
1366 +- dma_free_coherent(&instance->pdev->dev,
1367 +- kern_sge32[i].length,
1368 +- kbuff_arr[i], kern_sge32[i].phys_addr);
1369 ++ for (i = 0; i < ioc->sge_count; i++) {
1370 ++ if (kbuff_arr[i])
1371 ++ dma_free_coherent(&instance->pdev->dev,
1372 ++ kern_sge32[i].length,
1373 ++ kbuff_arr[i],
1374 ++ kern_sge32[i].phys_addr);
1375 + }
1376 +
1377 + megasas_return_cmd(instance, cmd);
1378 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1379 +index dcbf7c8..f8c4b85 100644
1380 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1381 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1382 +@@ -1273,6 +1273,7 @@ _scsih_slave_alloc(struct scsi_device *sdev)
1383 + struct MPT3SAS_DEVICE *sas_device_priv_data;
1384 + struct scsi_target *starget;
1385 + struct _raid_device *raid_device;
1386 ++ struct _sas_device *sas_device;
1387 + unsigned long flags;
1388 +
1389 + sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
1390 +@@ -1301,6 +1302,19 @@ _scsih_slave_alloc(struct scsi_device *sdev)
1391 + spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1392 + }
1393 +
1394 ++ if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1395 ++ spin_lock_irqsave(&ioc->sas_device_lock, flags);
1396 ++ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
1397 ++ sas_target_priv_data->sas_address);
1398 ++ if (sas_device && (sas_device->starget == NULL)) {
1399 ++ sdev_printk(KERN_INFO, sdev,
1400 ++ "%s : sas_device->starget set to starget @ %d\n",
1401 ++ __func__, __LINE__);
1402 ++ sas_device->starget = starget;
1403 ++ }
1404 ++ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1405 ++ }
1406 ++
1407 + return 0;
1408 + }
1409 +
1410 +@@ -6392,7 +6406,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
1411 + handle))) {
1412 + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1413 + MPI2_IOCSTATUS_MASK;
1414 +- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1415 ++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
1416 + break;
1417 + handle = le16_to_cpu(sas_device_pg0.DevHandle);
1418 + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
1419 +@@ -6494,7 +6508,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
1420 + &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
1421 + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1422 + MPI2_IOCSTATUS_MASK;
1423 +- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1424 ++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
1425 + break;
1426 + handle = le16_to_cpu(volume_pg1.DevHandle);
1427 +
1428 +@@ -6518,7 +6532,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
1429 + phys_disk_num))) {
1430 + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1431 + MPI2_IOCSTATUS_MASK;
1432 +- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1433 ++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
1434 + break;
1435 + phys_disk_num = pd_pg0.PhysDiskNum;
1436 + handle = le16_to_cpu(pd_pg0.DevHandle);
1437 +@@ -6597,7 +6611,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
1438 +
1439 + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1440 + MPI2_IOCSTATUS_MASK;
1441 +- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1442 ++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
1443 + break;
1444 +
1445 + handle = le16_to_cpu(expander_pg0.DevHandle);
1446 +@@ -6742,8 +6756,6 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
1447 + MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
1448 + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1449 + MPI2_IOCSTATUS_MASK;
1450 +- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1451 +- break;
1452 + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1453 + pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \
1454 + "ioc_status(0x%04x), loginfo(0x%08x)\n",
1455 +@@ -6787,8 +6799,6 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
1456 + phys_disk_num))) {
1457 + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1458 + MPI2_IOCSTATUS_MASK;
1459 +- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1460 +- break;
1461 + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1462 + pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\
1463 + "ioc_status(0x%04x), loginfo(0x%08x)\n",
1464 +@@ -6854,8 +6864,6 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
1465 + &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
1466 + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1467 + MPI2_IOCSTATUS_MASK;
1468 +- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1469 +- break;
1470 + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1471 + pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
1472 + "ioc_status(0x%04x), loginfo(0x%08x)\n",
1473 +@@ -6914,8 +6922,6 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
1474 + handle))) {
1475 + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1476 + MPI2_IOCSTATUS_MASK;
1477 +- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1478 +- break;
1479 + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1480 + pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\
1481 + " ioc_status(0x%04x), loginfo(0x%08x)\n",
1482 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1483 +index 2c65955..c90d960 100644
1484 +--- a/drivers/usb/serial/cp210x.c
1485 ++++ b/drivers/usb/serial/cp210x.c
1486 +@@ -53,6 +53,7 @@ static const struct usb_device_id id_table[] = {
1487 + { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
1488 + { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
1489 + { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
1490 ++ { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
1491 + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
1492 + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
1493 + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
1494 +@@ -118,6 +119,8 @@ static const struct usb_device_id id_table[] = {
1495 + { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
1496 + { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
1497 + { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
1498 ++ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
1499 ++ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
1500 + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
1501 + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
1502 + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
1503 +@@ -148,6 +151,7 @@ static const struct usb_device_id id_table[] = {
1504 + { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
1505 + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
1506 + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
1507 ++ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
1508 + { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
1509 + { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
1510 + { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
1511 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1512 +index 5dd857d..1cf6f12 100644
1513 +--- a/drivers/usb/serial/option.c
1514 ++++ b/drivers/usb/serial/option.c
1515 +@@ -341,17 +341,12 @@ static void option_instat_callback(struct urb *urb);
1516 + #define OLIVETTI_VENDOR_ID 0x0b3c
1517 + #define OLIVETTI_PRODUCT_OLICARD100 0xc000
1518 + #define OLIVETTI_PRODUCT_OLICARD145 0xc003
1519 ++#define OLIVETTI_PRODUCT_OLICARD200 0xc005
1520 +
1521 + /* Celot products */
1522 + #define CELOT_VENDOR_ID 0x211f
1523 + #define CELOT_PRODUCT_CT680M 0x6801
1524 +
1525 +-/* ONDA Communication vendor id */
1526 +-#define ONDA_VENDOR_ID 0x1ee8
1527 +-
1528 +-/* ONDA MT825UP HSDPA 14.2 modem */
1529 +-#define ONDA_MT825UP 0x000b
1530 +-
1531 + /* Samsung products */
1532 + #define SAMSUNG_VENDOR_ID 0x04e8
1533 + #define SAMSUNG_PRODUCT_GT_B3730 0x6889
1534 +@@ -444,7 +439,8 @@ static void option_instat_callback(struct urb *urb);
1535 +
1536 + /* Hyundai Petatel Inc. products */
1537 + #define PETATEL_VENDOR_ID 0x1ff4
1538 +-#define PETATEL_PRODUCT_NP10T 0x600e
1539 ++#define PETATEL_PRODUCT_NP10T_600A 0x600a
1540 ++#define PETATEL_PRODUCT_NP10T_600E 0x600e
1541 +
1542 + /* TP-LINK Incorporated products */
1543 + #define TPLINK_VENDOR_ID 0x2357
1544 +@@ -782,6 +778,7 @@ static const struct usb_device_id option_ids[] = {
1545 + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
1546 + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
1547 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1548 ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1549 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1550 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
1551 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
1552 +@@ -817,7 +814,8 @@ static const struct usb_device_id option_ids[] = {
1553 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
1554 + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1555 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
1556 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
1557 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
1558 ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1559 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
1560 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
1561 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1562 +@@ -1256,8 +1254,8 @@ static const struct usb_device_id option_ids[] = {
1563 +
1564 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
1565 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
1566 ++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
1567 + { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
1568 +- { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
1569 + { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1570 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
1571 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
1572 +@@ -1329,9 +1327,12 @@ static const struct usb_device_id option_ids[] = {
1573 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
1574 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
1575 + { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
1576 +- { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
1577 ++ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
1578 ++ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
1579 + { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
1580 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1581 ++ { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
1582 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1583 + { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
1584 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
1585 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
1586 +@@ -1339,6 +1340,8 @@ static const struct usb_device_id option_ids[] = {
1587 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
1588 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
1589 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
1590 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1591 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1592 + { } /* Terminating entry */
1593 + };
1594 + MODULE_DEVICE_TABLE(usb, option_ids);
1595 +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
1596 +index f80d3dd..8ca5ac7 100644
1597 +--- a/drivers/vhost/net.c
1598 ++++ b/drivers/vhost/net.c
1599 +@@ -150,6 +150,11 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
1600 + {
1601 + kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
1602 + wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
1603 ++}
1604 ++
1605 ++static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
1606 ++{
1607 ++ vhost_net_ubuf_put_and_wait(ubufs);
1608 + kfree(ubufs);
1609 + }
1610 +
1611 +@@ -948,7 +953,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
1612 + mutex_unlock(&vq->mutex);
1613 +
1614 + if (oldubufs) {
1615 +- vhost_net_ubuf_put_and_wait(oldubufs);
1616 ++ vhost_net_ubuf_put_wait_and_free(oldubufs);
1617 + mutex_lock(&vq->mutex);
1618 + vhost_zerocopy_signal_used(n, vq);
1619 + mutex_unlock(&vq->mutex);
1620 +@@ -966,7 +971,7 @@ err_used:
1621 + rcu_assign_pointer(vq->private_data, oldsock);
1622 + vhost_net_enable_vq(n, vq);
1623 + if (ubufs)
1624 +- vhost_net_ubuf_put_and_wait(ubufs);
1625 ++ vhost_net_ubuf_put_wait_and_free(ubufs);
1626 + err_ubufs:
1627 + fput(sock->file);
1628 + err_vq:
1629 +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
1630 +index 5217baf..37d58f8 100644
1631 +--- a/drivers/virtio/virtio_ring.c
1632 ++++ b/drivers/virtio/virtio_ring.c
1633 +@@ -607,19 +607,21 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
1634 + EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
1635 +
1636 + /**
1637 +- * virtqueue_enable_cb - restart callbacks after disable_cb.
1638 ++ * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
1639 + * @vq: the struct virtqueue we're talking about.
1640 + *
1641 +- * This re-enables callbacks; it returns "false" if there are pending
1642 +- * buffers in the queue, to detect a possible race between the driver
1643 +- * checking for more work, and enabling callbacks.
1644 ++ * This re-enables callbacks; it returns current queue state
1645 ++ * in an opaque unsigned value. This value should be later tested by
1646 ++ * virtqueue_poll, to detect a possible race between the driver checking for
1647 ++ * more work, and enabling callbacks.
1648 + *
1649 + * Caller must ensure we don't call this with other virtqueue
1650 + * operations at the same time (except where noted).
1651 + */
1652 +-bool virtqueue_enable_cb(struct virtqueue *_vq)
1653 ++unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
1654 + {
1655 + struct vring_virtqueue *vq = to_vvq(_vq);
1656 ++ u16 last_used_idx;
1657 +
1658 + START_USE(vq);
1659 +
1660 +@@ -629,15 +631,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
1661 + * either clear the flags bit or point the event index at the next
1662 + * entry. Always do both to keep code simple. */
1663 + vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
1664 +- vring_used_event(&vq->vring) = vq->last_used_idx;
1665 ++ vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
1666 ++ END_USE(vq);
1667 ++ return last_used_idx;
1668 ++}
1669 ++EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
1670 ++
1671 ++/**
1672 ++ * virtqueue_poll - query pending used buffers
1673 ++ * @vq: the struct virtqueue we're talking about.
1674 ++ * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
1675 ++ *
1676 ++ * Returns "true" if there are pending used buffers in the queue.
1677 ++ *
1678 ++ * This does not need to be serialized.
1679 ++ */
1680 ++bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
1681 ++{
1682 ++ struct vring_virtqueue *vq = to_vvq(_vq);
1683 ++
1684 + virtio_mb(vq->weak_barriers);
1685 +- if (unlikely(more_used(vq))) {
1686 +- END_USE(vq);
1687 +- return false;
1688 +- }
1689 ++ return (u16)last_used_idx != vq->vring.used->idx;
1690 ++}
1691 ++EXPORT_SYMBOL_GPL(virtqueue_poll);
1692 +
1693 +- END_USE(vq);
1694 +- return true;
1695 ++/**
1696 ++ * virtqueue_enable_cb - restart callbacks after disable_cb.
1697 ++ * @vq: the struct virtqueue we're talking about.
1698 ++ *
1699 ++ * This re-enables callbacks; it returns "false" if there are pending
1700 ++ * buffers in the queue, to detect a possible race between the driver
1701 ++ * checking for more work, and enabling callbacks.
1702 ++ *
1703 ++ * Caller must ensure we don't call this with other virtqueue
1704 ++ * operations at the same time (except where noted).
1705 ++ */
1706 ++bool virtqueue_enable_cb(struct virtqueue *_vq)
1707 ++{
1708 ++ unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
1709 ++ return !virtqueue_poll(_vq, last_used_idx);
1710 + }
1711 + EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
1712 +
1713 +diff --git a/fs/block_dev.c b/fs/block_dev.c
1714 +index 2091db8..85f5c85 100644
1715 +--- a/fs/block_dev.c
1716 ++++ b/fs/block_dev.c
1717 +@@ -58,17 +58,24 @@ static void bdev_inode_switch_bdi(struct inode *inode,
1718 + struct backing_dev_info *dst)
1719 + {
1720 + struct backing_dev_info *old = inode->i_data.backing_dev_info;
1721 ++ bool wakeup_bdi = false;
1722 +
1723 + if (unlikely(dst == old)) /* deadlock avoidance */
1724 + return;
1725 + bdi_lock_two(&old->wb, &dst->wb);
1726 + spin_lock(&inode->i_lock);
1727 + inode->i_data.backing_dev_info = dst;
1728 +- if (inode->i_state & I_DIRTY)
1729 ++ if (inode->i_state & I_DIRTY) {
1730 ++ if (bdi_cap_writeback_dirty(dst) && !wb_has_dirty_io(&dst->wb))
1731 ++ wakeup_bdi = true;
1732 + list_move(&inode->i_wb_list, &dst->wb.b_dirty);
1733 ++ }
1734 + spin_unlock(&inode->i_lock);
1735 + spin_unlock(&old->wb.list_lock);
1736 + spin_unlock(&dst->wb.list_lock);
1737 ++
1738 ++ if (wakeup_bdi)
1739 ++ bdi_wakeup_thread_delayed(dst);
1740 + }
1741 +
1742 + /* Kill _all_ buffers and pagecache , dirty or not.. */
1743 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
1744 +index e49da58..fddf3d9 100644
1745 +--- a/fs/ext4/extents.c
1746 ++++ b/fs/ext4/extents.c
1747 +@@ -4386,9 +4386,20 @@ void ext4_ext_truncate(handle_t *handle, struct inode *inode)
1748 +
1749 + last_block = (inode->i_size + sb->s_blocksize - 1)
1750 + >> EXT4_BLOCK_SIZE_BITS(sb);
1751 ++retry:
1752 + err = ext4_es_remove_extent(inode, last_block,
1753 + EXT_MAX_BLOCKS - last_block);
1754 ++ if (err == ENOMEM) {
1755 ++ cond_resched();
1756 ++ congestion_wait(BLK_RW_ASYNC, HZ/50);
1757 ++ goto retry;
1758 ++ }
1759 ++ if (err) {
1760 ++ ext4_std_error(inode->i_sb, err);
1761 ++ return;
1762 ++ }
1763 + err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
1764 ++ ext4_std_error(inode->i_sb, err);
1765 + }
1766 +
1767 + static void ext4_falloc_update_inode(struct inode *inode,
1768 +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
1769 +index f3f783d..5b12746 100644
1770 +--- a/fs/fuse/dir.c
1771 ++++ b/fs/fuse/dir.c
1772 +@@ -1225,13 +1225,29 @@ static int fuse_direntplus_link(struct file *file,
1773 + if (name.name[1] == '.' && name.len == 2)
1774 + return 0;
1775 + }
1776 ++
1777 ++ if (invalid_nodeid(o->nodeid))
1778 ++ return -EIO;
1779 ++ if (!fuse_valid_type(o->attr.mode))
1780 ++ return -EIO;
1781 ++
1782 + fc = get_fuse_conn(dir);
1783 +
1784 + name.hash = full_name_hash(name.name, name.len);
1785 + dentry = d_lookup(parent, &name);
1786 +- if (dentry && dentry->d_inode) {
1787 ++ if (dentry) {
1788 + inode = dentry->d_inode;
1789 +- if (get_node_id(inode) == o->nodeid) {
1790 ++ if (!inode) {
1791 ++ d_drop(dentry);
1792 ++ } else if (get_node_id(inode) != o->nodeid ||
1793 ++ ((o->attr.mode ^ inode->i_mode) & S_IFMT)) {
1794 ++ err = d_invalidate(dentry);
1795 ++ if (err)
1796 ++ goto out;
1797 ++ } else if (is_bad_inode(inode)) {
1798 ++ err = -EIO;
1799 ++ goto out;
1800 ++ } else {
1801 + struct fuse_inode *fi;
1802 + fi = get_fuse_inode(inode);
1803 + spin_lock(&fc->lock);
1804 +@@ -1244,9 +1260,6 @@ static int fuse_direntplus_link(struct file *file,
1805 + */
1806 + goto found;
1807 + }
1808 +- err = d_invalidate(dentry);
1809 +- if (err)
1810 +- goto out;
1811 + dput(dentry);
1812 + dentry = NULL;
1813 + }
1814 +@@ -1261,10 +1274,19 @@ static int fuse_direntplus_link(struct file *file,
1815 + if (!inode)
1816 + goto out;
1817 +
1818 +- alias = d_materialise_unique(dentry, inode);
1819 +- err = PTR_ERR(alias);
1820 +- if (IS_ERR(alias))
1821 +- goto out;
1822 ++ if (S_ISDIR(inode->i_mode)) {
1823 ++ mutex_lock(&fc->inst_mutex);
1824 ++ alias = fuse_d_add_directory(dentry, inode);
1825 ++ mutex_unlock(&fc->inst_mutex);
1826 ++ err = PTR_ERR(alias);
1827 ++ if (IS_ERR(alias)) {
1828 ++ iput(inode);
1829 ++ goto out;
1830 ++ }
1831 ++ } else {
1832 ++ alias = d_splice_alias(inode, dentry);
1833 ++ }
1834 ++
1835 + if (alias) {
1836 + dput(dentry);
1837 + dentry = alias;
1838 +diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
1839 +index e703318..8ebd3f5 100644
1840 +--- a/fs/lockd/svclock.c
1841 ++++ b/fs/lockd/svclock.c
1842 +@@ -939,6 +939,7 @@ nlmsvc_retry_blocked(void)
1843 + unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
1844 + struct nlm_block *block;
1845 +
1846 ++ spin_lock(&nlm_blocked_lock);
1847 + while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
1848 + block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
1849 +
1850 +@@ -948,6 +949,7 @@ nlmsvc_retry_blocked(void)
1851 + timeout = block->b_when - jiffies;
1852 + break;
1853 + }
1854 ++ spin_unlock(&nlm_blocked_lock);
1855 +
1856 + dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
1857 + block, block->b_when);
1858 +@@ -957,7 +959,9 @@ nlmsvc_retry_blocked(void)
1859 + retry_deferred_block(block);
1860 + } else
1861 + nlmsvc_grant_blocked(block);
1862 ++ spin_lock(&nlm_blocked_lock);
1863 + }
1864 ++ spin_unlock(&nlm_blocked_lock);
1865 +
1866 + return timeout;
1867 + }
1868 +diff --git a/include/linux/edac.h b/include/linux/edac.h
1869 +index 0b76327..5c6d7fb 100644
1870 +--- a/include/linux/edac.h
1871 ++++ b/include/linux/edac.h
1872 +@@ -622,7 +622,7 @@ struct edac_raw_error_desc {
1873 + */
1874 + struct mem_ctl_info {
1875 + struct device dev;
1876 +- struct bus_type bus;
1877 ++ struct bus_type *bus;
1878 +
1879 + struct list_head link; /* for global list of mem_ctl_info structs */
1880 +
1881 +@@ -742,4 +742,9 @@ struct mem_ctl_info {
1882 + #endif
1883 + };
1884 +
1885 ++/*
1886 ++ * Maximum number of memory controllers in the coherent fabric.
1887 ++ */
1888 ++#define EDAC_MAX_MCS 16
1889 ++
1890 + #endif
1891 +diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
1892 +index 637fa71d..0b34988 100644
1893 +--- a/include/linux/if_vlan.h
1894 ++++ b/include/linux/if_vlan.h
1895 +@@ -79,9 +79,8 @@ static inline int is_vlan_dev(struct net_device *dev)
1896 + }
1897 +
1898 + #define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
1899 +-#define vlan_tx_nonzero_tag_present(__skb) \
1900 +- (vlan_tx_tag_present(__skb) && ((__skb)->vlan_tci & VLAN_VID_MASK))
1901 + #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
1902 ++#define vlan_tx_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
1903 +
1904 + #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1905 +
1906 +diff --git a/include/linux/virtio.h b/include/linux/virtio.h
1907 +index 9ff8645..72398ee 100644
1908 +--- a/include/linux/virtio.h
1909 ++++ b/include/linux/virtio.h
1910 +@@ -70,6 +70,10 @@ void virtqueue_disable_cb(struct virtqueue *vq);
1911 +
1912 + bool virtqueue_enable_cb(struct virtqueue *vq);
1913 +
1914 ++unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
1915 ++
1916 ++bool virtqueue_poll(struct virtqueue *vq, unsigned);
1917 ++
1918 + bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
1919 +
1920 + void *virtqueue_detach_unused_buf(struct virtqueue *vq);
1921 +diff --git a/include/net/addrconf.h b/include/net/addrconf.h
1922 +index 21f70270..01b1a1a 100644
1923 +--- a/include/net/addrconf.h
1924 ++++ b/include/net/addrconf.h
1925 +@@ -86,6 +86,9 @@ extern int ipv6_dev_get_saddr(struct net *net,
1926 + const struct in6_addr *daddr,
1927 + unsigned int srcprefs,
1928 + struct in6_addr *saddr);
1929 ++extern int __ipv6_get_lladdr(struct inet6_dev *idev,
1930 ++ struct in6_addr *addr,
1931 ++ unsigned char banned_flags);
1932 + extern int ipv6_get_lladdr(struct net_device *dev,
1933 + struct in6_addr *addr,
1934 + unsigned char banned_flags);
1935 +diff --git a/include/net/udp.h b/include/net/udp.h
1936 +index 065f379..ad99eed 100644
1937 +--- a/include/net/udp.h
1938 ++++ b/include/net/udp.h
1939 +@@ -181,6 +181,7 @@ extern int udp_get_port(struct sock *sk, unsigned short snum,
1940 + extern void udp_err(struct sk_buff *, u32);
1941 + extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
1942 + struct msghdr *msg, size_t len);
1943 ++extern int udp_push_pending_frames(struct sock *sk);
1944 + extern void udp_flush_pending_frames(struct sock *sk);
1945 + extern int udp_rcv(struct sk_buff *skb);
1946 + extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
1947 +diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h
1948 +index 0b46fd5..e36a4ae 100644
1949 +--- a/include/uapi/linux/if_pppox.h
1950 ++++ b/include/uapi/linux/if_pppox.h
1951 +@@ -135,11 +135,11 @@ struct pppoe_tag {
1952 +
1953 + struct pppoe_hdr {
1954 + #if defined(__LITTLE_ENDIAN_BITFIELD)
1955 +- __u8 ver : 4;
1956 + __u8 type : 4;
1957 ++ __u8 ver : 4;
1958 + #elif defined(__BIG_ENDIAN_BITFIELD)
1959 +- __u8 type : 4;
1960 + __u8 ver : 4;
1961 ++ __u8 type : 4;
1962 + #else
1963 + #error "Please fix <asm/byteorder.h>"
1964 + #endif
1965 +diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
1966 +index fd4b13b..2288fbd 100644
1967 +--- a/kernel/hrtimer.c
1968 ++++ b/kernel/hrtimer.c
1969 +@@ -721,17 +721,20 @@ static int hrtimer_switch_to_hres(void)
1970 + return 1;
1971 + }
1972 +
1973 ++static void clock_was_set_work(struct work_struct *work)
1974 ++{
1975 ++ clock_was_set();
1976 ++}
1977 ++
1978 ++static DECLARE_WORK(hrtimer_work, clock_was_set_work);
1979 ++
1980 + /*
1981 +- * Called from timekeeping code to reprogramm the hrtimer interrupt
1982 +- * device. If called from the timer interrupt context we defer it to
1983 +- * softirq context.
1984 ++ * Called from timekeeping and resume code to reprogramm the hrtimer
1985 ++ * interrupt device on all cpus.
1986 + */
1987 + void clock_was_set_delayed(void)
1988 + {
1989 +- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1990 +-
1991 +- cpu_base->clock_was_set = 1;
1992 +- __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1993 ++ schedule_work(&hrtimer_work);
1994 + }
1995 +
1996 + #else
1997 +@@ -780,8 +783,10 @@ void hrtimers_resume(void)
1998 + WARN_ONCE(!irqs_disabled(),
1999 + KERN_INFO "hrtimers_resume() called with IRQs enabled!");
2000 +
2001 ++ /* Retrigger on the local CPU */
2002 + retrigger_next_event(NULL);
2003 +- timerfd_clock_was_set();
2004 ++ /* And schedule a retrigger for all others */
2005 ++ clock_was_set_delayed();
2006 + }
2007 +
2008 + static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
2009 +@@ -1432,13 +1437,6 @@ void hrtimer_peek_ahead_timers(void)
2010 +
2011 + static void run_hrtimer_softirq(struct softirq_action *h)
2012 + {
2013 +- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
2014 +-
2015 +- if (cpu_base->clock_was_set) {
2016 +- cpu_base->clock_was_set = 0;
2017 +- clock_was_set();
2018 +- }
2019 +-
2020 + hrtimer_peek_ahead_timers();
2021 + }
2022 +
2023 +diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c
2024 +index c6422ff..9012ecf 100644
2025 +--- a/kernel/power/autosleep.c
2026 ++++ b/kernel/power/autosleep.c
2027 +@@ -32,7 +32,8 @@ static void try_to_suspend(struct work_struct *work)
2028 +
2029 + mutex_lock(&autosleep_lock);
2030 +
2031 +- if (!pm_save_wakeup_count(initial_count)) {
2032 ++ if (!pm_save_wakeup_count(initial_count) ||
2033 ++ system_state != SYSTEM_RUNNING) {
2034 + mutex_unlock(&autosleep_lock);
2035 + goto out;
2036 + }
2037 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
2038 +index 566cf2b..74fdc5c 100644
2039 +--- a/lib/Kconfig.debug
2040 ++++ b/lib/Kconfig.debug
2041 +@@ -1272,7 +1272,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
2042 + depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
2043 + depends on !X86_64
2044 + select STACKTRACE
2045 +- select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
2046 ++ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
2047 + help
2048 + Provide stacktrace filter for fault-injection capabilities
2049 +
2050 +diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
2051 +index 8a15eaa..4a78c4d 100644
2052 +--- a/net/8021q/vlan_core.c
2053 ++++ b/net/8021q/vlan_core.c
2054 +@@ -9,7 +9,7 @@ bool vlan_do_receive(struct sk_buff **skbp)
2055 + {
2056 + struct sk_buff *skb = *skbp;
2057 + __be16 vlan_proto = skb->vlan_proto;
2058 +- u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
2059 ++ u16 vlan_id = vlan_tx_tag_get_id(skb);
2060 + struct net_device *vlan_dev;
2061 + struct vlan_pcpu_stats *rx_stats;
2062 +
2063 +diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
2064 +index 3a8c8fd..1cd3d2a 100644
2065 +--- a/net/8021q/vlan_dev.c
2066 ++++ b/net/8021q/vlan_dev.c
2067 +@@ -73,6 +73,8 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
2068 + {
2069 + struct vlan_priority_tci_mapping *mp;
2070 +
2071 ++ smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
2072 ++
2073 + mp = vlan_dev_priv(dev)->egress_priority_map[(skb->priority & 0xF)];
2074 + while (mp) {
2075 + if (mp->priority == skb->priority) {
2076 +@@ -249,6 +251,11 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
2077 + np->next = mp;
2078 + np->priority = skb_prio;
2079 + np->vlan_qos = vlan_qos;
2080 ++ /* Before inserting this element in hash table, make sure all its fields
2081 ++ * are committed to memory.
2082 ++ * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
2083 ++ */
2084 ++ smp_wmb();
2085 + vlan->egress_priority_map[skb_prio & 0xF] = np;
2086 + if (vlan_qos)
2087 + vlan->nr_egress_mappings++;
2088 +diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
2089 +index de8df95..2ee3879 100644
2090 +--- a/net/9p/trans_common.c
2091 ++++ b/net/9p/trans_common.c
2092 +@@ -24,11 +24,11 @@
2093 + */
2094 + void p9_release_pages(struct page **pages, int nr_pages)
2095 + {
2096 +- int i = 0;
2097 +- while (pages[i] && nr_pages--) {
2098 +- put_page(pages[i]);
2099 +- i++;
2100 +- }
2101 ++ int i;
2102 ++
2103 ++ for (i = 0; i < nr_pages; i++)
2104 ++ if (pages[i])
2105 ++ put_page(pages[i]);
2106 + }
2107 + EXPORT_SYMBOL(p9_release_pages);
2108 +
2109 +diff --git a/net/core/dev.c b/net/core/dev.c
2110 +index faebb39..7ddbb31 100644
2111 +--- a/net/core/dev.c
2112 ++++ b/net/core/dev.c
2113 +@@ -3513,8 +3513,15 @@ ncls:
2114 + }
2115 + }
2116 +
2117 +- if (vlan_tx_nonzero_tag_present(skb))
2118 +- skb->pkt_type = PACKET_OTHERHOST;
2119 ++ if (unlikely(vlan_tx_tag_present(skb))) {
2120 ++ if (vlan_tx_tag_get_id(skb))
2121 ++ skb->pkt_type = PACKET_OTHERHOST;
2122 ++ /* Note: we might in the future use prio bits
2123 ++ * and set skb->priority like in vlan_do_receive()
2124 ++ * For the time being, just ignore Priority Code Point
2125 ++ */
2126 ++ skb->vlan_tci = 0;
2127 ++ }
2128 +
2129 + /* deliver only exact match when indicated */
2130 + null_or_dev = deliver_exact ? skb->dev : NULL;
2131 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2132 +index 5c56b21..ce90b02 100644
2133 +--- a/net/core/neighbour.c
2134 ++++ b/net/core/neighbour.c
2135 +@@ -231,7 +231,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
2136 + we must kill timers etc. and move
2137 + it to safe state.
2138 + */
2139 +- skb_queue_purge(&n->arp_queue);
2140 ++ __skb_queue_purge(&n->arp_queue);
2141 + n->arp_queue_len_bytes = 0;
2142 + n->output = neigh_blackhole;
2143 + if (n->nud_state & NUD_VALID)
2144 +@@ -286,7 +286,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device
2145 + if (!n)
2146 + goto out_entries;
2147 +
2148 +- skb_queue_head_init(&n->arp_queue);
2149 ++ __skb_queue_head_init(&n->arp_queue);
2150 + rwlock_init(&n->lock);
2151 + seqlock_init(&n->ha_lock);
2152 + n->updated = n->used = now;
2153 +@@ -708,7 +708,9 @@ void neigh_destroy(struct neighbour *neigh)
2154 + if (neigh_del_timer(neigh))
2155 + pr_warn("Impossible event\n");
2156 +
2157 +- skb_queue_purge(&neigh->arp_queue);
2158 ++ write_lock_bh(&neigh->lock);
2159 ++ __skb_queue_purge(&neigh->arp_queue);
2160 ++ write_unlock_bh(&neigh->lock);
2161 + neigh->arp_queue_len_bytes = 0;
2162 +
2163 + if (dev->netdev_ops->ndo_neigh_destroy)
2164 +@@ -858,7 +860,7 @@ static void neigh_invalidate(struct neighbour *neigh)
2165 + neigh->ops->error_report(neigh, skb);
2166 + write_lock(&neigh->lock);
2167 + }
2168 +- skb_queue_purge(&neigh->arp_queue);
2169 ++ __skb_queue_purge(&neigh->arp_queue);
2170 + neigh->arp_queue_len_bytes = 0;
2171 + }
2172 +
2173 +@@ -1210,7 +1212,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
2174 +
2175 + write_lock_bh(&neigh->lock);
2176 + }
2177 +- skb_queue_purge(&neigh->arp_queue);
2178 ++ __skb_queue_purge(&neigh->arp_queue);
2179 + neigh->arp_queue_len_bytes = 0;
2180 + }
2181 + out:
2182 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
2183 +index 2a83591..855004f 100644
2184 +--- a/net/ipv4/ip_gre.c
2185 ++++ b/net/ipv4/ip_gre.c
2186 +@@ -503,10 +503,11 @@ static int ipgre_tunnel_ioctl(struct net_device *dev,
2187 +
2188 + if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
2189 + return -EFAULT;
2190 +- if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
2191 +- p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
2192 +- ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))) {
2193 +- return -EINVAL;
2194 ++ if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
2195 ++ if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
2196 ++ p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
2197 ++ ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
2198 ++ return -EINVAL;
2199 + }
2200 + p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
2201 + p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
2202 +diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
2203 +index 3da817b..15e3e68 100644
2204 +--- a/net/ipv4/ip_input.c
2205 ++++ b/net/ipv4/ip_input.c
2206 +@@ -190,10 +190,7 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
2207 + {
2208 + struct net *net = dev_net(skb->dev);
2209 +
2210 +- __skb_pull(skb, ip_hdrlen(skb));
2211 +-
2212 +- /* Point into the IP datagram, just past the header. */
2213 +- skb_reset_transport_header(skb);
2214 ++ __skb_pull(skb, skb_network_header_len(skb));
2215 +
2216 + rcu_read_lock();
2217 + {
2218 +@@ -437,6 +434,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
2219 + goto drop;
2220 + }
2221 +
2222 ++ skb->transport_header = skb->network_header + iph->ihl*4;
2223 ++
2224 + /* Remove any debris in the socket control block */
2225 + memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
2226 +
2227 +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
2228 +index 7fa8f08..cbfc37f 100644
2229 +--- a/net/ipv4/ip_tunnel.c
2230 ++++ b/net/ipv4/ip_tunnel.c
2231 +@@ -486,6 +486,53 @@ drop:
2232 + }
2233 + EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
2234 +
2235 ++static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
2236 ++ struct rtable *rt, __be16 df)
2237 ++{
2238 ++ struct ip_tunnel *tunnel = netdev_priv(dev);
2239 ++ int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
2240 ++ int mtu;
2241 ++
2242 ++ if (df)
2243 ++ mtu = dst_mtu(&rt->dst) - dev->hard_header_len
2244 ++ - sizeof(struct iphdr) - tunnel->hlen;
2245 ++ else
2246 ++ mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
2247 ++
2248 ++ if (skb_dst(skb))
2249 ++ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
2250 ++
2251 ++ if (skb->protocol == htons(ETH_P_IP)) {
2252 ++ if (!skb_is_gso(skb) &&
2253 ++ (df & htons(IP_DF)) && mtu < pkt_size) {
2254 ++ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
2255 ++ return -E2BIG;
2256 ++ }
2257 ++ }
2258 ++#if IS_ENABLED(CONFIG_IPV6)
2259 ++ else if (skb->protocol == htons(ETH_P_IPV6)) {
2260 ++ struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
2261 ++
2262 ++ if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
2263 ++ mtu >= IPV6_MIN_MTU) {
2264 ++ if ((tunnel->parms.iph.daddr &&
2265 ++ !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
2266 ++ rt6->rt6i_dst.plen == 128) {
2267 ++ rt6->rt6i_flags |= RTF_MODIFIED;
2268 ++ dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
2269 ++ }
2270 ++ }
2271 ++
2272 ++ if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
2273 ++ mtu < pkt_size) {
2274 ++ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
2275 ++ return -E2BIG;
2276 ++ }
2277 ++ }
2278 ++#endif
2279 ++ return 0;
2280 ++}
2281 ++
2282 + void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
2283 + const struct iphdr *tnl_params)
2284 + {
2285 +@@ -499,7 +546,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
2286 + struct net_device *tdev; /* Device to other host */
2287 + unsigned int max_headroom; /* The extra header space needed */
2288 + __be32 dst;
2289 +- int mtu;
2290 +
2291 + inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
2292 +
2293 +@@ -579,50 +625,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
2294 + goto tx_error;
2295 + }
2296 +
2297 +- df = tnl_params->frag_off;
2298 +
2299 +- if (df)
2300 +- mtu = dst_mtu(&rt->dst) - dev->hard_header_len
2301 +- - sizeof(struct iphdr);
2302 +- else
2303 +- mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
2304 +-
2305 +- if (skb_dst(skb))
2306 +- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
2307 +-
2308 +- if (skb->protocol == htons(ETH_P_IP)) {
2309 +- df |= (inner_iph->frag_off&htons(IP_DF));
2310 +-
2311 +- if (!skb_is_gso(skb) &&
2312 +- (inner_iph->frag_off&htons(IP_DF)) &&
2313 +- mtu < ntohs(inner_iph->tot_len)) {
2314 +- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
2315 +- ip_rt_put(rt);
2316 +- goto tx_error;
2317 +- }
2318 +- }
2319 +-#if IS_ENABLED(CONFIG_IPV6)
2320 +- else if (skb->protocol == htons(ETH_P_IPV6)) {
2321 +- struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
2322 +-
2323 +- if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
2324 +- mtu >= IPV6_MIN_MTU) {
2325 +- if ((tunnel->parms.iph.daddr &&
2326 +- !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
2327 +- rt6->rt6i_dst.plen == 128) {
2328 +- rt6->rt6i_flags |= RTF_MODIFIED;
2329 +- dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
2330 +- }
2331 +- }
2332 +-
2333 +- if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
2334 +- mtu < skb->len) {
2335 +- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
2336 +- ip_rt_put(rt);
2337 +- goto tx_error;
2338 +- }
2339 ++ if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
2340 ++ ip_rt_put(rt);
2341 ++ goto tx_error;
2342 + }
2343 +-#endif
2344 +
2345 + if (tunnel->err_count > 0) {
2346 + if (time_before(jiffies,
2347 +@@ -646,6 +653,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
2348 + ttl = ip4_dst_hoplimit(&rt->dst);
2349 + }
2350 +
2351 ++ df = tnl_params->frag_off;
2352 ++ if (skb->protocol == htons(ETH_P_IP))
2353 ++ df |= (inner_iph->frag_off&htons(IP_DF));
2354 ++
2355 + max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr)
2356 + + rt->dst.header_len;
2357 + if (max_headroom > dev->needed_headroom) {
2358 +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
2359 +index c118f6b..17cc0ff 100644
2360 +--- a/net/ipv4/ip_vti.c
2361 ++++ b/net/ipv4/ip_vti.c
2362 +@@ -606,17 +606,10 @@ static int __net_init vti_fb_tunnel_init(struct net_device *dev)
2363 + struct iphdr *iph = &tunnel->parms.iph;
2364 + struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
2365 +
2366 +- tunnel->dev = dev;
2367 +- strcpy(tunnel->parms.name, dev->name);
2368 +-
2369 + iph->version = 4;
2370 + iph->protocol = IPPROTO_IPIP;
2371 + iph->ihl = 5;
2372 +
2373 +- dev->tstats = alloc_percpu(struct pcpu_tstats);
2374 +- if (!dev->tstats)
2375 +- return -ENOMEM;
2376 +-
2377 + dev_hold(dev);
2378 + rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
2379 + return 0;
2380 +diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
2381 +index 77bfcce..7cfc456 100644
2382 +--- a/net/ipv4/ipip.c
2383 ++++ b/net/ipv4/ipip.c
2384 +@@ -240,11 +240,13 @@ ipip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2385 + if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
2386 + return -EFAULT;
2387 +
2388 +- if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
2389 +- p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
2390 +- return -EINVAL;
2391 +- if (p.i_key || p.o_key || p.i_flags || p.o_flags)
2392 +- return -EINVAL;
2393 ++ if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
2394 ++ if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
2395 ++ p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
2396 ++ return -EINVAL;
2397 ++ }
2398 ++
2399 ++ p.i_key = p.o_key = p.i_flags = p.o_flags = 0;
2400 + if (p.iph.ttl)
2401 + p.iph.frag_off |= htons(IP_DF);
2402 +
2403 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
2404 +index 0bf5d39..93b731d 100644
2405 +--- a/net/ipv4/udp.c
2406 ++++ b/net/ipv4/udp.c
2407 +@@ -799,7 +799,7 @@ send:
2408 + /*
2409 + * Push out all pending data as one UDP datagram. Socket is locked.
2410 + */
2411 +-static int udp_push_pending_frames(struct sock *sk)
2412 ++int udp_push_pending_frames(struct sock *sk)
2413 + {
2414 + struct udp_sock *up = udp_sk(sk);
2415 + struct inet_sock *inet = inet_sk(sk);
2416 +@@ -818,6 +818,7 @@ out:
2417 + up->pending = 0;
2418 + return err;
2419 + }
2420 ++EXPORT_SYMBOL(udp_push_pending_frames);
2421 +
2422 + int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
2423 + size_t len)
2424 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2425 +index 4ab4c38..fb8c94c 100644
2426 +--- a/net/ipv6/addrconf.c
2427 ++++ b/net/ipv6/addrconf.c
2428 +@@ -1448,6 +1448,23 @@ try_nextdev:
2429 + }
2430 + EXPORT_SYMBOL(ipv6_dev_get_saddr);
2431 +
2432 ++int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
2433 ++ unsigned char banned_flags)
2434 ++{
2435 ++ struct inet6_ifaddr *ifp;
2436 ++ int err = -EADDRNOTAVAIL;
2437 ++
2438 ++ list_for_each_entry(ifp, &idev->addr_list, if_list) {
2439 ++ if (ifp->scope == IFA_LINK &&
2440 ++ !(ifp->flags & banned_flags)) {
2441 ++ *addr = ifp->addr;
2442 ++ err = 0;
2443 ++ break;
2444 ++ }
2445 ++ }
2446 ++ return err;
2447 ++}
2448 ++
2449 + int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
2450 + unsigned char banned_flags)
2451 + {
2452 +@@ -1457,17 +1474,8 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
2453 + rcu_read_lock();
2454 + idev = __in6_dev_get(dev);
2455 + if (idev) {
2456 +- struct inet6_ifaddr *ifp;
2457 +-
2458 + read_lock_bh(&idev->lock);
2459 +- list_for_each_entry(ifp, &idev->addr_list, if_list) {
2460 +- if (ifp->scope == IFA_LINK &&
2461 +- !(ifp->flags & banned_flags)) {
2462 +- *addr = ifp->addr;
2463 +- err = 0;
2464 +- break;
2465 +- }
2466 +- }
2467 ++ err = __ipv6_get_lladdr(idev, addr, banned_flags);
2468 + read_unlock_bh(&idev->lock);
2469 + }
2470 + rcu_read_unlock();
2471 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2472 +index 192dd1a..5fc9c7a 100644
2473 +--- a/net/ipv6/ip6_fib.c
2474 ++++ b/net/ipv6/ip6_fib.c
2475 +@@ -632,6 +632,12 @@ insert_above:
2476 + return ln;
2477 + }
2478 +
2479 ++static inline bool rt6_qualify_for_ecmp(struct rt6_info *rt)
2480 ++{
2481 ++ return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
2482 ++ RTF_GATEWAY;
2483 ++}
2484 ++
2485 + /*
2486 + * Insert routing information in a node.
2487 + */
2488 +@@ -646,6 +652,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
2489 + int add = (!info->nlh ||
2490 + (info->nlh->nlmsg_flags & NLM_F_CREATE));
2491 + int found = 0;
2492 ++ bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
2493 +
2494 + ins = &fn->leaf;
2495 +
2496 +@@ -691,9 +698,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
2497 + * To avoid long list, we only had siblings if the
2498 + * route have a gateway.
2499 + */
2500 +- if (rt->rt6i_flags & RTF_GATEWAY &&
2501 +- !(rt->rt6i_flags & RTF_EXPIRES) &&
2502 +- !(iter->rt6i_flags & RTF_EXPIRES))
2503 ++ if (rt_can_ecmp &&
2504 ++ rt6_qualify_for_ecmp(iter))
2505 + rt->rt6i_nsiblings++;
2506 + }
2507 +
2508 +@@ -715,7 +721,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
2509 + /* Find the first route that have the same metric */
2510 + sibling = fn->leaf;
2511 + while (sibling) {
2512 +- if (sibling->rt6i_metric == rt->rt6i_metric) {
2513 ++ if (sibling->rt6i_metric == rt->rt6i_metric &&
2514 ++ rt6_qualify_for_ecmp(sibling)) {
2515 + list_add_tail(&rt->rt6i_siblings,
2516 + &sibling->rt6i_siblings);
2517 + break;
2518 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2519 +index d5d20cd..6e3ddf8 100644
2520 +--- a/net/ipv6/ip6_output.c
2521 ++++ b/net/ipv6/ip6_output.c
2522 +@@ -1098,11 +1098,12 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
2523 + return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
2524 + }
2525 +
2526 +-static void ip6_append_data_mtu(int *mtu,
2527 ++static void ip6_append_data_mtu(unsigned int *mtu,
2528 + int *maxfraglen,
2529 + unsigned int fragheaderlen,
2530 + struct sk_buff *skb,
2531 +- struct rt6_info *rt)
2532 ++ struct rt6_info *rt,
2533 ++ bool pmtuprobe)
2534 + {
2535 + if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
2536 + if (skb == NULL) {
2537 +@@ -1114,7 +1115,9 @@ static void ip6_append_data_mtu(int *mtu,
2538 + * this fragment is not first, the headers
2539 + * space is regarded as data space.
2540 + */
2541 +- *mtu = dst_mtu(rt->dst.path);
2542 ++ *mtu = min(*mtu, pmtuprobe ?
2543 ++ rt->dst.dev->mtu :
2544 ++ dst_mtu(rt->dst.path));
2545 + }
2546 + *maxfraglen = ((*mtu - fragheaderlen) & ~7)
2547 + + fragheaderlen - sizeof(struct frag_hdr);
2548 +@@ -1131,11 +1134,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
2549 + struct ipv6_pinfo *np = inet6_sk(sk);
2550 + struct inet_cork *cork;
2551 + struct sk_buff *skb, *skb_prev = NULL;
2552 +- unsigned int maxfraglen, fragheaderlen;
2553 ++ unsigned int maxfraglen, fragheaderlen, mtu;
2554 + int exthdrlen;
2555 + int dst_exthdrlen;
2556 + int hh_len;
2557 +- int mtu;
2558 + int copy;
2559 + int err;
2560 + int offset = 0;
2561 +@@ -1292,7 +1294,9 @@ alloc_new_skb:
2562 + /* update mtu and maxfraglen if necessary */
2563 + if (skb == NULL || skb_prev == NULL)
2564 + ip6_append_data_mtu(&mtu, &maxfraglen,
2565 +- fragheaderlen, skb, rt);
2566 ++ fragheaderlen, skb, rt,
2567 ++ np->pmtudisc ==
2568 ++ IPV6_PMTUDISC_PROBE);
2569 +
2570 + skb_prev = skb;
2571 +
2572 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
2573 +index bfa6cc3..c3998c2 100644
2574 +--- a/net/ipv6/mcast.c
2575 ++++ b/net/ipv6/mcast.c
2576 +@@ -1343,8 +1343,9 @@ static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
2577 + hdr->daddr = *daddr;
2578 + }
2579 +
2580 +-static struct sk_buff *mld_newpack(struct net_device *dev, int size)
2581 ++static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
2582 + {
2583 ++ struct net_device *dev = idev->dev;
2584 + struct net *net = dev_net(dev);
2585 + struct sock *sk = net->ipv6.igmp_sk;
2586 + struct sk_buff *skb;
2587 +@@ -1369,7 +1370,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
2588 +
2589 + skb_reserve(skb, hlen);
2590 +
2591 +- if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
2592 ++ if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
2593 + /* <draft-ietf-magma-mld-source-05.txt>:
2594 + * use unspecified address as the source address
2595 + * when a valid link-local address is not available.
2596 +@@ -1465,7 +1466,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2597 + struct mld2_grec *pgr;
2598 +
2599 + if (!skb)
2600 +- skb = mld_newpack(dev, dev->mtu);
2601 ++ skb = mld_newpack(pmc->idev, dev->mtu);
2602 + if (!skb)
2603 + return NULL;
2604 + pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
2605 +@@ -1485,7 +1486,8 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2606 + static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2607 + int type, int gdeleted, int sdeleted)
2608 + {
2609 +- struct net_device *dev = pmc->idev->dev;
2610 ++ struct inet6_dev *idev = pmc->idev;
2611 ++ struct net_device *dev = idev->dev;
2612 + struct mld2_report *pmr;
2613 + struct mld2_grec *pgr = NULL;
2614 + struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
2615 +@@ -1514,7 +1516,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2616 + AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
2617 + if (skb)
2618 + mld_sendpack(skb);
2619 +- skb = mld_newpack(dev, dev->mtu);
2620 ++ skb = mld_newpack(idev, dev->mtu);
2621 + }
2622 + }
2623 + first = 1;
2624 +@@ -1541,7 +1543,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2625 + pgr->grec_nsrcs = htons(scount);
2626 + if (skb)
2627 + mld_sendpack(skb);
2628 +- skb = mld_newpack(dev, dev->mtu);
2629 ++ skb = mld_newpack(idev, dev->mtu);
2630 + first = 1;
2631 + scount = 0;
2632 + }
2633 +@@ -1596,8 +1598,8 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
2634 + struct sk_buff *skb = NULL;
2635 + int type;
2636 +
2637 ++ read_lock_bh(&idev->lock);
2638 + if (!pmc) {
2639 +- read_lock_bh(&idev->lock);
2640 + for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
2641 + if (pmc->mca_flags & MAF_NOREPORT)
2642 + continue;
2643 +@@ -1609,7 +1611,6 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
2644 + skb = add_grec(skb, pmc, type, 0, 0);
2645 + spin_unlock_bh(&pmc->mca_lock);
2646 + }
2647 +- read_unlock_bh(&idev->lock);
2648 + } else {
2649 + spin_lock_bh(&pmc->mca_lock);
2650 + if (pmc->mca_sfcount[MCAST_EXCLUDE])
2651 +@@ -1619,6 +1620,7 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
2652 + skb = add_grec(skb, pmc, type, 0, 0);
2653 + spin_unlock_bh(&pmc->mca_lock);
2654 + }
2655 ++ read_unlock_bh(&idev->lock);
2656 + if (skb)
2657 + mld_sendpack(skb);
2658 + }
2659 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2660 +index ad0aa6b..bacce6c 100644
2661 +--- a/net/ipv6/route.c
2662 ++++ b/net/ipv6/route.c
2663 +@@ -65,6 +65,12 @@
2664 + #include <linux/sysctl.h>
2665 + #endif
2666 +
2667 ++enum rt6_nud_state {
2668 ++ RT6_NUD_FAIL_HARD = -2,
2669 ++ RT6_NUD_FAIL_SOFT = -1,
2670 ++ RT6_NUD_SUCCEED = 1
2671 ++};
2672 ++
2673 + static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
2674 + const struct in6_addr *dest);
2675 + static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
2676 +@@ -527,26 +533,29 @@ static inline int rt6_check_dev(struct rt6_info *rt, int oif)
2677 + return 0;
2678 + }
2679 +
2680 +-static inline bool rt6_check_neigh(struct rt6_info *rt)
2681 ++static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
2682 + {
2683 + struct neighbour *neigh;
2684 +- bool ret = false;
2685 ++ enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
2686 +
2687 + if (rt->rt6i_flags & RTF_NONEXTHOP ||
2688 + !(rt->rt6i_flags & RTF_GATEWAY))
2689 +- return true;
2690 ++ return RT6_NUD_SUCCEED;
2691 +
2692 + rcu_read_lock_bh();
2693 + neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2694 + if (neigh) {
2695 + read_lock(&neigh->lock);
2696 + if (neigh->nud_state & NUD_VALID)
2697 +- ret = true;
2698 ++ ret = RT6_NUD_SUCCEED;
2699 + #ifdef CONFIG_IPV6_ROUTER_PREF
2700 + else if (!(neigh->nud_state & NUD_FAILED))
2701 +- ret = true;
2702 ++ ret = RT6_NUD_SUCCEED;
2703 + #endif
2704 + read_unlock(&neigh->lock);
2705 ++ } else {
2706 ++ ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
2707 ++ RT6_NUD_SUCCEED : RT6_NUD_FAIL_SOFT;
2708 + }
2709 + rcu_read_unlock_bh();
2710 +
2711 +@@ -560,43 +569,52 @@ static int rt6_score_route(struct rt6_info *rt, int oif,
2712 +
2713 + m = rt6_check_dev(rt, oif);
2714 + if (!m && (strict & RT6_LOOKUP_F_IFACE))
2715 +- return -1;
2716 ++ return RT6_NUD_FAIL_HARD;
2717 + #ifdef CONFIG_IPV6_ROUTER_PREF
2718 + m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
2719 + #endif
2720 +- if (!rt6_check_neigh(rt) && (strict & RT6_LOOKUP_F_REACHABLE))
2721 +- return -1;
2722 ++ if (strict & RT6_LOOKUP_F_REACHABLE) {
2723 ++ int n = rt6_check_neigh(rt);
2724 ++ if (n < 0)
2725 ++ return n;
2726 ++ }
2727 + return m;
2728 + }
2729 +
2730 + static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
2731 +- int *mpri, struct rt6_info *match)
2732 ++ int *mpri, struct rt6_info *match,
2733 ++ bool *do_rr)
2734 + {
2735 + int m;
2736 ++ bool match_do_rr = false;
2737 +
2738 + if (rt6_check_expired(rt))
2739 + goto out;
2740 +
2741 + m = rt6_score_route(rt, oif, strict);
2742 +- if (m < 0)
2743 ++ if (m == RT6_NUD_FAIL_SOFT && !IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) {
2744 ++ match_do_rr = true;
2745 ++ m = 0; /* lowest valid score */
2746 ++ } else if (m < 0) {
2747 + goto out;
2748 ++ }
2749 ++
2750 ++ if (strict & RT6_LOOKUP_F_REACHABLE)
2751 ++ rt6_probe(rt);
2752 +
2753 + if (m > *mpri) {
2754 +- if (strict & RT6_LOOKUP_F_REACHABLE)
2755 +- rt6_probe(match);
2756 ++ *do_rr = match_do_rr;
2757 + *mpri = m;
2758 + match = rt;
2759 +- } else if (strict & RT6_LOOKUP_F_REACHABLE) {
2760 +- rt6_probe(rt);
2761 + }
2762 +-
2763 + out:
2764 + return match;
2765 + }
2766 +
2767 + static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
2768 + struct rt6_info *rr_head,
2769 +- u32 metric, int oif, int strict)
2770 ++ u32 metric, int oif, int strict,
2771 ++ bool *do_rr)
2772 + {
2773 + struct rt6_info *rt, *match;
2774 + int mpri = -1;
2775 +@@ -604,10 +622,10 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
2776 + match = NULL;
2777 + for (rt = rr_head; rt && rt->rt6i_metric == metric;
2778 + rt = rt->dst.rt6_next)
2779 +- match = find_match(rt, oif, strict, &mpri, match);
2780 ++ match = find_match(rt, oif, strict, &mpri, match, do_rr);
2781 + for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
2782 + rt = rt->dst.rt6_next)
2783 +- match = find_match(rt, oif, strict, &mpri, match);
2784 ++ match = find_match(rt, oif, strict, &mpri, match, do_rr);
2785 +
2786 + return match;
2787 + }
2788 +@@ -616,15 +634,16 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
2789 + {
2790 + struct rt6_info *match, *rt0;
2791 + struct net *net;
2792 ++ bool do_rr = false;
2793 +
2794 + rt0 = fn->rr_ptr;
2795 + if (!rt0)
2796 + fn->rr_ptr = rt0 = fn->leaf;
2797 +
2798 +- match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
2799 ++ match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
2800 ++ &do_rr);
2801 +
2802 +- if (!match &&
2803 +- (strict & RT6_LOOKUP_F_REACHABLE)) {
2804 ++ if (do_rr) {
2805 + struct rt6_info *next = rt0->dst.rt6_next;
2806 +
2807 + /* no entries matched; do round-robin */
2808 +@@ -1074,10 +1093,13 @@ static void ip6_link_failure(struct sk_buff *skb)
2809 +
2810 + rt = (struct rt6_info *) skb_dst(skb);
2811 + if (rt) {
2812 +- if (rt->rt6i_flags & RTF_CACHE)
2813 +- rt6_update_expires(rt, 0);
2814 +- else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
2815 ++ if (rt->rt6i_flags & RTF_CACHE) {
2816 ++ dst_hold(&rt->dst);
2817 ++ if (ip6_del_rt(rt))
2818 ++ dst_free(&rt->dst);
2819 ++ } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
2820 + rt->rt6i_node->fn_sernum = -1;
2821 ++ }
2822 + }
2823 + }
2824 +
2825 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
2826 +index 3353634..60df36d 100644
2827 +--- a/net/ipv6/sit.c
2828 ++++ b/net/ipv6/sit.c
2829 +@@ -589,7 +589,7 @@ static int ipip6_rcv(struct sk_buff *skb)
2830 + tunnel->dev->stats.rx_errors++;
2831 + goto out;
2832 + }
2833 +- } else {
2834 ++ } else if (!(tunnel->dev->flags&IFF_POINTOPOINT)) {
2835 + if (is_spoofed_6rd(tunnel, iph->saddr,
2836 + &ipv6_hdr(skb)->saddr) ||
2837 + is_spoofed_6rd(tunnel, iph->daddr,
2838 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
2839 +index 42923b1..e7b28f9 100644
2840 +--- a/net/ipv6/udp.c
2841 ++++ b/net/ipv6/udp.c
2842 +@@ -955,11 +955,16 @@ static int udp_v6_push_pending_frames(struct sock *sk)
2843 + struct udphdr *uh;
2844 + struct udp_sock *up = udp_sk(sk);
2845 + struct inet_sock *inet = inet_sk(sk);
2846 +- struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
2847 ++ struct flowi6 *fl6;
2848 + int err = 0;
2849 + int is_udplite = IS_UDPLITE(sk);
2850 + __wsum csum = 0;
2851 +
2852 ++ if (up->pending == AF_INET)
2853 ++ return udp_push_pending_frames(sk);
2854 ++
2855 ++ fl6 = &inet->cork.fl.u.ip6;
2856 ++
2857 + /* Grab the skbuff where UDP header space exists. */
2858 + if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
2859 + goto out;
2860 +diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
2861 +index 8dec687..5ebee2d 100644
2862 +--- a/net/l2tp/l2tp_ppp.c
2863 ++++ b/net/l2tp/l2tp_ppp.c
2864 +@@ -1793,7 +1793,8 @@ static const struct proto_ops pppol2tp_ops = {
2865 +
2866 + static const struct pppox_proto pppol2tp_proto = {
2867 + .create = pppol2tp_create,
2868 +- .ioctl = pppol2tp_ioctl
2869 ++ .ioctl = pppol2tp_ioctl,
2870 ++ .owner = THIS_MODULE,
2871 + };
2872 +
2873 + #ifdef CONFIG_L2TP_V3
2874 +diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
2875 +index d51852b..5792252 100644
2876 +--- a/net/sched/sch_qfq.c
2877 ++++ b/net/sched/sch_qfq.c
2878 +@@ -113,7 +113,6 @@
2879 +
2880 + #define FRAC_BITS 30 /* fixed point arithmetic */
2881 + #define ONE_FP (1UL << FRAC_BITS)
2882 +-#define IWSUM (ONE_FP/QFQ_MAX_WSUM)
2883 +
2884 + #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
2885 + #define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */
2886 +@@ -189,6 +188,7 @@ struct qfq_sched {
2887 + struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */
2888 + u32 num_active_agg; /* Num. of active aggregates */
2889 + u32 wsum; /* weight sum */
2890 ++ u32 iwsum; /* inverse weight sum */
2891 +
2892 + unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
2893 + struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
2894 +@@ -314,6 +314,7 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
2895 +
2896 + q->wsum +=
2897 + (int) agg->class_weight * (new_num_classes - agg->num_classes);
2898 ++ q->iwsum = ONE_FP / q->wsum;
2899 +
2900 + agg->num_classes = new_num_classes;
2901 + }
2902 +@@ -340,6 +341,10 @@ static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
2903 + {
2904 + if (!hlist_unhashed(&agg->nonfull_next))
2905 + hlist_del_init(&agg->nonfull_next);
2906 ++ q->wsum -= agg->class_weight;
2907 ++ if (q->wsum != 0)
2908 ++ q->iwsum = ONE_FP / q->wsum;
2909 ++
2910 + if (q->in_serv_agg == agg)
2911 + q->in_serv_agg = qfq_choose_next_agg(q);
2912 + kfree(agg);
2913 +@@ -827,38 +832,60 @@ static void qfq_make_eligible(struct qfq_sched *q)
2914 + }
2915 + }
2916 +
2917 +-
2918 + /*
2919 +- * The index of the slot in which the aggregate is to be inserted must
2920 +- * not be higher than QFQ_MAX_SLOTS-2. There is a '-2' and not a '-1'
2921 +- * because the start time of the group may be moved backward by one
2922 +- * slot after the aggregate has been inserted, and this would cause
2923 +- * non-empty slots to be right-shifted by one position.
2924 ++ * The index of the slot in which the input aggregate agg is to be
2925 ++ * inserted must not be higher than QFQ_MAX_SLOTS-2. There is a '-2'
2926 ++ * and not a '-1' because the start time of the group may be moved
2927 ++ * backward by one slot after the aggregate has been inserted, and
2928 ++ * this would cause non-empty slots to be right-shifted by one
2929 ++ * position.
2930 ++ *
2931 ++ * QFQ+ fully satisfies this bound to the slot index if the parameters
2932 ++ * of the classes are not changed dynamically, and if QFQ+ never
2933 ++ * happens to postpone the service of agg unjustly, i.e., it never
2934 ++ * happens that the aggregate becomes backlogged and eligible, or just
2935 ++ * eligible, while an aggregate with a higher approximated finish time
2936 ++ * is being served. In particular, in this case QFQ+ guarantees that
2937 ++ * the timestamps of agg are low enough that the slot index is never
2938 ++ * higher than 2. Unfortunately, QFQ+ cannot provide the same
2939 ++ * guarantee if it happens to unjustly postpone the service of agg, or
2940 ++ * if the parameters of some class are changed.
2941 ++ *
2942 ++ * As for the first event, i.e., an out-of-order service, the
2943 ++ * upper bound to the slot index guaranteed by QFQ+ grows to
2944 ++ * 2 +
2945 ++ * QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
2946 ++ * (current_max_weight/current_wsum) <= 2 + 8 * 128 * 1.
2947 + *
2948 +- * If the weight and lmax (max_pkt_size) of the classes do not change,
2949 +- * then QFQ+ does meet the above contraint according to the current
2950 +- * values of its parameters. In fact, if the weight and lmax of the
2951 +- * classes do not change, then, from the theory, QFQ+ guarantees that
2952 +- * the slot index is never higher than
2953 +- * 2 + QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
2954 +- * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM) = 2 + 8 * 128 * (1 / 64) = 18
2955 ++ * The following function deals with this problem by backward-shifting
2956 ++ * the timestamps of agg, if needed, so as to guarantee that the slot
2957 ++ * index is never higher than QFQ_MAX_SLOTS-2. This backward-shift may
2958 ++ * cause the service of other aggregates to be postponed, yet the
2959 ++ * worst-case guarantees of these aggregates are not violated. In
2960 ++ * fact, in case of no out-of-order service, the timestamps of agg
2961 ++ * would have been even lower than they are after the backward shift,
2962 ++ * because QFQ+ would have guaranteed a maximum value equal to 2 for
2963 ++ * the slot index, and 2 < QFQ_MAX_SLOTS-2. Hence the aggregates whose
2964 ++ * service is postponed because of the backward-shift would have
2965 ++ * however waited for the service of agg before being served.
2966 + *
2967 +- * When the weight of a class is increased or the lmax of the class is
2968 +- * decreased, a new aggregate with smaller slot size than the original
2969 +- * parent aggregate of the class may happen to be activated. The
2970 +- * activation of this aggregate should be properly delayed to when the
2971 +- * service of the class has finished in the ideal system tracked by
2972 +- * QFQ+. If the activation of the aggregate is not delayed to this
2973 +- * reference time instant, then this aggregate may be unjustly served
2974 +- * before other aggregates waiting for service. This may cause the
2975 +- * above bound to the slot index to be violated for some of these
2976 +- * unlucky aggregates.
2977 ++ * The other event that may cause the slot index to be higher than 2
2978 ++ * for agg is a recent change of the parameters of some class. If the
2979 ++ * weight of a class is increased or the lmax (max_pkt_size) of the
2980 ++ * class is decreased, then a new aggregate with smaller slot size
2981 ++ * than the original parent aggregate of the class may happen to be
2982 ++ * activated. The activation of this aggregate should be properly
2983 ++ * delayed to when the service of the class has finished in the ideal
2984 ++ * system tracked by QFQ+. If the activation of the aggregate is not
2985 ++ * delayed to this reference time instant, then this aggregate may be
2986 ++ * unjustly served before other aggregates waiting for service. This
2987 ++ * may cause the above bound to the slot index to be violated for some
2988 ++ * of these unlucky aggregates.
2989 + *
2990 + * Instead of delaying the activation of the new aggregate, which is
2991 +- * quite complex, the following inaccurate but simple solution is used:
2992 +- * if the slot index is higher than QFQ_MAX_SLOTS-2, then the
2993 +- * timestamps of the aggregate are shifted backward so as to let the
2994 +- * slot index become equal to QFQ_MAX_SLOTS-2.
2995 ++ * quite complex, the above-discussed capping of the slot index is
2996 ++ * used to handle also the consequences of a change of the parameters
2997 ++ * of a class.
2998 + */
2999 + static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg,
3000 + u64 roundedS)
3001 +@@ -1077,7 +1104,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
3002 + else
3003 + in_serv_agg->budget -= len;
3004 +
3005 +- q->V += (u64)len * IWSUM;
3006 ++ q->V += (u64)len * q->iwsum;
3007 + pr_debug("qfq dequeue: len %u F %lld now %lld\n",
3008 + len, (unsigned long long) in_serv_agg->F,
3009 + (unsigned long long) q->V);
3010 +diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
3011 +index 37ca969..22c88d2 100644
3012 +--- a/net/x25/af_x25.c
3013 ++++ b/net/x25/af_x25.c
3014 +@@ -1583,11 +1583,11 @@ out_cud_release:
3015 + case SIOCX25CALLACCPTAPPRV: {
3016 + rc = -EINVAL;
3017 + lock_sock(sk);
3018 +- if (sk->sk_state != TCP_CLOSE)
3019 +- break;
3020 +- clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
3021 ++ if (sk->sk_state == TCP_CLOSE) {
3022 ++ clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
3023 ++ rc = 0;
3024 ++ }
3025 + release_sock(sk);
3026 +- rc = 0;
3027 + break;
3028 + }
3029 +
3030 +@@ -1595,14 +1595,15 @@ out_cud_release:
3031 + rc = -EINVAL;
3032 + lock_sock(sk);
3033 + if (sk->sk_state != TCP_ESTABLISHED)
3034 +- break;
3035 ++ goto out_sendcallaccpt_release;
3036 + /* must call accptapprv above */
3037 + if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags))
3038 +- break;
3039 ++ goto out_sendcallaccpt_release;
3040 + x25_write_internal(sk, X25_CALL_ACCEPTED);
3041 + x25->state = X25_STATE_3;
3042 +- release_sock(sk);
3043 + rc = 0;
3044 ++out_sendcallaccpt_release:
3045 ++ release_sock(sk);
3046 + break;
3047 + }
3048 +
3049 +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
3050 +index 1d9d642..e849e1e 100644
3051 +--- a/sound/pci/hda/patch_sigmatel.c
3052 ++++ b/sound/pci/hda/patch_sigmatel.c
3053 +@@ -417,9 +417,11 @@ static void stac_update_outputs(struct hda_codec *codec)
3054 + val &= ~spec->eapd_mask;
3055 + else
3056 + val |= spec->eapd_mask;
3057 +- if (spec->gpio_data != val)
3058 ++ if (spec->gpio_data != val) {
3059 ++ spec->gpio_data = val;
3060 + stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir,
3061 + val);
3062 ++ }
3063 + }
3064 + }
3065 +
3066 +@@ -3227,7 +3229,7 @@ static const struct hda_fixup stac927x_fixups[] = {
3067 + /* configure the analog microphone on some laptops */
3068 + { 0x0c, 0x90a79130 },
3069 + /* correct the front output jack as a hp out */
3070 +- { 0x0f, 0x0227011f },
3071 ++ { 0x0f, 0x0221101f },
3072 + /* correct the front input jack as a mic */
3073 + { 0x0e, 0x02a79130 },
3074 + {}
3075 +@@ -3608,20 +3610,18 @@ static int stac_parse_auto_config(struct hda_codec *codec)
3076 + static int stac_init(struct hda_codec *codec)
3077 + {
3078 + struct sigmatel_spec *spec = codec->spec;
3079 +- unsigned int gpio;
3080 + int i;
3081 +
3082 + /* override some hints */
3083 + stac_store_hints(codec);
3084 +
3085 + /* set up GPIO */
3086 +- gpio = spec->gpio_data;
3087 + /* turn on EAPD statically when spec->eapd_switch isn't set.
3088 + * otherwise, unsol event will turn it on/off dynamically
3089 + */
3090 + if (!spec->eapd_switch)
3091 +- gpio |= spec->eapd_mask;
3092 +- stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, gpio);
3093 ++ spec->gpio_data |= spec->eapd_mask;
3094 ++ stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data);
3095 +
3096 + snd_hda_gen_init(codec);
3097 +
3098 +@@ -3921,6 +3921,7 @@ static void stac_setup_gpio(struct hda_codec *codec)
3099 + {
3100 + struct sigmatel_spec *spec = codec->spec;
3101 +
3102 ++ spec->gpio_mask |= spec->eapd_mask;
3103 + if (spec->gpio_led) {
3104 + if (!spec->vref_mute_led_nid) {
3105 + spec->gpio_mask |= spec->gpio_led;
3106 +diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
3107 +index 8221ff2..074aaf7 100644
3108 +--- a/sound/usb/6fire/pcm.c
3109 ++++ b/sound/usb/6fire/pcm.c
3110 +@@ -543,7 +543,7 @@ static snd_pcm_uframes_t usb6fire_pcm_pointer(
3111 + snd_pcm_uframes_t ret;
3112 +
3113 + if (rt->panic || !sub)
3114 +- return SNDRV_PCM_STATE_XRUN;
3115 ++ return SNDRV_PCM_POS_XRUN;
3116 +
3117 + spin_lock_irqsave(&sub->lock, flags);
3118 + ret = sub->dma_off;