Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.1 commit in: /
Date: Wed, 13 Jul 2016 23:38:25
Message-Id: 1468453106.051f9f633cfed6438da621f5ba235c50e1073ccf.mpagano@gentoo
1 commit: 051f9f633cfed6438da621f5ba235c50e1073ccf
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jul 13 23:38:26 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jul 13 23:38:26 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=051f9f63
7
8 Linux patch 4.1.28
9
10 0000_README | 4 +
11 1027_linux-4.1.28.patch | 12868 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 12872 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index b592a97..24c0a3c 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -151,6 +151,10 @@ Patch: 1026_linux-4.1.27.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.1.27
21
22 +Patch: 1027_linux-4.1.28.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.1.28
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1027_linux-4.1.28.patch b/1027_linux-4.1.28.patch
31 new file mode 100644
32 index 0000000..71127b1
33 --- /dev/null
34 +++ b/1027_linux-4.1.28.patch
35 @@ -0,0 +1,12868 @@
36 +diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
37 +index 8638f61c8c9d..37eca00796ee 100644
38 +--- a/Documentation/scsi/scsi_eh.txt
39 ++++ b/Documentation/scsi/scsi_eh.txt
40 +@@ -263,19 +263,23 @@ scmd->allowed.
41 +
42 + 3. scmd recovered
43 + ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
44 +- - shost->host_failed--
45 + - clear scmd->eh_eflags
46 + - scsi_setup_cmd_retry()
47 + - move from local eh_work_q to local eh_done_q
48 + LOCKING: none
49 ++ CONCURRENCY: at most one thread per separate eh_work_q to
50 ++ keep queue manipulation lockless
51 +
52 + 4. EH completes
53 + ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
54 +- layer of failure.
55 ++ layer of failure. May be called concurrently but must have
56 ++ a no more than one thread per separate eh_work_q to
57 ++ manipulate the queue locklessly
58 + - scmd is removed from eh_done_q and scmd->eh_entry is cleared
59 + - if retry is necessary, scmd is requeued using
60 + scsi_queue_insert()
61 + - otherwise, scsi_finish_command() is invoked for scmd
62 ++ - zero shost->host_failed
63 + LOCKING: queue or finish function performs appropriate locking
64 +
65 +
66 +diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
67 +index 88152f214f48..302b5ed616a6 100644
68 +--- a/Documentation/sysctl/fs.txt
69 ++++ b/Documentation/sysctl/fs.txt
70 +@@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/fs:
71 + - nr_open
72 + - overflowuid
73 + - overflowgid
74 ++- pipe-user-pages-hard
75 ++- pipe-user-pages-soft
76 + - protected_hardlinks
77 + - protected_symlinks
78 + - suid_dumpable
79 +@@ -159,6 +161,27 @@ The default is 65534.
80 +
81 + ==============================================================
82 +
83 ++pipe-user-pages-hard:
84 ++
85 ++Maximum total number of pages a non-privileged user may allocate for pipes.
86 ++Once this limit is reached, no new pipes may be allocated until usage goes
87 ++below the limit again. When set to 0, no limit is applied, which is the default
88 ++setting.
89 ++
90 ++==============================================================
91 ++
92 ++pipe-user-pages-soft:
93 ++
94 ++Maximum total number of pages a non-privileged user may allocate for pipes
95 ++before the pipe size gets limited to a single page. Once this limit is reached,
96 ++new pipes will be limited to a single page in size for this user in order to
97 ++limit total memory usage, and trying to increase them using fcntl() will be
98 ++denied until usage goes below the limit again. The default value allows to
99 ++allocate up to 1024 pipes at their default size. When set to 0, no limit is
100 ++applied.
101 ++
102 ++==============================================================
103 ++
104 + protected_hardlinks:
105 +
106 + A long-standing class of security issues is the hardlink-based
107 +diff --git a/Makefile b/Makefile
108 +index 54b3d8ae8624..241237cd4ca6 100644
109 +--- a/Makefile
110 ++++ b/Makefile
111 +@@ -1,6 +1,6 @@
112 + VERSION = 4
113 + PATCHLEVEL = 1
114 +-SUBLEVEL = 27
115 ++SUBLEVEL = 28
116 + EXTRAVERSION =
117 + NAME = Series 4800
118 +
119 +diff --git a/arch/arc/Makefile b/arch/arc/Makefile
120 +index 2f21e1e0ecf7..305dbdf6c944 100644
121 +--- a/arch/arc/Makefile
122 ++++ b/arch/arc/Makefile
123 +@@ -34,7 +34,6 @@ cflags-$(atleast_gcc44) += -fsection-anchors
124 + cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
125 + cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
126 + cflags-$(CONFIG_ARC_HAS_RTSC) += -mrtsc
127 +-cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
128 +
129 + # By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
130 + ifeq ($(atleast_gcc48),y)
131 +diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
132 +index 92320d6f737c..5086cc767c0b 100644
133 +--- a/arch/arc/kernel/stacktrace.c
134 ++++ b/arch/arc/kernel/stacktrace.c
135 +@@ -144,7 +144,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
136 + * prelogue is setup (callee regs saved and then fp set and not other
137 + * way around
138 + */
139 +- pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
140 ++ pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
141 + return 0;
142 +
143 + #endif
144 +diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
145 +index bfd662e49a25..89b5a0a00dc9 100644
146 +--- a/arch/arm/include/asm/pgtable-2level.h
147 ++++ b/arch/arm/include/asm/pgtable-2level.h
148 +@@ -164,6 +164,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
149 +
150 + #define pmd_large(pmd) (pmd_val(pmd) & 2)
151 + #define pmd_bad(pmd) (pmd_val(pmd) & 2)
152 ++#define pmd_present(pmd) (pmd_val(pmd))
153 +
154 + #define copy_pmd(pmdpd,pmdps) \
155 + do { \
156 +diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
157 +index a745a2a53853..fd929b5ded9e 100644
158 +--- a/arch/arm/include/asm/pgtable-3level.h
159 ++++ b/arch/arm/include/asm/pgtable-3level.h
160 +@@ -212,6 +212,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
161 + : !!(pmd_val(pmd) & (val)))
162 + #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
163 +
164 ++#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
165 + #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
166 + #define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
167 + static inline pte_t pte_mkspecial(pte_t pte)
168 +@@ -257,10 +258,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
169 + #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
170 + #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
171 +
172 +-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
173 ++/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
174 + static inline pmd_t pmd_mknotpresent(pmd_t pmd)
175 + {
176 +- return __pmd(0);
177 ++ return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
178 + }
179 +
180 + static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
181 +diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
182 +index f40354198bad..7fa12e0f1bc9 100644
183 +--- a/arch/arm/include/asm/pgtable.h
184 ++++ b/arch/arm/include/asm/pgtable.h
185 +@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
186 + #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
187 +
188 + #define pmd_none(pmd) (!pmd_val(pmd))
189 +-#define pmd_present(pmd) (pmd_val(pmd))
190 +
191 + static inline pte_t *pmd_page_vaddr(pmd_t pmd)
192 + {
193 +diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
194 +index d6223cbcb661..5414081c0bbf 100644
195 +--- a/arch/arm/kvm/arm.c
196 ++++ b/arch/arm/kvm/arm.c
197 +@@ -257,6 +257,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
198 + kvm_mmu_free_memory_caches(vcpu);
199 + kvm_timer_vcpu_terminate(vcpu);
200 + kvm_vgic_vcpu_destroy(vcpu);
201 ++ kvm_vcpu_uninit(vcpu);
202 + kmem_cache_free(kvm_vcpu_cache, vcpu);
203 + }
204 +
205 +diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
206 +index aa7b379e2661..2a3db0bd9e15 100644
207 +--- a/arch/arm/mach-omap2/cpuidle34xx.c
208 ++++ b/arch/arm/mach-omap2/cpuidle34xx.c
209 +@@ -34,6 +34,7 @@
210 + #include "pm.h"
211 + #include "control.h"
212 + #include "common.h"
213 ++#include "soc.h"
214 +
215 + /* Mach specific information to be recorded in the C-state driver_data */
216 + struct omap3_idle_statedata {
217 +@@ -315,6 +316,69 @@ static struct cpuidle_driver omap3_idle_driver = {
218 + .safe_state_index = 0,
219 + };
220 +
221 ++/*
222 ++ * Numbers based on measurements made in October 2009 for PM optimized kernel
223 ++ * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
224 ++ * and worst case latencies).
225 ++ */
226 ++static struct cpuidle_driver omap3430_idle_driver = {
227 ++ .name = "omap3430_idle",
228 ++ .owner = THIS_MODULE,
229 ++ .states = {
230 ++ {
231 ++ .enter = omap3_enter_idle_bm,
232 ++ .exit_latency = 110 + 162,
233 ++ .target_residency = 5,
234 ++ .name = "C1",
235 ++ .desc = "MPU ON + CORE ON",
236 ++ },
237 ++ {
238 ++ .enter = omap3_enter_idle_bm,
239 ++ .exit_latency = 106 + 180,
240 ++ .target_residency = 309,
241 ++ .name = "C2",
242 ++ .desc = "MPU ON + CORE ON",
243 ++ },
244 ++ {
245 ++ .enter = omap3_enter_idle_bm,
246 ++ .exit_latency = 107 + 410,
247 ++ .target_residency = 46057,
248 ++ .name = "C3",
249 ++ .desc = "MPU RET + CORE ON",
250 ++ },
251 ++ {
252 ++ .enter = omap3_enter_idle_bm,
253 ++ .exit_latency = 121 + 3374,
254 ++ .target_residency = 46057,
255 ++ .name = "C4",
256 ++ .desc = "MPU OFF + CORE ON",
257 ++ },
258 ++ {
259 ++ .enter = omap3_enter_idle_bm,
260 ++ .exit_latency = 855 + 1146,
261 ++ .target_residency = 46057,
262 ++ .name = "C5",
263 ++ .desc = "MPU RET + CORE RET",
264 ++ },
265 ++ {
266 ++ .enter = omap3_enter_idle_bm,
267 ++ .exit_latency = 7580 + 4134,
268 ++ .target_residency = 484329,
269 ++ .name = "C6",
270 ++ .desc = "MPU OFF + CORE RET",
271 ++ },
272 ++ {
273 ++ .enter = omap3_enter_idle_bm,
274 ++ .exit_latency = 7505 + 15274,
275 ++ .target_residency = 484329,
276 ++ .name = "C7",
277 ++ .desc = "MPU OFF + CORE OFF",
278 ++ },
279 ++ },
280 ++ .state_count = ARRAY_SIZE(omap3_idle_data),
281 ++ .safe_state_index = 0,
282 ++};
283 ++
284 + /* Public functions */
285 +
286 + /**
287 +@@ -333,5 +397,8 @@ int __init omap3_idle_init(void)
288 + if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
289 + return -ENODEV;
290 +
291 +- return cpuidle_register(&omap3_idle_driver, NULL);
292 ++ if (cpu_is_omap3430())
293 ++ return cpuidle_register(&omap3430_idle_driver, NULL);
294 ++ else
295 ++ return cpuidle_register(&omap3_idle_driver, NULL);
296 + }
297 +diff --git a/arch/arm/mach-s3c64xx/dev-audio.c b/arch/arm/mach-s3c64xx/dev-audio.c
298 +index ff780a8d8366..9a42736ef4ac 100644
299 +--- a/arch/arm/mach-s3c64xx/dev-audio.c
300 ++++ b/arch/arm/mach-s3c64xx/dev-audio.c
301 +@@ -54,12 +54,12 @@ static int s3c64xx_i2s_cfg_gpio(struct platform_device *pdev)
302 +
303 + static struct resource s3c64xx_iis0_resource[] = {
304 + [0] = DEFINE_RES_MEM(S3C64XX_PA_IIS0, SZ_256),
305 +- [1] = DEFINE_RES_DMA(DMACH_I2S0_OUT),
306 +- [2] = DEFINE_RES_DMA(DMACH_I2S0_IN),
307 + };
308 +
309 +-static struct s3c_audio_pdata i2sv3_pdata = {
310 ++static struct s3c_audio_pdata i2s0_pdata = {
311 + .cfg_gpio = s3c64xx_i2s_cfg_gpio,
312 ++ .dma_playback = DMACH_I2S0_OUT,
313 ++ .dma_capture = DMACH_I2S0_IN,
314 + };
315 +
316 + struct platform_device s3c64xx_device_iis0 = {
317 +@@ -68,15 +68,19 @@ struct platform_device s3c64xx_device_iis0 = {
318 + .num_resources = ARRAY_SIZE(s3c64xx_iis0_resource),
319 + .resource = s3c64xx_iis0_resource,
320 + .dev = {
321 +- .platform_data = &i2sv3_pdata,
322 ++ .platform_data = &i2s0_pdata,
323 + },
324 + };
325 + EXPORT_SYMBOL(s3c64xx_device_iis0);
326 +
327 + static struct resource s3c64xx_iis1_resource[] = {
328 + [0] = DEFINE_RES_MEM(S3C64XX_PA_IIS1, SZ_256),
329 +- [1] = DEFINE_RES_DMA(DMACH_I2S1_OUT),
330 +- [2] = DEFINE_RES_DMA(DMACH_I2S1_IN),
331 ++};
332 ++
333 ++static struct s3c_audio_pdata i2s1_pdata = {
334 ++ .cfg_gpio = s3c64xx_i2s_cfg_gpio,
335 ++ .dma_playback = DMACH_I2S1_OUT,
336 ++ .dma_capture = DMACH_I2S1_IN,
337 + };
338 +
339 + struct platform_device s3c64xx_device_iis1 = {
340 +@@ -85,19 +89,19 @@ struct platform_device s3c64xx_device_iis1 = {
341 + .num_resources = ARRAY_SIZE(s3c64xx_iis1_resource),
342 + .resource = s3c64xx_iis1_resource,
343 + .dev = {
344 +- .platform_data = &i2sv3_pdata,
345 ++ .platform_data = &i2s1_pdata,
346 + },
347 + };
348 + EXPORT_SYMBOL(s3c64xx_device_iis1);
349 +
350 + static struct resource s3c64xx_iisv4_resource[] = {
351 + [0] = DEFINE_RES_MEM(S3C64XX_PA_IISV4, SZ_256),
352 +- [1] = DEFINE_RES_DMA(DMACH_HSI_I2SV40_TX),
353 +- [2] = DEFINE_RES_DMA(DMACH_HSI_I2SV40_RX),
354 + };
355 +
356 + static struct s3c_audio_pdata i2sv4_pdata = {
357 + .cfg_gpio = s3c64xx_i2s_cfg_gpio,
358 ++ .dma_playback = DMACH_HSI_I2SV40_TX,
359 ++ .dma_capture = DMACH_HSI_I2SV40_RX,
360 + .type = {
361 + .i2s = {
362 + .quirks = QUIRK_PRI_6CHAN,
363 +@@ -142,12 +146,12 @@ static int s3c64xx_pcm_cfg_gpio(struct platform_device *pdev)
364 +
365 + static struct resource s3c64xx_pcm0_resource[] = {
366 + [0] = DEFINE_RES_MEM(S3C64XX_PA_PCM0, SZ_256),
367 +- [1] = DEFINE_RES_DMA(DMACH_PCM0_TX),
368 +- [2] = DEFINE_RES_DMA(DMACH_PCM0_RX),
369 + };
370 +
371 + static struct s3c_audio_pdata s3c_pcm0_pdata = {
372 + .cfg_gpio = s3c64xx_pcm_cfg_gpio,
373 ++ .dma_capture = DMACH_PCM0_RX,
374 ++ .dma_playback = DMACH_PCM0_TX,
375 + };
376 +
377 + struct platform_device s3c64xx_device_pcm0 = {
378 +@@ -163,12 +167,12 @@ EXPORT_SYMBOL(s3c64xx_device_pcm0);
379 +
380 + static struct resource s3c64xx_pcm1_resource[] = {
381 + [0] = DEFINE_RES_MEM(S3C64XX_PA_PCM1, SZ_256),
382 +- [1] = DEFINE_RES_DMA(DMACH_PCM1_TX),
383 +- [2] = DEFINE_RES_DMA(DMACH_PCM1_RX),
384 + };
385 +
386 + static struct s3c_audio_pdata s3c_pcm1_pdata = {
387 + .cfg_gpio = s3c64xx_pcm_cfg_gpio,
388 ++ .dma_playback = DMACH_PCM1_TX,
389 ++ .dma_capture = DMACH_PCM1_RX,
390 + };
391 +
392 + struct platform_device s3c64xx_device_pcm1 = {
393 +@@ -196,13 +200,14 @@ static int s3c64xx_ac97_cfg_gpe(struct platform_device *pdev)
394 +
395 + static struct resource s3c64xx_ac97_resource[] = {
396 + [0] = DEFINE_RES_MEM(S3C64XX_PA_AC97, SZ_256),
397 +- [1] = DEFINE_RES_DMA(DMACH_AC97_PCMOUT),
398 +- [2] = DEFINE_RES_DMA(DMACH_AC97_PCMIN),
399 +- [3] = DEFINE_RES_DMA(DMACH_AC97_MICIN),
400 +- [4] = DEFINE_RES_IRQ(IRQ_AC97),
401 ++ [1] = DEFINE_RES_IRQ(IRQ_AC97),
402 + };
403 +
404 +-static struct s3c_audio_pdata s3c_ac97_pdata;
405 ++static struct s3c_audio_pdata s3c_ac97_pdata = {
406 ++ .dma_playback = DMACH_AC97_PCMOUT,
407 ++ .dma_capture = DMACH_AC97_PCMIN,
408 ++ .dma_capture_mic = DMACH_AC97_MICIN,
409 ++};
410 +
411 + static u64 s3c64xx_ac97_dmamask = DMA_BIT_MASK(32);
412 +
413 +diff --git a/arch/arm/mach-s3c64xx/include/mach/dma.h b/arch/arm/mach-s3c64xx/include/mach/dma.h
414 +index 096e14073bd9..9c739eafe95c 100644
415 +--- a/arch/arm/mach-s3c64xx/include/mach/dma.h
416 ++++ b/arch/arm/mach-s3c64xx/include/mach/dma.h
417 +@@ -14,38 +14,38 @@
418 + #define S3C64XX_DMA_CHAN(name) ((unsigned long)(name))
419 +
420 + /* DMA0/SDMA0 */
421 +-#define DMACH_UART0 S3C64XX_DMA_CHAN("uart0_tx")
422 +-#define DMACH_UART0_SRC2 S3C64XX_DMA_CHAN("uart0_rx")
423 +-#define DMACH_UART1 S3C64XX_DMA_CHAN("uart1_tx")
424 +-#define DMACH_UART1_SRC2 S3C64XX_DMA_CHAN("uart1_rx")
425 +-#define DMACH_UART2 S3C64XX_DMA_CHAN("uart2_tx")
426 +-#define DMACH_UART2_SRC2 S3C64XX_DMA_CHAN("uart2_rx")
427 +-#define DMACH_UART3 S3C64XX_DMA_CHAN("uart3_tx")
428 +-#define DMACH_UART3_SRC2 S3C64XX_DMA_CHAN("uart3_rx")
429 +-#define DMACH_PCM0_TX S3C64XX_DMA_CHAN("pcm0_tx")
430 +-#define DMACH_PCM0_RX S3C64XX_DMA_CHAN("pcm0_rx")
431 +-#define DMACH_I2S0_OUT S3C64XX_DMA_CHAN("i2s0_tx")
432 +-#define DMACH_I2S0_IN S3C64XX_DMA_CHAN("i2s0_rx")
433 ++#define DMACH_UART0 "uart0_tx"
434 ++#define DMACH_UART0_SRC2 "uart0_rx"
435 ++#define DMACH_UART1 "uart1_tx"
436 ++#define DMACH_UART1_SRC2 "uart1_rx"
437 ++#define DMACH_UART2 "uart2_tx"
438 ++#define DMACH_UART2_SRC2 "uart2_rx"
439 ++#define DMACH_UART3 "uart3_tx"
440 ++#define DMACH_UART3_SRC2 "uart3_rx"
441 ++#define DMACH_PCM0_TX "pcm0_tx"
442 ++#define DMACH_PCM0_RX "pcm0_rx"
443 ++#define DMACH_I2S0_OUT "i2s0_tx"
444 ++#define DMACH_I2S0_IN "i2s0_rx"
445 + #define DMACH_SPI0_TX S3C64XX_DMA_CHAN("spi0_tx")
446 + #define DMACH_SPI0_RX S3C64XX_DMA_CHAN("spi0_rx")
447 +-#define DMACH_HSI_I2SV40_TX S3C64XX_DMA_CHAN("i2s2_tx")
448 +-#define DMACH_HSI_I2SV40_RX S3C64XX_DMA_CHAN("i2s2_rx")
449 ++#define DMACH_HSI_I2SV40_TX "i2s2_tx"
450 ++#define DMACH_HSI_I2SV40_RX "i2s2_rx"
451 +
452 + /* DMA1/SDMA1 */
453 +-#define DMACH_PCM1_TX S3C64XX_DMA_CHAN("pcm1_tx")
454 +-#define DMACH_PCM1_RX S3C64XX_DMA_CHAN("pcm1_rx")
455 +-#define DMACH_I2S1_OUT S3C64XX_DMA_CHAN("i2s1_tx")
456 +-#define DMACH_I2S1_IN S3C64XX_DMA_CHAN("i2s1_rx")
457 ++#define DMACH_PCM1_TX "pcm1_tx"
458 ++#define DMACH_PCM1_RX "pcm1_rx"
459 ++#define DMACH_I2S1_OUT "i2s1_tx"
460 ++#define DMACH_I2S1_IN "i2s1_rx"
461 + #define DMACH_SPI1_TX S3C64XX_DMA_CHAN("spi1_tx")
462 + #define DMACH_SPI1_RX S3C64XX_DMA_CHAN("spi1_rx")
463 +-#define DMACH_AC97_PCMOUT S3C64XX_DMA_CHAN("ac97_out")
464 +-#define DMACH_AC97_PCMIN S3C64XX_DMA_CHAN("ac97_in")
465 +-#define DMACH_AC97_MICIN S3C64XX_DMA_CHAN("ac97_mic")
466 +-#define DMACH_PWM S3C64XX_DMA_CHAN("pwm")
467 +-#define DMACH_IRDA S3C64XX_DMA_CHAN("irda")
468 +-#define DMACH_EXTERNAL S3C64XX_DMA_CHAN("external")
469 +-#define DMACH_SECURITY_RX S3C64XX_DMA_CHAN("sec_rx")
470 +-#define DMACH_SECURITY_TX S3C64XX_DMA_CHAN("sec_tx")
471 ++#define DMACH_AC97_PCMOUT "ac97_out"
472 ++#define DMACH_AC97_PCMIN "ac97_in"
473 ++#define DMACH_AC97_MICIN "ac97_mic"
474 ++#define DMACH_PWM "pwm"
475 ++#define DMACH_IRDA "irda"
476 ++#define DMACH_EXTERNAL "external"
477 ++#define DMACH_SECURITY_RX "sec_rx"
478 ++#define DMACH_SECURITY_TX "sec_tx"
479 +
480 + enum dma_ch {
481 + DMACH_MAX = 32
482 +diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
483 +index 83c7d154bde0..8b67db8c1213 100644
484 +--- a/arch/arm/plat-samsung/devs.c
485 ++++ b/arch/arm/plat-samsung/devs.c
486 +@@ -65,6 +65,7 @@
487 + #include <linux/platform_data/usb-ohci-s3c2410.h>
488 + #include <plat/usb-phy.h>
489 + #include <plat/regs-spi.h>
490 ++#include <linux/platform_data/asoc-s3c.h>
491 + #include <linux/platform_data/spi-s3c64xx.h>
492 +
493 + static u64 samsung_device_dma_mask = DMA_BIT_MASK(32);
494 +@@ -74,9 +75,12 @@ static u64 samsung_device_dma_mask = DMA_BIT_MASK(32);
495 + static struct resource s3c_ac97_resource[] = {
496 + [0] = DEFINE_RES_MEM(S3C2440_PA_AC97, S3C2440_SZ_AC97),
497 + [1] = DEFINE_RES_IRQ(IRQ_S3C244X_AC97),
498 +- [2] = DEFINE_RES_DMA_NAMED(DMACH_PCM_OUT, "PCM out"),
499 +- [3] = DEFINE_RES_DMA_NAMED(DMACH_PCM_IN, "PCM in"),
500 +- [4] = DEFINE_RES_DMA_NAMED(DMACH_MIC_IN, "Mic in"),
501 ++};
502 ++
503 ++static struct s3c_audio_pdata s3c_ac97_pdata = {
504 ++ .dma_playback = (void *)DMACH_PCM_OUT,
505 ++ .dma_capture = (void *)DMACH_PCM_IN,
506 ++ .dma_capture_mic = (void *)DMACH_MIC_IN,
507 + };
508 +
509 + struct platform_device s3c_device_ac97 = {
510 +@@ -87,6 +91,7 @@ struct platform_device s3c_device_ac97 = {
511 + .dev = {
512 + .dma_mask = &samsung_device_dma_mask,
513 + .coherent_dma_mask = DMA_BIT_MASK(32),
514 ++ .platform_data = &s3c_ac97_pdata,
515 + }
516 + };
517 + #endif /* CONFIG_CPU_S3C2440 */
518 +diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
519 +index b6f14e8d2121..bfb8eb168f2d 100644
520 +--- a/arch/arm64/mm/flush.c
521 ++++ b/arch/arm64/mm/flush.c
522 +@@ -74,10 +74,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
523 + {
524 + struct page *page = pte_page(pte);
525 +
526 +- /* no flushing needed for anonymous pages */
527 +- if (!page_mapping(page))
528 +- return;
529 +-
530 + if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
531 + __flush_dcache_area(page_address(page),
532 + PAGE_SIZE << compound_order(page));
533 +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
534 +index 3585af093576..ab518d14b7b0 100644
535 +--- a/arch/mips/include/asm/kvm_host.h
536 ++++ b/arch/mips/include/asm/kvm_host.h
537 +@@ -370,6 +370,7 @@ struct kvm_mips_tlb {
538 + #define KVM_MIPS_GUEST_TLB_SIZE 64
539 + struct kvm_vcpu_arch {
540 + void *host_ebase, *guest_ebase;
541 ++ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
542 + unsigned long host_stack;
543 + unsigned long host_gp;
544 +
545 +diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
546 +index 9b3b48e21c22..6ec90a19be70 100644
547 +--- a/arch/mips/include/asm/processor.h
548 ++++ b/arch/mips/include/asm/processor.h
549 +@@ -51,7 +51,7 @@ extern unsigned int vced_count, vcei_count;
550 + * User space process size: 2GB. This is hardcoded into a few places,
551 + * so don't change it unless you know what you are doing.
552 + */
553 +-#define TASK_SIZE 0x7fff8000UL
554 ++#define TASK_SIZE 0x80000000UL
555 + #endif
556 +
557 + #define STACK_TOP_MAX TASK_SIZE
558 +diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
559 +index be73c491182b..49b52035226c 100644
560 +--- a/arch/mips/kernel/setup.c
561 ++++ b/arch/mips/kernel/setup.c
562 +@@ -686,6 +686,9 @@ static void __init arch_mem_init(char **cmdline_p)
563 + for_each_memblock(reserved, reg)
564 + if (reg->size != 0)
565 + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
566 ++
567 ++ reserve_bootmem_region(__pa_symbol(&__nosave_begin),
568 ++ __pa_symbol(&__nosave_end)); /* Reserve for hibernation */
569 + }
570 +
571 + static void __init resource_init(void)
572 +diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
573 +index 4ab4bdfad703..2143884709e4 100644
574 +--- a/arch/mips/kvm/interrupt.h
575 ++++ b/arch/mips/kvm/interrupt.h
576 +@@ -28,6 +28,7 @@
577 + #define MIPS_EXC_MAX 12
578 + /* XXXSL More to follow */
579 +
580 ++extern char __kvm_mips_vcpu_run_end[];
581 + extern char mips32_exception[], mips32_exceptionEnd[];
582 + extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
583 +
584 +diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
585 +index d1ee95a7f7dd..ae01e7fe4e1c 100644
586 +--- a/arch/mips/kvm/locore.S
587 ++++ b/arch/mips/kvm/locore.S
588 +@@ -235,6 +235,7 @@ FEXPORT(__kvm_mips_load_k0k1)
589 +
590 + /* Jump to guest */
591 + eret
592 ++EXPORT(__kvm_mips_vcpu_run_end)
593 +
594 + VECTOR(MIPSX(exception), unknown)
595 + /* Find out what mode we came from and jump to the proper handler. */
596 +diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
597 +index ace4ed7d41c6..485fdc462243 100644
598 +--- a/arch/mips/kvm/mips.c
599 ++++ b/arch/mips/kvm/mips.c
600 +@@ -312,6 +312,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
601 + memcpy(gebase + offset, mips32_GuestException,
602 + mips32_GuestExceptionEnd - mips32_GuestException);
603 +
604 ++#ifdef MODULE
605 ++ offset += mips32_GuestExceptionEnd - mips32_GuestException;
606 ++ memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
607 ++ __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
608 ++ vcpu->arch.vcpu_run = gebase + offset;
609 ++#else
610 ++ vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
611 ++#endif
612 ++
613 + /* Invalidate the icache for these ranges */
614 + local_flush_icache_range((unsigned long)gebase,
615 + (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
616 +@@ -401,7 +410,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
617 + /* Disable hardware page table walking while in guest */
618 + htw_stop();
619 +
620 +- r = __kvm_mips_vcpu_run(run, vcpu);
621 ++ r = vcpu->arch.vcpu_run(run, vcpu);
622 +
623 + /* Re-enable HTW before enabling interrupts */
624 + htw_start();
625 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
626 +index c8c8275765e7..dd023904bac5 100644
627 +--- a/arch/powerpc/kernel/process.c
628 ++++ b/arch/powerpc/kernel/process.c
629 +@@ -1240,6 +1240,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
630 + current->thread.regs = regs - 1;
631 + }
632 +
633 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
634 ++ /*
635 ++ * Clear any transactional state, we're exec()ing. The cause is
636 ++ * not important as there will never be a recheckpoint so it's not
637 ++ * user visible.
638 ++ */
639 ++ if (MSR_TM_SUSPENDED(mfmsr()))
640 ++ tm_reclaim_current(0);
641 ++#endif
642 ++
643 + memset(regs->gpr, 0, sizeof(regs->gpr));
644 + regs->ctr = 0;
645 + regs->link = 0;
646 +diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
647 +index abe9cdc390a5..28dbbb0d12c4 100644
648 +--- a/arch/powerpc/kernel/prom.c
649 ++++ b/arch/powerpc/kernel/prom.c
650 +@@ -162,11 +162,12 @@ static struct ibm_pa_feature {
651 + {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
652 + {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
653 + /*
654 +- * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
655 +- * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
656 +- * which is 0 if the kernel doesn't support TM.
657 ++ * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
658 ++ * we don't want to turn on TM here, so we use the *_COMP versions
659 ++ * which are 0 if the kernel doesn't support TM.
660 + */
661 +- {CPU_FTR_TM_COMP, 0, 0, 0, 22, 0, 0},
662 ++ {CPU_FTR_TM_COMP, 0, 0,
663 ++ PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
664 + };
665 +
666 + static void __init scan_features(unsigned long node, const unsigned char *ftrs,
667 +diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
668 +index 3385e3d0506e..4d29154a4987 100644
669 +--- a/arch/powerpc/mm/hugetlbpage.c
670 ++++ b/arch/powerpc/mm/hugetlbpage.c
671 +@@ -472,13 +472,13 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
672 + {
673 + struct hugepd_freelist **batchp;
674 +
675 +- batchp = this_cpu_ptr(&hugepd_freelist_cur);
676 ++ batchp = &get_cpu_var(hugepd_freelist_cur);
677 +
678 + if (atomic_read(&tlb->mm->mm_users) < 2 ||
679 + cpumask_equal(mm_cpumask(tlb->mm),
680 + cpumask_of(smp_processor_id()))) {
681 + kmem_cache_free(hugepte_cache, hugepte);
682 +- put_cpu_var(hugepd_freelist_cur);
683 ++ put_cpu_var(hugepd_freelist_cur);
684 + return;
685 + }
686 +
687 +diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
688 +index d29ad9545b41..081b2ad99d73 100644
689 +--- a/arch/s390/include/asm/mmu.h
690 ++++ b/arch/s390/include/asm/mmu.h
691 +@@ -11,7 +11,7 @@ typedef struct {
692 + spinlock_t list_lock;
693 + struct list_head pgtable_list;
694 + struct list_head gmap_list;
695 +- unsigned long asce_bits;
696 ++ unsigned long asce;
697 + unsigned long asce_limit;
698 + unsigned long vdso_base;
699 + /* The mmu context allocates 4K page tables. */
700 +diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
701 +index e485817f7b1a..22877c9440ea 100644
702 +--- a/arch/s390/include/asm/mmu_context.h
703 ++++ b/arch/s390/include/asm/mmu_context.h
704 +@@ -26,12 +26,28 @@ static inline int init_new_context(struct task_struct *tsk,
705 + mm->context.has_pgste = 0;
706 + mm->context.use_skey = 0;
707 + #endif
708 +- if (mm->context.asce_limit == 0) {
709 ++ switch (mm->context.asce_limit) {
710 ++ case 1UL << 42:
711 ++ /*
712 ++ * forked 3-level task, fall through to set new asce with new
713 ++ * mm->pgd
714 ++ */
715 ++ case 0:
716 + /* context created by exec, set asce limit to 4TB */
717 +- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
718 +- _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
719 + mm->context.asce_limit = STACK_TOP_MAX;
720 +- } else if (mm->context.asce_limit == (1UL << 31)) {
721 ++ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
722 ++ _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
723 ++ break;
724 ++ case 1UL << 53:
725 ++ /* forked 4-level task, set new asce with new mm->pgd */
726 ++ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
727 ++ _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
728 ++ break;
729 ++ case 1UL << 31:
730 ++ /* forked 2-level compat task, set new asce with new mm->pgd */
731 ++ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
732 ++ _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
733 ++ /* pgd_alloc() did not increase mm->nr_pmds */
734 + mm_inc_nr_pmds(mm);
735 + }
736 + crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
737 +@@ -42,7 +58,7 @@ static inline int init_new_context(struct task_struct *tsk,
738 +
739 + static inline void set_user_asce(struct mm_struct *mm)
740 + {
741 +- S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
742 ++ S390_lowcore.user_asce = mm->context.asce;
743 + if (current->thread.mm_segment.ar4)
744 + __ctl_load(S390_lowcore.user_asce, 7, 7);
745 + set_cpu_flag(CIF_ASCE);
746 +@@ -71,7 +87,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
747 + {
748 + int cpu = smp_processor_id();
749 +
750 +- S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
751 ++ S390_lowcore.user_asce = next->context.asce;
752 + if (prev == next)
753 + return;
754 + if (MACHINE_HAS_TLB_LC)
755 +diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
756 +index d7cc79fb6191..5991cdcb5b40 100644
757 +--- a/arch/s390/include/asm/pgalloc.h
758 ++++ b/arch/s390/include/asm/pgalloc.h
759 +@@ -56,8 +56,8 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
760 + return _REGION2_ENTRY_EMPTY;
761 + }
762 +
763 +-int crst_table_upgrade(struct mm_struct *, unsigned long limit);
764 +-void crst_table_downgrade(struct mm_struct *, unsigned long limit);
765 ++int crst_table_upgrade(struct mm_struct *);
766 ++void crst_table_downgrade(struct mm_struct *);
767 +
768 + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
769 + {
770 +diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
771 +index dedb6218544b..7ce53f682ec9 100644
772 +--- a/arch/s390/include/asm/processor.h
773 ++++ b/arch/s390/include/asm/processor.h
774 +@@ -155,7 +155,7 @@ struct stack_frame {
775 + regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
776 + regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
777 + regs->gprs[15] = new_stackp; \
778 +- crst_table_downgrade(current->mm, 1UL << 31); \
779 ++ crst_table_downgrade(current->mm); \
780 + execve_tail(); \
781 + } while (0)
782 +
783 +diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
784 +index ca148f7c3eaa..a2e6ef32e054 100644
785 +--- a/arch/s390/include/asm/tlbflush.h
786 ++++ b/arch/s390/include/asm/tlbflush.h
787 +@@ -110,8 +110,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
788 + static inline void __tlb_flush_kernel(void)
789 + {
790 + if (MACHINE_HAS_IDTE)
791 +- __tlb_flush_idte((unsigned long) init_mm.pgd |
792 +- init_mm.context.asce_bits);
793 ++ __tlb_flush_idte(init_mm.context.asce);
794 + else
795 + __tlb_flush_global();
796 + }
797 +@@ -133,8 +132,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
798 + static inline void __tlb_flush_kernel(void)
799 + {
800 + if (MACHINE_HAS_TLB_LC)
801 +- __tlb_flush_idte_local((unsigned long) init_mm.pgd |
802 +- init_mm.context.asce_bits);
803 ++ __tlb_flush_idte_local(init_mm.context.asce);
804 + else
805 + __tlb_flush_local();
806 + }
807 +@@ -148,8 +146,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
808 + * only ran on the local cpu.
809 + */
810 + if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
811 +- __tlb_flush_asce(mm, (unsigned long) mm->pgd |
812 +- mm->context.asce_bits);
813 ++ __tlb_flush_asce(mm, mm->context.asce);
814 + else
815 + __tlb_flush_full(mm);
816 + }
817 +diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
818 +index 52fbef91d1d9..7963c6aa1196 100644
819 +--- a/arch/s390/kernel/ipl.c
820 ++++ b/arch/s390/kernel/ipl.c
821 +@@ -2044,13 +2044,6 @@ void s390_reset_system(void (*fn_pre)(void),
822 + S390_lowcore.program_new_psw.addr =
823 + PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
824 +
825 +- /*
826 +- * Clear subchannel ID and number to signal new kernel that no CCW or
827 +- * SCSI IPL has been done (for kexec and kdump)
828 +- */
829 +- S390_lowcore.subchannel_id = 0;
830 +- S390_lowcore.subchannel_nr = 0;
831 +-
832 + /* Store status at absolute zero */
833 + store_status();
834 +
835 +diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
836 +index 80875c43a4a4..728744918a07 100644
837 +--- a/arch/s390/mm/init.c
838 ++++ b/arch/s390/mm/init.c
839 +@@ -112,7 +112,8 @@ void __init paging_init(void)
840 + asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
841 + pgd_type = _REGION3_ENTRY_EMPTY;
842 + }
843 +- S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
844 ++ init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
845 ++ S390_lowcore.kernel_asce = init_mm.context.asce;
846 + clear_table((unsigned long *) init_mm.pgd, pgd_type,
847 + sizeof(unsigned long)*2048);
848 + vmem_map_init();
849 +diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
850 +index 6e552af08c76..e2f8685d9981 100644
851 +--- a/arch/s390/mm/mmap.c
852 ++++ b/arch/s390/mm/mmap.c
853 +@@ -184,7 +184,7 @@ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
854 + if (!(flags & MAP_FIXED))
855 + addr = 0;
856 + if ((addr + len) >= TASK_SIZE)
857 +- return crst_table_upgrade(current->mm, 1UL << 53);
858 ++ return crst_table_upgrade(current->mm);
859 + return 0;
860 + }
861 +
862 +@@ -201,7 +201,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
863 + return area;
864 + if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
865 + /* Upgrade the page table to 4 levels and retry. */
866 +- rc = crst_table_upgrade(mm, 1UL << 53);
867 ++ rc = crst_table_upgrade(mm);
868 + if (rc)
869 + return (unsigned long) rc;
870 + area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
871 +@@ -223,7 +223,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
872 + return area;
873 + if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
874 + /* Upgrade the page table to 4 levels and retry. */
875 +- rc = crst_table_upgrade(mm, 1UL << 53);
876 ++ rc = crst_table_upgrade(mm);
877 + if (rc)
878 + return (unsigned long) rc;
879 + area = arch_get_unmapped_area_topdown(filp, addr, len,
880 +diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
881 +index b33f66110ca9..ebf82a99df45 100644
882 +--- a/arch/s390/mm/pgtable.c
883 ++++ b/arch/s390/mm/pgtable.c
884 +@@ -56,81 +56,52 @@ static void __crst_table_upgrade(void *arg)
885 + __tlb_flush_local();
886 + }
887 +
888 +-int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
889 ++int crst_table_upgrade(struct mm_struct *mm)
890 + {
891 + unsigned long *table, *pgd;
892 +- unsigned long entry;
893 +- int flush;
894 +
895 +- BUG_ON(limit > (1UL << 53));
896 +- flush = 0;
897 +-repeat:
898 ++ /* upgrade should only happen from 3 to 4 levels */
899 ++ BUG_ON(mm->context.asce_limit != (1UL << 42));
900 ++
901 + table = crst_table_alloc(mm);
902 + if (!table)
903 + return -ENOMEM;
904 ++
905 + spin_lock_bh(&mm->page_table_lock);
906 +- if (mm->context.asce_limit < limit) {
907 +- pgd = (unsigned long *) mm->pgd;
908 +- if (mm->context.asce_limit <= (1UL << 31)) {
909 +- entry = _REGION3_ENTRY_EMPTY;
910 +- mm->context.asce_limit = 1UL << 42;
911 +- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
912 +- _ASCE_USER_BITS |
913 +- _ASCE_TYPE_REGION3;
914 +- } else {
915 +- entry = _REGION2_ENTRY_EMPTY;
916 +- mm->context.asce_limit = 1UL << 53;
917 +- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
918 +- _ASCE_USER_BITS |
919 +- _ASCE_TYPE_REGION2;
920 +- }
921 +- crst_table_init(table, entry);
922 +- pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
923 +- mm->pgd = (pgd_t *) table;
924 +- mm->task_size = mm->context.asce_limit;
925 +- table = NULL;
926 +- flush = 1;
927 +- }
928 ++ pgd = (unsigned long *) mm->pgd;
929 ++ crst_table_init(table, _REGION2_ENTRY_EMPTY);
930 ++ pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
931 ++ mm->pgd = (pgd_t *) table;
932 ++ mm->context.asce_limit = 1UL << 53;
933 ++ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
934 ++ _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
935 ++ mm->task_size = mm->context.asce_limit;
936 + spin_unlock_bh(&mm->page_table_lock);
937 +- if (table)
938 +- crst_table_free(mm, table);
939 +- if (mm->context.asce_limit < limit)
940 +- goto repeat;
941 +- if (flush)
942 +- on_each_cpu(__crst_table_upgrade, mm, 0);
943 ++
944 ++ on_each_cpu(__crst_table_upgrade, mm, 0);
945 + return 0;
946 + }
947 +
948 +-void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
949 ++void crst_table_downgrade(struct mm_struct *mm)
950 + {
951 + pgd_t *pgd;
952 +
953 ++ /* downgrade should only happen from 3 to 2 levels (compat only) */
954 ++ BUG_ON(mm->context.asce_limit != (1UL << 42));
955 ++
956 + if (current->active_mm == mm) {
957 + clear_user_asce();
958 + __tlb_flush_mm(mm);
959 + }
960 +- while (mm->context.asce_limit > limit) {
961 +- pgd = mm->pgd;
962 +- switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
963 +- case _REGION_ENTRY_TYPE_R2:
964 +- mm->context.asce_limit = 1UL << 42;
965 +- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
966 +- _ASCE_USER_BITS |
967 +- _ASCE_TYPE_REGION3;
968 +- break;
969 +- case _REGION_ENTRY_TYPE_R3:
970 +- mm->context.asce_limit = 1UL << 31;
971 +- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
972 +- _ASCE_USER_BITS |
973 +- _ASCE_TYPE_SEGMENT;
974 +- break;
975 +- default:
976 +- BUG();
977 +- }
978 +- mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
979 +- mm->task_size = mm->context.asce_limit;
980 +- crst_table_free(mm, (unsigned long *) pgd);
981 +- }
982 ++
983 ++ pgd = mm->pgd;
984 ++ mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
985 ++ mm->context.asce_limit = 1UL << 31;
986 ++ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
987 ++ _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
988 ++ mm->task_size = mm->context.asce_limit;
989 ++ crst_table_free(mm, (unsigned long *) pgd);
990 ++
991 + if (current->active_mm == mm)
992 + set_user_asce(mm);
993 + }
994 +diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h
995 +index 10e9dabc4c41..f0700cfeedd7 100644
996 +--- a/arch/sparc/include/asm/head_64.h
997 ++++ b/arch/sparc/include/asm/head_64.h
998 +@@ -15,6 +15,10 @@
999 +
1000 + #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
1001 +
1002 ++#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
1003 ++#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
1004 ++#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
1005 ++
1006 + #define __CHEETAH_ID 0x003e0014
1007 + #define __JALAPENO_ID 0x003e0016
1008 + #define __SERRANO_ID 0x003e0022
1009 +diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h
1010 +index 71b5a67522ab..781b9f1dbdc2 100644
1011 +--- a/arch/sparc/include/asm/ttable.h
1012 ++++ b/arch/sparc/include/asm/ttable.h
1013 +@@ -589,8 +589,8 @@ user_rtt_fill_64bit: \
1014 + restored; \
1015 + nop; nop; nop; nop; nop; nop; \
1016 + nop; nop; nop; nop; nop; \
1017 +- ba,a,pt %xcc, user_rtt_fill_fixup; \
1018 +- ba,a,pt %xcc, user_rtt_fill_fixup; \
1019 ++ ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
1020 ++ ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
1021 + ba,a,pt %xcc, user_rtt_fill_fixup;
1022 +
1023 +
1024 +@@ -652,8 +652,8 @@ user_rtt_fill_32bit: \
1025 + restored; \
1026 + nop; nop; nop; nop; nop; \
1027 + nop; nop; nop; \
1028 +- ba,a,pt %xcc, user_rtt_fill_fixup; \
1029 +- ba,a,pt %xcc, user_rtt_fill_fixup; \
1030 ++ ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
1031 ++ ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
1032 + ba,a,pt %xcc, user_rtt_fill_fixup;
1033 +
1034 +
1035 +diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
1036 +index 7cf9c6ea3f1f..fdb13327fded 100644
1037 +--- a/arch/sparc/kernel/Makefile
1038 ++++ b/arch/sparc/kernel/Makefile
1039 +@@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg
1040 + CFLAGS_REMOVE_pcr.o := -pg
1041 + endif
1042 +
1043 ++obj-$(CONFIG_SPARC64) += urtt_fill.o
1044 + obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
1045 + obj-$(CONFIG_SPARC32) += etrap_32.o
1046 + obj-$(CONFIG_SPARC32) += rtrap_32.o
1047 +diff --git a/arch/sparc/kernel/cherrs.S b/arch/sparc/kernel/cherrs.S
1048 +index 4ee1ad420862..655628def68e 100644
1049 +--- a/arch/sparc/kernel/cherrs.S
1050 ++++ b/arch/sparc/kernel/cherrs.S
1051 +@@ -214,8 +214,7 @@ do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
1052 + subcc %g1, %g2, %g1 ! Next cacheline
1053 + bge,pt %icc, 1b
1054 + nop
1055 +- ba,pt %xcc, dcpe_icpe_tl1_common
1056 +- nop
1057 ++ ba,a,pt %xcc, dcpe_icpe_tl1_common
1058 +
1059 + do_dcpe_tl1_fatal:
1060 + sethi %hi(1f), %g7
1061 +@@ -224,8 +223,7 @@ do_dcpe_tl1_fatal:
1062 + mov 0x2, %o0
1063 + call cheetah_plus_parity_error
1064 + add %sp, PTREGS_OFF, %o1
1065 +- ba,pt %xcc, rtrap
1066 +- nop
1067 ++ ba,a,pt %xcc, rtrap
1068 + .size do_dcpe_tl1,.-do_dcpe_tl1
1069 +
1070 + .globl do_icpe_tl1
1071 +@@ -259,8 +257,7 @@ do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
1072 + subcc %g1, %g2, %g1
1073 + bge,pt %icc, 1b
1074 + nop
1075 +- ba,pt %xcc, dcpe_icpe_tl1_common
1076 +- nop
1077 ++ ba,a,pt %xcc, dcpe_icpe_tl1_common
1078 +
1079 + do_icpe_tl1_fatal:
1080 + sethi %hi(1f), %g7
1081 +@@ -269,8 +266,7 @@ do_icpe_tl1_fatal:
1082 + mov 0x3, %o0
1083 + call cheetah_plus_parity_error
1084 + add %sp, PTREGS_OFF, %o1
1085 +- ba,pt %xcc, rtrap
1086 +- nop
1087 ++ ba,a,pt %xcc, rtrap
1088 + .size do_icpe_tl1,.-do_icpe_tl1
1089 +
1090 + .type dcpe_icpe_tl1_common,#function
1091 +@@ -456,7 +452,7 @@ __cheetah_log_error:
1092 + cmp %g2, 0x63
1093 + be c_cee
1094 + nop
1095 +- ba,pt %xcc, c_deferred
1096 ++ ba,a,pt %xcc, c_deferred
1097 + .size __cheetah_log_error,.-__cheetah_log_error
1098 +
1099 + /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
1100 +diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
1101 +index 33c02b15f478..a83707c83be8 100644
1102 +--- a/arch/sparc/kernel/entry.S
1103 ++++ b/arch/sparc/kernel/entry.S
1104 +@@ -948,7 +948,24 @@ linux_syscall_trace:
1105 + cmp %o0, 0
1106 + bne 3f
1107 + mov -ENOSYS, %o0
1108 ++
1109 ++ /* Syscall tracing can modify the registers. */
1110 ++ ld [%sp + STACKFRAME_SZ + PT_G1], %g1
1111 ++ sethi %hi(sys_call_table), %l7
1112 ++ ld [%sp + STACKFRAME_SZ + PT_I0], %i0
1113 ++ or %l7, %lo(sys_call_table), %l7
1114 ++ ld [%sp + STACKFRAME_SZ + PT_I1], %i1
1115 ++ ld [%sp + STACKFRAME_SZ + PT_I2], %i2
1116 ++ ld [%sp + STACKFRAME_SZ + PT_I3], %i3
1117 ++ ld [%sp + STACKFRAME_SZ + PT_I4], %i4
1118 ++ ld [%sp + STACKFRAME_SZ + PT_I5], %i5
1119 ++ cmp %g1, NR_syscalls
1120 ++ bgeu 3f
1121 ++ mov -ENOSYS, %o0
1122 ++
1123 ++ sll %g1, 2, %l4
1124 + mov %i0, %o0
1125 ++ ld [%l7 + %l4], %l7
1126 + mov %i1, %o1
1127 + mov %i2, %o2
1128 + mov %i3, %o3
1129 +diff --git a/arch/sparc/kernel/fpu_traps.S b/arch/sparc/kernel/fpu_traps.S
1130 +index a6864826a4bd..336d2750fe78 100644
1131 +--- a/arch/sparc/kernel/fpu_traps.S
1132 ++++ b/arch/sparc/kernel/fpu_traps.S
1133 +@@ -100,8 +100,8 @@ do_fpdis:
1134 + fmuld %f0, %f2, %f26
1135 + faddd %f0, %f2, %f28
1136 + fmuld %f0, %f2, %f30
1137 +- b,pt %xcc, fpdis_exit
1138 +- nop
1139 ++ ba,a,pt %xcc, fpdis_exit
1140 ++
1141 + 2: andcc %g5, FPRS_DU, %g0
1142 + bne,pt %icc, 3f
1143 + fzero %f32
1144 +@@ -144,8 +144,8 @@ do_fpdis:
1145 + fmuld %f32, %f34, %f58
1146 + faddd %f32, %f34, %f60
1147 + fmuld %f32, %f34, %f62
1148 +- ba,pt %xcc, fpdis_exit
1149 +- nop
1150 ++ ba,a,pt %xcc, fpdis_exit
1151 ++
1152 + 3: mov SECONDARY_CONTEXT, %g3
1153 + add %g6, TI_FPREGS, %g1
1154 +
1155 +@@ -197,8 +197,7 @@ fpdis_exit2:
1156 + fp_other_bounce:
1157 + call do_fpother
1158 + add %sp, PTREGS_OFF, %o0
1159 +- ba,pt %xcc, rtrap
1160 +- nop
1161 ++ ba,a,pt %xcc, rtrap
1162 + .size fp_other_bounce,.-fp_other_bounce
1163 +
1164 + .align 32
1165 +diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
1166 +index 3d61fcae7ee3..8ff57630a486 100644
1167 +--- a/arch/sparc/kernel/head_64.S
1168 ++++ b/arch/sparc/kernel/head_64.S
1169 +@@ -461,9 +461,8 @@ sun4v_chip_type:
1170 + subcc %g3, 1, %g3
1171 + bne,pt %xcc, 41b
1172 + add %g1, 1, %g1
1173 +- mov SUN4V_CHIP_SPARC64X, %g4
1174 + ba,pt %xcc, 5f
1175 +- nop
1176 ++ mov SUN4V_CHIP_SPARC64X, %g4
1177 +
1178 + 49:
1179 + mov SUN4V_CHIP_UNKNOWN, %g4
1180 +@@ -548,8 +547,7 @@ sun4u_init:
1181 + stxa %g0, [%g7] ASI_DMMU
1182 + membar #Sync
1183 +
1184 +- ba,pt %xcc, sun4u_continue
1185 +- nop
1186 ++ ba,a,pt %xcc, sun4u_continue
1187 +
1188 + sun4v_init:
1189 + /* Set ctx 0 */
1190 +@@ -560,14 +558,12 @@ sun4v_init:
1191 + mov SECONDARY_CONTEXT, %g7
1192 + stxa %g0, [%g7] ASI_MMU
1193 + membar #Sync
1194 +- ba,pt %xcc, niagara_tlb_fixup
1195 +- nop
1196 ++ ba,a,pt %xcc, niagara_tlb_fixup
1197 +
1198 + sun4u_continue:
1199 + BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
1200 +
1201 +- ba,pt %xcc, spitfire_tlb_fixup
1202 +- nop
1203 ++ ba,a,pt %xcc, spitfire_tlb_fixup
1204 +
1205 + niagara_tlb_fixup:
1206 + mov 3, %g2 /* Set TLB type to hypervisor. */
1207 +@@ -639,8 +635,7 @@ niagara_patch:
1208 + call hypervisor_patch_cachetlbops
1209 + nop
1210 +
1211 +- ba,pt %xcc, tlb_fixup_done
1212 +- nop
1213 ++ ba,a,pt %xcc, tlb_fixup_done
1214 +
1215 + cheetah_tlb_fixup:
1216 + mov 2, %g2 /* Set TLB type to cheetah+. */
1217 +@@ -659,8 +654,7 @@ cheetah_tlb_fixup:
1218 + call cheetah_patch_cachetlbops
1219 + nop
1220 +
1221 +- ba,pt %xcc, tlb_fixup_done
1222 +- nop
1223 ++ ba,a,pt %xcc, tlb_fixup_done
1224 +
1225 + spitfire_tlb_fixup:
1226 + /* Set TLB type to spitfire. */
1227 +@@ -782,8 +776,7 @@ setup_trap_table:
1228 + call %o1
1229 + add %sp, (2047 + 128), %o0
1230 +
1231 +- ba,pt %xcc, 2f
1232 +- nop
1233 ++ ba,a,pt %xcc, 2f
1234 +
1235 + 1: sethi %hi(sparc64_ttable_tl0), %o0
1236 + set prom_set_trap_table_name, %g2
1237 +@@ -822,8 +815,7 @@ setup_trap_table:
1238 +
1239 + BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
1240 +
1241 +- ba,pt %xcc, 2f
1242 +- nop
1243 ++ ba,a,pt %xcc, 2f
1244 +
1245 + /* Disable STICK_INT interrupts. */
1246 + 1:
1247 +diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S
1248 +index 753b4f031bfb..34b4933900bf 100644
1249 +--- a/arch/sparc/kernel/misctrap.S
1250 ++++ b/arch/sparc/kernel/misctrap.S
1251 +@@ -18,8 +18,7 @@ __do_privact:
1252 + 109: or %g7, %lo(109b), %g7
1253 + call do_privact
1254 + add %sp, PTREGS_OFF, %o0
1255 +- ba,pt %xcc, rtrap
1256 +- nop
1257 ++ ba,a,pt %xcc, rtrap
1258 + .size __do_privact,.-__do_privact
1259 +
1260 + .type do_mna,#function
1261 +@@ -46,8 +45,7 @@ do_mna:
1262 + mov %l5, %o2
1263 + call mem_address_unaligned
1264 + add %sp, PTREGS_OFF, %o0
1265 +- ba,pt %xcc, rtrap
1266 +- nop
1267 ++ ba,a,pt %xcc, rtrap
1268 + .size do_mna,.-do_mna
1269 +
1270 + .type do_lddfmna,#function
1271 +@@ -65,8 +63,7 @@ do_lddfmna:
1272 + mov %l5, %o2
1273 + call handle_lddfmna
1274 + add %sp, PTREGS_OFF, %o0
1275 +- ba,pt %xcc, rtrap
1276 +- nop
1277 ++ ba,a,pt %xcc, rtrap
1278 + .size do_lddfmna,.-do_lddfmna
1279 +
1280 + .type do_stdfmna,#function
1281 +@@ -84,8 +81,7 @@ do_stdfmna:
1282 + mov %l5, %o2
1283 + call handle_stdfmna
1284 + add %sp, PTREGS_OFF, %o0
1285 +- ba,pt %xcc, rtrap
1286 +- nop
1287 ++ ba,a,pt %xcc, rtrap
1288 + .size do_stdfmna,.-do_stdfmna
1289 +
1290 + .type breakpoint_trap,#function
1291 +diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
1292 +index c928bc64b4ba..991ba90beb04 100644
1293 +--- a/arch/sparc/kernel/pci.c
1294 ++++ b/arch/sparc/kernel/pci.c
1295 +@@ -994,6 +994,23 @@ void pcibios_set_master(struct pci_dev *dev)
1296 + /* No special bus mastering setup handling */
1297 + }
1298 +
1299 ++#ifdef CONFIG_PCI_IOV
1300 ++int pcibios_add_device(struct pci_dev *dev)
1301 ++{
1302 ++ struct pci_dev *pdev;
1303 ++
1304 ++ /* Add sriov arch specific initialization here.
1305 ++ * Copy dev_archdata from PF to VF
1306 ++ */
1307 ++ if (dev->is_virtfn) {
1308 ++ pdev = dev->physfn;
1309 ++ memcpy(&dev->dev.archdata, &pdev->dev.archdata,
1310 ++ sizeof(struct dev_archdata));
1311 ++ }
1312 ++ return 0;
1313 ++}
1314 ++#endif /* CONFIG_PCI_IOV */
1315 ++
1316 + static int __init pcibios_init(void)
1317 + {
1318 + pci_dfl_cache_line_size = 64 >> 2;
1319 +diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
1320 +index 39f0c662f4c8..8de386dc8150 100644
1321 +--- a/arch/sparc/kernel/rtrap_64.S
1322 ++++ b/arch/sparc/kernel/rtrap_64.S
1323 +@@ -14,10 +14,6 @@
1324 + #include <asm/visasm.h>
1325 + #include <asm/processor.h>
1326 +
1327 +-#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
1328 +-#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
1329 +-#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
1330 +-
1331 + #ifdef CONFIG_CONTEXT_TRACKING
1332 + # define SCHEDULE_USER schedule_user
1333 + #else
1334 +@@ -236,52 +232,17 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
1335 + wrpr %g1, %cwp
1336 + ba,a,pt %xcc, user_rtt_fill_64bit
1337 +
1338 +-user_rtt_fill_fixup:
1339 +- rdpr %cwp, %g1
1340 +- add %g1, 1, %g1
1341 +- wrpr %g1, 0x0, %cwp
1342 +-
1343 +- rdpr %wstate, %g2
1344 +- sll %g2, 3, %g2
1345 +- wrpr %g2, 0x0, %wstate
1346 +-
1347 +- /* We know %canrestore and %otherwin are both zero. */
1348 +-
1349 +- sethi %hi(sparc64_kern_pri_context), %g2
1350 +- ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
1351 +- mov PRIMARY_CONTEXT, %g1
1352 +-
1353 +-661: stxa %g2, [%g1] ASI_DMMU
1354 +- .section .sun4v_1insn_patch, "ax"
1355 +- .word 661b
1356 +- stxa %g2, [%g1] ASI_MMU
1357 +- .previous
1358 +-
1359 +- sethi %hi(KERNBASE), %g1
1360 +- flush %g1
1361 ++user_rtt_fill_fixup_dax:
1362 ++ ba,pt %xcc, user_rtt_fill_fixup_common
1363 ++ mov 1, %g3
1364 +
1365 +- or %g4, FAULT_CODE_WINFIXUP, %g4
1366 +- stb %g4, [%g6 + TI_FAULT_CODE]
1367 +- stx %g5, [%g6 + TI_FAULT_ADDR]
1368 ++user_rtt_fill_fixup_mna:
1369 ++ ba,pt %xcc, user_rtt_fill_fixup_common
1370 ++ mov 2, %g3
1371 +
1372 +- mov %g6, %l1
1373 +- wrpr %g0, 0x0, %tl
1374 +-
1375 +-661: nop
1376 +- .section .sun4v_1insn_patch, "ax"
1377 +- .word 661b
1378 +- SET_GL(0)
1379 +- .previous
1380 +-
1381 +- wrpr %g0, RTRAP_PSTATE, %pstate
1382 +-
1383 +- mov %l1, %g6
1384 +- ldx [%g6 + TI_TASK], %g4
1385 +- LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
1386 +- call do_sparc64_fault
1387 +- add %sp, PTREGS_OFF, %o0
1388 +- ba,pt %xcc, rtrap
1389 +- nop
1390 ++user_rtt_fill_fixup:
1391 ++ ba,pt %xcc, user_rtt_fill_fixup_common
1392 ++ clr %g3
1393 +
1394 + user_rtt_pre_restore:
1395 + add %g1, 1, %g1
1396 +diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
1397 +index 4eed773a7735..77655f0f0fc7 100644
1398 +--- a/arch/sparc/kernel/signal32.c
1399 ++++ b/arch/sparc/kernel/signal32.c
1400 +@@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
1401 + return 0;
1402 + }
1403 +
1404 ++/* Checks if the fp is valid. We always build signal frames which are
1405 ++ * 16-byte aligned, therefore we can always enforce that the restore
1406 ++ * frame has that property as well.
1407 ++ */
1408 ++static bool invalid_frame_pointer(void __user *fp, int fplen)
1409 ++{
1410 ++ if ((((unsigned long) fp) & 15) ||
1411 ++ ((unsigned long)fp) > 0x100000000ULL - fplen)
1412 ++ return true;
1413 ++ return false;
1414 ++}
1415 ++
1416 + void do_sigreturn32(struct pt_regs *regs)
1417 + {
1418 + struct signal_frame32 __user *sf;
1419 + compat_uptr_t fpu_save;
1420 + compat_uptr_t rwin_save;
1421 +- unsigned int psr;
1422 ++ unsigned int psr, ufp;
1423 + unsigned pc, npc;
1424 + sigset_t set;
1425 + compat_sigset_t seta;
1426 +@@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs)
1427 + sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
1428 +
1429 + /* 1. Make sure we are not getting garbage from the user */
1430 +- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
1431 +- (((unsigned long) sf) & 3))
1432 ++ if (invalid_frame_pointer(sf, sizeof(*sf)))
1433 ++ goto segv;
1434 ++
1435 ++ if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
1436 ++ goto segv;
1437 ++
1438 ++ if (ufp & 0x7)
1439 + goto segv;
1440 +
1441 +- if (get_user(pc, &sf->info.si_regs.pc) ||
1442 ++ if (__get_user(pc, &sf->info.si_regs.pc) ||
1443 + __get_user(npc, &sf->info.si_regs.npc))
1444 + goto segv;
1445 +
1446 +@@ -227,7 +244,7 @@ segv:
1447 + asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
1448 + {
1449 + struct rt_signal_frame32 __user *sf;
1450 +- unsigned int psr, pc, npc;
1451 ++ unsigned int psr, pc, npc, ufp;
1452 + compat_uptr_t fpu_save;
1453 + compat_uptr_t rwin_save;
1454 + sigset_t set;
1455 +@@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
1456 + sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
1457 +
1458 + /* 1. Make sure we are not getting garbage from the user */
1459 +- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
1460 +- (((unsigned long) sf) & 3))
1461 ++ if (invalid_frame_pointer(sf, sizeof(*sf)))
1462 + goto segv;
1463 +
1464 +- if (get_user(pc, &sf->regs.pc) ||
1465 ++ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
1466 ++ goto segv;
1467 ++
1468 ++ if (ufp & 0x7)
1469 ++ goto segv;
1470 ++
1471 ++ if (__get_user(pc, &sf->regs.pc) ||
1472 + __get_user(npc, &sf->regs.npc))
1473 + goto segv;
1474 +
1475 +@@ -307,14 +329,6 @@ segv:
1476 + force_sig(SIGSEGV, current);
1477 + }
1478 +
1479 +-/* Checks if the fp is valid */
1480 +-static int invalid_frame_pointer(void __user *fp, int fplen)
1481 +-{
1482 +- if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
1483 +- return 1;
1484 +- return 0;
1485 +-}
1486 +-
1487 + static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
1488 + {
1489 + unsigned long sp;
1490 +diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
1491 +index 52aa5e4ce5e7..c3c12efe0bc0 100644
1492 +--- a/arch/sparc/kernel/signal_32.c
1493 ++++ b/arch/sparc/kernel/signal_32.c
1494 +@@ -60,10 +60,22 @@ struct rt_signal_frame {
1495 + #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7)))
1496 + #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
1497 +
1498 ++/* Checks if the fp is valid. We always build signal frames which are
1499 ++ * 16-byte aligned, therefore we can always enforce that the restore
1500 ++ * frame has that property as well.
1501 ++ */
1502 ++static inline bool invalid_frame_pointer(void __user *fp, int fplen)
1503 ++{
1504 ++ if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
1505 ++ return true;
1506 ++
1507 ++ return false;
1508 ++}
1509 ++
1510 + asmlinkage void do_sigreturn(struct pt_regs *regs)
1511 + {
1512 ++ unsigned long up_psr, pc, npc, ufp;
1513 + struct signal_frame __user *sf;
1514 +- unsigned long up_psr, pc, npc;
1515 + sigset_t set;
1516 + __siginfo_fpu_t __user *fpu_save;
1517 + __siginfo_rwin_t __user *rwin_save;
1518 +@@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
1519 + sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
1520 +
1521 + /* 1. Make sure we are not getting garbage from the user */
1522 +- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
1523 ++ if (!invalid_frame_pointer(sf, sizeof(*sf)))
1524 ++ goto segv_and_exit;
1525 ++
1526 ++ if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
1527 + goto segv_and_exit;
1528 +
1529 +- if (((unsigned long) sf) & 3)
1530 ++ if (ufp & 0x7)
1531 + goto segv_and_exit;
1532 +
1533 + err = __get_user(pc, &sf->info.si_regs.pc);
1534 +@@ -127,7 +142,7 @@ segv_and_exit:
1535 + asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
1536 + {
1537 + struct rt_signal_frame __user *sf;
1538 +- unsigned int psr, pc, npc;
1539 ++ unsigned int psr, pc, npc, ufp;
1540 + __siginfo_fpu_t __user *fpu_save;
1541 + __siginfo_rwin_t __user *rwin_save;
1542 + sigset_t set;
1543 +@@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
1544 +
1545 + synchronize_user_stack();
1546 + sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
1547 +- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
1548 +- (((unsigned long) sf) & 0x03))
1549 ++ if (!invalid_frame_pointer(sf, sizeof(*sf)))
1550 ++ goto segv;
1551 ++
1552 ++ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
1553 ++ goto segv;
1554 ++
1555 ++ if (ufp & 0x7)
1556 + goto segv;
1557 +
1558 + err = __get_user(pc, &sf->regs.pc);
1559 +@@ -178,15 +198,6 @@ segv:
1560 + force_sig(SIGSEGV, current);
1561 + }
1562 +
1563 +-/* Checks if the fp is valid */
1564 +-static inline int invalid_frame_pointer(void __user *fp, int fplen)
1565 +-{
1566 +- if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
1567 +- return 1;
1568 +-
1569 +- return 0;
1570 +-}
1571 +-
1572 + static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
1573 + {
1574 + unsigned long sp = regs->u_regs[UREG_FP];
1575 +diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
1576 +index d88beff47bab..5ee930c48f4c 100644
1577 +--- a/arch/sparc/kernel/signal_64.c
1578 ++++ b/arch/sparc/kernel/signal_64.c
1579 +@@ -52,7 +52,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
1580 + unsigned char fenab;
1581 + int err;
1582 +
1583 +- flush_user_windows();
1584 ++ synchronize_user_stack();
1585 + if (get_thread_wsaved() ||
1586 + (((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
1587 + (!__access_ok(ucp, sizeof(*ucp))))
1588 +@@ -234,6 +234,17 @@ do_sigsegv:
1589 + goto out;
1590 + }
1591 +
1592 ++/* Checks if the fp is valid. We always build rt signal frames which
1593 ++ * are 16-byte aligned, therefore we can always enforce that the
1594 ++ * restore frame has that property as well.
1595 ++ */
1596 ++static bool invalid_frame_pointer(void __user *fp)
1597 ++{
1598 ++ if (((unsigned long) fp) & 15)
1599 ++ return true;
1600 ++ return false;
1601 ++}
1602 ++
1603 + struct rt_signal_frame {
1604 + struct sparc_stackf ss;
1605 + siginfo_t info;
1606 +@@ -246,8 +257,8 @@ struct rt_signal_frame {
1607 +
1608 + void do_rt_sigreturn(struct pt_regs *regs)
1609 + {
1610 ++ unsigned long tpc, tnpc, tstate, ufp;
1611 + struct rt_signal_frame __user *sf;
1612 +- unsigned long tpc, tnpc, tstate;
1613 + __siginfo_fpu_t __user *fpu_save;
1614 + __siginfo_rwin_t __user *rwin_save;
1615 + sigset_t set;
1616 +@@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs)
1617 + (regs->u_regs [UREG_FP] + STACK_BIAS);
1618 +
1619 + /* 1. Make sure we are not getting garbage from the user */
1620 +- if (((unsigned long) sf) & 3)
1621 ++ if (invalid_frame_pointer(sf))
1622 ++ goto segv;
1623 ++
1624 ++ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
1625 + goto segv;
1626 +
1627 +- err = get_user(tpc, &sf->regs.tpc);
1628 ++ if ((ufp + STACK_BIAS) & 0x7)
1629 ++ goto segv;
1630 ++
1631 ++ err = __get_user(tpc, &sf->regs.tpc);
1632 + err |= __get_user(tnpc, &sf->regs.tnpc);
1633 + if (test_thread_flag(TIF_32BIT)) {
1634 + tpc &= 0xffffffff;
1635 +@@ -308,14 +325,6 @@ segv:
1636 + force_sig(SIGSEGV, current);
1637 + }
1638 +
1639 +-/* Checks if the fp is valid */
1640 +-static int invalid_frame_pointer(void __user *fp)
1641 +-{
1642 +- if (((unsigned long) fp) & 15)
1643 +- return 1;
1644 +- return 0;
1645 +-}
1646 +-
1647 + static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
1648 + {
1649 + unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
1650 +diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c
1651 +index 0f6eebe71e6c..e5fe8cef9a69 100644
1652 +--- a/arch/sparc/kernel/sigutil_32.c
1653 ++++ b/arch/sparc/kernel/sigutil_32.c
1654 +@@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
1655 + int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
1656 + {
1657 + int err;
1658 ++
1659 ++ if (((unsigned long) fpu) & 3)
1660 ++ return -EFAULT;
1661 ++
1662 + #ifdef CONFIG_SMP
1663 + if (test_tsk_thread_flag(current, TIF_USEDFPU))
1664 + regs->psr &= ~PSR_EF;
1665 +@@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
1666 + struct thread_info *t = current_thread_info();
1667 + int i, wsaved, err;
1668 +
1669 +- __get_user(wsaved, &rp->wsaved);
1670 ++ if (((unsigned long) rp) & 3)
1671 ++ return -EFAULT;
1672 ++
1673 ++ get_user(wsaved, &rp->wsaved);
1674 + if (wsaved > NSWINS)
1675 + return -EFAULT;
1676 +
1677 +diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
1678 +index 387834a9c56a..36aadcbeac69 100644
1679 +--- a/arch/sparc/kernel/sigutil_64.c
1680 ++++ b/arch/sparc/kernel/sigutil_64.c
1681 +@@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
1682 + unsigned long fprs;
1683 + int err;
1684 +
1685 +- err = __get_user(fprs, &fpu->si_fprs);
1686 ++ if (((unsigned long) fpu) & 7)
1687 ++ return -EFAULT;
1688 ++
1689 ++ err = get_user(fprs, &fpu->si_fprs);
1690 + fprs_write(0);
1691 + regs->tstate &= ~TSTATE_PEF;
1692 + if (fprs & FPRS_DL)
1693 +@@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
1694 + struct thread_info *t = current_thread_info();
1695 + int i, wsaved, err;
1696 +
1697 +- __get_user(wsaved, &rp->wsaved);
1698 ++ if (((unsigned long) rp) & 7)
1699 ++ return -EFAULT;
1700 ++
1701 ++ get_user(wsaved, &rp->wsaved);
1702 + if (wsaved > NSWINS)
1703 + return -EFAULT;
1704 +
1705 +diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S
1706 +index c357e40ffd01..4a73009f66a5 100644
1707 +--- a/arch/sparc/kernel/spiterrs.S
1708 ++++ b/arch/sparc/kernel/spiterrs.S
1709 +@@ -85,8 +85,7 @@ __spitfire_cee_trap_continue:
1710 + ba,pt %xcc, etraptl1
1711 + rd %pc, %g7
1712 +
1713 +- ba,pt %xcc, 2f
1714 +- nop
1715 ++ ba,a,pt %xcc, 2f
1716 +
1717 + 1: ba,pt %xcc, etrap_irq
1718 + rd %pc, %g7
1719 +@@ -100,8 +99,7 @@ __spitfire_cee_trap_continue:
1720 + mov %l5, %o2
1721 + call spitfire_access_error
1722 + add %sp, PTREGS_OFF, %o0
1723 +- ba,pt %xcc, rtrap
1724 +- nop
1725 ++ ba,a,pt %xcc, rtrap
1726 + .size __spitfire_access_error,.-__spitfire_access_error
1727 +
1728 + /* This is the trap handler entry point for ECC correctable
1729 +@@ -179,8 +177,7 @@ __spitfire_data_access_exception_tl1:
1730 + mov %l5, %o2
1731 + call spitfire_data_access_exception_tl1
1732 + add %sp, PTREGS_OFF, %o0
1733 +- ba,pt %xcc, rtrap
1734 +- nop
1735 ++ ba,a,pt %xcc, rtrap
1736 + .size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
1737 +
1738 + .type __spitfire_data_access_exception,#function
1739 +@@ -200,8 +197,7 @@ __spitfire_data_access_exception:
1740 + mov %l5, %o2
1741 + call spitfire_data_access_exception
1742 + add %sp, PTREGS_OFF, %o0
1743 +- ba,pt %xcc, rtrap
1744 +- nop
1745 ++ ba,a,pt %xcc, rtrap
1746 + .size __spitfire_data_access_exception,.-__spitfire_data_access_exception
1747 +
1748 + .type __spitfire_insn_access_exception_tl1,#function
1749 +@@ -220,8 +216,7 @@ __spitfire_insn_access_exception_tl1:
1750 + mov %l5, %o2
1751 + call spitfire_insn_access_exception_tl1
1752 + add %sp, PTREGS_OFF, %o0
1753 +- ba,pt %xcc, rtrap
1754 +- nop
1755 ++ ba,a,pt %xcc, rtrap
1756 + .size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
1757 +
1758 + .type __spitfire_insn_access_exception,#function
1759 +@@ -240,6 +235,5 @@ __spitfire_insn_access_exception:
1760 + mov %l5, %o2
1761 + call spitfire_insn_access_exception
1762 + add %sp, PTREGS_OFF, %o0
1763 +- ba,pt %xcc, rtrap
1764 +- nop
1765 ++ ba,a,pt %xcc, rtrap
1766 + .size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception
1767 +diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
1768 +index bb0008927598..c4a1b5c40e4e 100644
1769 +--- a/arch/sparc/kernel/syscalls.S
1770 ++++ b/arch/sparc/kernel/syscalls.S
1771 +@@ -158,7 +158,25 @@ linux_syscall_trace32:
1772 + add %sp, PTREGS_OFF, %o0
1773 + brnz,pn %o0, 3f
1774 + mov -ENOSYS, %o0
1775 ++
1776 ++ /* Syscall tracing can modify the registers. */
1777 ++ ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
1778 ++ sethi %hi(sys_call_table32), %l7
1779 ++ ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
1780 ++ or %l7, %lo(sys_call_table32), %l7
1781 ++ ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
1782 ++ ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
1783 ++ ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
1784 ++ ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
1785 ++ ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
1786 ++
1787 ++ cmp %g1, NR_syscalls
1788 ++ bgeu,pn %xcc, 3f
1789 ++ mov -ENOSYS, %o0
1790 ++
1791 ++ sll %g1, 2, %l4
1792 + srl %i0, 0, %o0
1793 ++ lduw [%l7 + %l4], %l7
1794 + srl %i4, 0, %o4
1795 + srl %i1, 0, %o1
1796 + srl %i2, 0, %o2
1797 +@@ -170,7 +188,25 @@ linux_syscall_trace:
1798 + add %sp, PTREGS_OFF, %o0
1799 + brnz,pn %o0, 3f
1800 + mov -ENOSYS, %o0
1801 ++
1802 ++ /* Syscall tracing can modify the registers. */
1803 ++ ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
1804 ++ sethi %hi(sys_call_table64), %l7
1805 ++ ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
1806 ++ or %l7, %lo(sys_call_table64), %l7
1807 ++ ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
1808 ++ ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
1809 ++ ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
1810 ++ ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
1811 ++ ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
1812 ++
1813 ++ cmp %g1, NR_syscalls
1814 ++ bgeu,pn %xcc, 3f
1815 ++ mov -ENOSYS, %o0
1816 ++
1817 ++ sll %g1, 2, %l4
1818 + mov %i0, %o0
1819 ++ lduw [%l7 + %l4], %l7
1820 + mov %i1, %o1
1821 + mov %i2, %o2
1822 + mov %i3, %o3
1823 +diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
1824 +new file mode 100644
1825 +index 000000000000..5604a2b051d4
1826 +--- /dev/null
1827 ++++ b/arch/sparc/kernel/urtt_fill.S
1828 +@@ -0,0 +1,98 @@
1829 ++#include <asm/thread_info.h>
1830 ++#include <asm/trap_block.h>
1831 ++#include <asm/spitfire.h>
1832 ++#include <asm/ptrace.h>
1833 ++#include <asm/head.h>
1834 ++
1835 ++ .text
1836 ++ .align 8
1837 ++ .globl user_rtt_fill_fixup_common
1838 ++user_rtt_fill_fixup_common:
1839 ++ rdpr %cwp, %g1
1840 ++ add %g1, 1, %g1
1841 ++ wrpr %g1, 0x0, %cwp
1842 ++
1843 ++ rdpr %wstate, %g2
1844 ++ sll %g2, 3, %g2
1845 ++ wrpr %g2, 0x0, %wstate
1846 ++
1847 ++ /* We know %canrestore and %otherwin are both zero. */
1848 ++
1849 ++ sethi %hi(sparc64_kern_pri_context), %g2
1850 ++ ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
1851 ++ mov PRIMARY_CONTEXT, %g1
1852 ++
1853 ++661: stxa %g2, [%g1] ASI_DMMU
1854 ++ .section .sun4v_1insn_patch, "ax"
1855 ++ .word 661b
1856 ++ stxa %g2, [%g1] ASI_MMU
1857 ++ .previous
1858 ++
1859 ++ sethi %hi(KERNBASE), %g1
1860 ++ flush %g1
1861 ++
1862 ++ mov %g4, %l4
1863 ++ mov %g5, %l5
1864 ++ brnz,pn %g3, 1f
1865 ++ mov %g3, %l3
1866 ++
1867 ++ or %g4, FAULT_CODE_WINFIXUP, %g4
1868 ++ stb %g4, [%g6 + TI_FAULT_CODE]
1869 ++ stx %g5, [%g6 + TI_FAULT_ADDR]
1870 ++1:
1871 ++ mov %g6, %l1
1872 ++ wrpr %g0, 0x0, %tl
1873 ++
1874 ++661: nop
1875 ++ .section .sun4v_1insn_patch, "ax"
1876 ++ .word 661b
1877 ++ SET_GL(0)
1878 ++ .previous
1879 ++
1880 ++ wrpr %g0, RTRAP_PSTATE, %pstate
1881 ++
1882 ++ mov %l1, %g6
1883 ++ ldx [%g6 + TI_TASK], %g4
1884 ++ LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
1885 ++
1886 ++ brnz,pn %l3, 1f
1887 ++ nop
1888 ++
1889 ++ call do_sparc64_fault
1890 ++ add %sp, PTREGS_OFF, %o0
1891 ++ ba,pt %xcc, rtrap
1892 ++ nop
1893 ++
1894 ++1: cmp %g3, 2
1895 ++ bne,pn %xcc, 2f
1896 ++ nop
1897 ++
1898 ++ sethi %hi(tlb_type), %g1
1899 ++ lduw [%g1 + %lo(tlb_type)], %g1
1900 ++ cmp %g1, 3
1901 ++ bne,pt %icc, 1f
1902 ++ add %sp, PTREGS_OFF, %o0
1903 ++ mov %l4, %o2
1904 ++ call sun4v_do_mna
1905 ++ mov %l5, %o1
1906 ++ ba,a,pt %xcc, rtrap
1907 ++1: mov %l4, %o1
1908 ++ mov %l5, %o2
1909 ++ call mem_address_unaligned
1910 ++ nop
1911 ++ ba,a,pt %xcc, rtrap
1912 ++
1913 ++2: sethi %hi(tlb_type), %g1
1914 ++ mov %l4, %o1
1915 ++ lduw [%g1 + %lo(tlb_type)], %g1
1916 ++ mov %l5, %o2
1917 ++ cmp %g1, 3
1918 ++ bne,pt %icc, 1f
1919 ++ add %sp, PTREGS_OFF, %o0
1920 ++ call sun4v_data_access_exception
1921 ++ nop
1922 ++ ba,a,pt %xcc, rtrap
1923 ++
1924 ++1: call spitfire_data_access_exception
1925 ++ nop
1926 ++ ba,a,pt %xcc, rtrap
1927 +diff --git a/arch/sparc/kernel/utrap.S b/arch/sparc/kernel/utrap.S
1928 +index b7f0f3f3a909..c731e8023d3e 100644
1929 +--- a/arch/sparc/kernel/utrap.S
1930 ++++ b/arch/sparc/kernel/utrap.S
1931 +@@ -11,8 +11,7 @@ utrap_trap: /* %g3=handler,%g4=level */
1932 + mov %l4, %o1
1933 + call bad_trap
1934 + add %sp, PTREGS_OFF, %o0
1935 +- ba,pt %xcc, rtrap
1936 +- nop
1937 ++ ba,a,pt %xcc, rtrap
1938 +
1939 + invoke_utrap:
1940 + sllx %g3, 3, %g3
1941 +diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
1942 +index f1a2f688b28a..4a41d412dd3d 100644
1943 +--- a/arch/sparc/kernel/vmlinux.lds.S
1944 ++++ b/arch/sparc/kernel/vmlinux.lds.S
1945 +@@ -33,6 +33,10 @@ ENTRY(_start)
1946 + jiffies = jiffies_64;
1947 + #endif
1948 +
1949 ++#ifdef CONFIG_SPARC64
1950 ++ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
1951 ++#endif
1952 ++
1953 + SECTIONS
1954 + {
1955 + #ifdef CONFIG_SPARC64
1956 +diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S
1957 +index 1e67ce958369..855019a8590e 100644
1958 +--- a/arch/sparc/kernel/winfixup.S
1959 ++++ b/arch/sparc/kernel/winfixup.S
1960 +@@ -32,8 +32,7 @@ fill_fixup:
1961 + rd %pc, %g7
1962 + call do_sparc64_fault
1963 + add %sp, PTREGS_OFF, %o0
1964 +- ba,pt %xcc, rtrap
1965 +- nop
1966 ++ ba,a,pt %xcc, rtrap
1967 +
1968 + /* Be very careful about usage of the trap globals here.
1969 + * You cannot touch %g5 as that has the fault information.
1970 +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
1971 +index 559cb744112c..71c7ace855d7 100644
1972 +--- a/arch/sparc/mm/init_64.c
1973 ++++ b/arch/sparc/mm/init_64.c
1974 +@@ -1301,10 +1301,18 @@ static int __init numa_parse_sun4u(void)
1975 +
1976 + static int __init bootmem_init_numa(void)
1977 + {
1978 ++ int i, j;
1979 + int err = -1;
1980 +
1981 + numadbg("bootmem_init_numa()\n");
1982 +
1983 ++ /* Some sane defaults for numa latency values */
1984 ++ for (i = 0; i < MAX_NUMNODES; i++) {
1985 ++ for (j = 0; j < MAX_NUMNODES; j++)
1986 ++ numa_latency[i][j] = (i == j) ?
1987 ++ LOCAL_DISTANCE : REMOTE_DISTANCE;
1988 ++ }
1989 ++
1990 + if (numa_enabled) {
1991 + if (tlb_type == hypervisor)
1992 + err = numa_parse_mdesc();
1993 +@@ -2762,9 +2770,10 @@ void hugetlb_setup(struct pt_regs *regs)
1994 + * the Data-TLB for huge pages.
1995 + */
1996 + if (tlb_type == cheetah_plus) {
1997 ++ bool need_context_reload = false;
1998 + unsigned long ctx;
1999 +
2000 +- spin_lock(&ctx_alloc_lock);
2001 ++ spin_lock_irq(&ctx_alloc_lock);
2002 + ctx = mm->context.sparc64_ctx_val;
2003 + ctx &= ~CTX_PGSZ_MASK;
2004 + ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2005 +@@ -2783,9 +2792,12 @@ void hugetlb_setup(struct pt_regs *regs)
2006 + * also executing in this address space.
2007 + */
2008 + mm->context.sparc64_ctx_val = ctx;
2009 +- on_each_cpu(context_reload, mm, 0);
2010 ++ need_context_reload = true;
2011 + }
2012 +- spin_unlock(&ctx_alloc_lock);
2013 ++ spin_unlock_irq(&ctx_alloc_lock);
2014 ++
2015 ++ if (need_context_reload)
2016 ++ on_each_cpu(context_reload, mm, 0);
2017 + }
2018 + }
2019 + #endif
2020 +diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
2021 +index 1af51b1586d7..56270f0f05e6 100644
2022 +--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
2023 ++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
2024 +@@ -385,6 +385,9 @@ static void intel_thermal_interrupt(void)
2025 + {
2026 + __u64 msr_val;
2027 +
2028 ++ if (static_cpu_has(X86_FEATURE_HWP))
2029 ++ wrmsrl_safe(MSR_HWP_STATUS, 0);
2030 ++
2031 + rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
2032 +
2033 + /* Check for violation of core thermal thresholds*/
2034 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
2035 +index 1deffe6cc873..023c442c33bb 100644
2036 +--- a/arch/x86/kernel/kprobes/core.c
2037 ++++ b/arch/x86/kernel/kprobes/core.c
2038 +@@ -959,7 +959,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
2039 + * normal page fault.
2040 + */
2041 + regs->ip = (unsigned long)cur->addr;
2042 ++ /*
2043 ++ * Trap flag (TF) has been set here because this fault
2044 ++ * happened where the single stepping will be done.
2045 ++ * So clear it by resetting the current kprobe:
2046 ++ */
2047 ++ regs->flags &= ~X86_EFLAGS_TF;
2048 ++
2049 ++ /*
2050 ++ * If the TF flag was set before the kprobe hit,
2051 ++ * don't touch it:
2052 ++ */
2053 + regs->flags |= kcb->kprobe_old_flags;
2054 ++
2055 + if (kcb->kprobe_status == KPROBE_REENTER)
2056 + restore_previous_kprobe(kcb);
2057 + else
2058 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
2059 +index 1d08ad3582d0..090aa5c1d6b1 100644
2060 +--- a/arch/x86/kvm/cpuid.c
2061 ++++ b/arch/x86/kvm/cpuid.c
2062 +@@ -501,6 +501,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
2063 + do_cpuid_1_ent(&entry[i], function, idx);
2064 + if (idx == 1) {
2065 + entry[i].eax &= kvm_supported_word10_x86_features;
2066 ++ cpuid_mask(&entry[i].eax, 10);
2067 + entry[i].ebx = 0;
2068 + if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
2069 + entry[i].ebx =
2070 +diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
2071 +index 637ab34ed632..ddb2244b06a1 100644
2072 +--- a/arch/x86/mm/kmmio.c
2073 ++++ b/arch/x86/mm/kmmio.c
2074 +@@ -33,7 +33,7 @@
2075 + struct kmmio_fault_page {
2076 + struct list_head list;
2077 + struct kmmio_fault_page *release_next;
2078 +- unsigned long page; /* location of the fault page */
2079 ++ unsigned long addr; /* the requested address */
2080 + pteval_t old_presence; /* page presence prior to arming */
2081 + bool armed;
2082 +
2083 +@@ -70,9 +70,16 @@ unsigned int kmmio_count;
2084 + static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
2085 + static LIST_HEAD(kmmio_probes);
2086 +
2087 +-static struct list_head *kmmio_page_list(unsigned long page)
2088 ++static struct list_head *kmmio_page_list(unsigned long addr)
2089 + {
2090 +- return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
2091 ++ unsigned int l;
2092 ++ pte_t *pte = lookup_address(addr, &l);
2093 ++
2094 ++ if (!pte)
2095 ++ return NULL;
2096 ++ addr &= page_level_mask(l);
2097 ++
2098 ++ return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
2099 + }
2100 +
2101 + /* Accessed per-cpu */
2102 +@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
2103 + }
2104 +
2105 + /* You must be holding RCU read lock. */
2106 +-static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
2107 ++static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
2108 + {
2109 + struct list_head *head;
2110 + struct kmmio_fault_page *f;
2111 ++ unsigned int l;
2112 ++ pte_t *pte = lookup_address(addr, &l);
2113 +
2114 +- page &= PAGE_MASK;
2115 +- head = kmmio_page_list(page);
2116 ++ if (!pte)
2117 ++ return NULL;
2118 ++ addr &= page_level_mask(l);
2119 ++ head = kmmio_page_list(addr);
2120 + list_for_each_entry_rcu(f, head, list) {
2121 +- if (f->page == page)
2122 ++ if (f->addr == addr)
2123 + return f;
2124 + }
2125 + return NULL;
2126 +@@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
2127 + static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
2128 + {
2129 + unsigned int level;
2130 +- pte_t *pte = lookup_address(f->page, &level);
2131 ++ pte_t *pte = lookup_address(f->addr, &level);
2132 +
2133 + if (!pte) {
2134 +- pr_err("no pte for page 0x%08lx\n", f->page);
2135 ++ pr_err("no pte for addr 0x%08lx\n", f->addr);
2136 + return -1;
2137 + }
2138 +
2139 +@@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
2140 + return -1;
2141 + }
2142 +
2143 +- __flush_tlb_one(f->page);
2144 ++ __flush_tlb_one(f->addr);
2145 + return 0;
2146 + }
2147 +
2148 +@@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
2149 + int ret;
2150 + WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
2151 + if (f->armed) {
2152 +- pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
2153 +- f->page, f->count, !!f->old_presence);
2154 ++ pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
2155 ++ f->addr, f->count, !!f->old_presence);
2156 + }
2157 + ret = clear_page_presence(f, true);
2158 +- WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
2159 +- f->page);
2160 ++ WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
2161 ++ f->addr);
2162 + f->armed = true;
2163 + return ret;
2164 + }
2165 +@@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
2166 + {
2167 + int ret = clear_page_presence(f, false);
2168 + WARN_ONCE(ret < 0,
2169 +- KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
2170 ++ KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
2171 + f->armed = false;
2172 + }
2173 +
2174 +@@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
2175 + struct kmmio_context *ctx;
2176 + struct kmmio_fault_page *faultpage;
2177 + int ret = 0; /* default to fault not handled */
2178 ++ unsigned long page_base = addr;
2179 ++ unsigned int l;
2180 ++ pte_t *pte = lookup_address(addr, &l);
2181 ++ if (!pte)
2182 ++ return -EINVAL;
2183 ++ page_base &= page_level_mask(l);
2184 +
2185 + /*
2186 + * Preemption is now disabled to prevent process switch during
2187 +@@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
2188 + preempt_disable();
2189 + rcu_read_lock();
2190 +
2191 +- faultpage = get_kmmio_fault_page(addr);
2192 ++ faultpage = get_kmmio_fault_page(page_base);
2193 + if (!faultpage) {
2194 + /*
2195 + * Either this page fault is not caused by kmmio, or
2196 +@@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
2197 +
2198 + ctx = &get_cpu_var(kmmio_ctx);
2199 + if (ctx->active) {
2200 +- if (addr == ctx->addr) {
2201 ++ if (page_base == ctx->addr) {
2202 + /*
2203 + * A second fault on the same page means some other
2204 + * condition needs handling by do_page_fault(), the
2205 +@@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
2206 + ctx->active++;
2207 +
2208 + ctx->fpage = faultpage;
2209 +- ctx->probe = get_kmmio_probe(addr);
2210 ++ ctx->probe = get_kmmio_probe(page_base);
2211 + ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
2212 +- ctx->addr = addr;
2213 ++ ctx->addr = page_base;
2214 +
2215 + if (ctx->probe && ctx->probe->pre_handler)
2216 + ctx->probe->pre_handler(ctx->probe, regs, addr);
2217 +@@ -354,12 +371,11 @@ out:
2218 + }
2219 +
2220 + /* You must be holding kmmio_lock. */
2221 +-static int add_kmmio_fault_page(unsigned long page)
2222 ++static int add_kmmio_fault_page(unsigned long addr)
2223 + {
2224 + struct kmmio_fault_page *f;
2225 +
2226 +- page &= PAGE_MASK;
2227 +- f = get_kmmio_fault_page(page);
2228 ++ f = get_kmmio_fault_page(addr);
2229 + if (f) {
2230 + if (!f->count)
2231 + arm_kmmio_fault_page(f);
2232 +@@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned long page)
2233 + return -1;
2234 +
2235 + f->count = 1;
2236 +- f->page = page;
2237 ++ f->addr = addr;
2238 +
2239 + if (arm_kmmio_fault_page(f)) {
2240 + kfree(f);
2241 + return -1;
2242 + }
2243 +
2244 +- list_add_rcu(&f->list, kmmio_page_list(f->page));
2245 ++ list_add_rcu(&f->list, kmmio_page_list(f->addr));
2246 +
2247 + return 0;
2248 + }
2249 +
2250 + /* You must be holding kmmio_lock. */
2251 +-static void release_kmmio_fault_page(unsigned long page,
2252 ++static void release_kmmio_fault_page(unsigned long addr,
2253 + struct kmmio_fault_page **release_list)
2254 + {
2255 + struct kmmio_fault_page *f;
2256 +
2257 +- page &= PAGE_MASK;
2258 +- f = get_kmmio_fault_page(page);
2259 ++ f = get_kmmio_fault_page(addr);
2260 + if (!f)
2261 + return;
2262 +
2263 +@@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_probe *p)
2264 + int ret = 0;
2265 + unsigned long size = 0;
2266 + const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
2267 ++ unsigned int l;
2268 ++ pte_t *pte;
2269 +
2270 + spin_lock_irqsave(&kmmio_lock, flags);
2271 + if (get_kmmio_probe(p->addr)) {
2272 + ret = -EEXIST;
2273 + goto out;
2274 + }
2275 ++
2276 ++ pte = lookup_address(p->addr, &l);
2277 ++ if (!pte) {
2278 ++ ret = -EINVAL;
2279 ++ goto out;
2280 ++ }
2281 ++
2282 + kmmio_count++;
2283 + list_add_rcu(&p->list, &kmmio_probes);
2284 + while (size < size_lim) {
2285 + if (add_kmmio_fault_page(p->addr + size))
2286 + pr_err("Unable to set page fault.\n");
2287 +- size += PAGE_SIZE;
2288 ++ size += page_level_size(l);
2289 + }
2290 + out:
2291 + spin_unlock_irqrestore(&kmmio_lock, flags);
2292 +@@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
2293 + const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
2294 + struct kmmio_fault_page *release_list = NULL;
2295 + struct kmmio_delayed_release *drelease;
2296 ++ unsigned int l;
2297 ++ pte_t *pte;
2298 ++
2299 ++ pte = lookup_address(p->addr, &l);
2300 ++ if (!pte)
2301 ++ return;
2302 +
2303 + spin_lock_irqsave(&kmmio_lock, flags);
2304 + while (size < size_lim) {
2305 + release_kmmio_fault_page(p->addr + size, &release_list);
2306 +- size += PAGE_SIZE;
2307 ++ size += page_level_size(l);
2308 + }
2309 + list_del_rcu(&p->list);
2310 + kmmio_count--;
2311 +diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
2312 +index 0f6463b6692b..44c8447eb97e 100644
2313 +--- a/crypto/asymmetric_keys/pkcs7_trust.c
2314 ++++ b/crypto/asymmetric_keys/pkcs7_trust.c
2315 +@@ -174,6 +174,8 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
2316 + int cached_ret = -ENOKEY;
2317 + int ret;
2318 +
2319 ++ *_trusted = false;
2320 ++
2321 + for (p = pkcs7->certs; p; p = p->next)
2322 + p->seen = false;
2323 +
2324 +diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
2325 +index 58f335ca2e75..568f2b942aac 100644
2326 +--- a/drivers/acpi/acpi_processor.c
2327 ++++ b/drivers/acpi/acpi_processor.c
2328 +@@ -475,6 +475,58 @@ static void acpi_processor_remove(struct acpi_device *device)
2329 + }
2330 + #endif /* CONFIG_ACPI_HOTPLUG_CPU */
2331 +
2332 ++#ifdef CONFIG_X86
2333 ++static bool acpi_hwp_native_thermal_lvt_set;
2334 ++static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
2335 ++ u32 lvl,
2336 ++ void *context,
2337 ++ void **rv)
2338 ++{
2339 ++ u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
2340 ++ u32 capbuf[2];
2341 ++ struct acpi_osc_context osc_context = {
2342 ++ .uuid_str = sb_uuid_str,
2343 ++ .rev = 1,
2344 ++ .cap.length = 8,
2345 ++ .cap.pointer = capbuf,
2346 ++ };
2347 ++
2348 ++ if (acpi_hwp_native_thermal_lvt_set)
2349 ++ return AE_CTRL_TERMINATE;
2350 ++
2351 ++ capbuf[0] = 0x0000;
2352 ++ capbuf[1] = 0x1000; /* set bit 12 */
2353 ++
2354 ++ if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
2355 ++ if (osc_context.ret.pointer && osc_context.ret.length > 1) {
2356 ++ u32 *capbuf_ret = osc_context.ret.pointer;
2357 ++
2358 ++ if (capbuf_ret[1] & 0x1000) {
2359 ++ acpi_handle_info(handle,
2360 ++ "_OSC native thermal LVT Acked\n");
2361 ++ acpi_hwp_native_thermal_lvt_set = true;
2362 ++ }
2363 ++ }
2364 ++ kfree(osc_context.ret.pointer);
2365 ++ }
2366 ++
2367 ++ return AE_OK;
2368 ++}
2369 ++
2370 ++void __init acpi_early_processor_osc(void)
2371 ++{
2372 ++ if (boot_cpu_has(X86_FEATURE_HWP)) {
2373 ++ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
2374 ++ ACPI_UINT32_MAX,
2375 ++ acpi_hwp_native_thermal_lvt_osc,
2376 ++ NULL, NULL, NULL);
2377 ++ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
2378 ++ acpi_hwp_native_thermal_lvt_osc,
2379 ++ NULL, NULL);
2380 ++ }
2381 ++}
2382 ++#endif
2383 ++
2384 + /*
2385 + * The following ACPI IDs are known to be suitable for representing as
2386 + * processor devices.
2387 +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
2388 +index 513e7230e3d0..fd6053908d24 100644
2389 +--- a/drivers/acpi/bus.c
2390 ++++ b/drivers/acpi/bus.c
2391 +@@ -612,6 +612,9 @@ static int __init acpi_bus_init(void)
2392 + goto error1;
2393 + }
2394 +
2395 ++ /* Set capability bits for _OSC under processor scope */
2396 ++ acpi_early_processor_osc();
2397 ++
2398 + /*
2399 + * _OSC method may exist in module level code,
2400 + * so it must be run after ACPI_FULL_INITIALIZATION
2401 +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
2402 +index ba4a61e964be..7db7f9dd7c47 100644
2403 +--- a/drivers/acpi/internal.h
2404 ++++ b/drivers/acpi/internal.h
2405 +@@ -121,6 +121,12 @@ void acpi_early_processor_set_pdc(void);
2406 + static inline void acpi_early_processor_set_pdc(void) {}
2407 + #endif
2408 +
2409 ++#ifdef CONFIG_X86
2410 ++void acpi_early_processor_osc(void);
2411 ++#else
2412 ++static inline void acpi_early_processor_osc(void) {}
2413 ++#endif
2414 ++
2415 + /* --------------------------------------------------------------------------
2416 + Embedded Controller
2417 + -------------------------------------------------------------------------- */
2418 +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
2419 +index cb0508af1459..5ab6fa9cfc2f 100644
2420 +--- a/drivers/ata/libata-eh.c
2421 ++++ b/drivers/ata/libata-eh.c
2422 +@@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host)
2423 + ata_scsi_port_error_handler(host, ap);
2424 +
2425 + /* finish or retry handled scmd's and clean up */
2426 +- WARN_ON(host->host_failed || !list_empty(&eh_work_q));
2427 ++ WARN_ON(!list_empty(&eh_work_q));
2428 +
2429 + DPRINTK("EXIT\n");
2430 + }
2431 +diff --git a/drivers/base/module.c b/drivers/base/module.c
2432 +index db930d3ee312..2a215780eda2 100644
2433 +--- a/drivers/base/module.c
2434 ++++ b/drivers/base/module.c
2435 +@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
2436 +
2437 + static void module_create_drivers_dir(struct module_kobject *mk)
2438 + {
2439 +- if (!mk || mk->drivers_dir)
2440 +- return;
2441 ++ static DEFINE_MUTEX(drivers_dir_mutex);
2442 +
2443 +- mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
2444 ++ mutex_lock(&drivers_dir_mutex);
2445 ++ if (mk && !mk->drivers_dir)
2446 ++ mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
2447 ++ mutex_unlock(&drivers_dir_mutex);
2448 + }
2449 +
2450 + void module_add_driver(struct module *mod, struct device_driver *drv)
2451 +diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
2452 +index 2af8b29656af..eecaa02ec222 100644
2453 +--- a/drivers/block/mtip32xx/mtip32xx.c
2454 ++++ b/drivers/block/mtip32xx/mtip32xx.c
2455 +@@ -2946,9 +2946,15 @@ static int mtip_service_thread(void *data)
2456 + * is in progress nor error handling is active
2457 + */
2458 + wait_event_interruptible(port->svc_wait, (port->flags) &&
2459 +- !(port->flags & MTIP_PF_PAUSE_IO));
2460 ++ (port->flags & MTIP_PF_SVC_THD_WORK));
2461 +
2462 +- set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
2463 ++ if (kthread_should_stop() ||
2464 ++ test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
2465 ++ goto st_out;
2466 ++
2467 ++ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
2468 ++ &dd->dd_flag)))
2469 ++ goto st_out;
2470 +
2471 + if (kthread_should_stop() ||
2472 + test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
2473 +@@ -2962,6 +2968,8 @@ static int mtip_service_thread(void *data)
2474 + &dd->dd_flag)))
2475 + goto st_out;
2476 +
2477 ++ set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
2478 ++
2479 + restart_eh:
2480 + /* Demux bits: start with error handling */
2481 + if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
2482 +@@ -3004,10 +3012,8 @@ restart_eh:
2483 + }
2484 +
2485 + if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
2486 +- if (mtip_ftl_rebuild_poll(dd) < 0)
2487 +- set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
2488 +- &dd->dd_flag);
2489 +- clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
2490 ++ if (mtip_ftl_rebuild_poll(dd) == 0)
2491 ++ clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
2492 + }
2493 + }
2494 +
2495 +@@ -3887,7 +3893,6 @@ static int mtip_block_initialize(struct driver_data *dd)
2496 +
2497 + mtip_hw_debugfs_init(dd);
2498 +
2499 +-skip_create_disk:
2500 + memset(&dd->tags, 0, sizeof(dd->tags));
2501 + dd->tags.ops = &mtip_mq_ops;
2502 + dd->tags.nr_hw_queues = 1;
2503 +@@ -3917,6 +3922,7 @@ skip_create_disk:
2504 + dd->disk->queue = dd->queue;
2505 + dd->queue->queuedata = dd;
2506 +
2507 ++skip_create_disk:
2508 + /* Initialize the protocol layer. */
2509 + wait_for_rebuild = mtip_hw_get_identify(dd);
2510 + if (wait_for_rebuild < 0) {
2511 +@@ -4078,7 +4084,8 @@ static int mtip_block_remove(struct driver_data *dd)
2512 + dd->bdev = NULL;
2513 + }
2514 + if (dd->disk) {
2515 +- del_gendisk(dd->disk);
2516 ++ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
2517 ++ del_gendisk(dd->disk);
2518 + if (dd->disk->queue) {
2519 + blk_cleanup_queue(dd->queue);
2520 + blk_mq_free_tag_set(&dd->tags);
2521 +@@ -4119,7 +4126,8 @@ static int mtip_block_shutdown(struct driver_data *dd)
2522 + dev_info(&dd->pdev->dev,
2523 + "Shutting down %s ...\n", dd->disk->disk_name);
2524 +
2525 +- del_gendisk(dd->disk);
2526 ++ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
2527 ++ del_gendisk(dd->disk);
2528 + if (dd->disk->queue) {
2529 + blk_cleanup_queue(dd->queue);
2530 + blk_mq_free_tag_set(&dd->tags);
2531 +diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
2532 +index 76695265dffb..578ad36c9913 100644
2533 +--- a/drivers/block/mtip32xx/mtip32xx.h
2534 ++++ b/drivers/block/mtip32xx/mtip32xx.h
2535 +@@ -145,6 +145,11 @@ enum {
2536 + MTIP_PF_SR_CLEANUP_BIT = 7,
2537 + MTIP_PF_SVC_THD_STOP_BIT = 8,
2538 +
2539 ++ MTIP_PF_SVC_THD_WORK = ((1 << MTIP_PF_EH_ACTIVE_BIT) |
2540 ++ (1 << MTIP_PF_ISSUE_CMDS_BIT) |
2541 ++ (1 << MTIP_PF_REBUILD_BIT) |
2542 ++ (1 << MTIP_PF_SVC_THD_STOP_BIT)),
2543 ++
2544 + /* below are bit numbers in 'dd_flag' defined in driver_data */
2545 + MTIP_DDF_SEC_LOCK_BIT = 0,
2546 + MTIP_DDF_REMOVE_PENDING_BIT = 1,
2547 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
2548 +index 39e5f7fae3ef..9911b2067286 100644
2549 +--- a/drivers/block/nbd.c
2550 ++++ b/drivers/block/nbd.c
2551 +@@ -557,8 +557,8 @@ static void do_nbd_request(struct request_queue *q)
2552 + req, req->cmd_type);
2553 +
2554 + if (unlikely(!nbd->sock)) {
2555 +- dev_err(disk_to_dev(nbd->disk),
2556 +- "Attempted send on closed socket\n");
2557 ++ dev_err_ratelimited(disk_to_dev(nbd->disk),
2558 ++ "Attempted send on closed socket\n");
2559 + req->errors++;
2560 + nbd_end_request(nbd, req);
2561 + spin_lock_irq(q->queue_lock);
2562 +diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
2563 +index d48715b287e6..b0414702e61a 100644
2564 +--- a/drivers/block/paride/pd.c
2565 ++++ b/drivers/block/paride/pd.c
2566 +@@ -126,7 +126,7 @@
2567 + */
2568 + #include <linux/types.h>
2569 +
2570 +-static bool verbose = 0;
2571 ++static int verbose = 0;
2572 + static int major = PD_MAJOR;
2573 + static char *name = PD_NAME;
2574 + static int cluster = 64;
2575 +@@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
2576 + static DEFINE_MUTEX(pd_mutex);
2577 + static DEFINE_SPINLOCK(pd_lock);
2578 +
2579 +-module_param(verbose, bool, 0);
2580 ++module_param(verbose, int, 0);
2581 + module_param(major, int, 0);
2582 + module_param(name, charp, 0);
2583 + module_param(cluster, int, 0);
2584 +diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
2585 +index 2596042eb987..ada45058e04d 100644
2586 +--- a/drivers/block/paride/pt.c
2587 ++++ b/drivers/block/paride/pt.c
2588 +@@ -117,7 +117,7 @@
2589 +
2590 + */
2591 +
2592 +-static bool verbose = 0;
2593 ++static int verbose = 0;
2594 + static int major = PT_MAJOR;
2595 + static char *name = PT_NAME;
2596 + static int disable = 0;
2597 +@@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
2598 +
2599 + #include <asm/uaccess.h>
2600 +
2601 +-module_param(verbose, bool, 0);
2602 ++module_param(verbose, int, 0);
2603 + module_param(major, int, 0);
2604 + module_param(name, charp, 0);
2605 + module_param_array(drive0, int, NULL, 0);
2606 +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
2607 +index bf75f6361773..4bc508c14900 100644
2608 +--- a/drivers/char/ipmi/ipmi_msghandler.c
2609 ++++ b/drivers/char/ipmi/ipmi_msghandler.c
2610 +@@ -3813,6 +3813,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
2611 + while (!list_empty(&intf->waiting_rcv_msgs)) {
2612 + smi_msg = list_entry(intf->waiting_rcv_msgs.next,
2613 + struct ipmi_smi_msg, link);
2614 ++ list_del(&smi_msg->link);
2615 + if (!run_to_completion)
2616 + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
2617 + flags);
2618 +@@ -3822,11 +3823,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
2619 + if (rv > 0) {
2620 + /*
2621 + * To preserve message order, quit if we
2622 +- * can't handle a message.
2623 ++ * can't handle a message. Add the message
2624 ++ * back at the head, this is safe because this
2625 ++ * tasklet is the only thing that pulls the
2626 ++ * messages.
2627 + */
2628 ++ list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
2629 + break;
2630 + } else {
2631 +- list_del(&smi_msg->link);
2632 + if (rv == 0)
2633 + /* Message handled */
2634 + ipmi_free_smi_msg(smi_msg);
2635 +diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
2636 +index 1082d4bb016a..591629cc32d5 100644
2637 +--- a/drivers/char/tpm/tpm-chip.c
2638 ++++ b/drivers/char/tpm/tpm-chip.c
2639 +@@ -133,6 +133,8 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
2640 + chip->cdev.owner = chip->pdev->driver->owner;
2641 + chip->cdev.kobj.parent = &chip->dev.kobj;
2642 +
2643 ++ devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
2644 ++
2645 + return chip;
2646 + }
2647 + EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
2648 +@@ -168,7 +170,7 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
2649 + static void tpm_dev_del_device(struct tpm_chip *chip)
2650 + {
2651 + cdev_del(&chip->cdev);
2652 +- device_unregister(&chip->dev);
2653 ++ device_del(&chip->dev);
2654 + }
2655 +
2656 + static int tpm1_chip_register(struct tpm_chip *chip)
2657 +diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
2658 +index 5d75bffab141..0f721998b2d4 100644
2659 +--- a/drivers/clk/qcom/gcc-msm8916.c
2660 ++++ b/drivers/clk/qcom/gcc-msm8916.c
2661 +@@ -1961,6 +1961,7 @@ static struct clk_branch gcc_crypto_ahb_clk = {
2662 + "pcnoc_bfdcd_clk_src",
2663 + },
2664 + .num_parents = 1,
2665 ++ .flags = CLK_SET_RATE_PARENT,
2666 + .ops = &clk_branch2_ops,
2667 + },
2668 + },
2669 +@@ -1996,6 +1997,7 @@ static struct clk_branch gcc_crypto_clk = {
2670 + "crypto_clk_src",
2671 + },
2672 + .num_parents = 1,
2673 ++ .flags = CLK_SET_RATE_PARENT,
2674 + .ops = &clk_branch2_ops,
2675 + },
2676 + },
2677 +diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
2678 +index eb6a4f9fa107..476d2090c96b 100644
2679 +--- a/drivers/clk/qcom/gcc-msm8960.c
2680 ++++ b/drivers/clk/qcom/gcc-msm8960.c
2681 +@@ -2753,7 +2753,7 @@ static struct clk_rcg ce3_src = {
2682 + },
2683 + .freq_tbl = clk_tbl_ce3,
2684 + .clkr = {
2685 +- .enable_reg = 0x2c08,
2686 ++ .enable_reg = 0x36c0,
2687 + .enable_mask = BIT(7),
2688 + .hw.init = &(struct clk_init_data){
2689 + .name = "ce3_src",
2690 +@@ -2769,7 +2769,7 @@ static struct clk_branch ce3_core_clk = {
2691 + .halt_reg = 0x2fdc,
2692 + .halt_bit = 5,
2693 + .clkr = {
2694 +- .enable_reg = 0x36c4,
2695 ++ .enable_reg = 0x36cc,
2696 + .enable_mask = BIT(4),
2697 + .hw.init = &(struct clk_init_data){
2698 + .name = "ce3_core_clk",
2699 +diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
2700 +index c842e3b60f21..2c111c2cdccc 100644
2701 +--- a/drivers/clk/rockchip/clk-mmc-phase.c
2702 ++++ b/drivers/clk/rockchip/clk-mmc-phase.c
2703 +@@ -131,6 +131,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
2704 + if (!mmc_clock)
2705 + return NULL;
2706 +
2707 ++ init.flags = 0;
2708 + init.num_parents = num_parents;
2709 + init.parent_names = parent_names;
2710 + init.ops = &rockchip_mmc_clk_ops;
2711 +diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
2712 +index edb5d489ae61..3b9de3264534 100644
2713 +--- a/drivers/clk/rockchip/clk.c
2714 ++++ b/drivers/clk/rockchip/clk.c
2715 +@@ -70,7 +70,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
2716 + if (gate_offset >= 0) {
2717 + gate = kzalloc(sizeof(*gate), GFP_KERNEL);
2718 + if (!gate)
2719 +- return ERR_PTR(-ENOMEM);
2720 ++ goto err_gate;
2721 +
2722 + gate->flags = gate_flags;
2723 + gate->reg = base + gate_offset;
2724 +@@ -82,7 +82,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
2725 + if (div_width > 0) {
2726 + div = kzalloc(sizeof(*div), GFP_KERNEL);
2727 + if (!div)
2728 +- return ERR_PTR(-ENOMEM);
2729 ++ goto err_div;
2730 +
2731 + div->flags = div_flags;
2732 + div->reg = base + muxdiv_offset;
2733 +@@ -100,6 +100,11 @@ static struct clk *rockchip_clk_register_branch(const char *name,
2734 + flags);
2735 +
2736 + return clk;
2737 ++err_div:
2738 ++ kfree(gate);
2739 ++err_gate:
2740 ++ kfree(mux);
2741 ++ return ERR_PTR(-ENOMEM);
2742 + }
2743 +
2744 + static struct clk *rockchip_clk_register_frac_branch(const char *name,
2745 +diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
2746 +index 5122ef25f595..e63c3ef9b5ec 100644
2747 +--- a/drivers/clk/versatile/clk-sp810.c
2748 ++++ b/drivers/clk/versatile/clk-sp810.c
2749 +@@ -141,6 +141,7 @@ void __init clk_sp810_of_setup(struct device_node *node)
2750 + const char *parent_names[2];
2751 + char name[12];
2752 + struct clk_init_data init;
2753 ++ static int instance;
2754 + int i;
2755 +
2756 + if (!sp810) {
2757 +@@ -172,7 +173,7 @@ void __init clk_sp810_of_setup(struct device_node *node)
2758 + init.num_parents = ARRAY_SIZE(parent_names);
2759 +
2760 + for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
2761 +- snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
2762 ++ snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
2763 +
2764 + sp810->timerclken[i].sp810 = sp810;
2765 + sp810->timerclken[i].channel = i;
2766 +@@ -184,5 +185,6 @@ void __init clk_sp810_of_setup(struct device_node *node)
2767 + }
2768 +
2769 + of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
2770 ++ instance++;
2771 + }
2772 + CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
2773 +diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
2774 +index 5f5f360628fc..c5e6222a6ca2 100644
2775 +--- a/drivers/crypto/ux500/hash/hash_core.c
2776 ++++ b/drivers/crypto/ux500/hash/hash_core.c
2777 +@@ -797,7 +797,7 @@ static int hash_process_data(struct hash_device_data *device_data,
2778 + &device_data->state);
2779 + memmove(req_ctx->state.buffer,
2780 + device_data->state.buffer,
2781 +- HASH_BLOCK_SIZE / sizeof(u32));
2782 ++ HASH_BLOCK_SIZE);
2783 + if (ret) {
2784 + dev_err(device_data->dev,
2785 + "%s: hash_resume_state() failed!\n",
2786 +@@ -848,7 +848,7 @@ static int hash_process_data(struct hash_device_data *device_data,
2787 +
2788 + memmove(device_data->state.buffer,
2789 + req_ctx->state.buffer,
2790 +- HASH_BLOCK_SIZE / sizeof(u32));
2791 ++ HASH_BLOCK_SIZE);
2792 + if (ret) {
2793 + dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
2794 + __func__);
2795 +diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
2796 +index c8e7f653e5d3..f508cea02039 100644
2797 +--- a/drivers/crypto/vmx/aes_cbc.c
2798 ++++ b/drivers/crypto/vmx/aes_cbc.c
2799 +@@ -167,7 +167,7 @@ struct crypto_alg p8_aes_cbc_alg = {
2800 + .cra_name = "cbc(aes)",
2801 + .cra_driver_name = "p8_aes_cbc",
2802 + .cra_module = THIS_MODULE,
2803 +- .cra_priority = 1000,
2804 ++ .cra_priority = 2000,
2805 + .cra_type = &crypto_blkcipher_type,
2806 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
2807 + .cra_alignmask = 0,
2808 +diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
2809 +index 266e708d63df..d8fa3b4ec17f 100644
2810 +--- a/drivers/crypto/vmx/aes_ctr.c
2811 ++++ b/drivers/crypto/vmx/aes_ctr.c
2812 +@@ -151,7 +151,7 @@ struct crypto_alg p8_aes_ctr_alg = {
2813 + .cra_name = "ctr(aes)",
2814 + .cra_driver_name = "p8_aes_ctr",
2815 + .cra_module = THIS_MODULE,
2816 +- .cra_priority = 1000,
2817 ++ .cra_priority = 2000,
2818 + .cra_type = &crypto_blkcipher_type,
2819 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
2820 + .cra_alignmask = 0,
2821 +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
2822 +index ffa809f30b19..c5e6c82516ce 100644
2823 +--- a/drivers/dma/at_xdmac.c
2824 ++++ b/drivers/dma/at_xdmac.c
2825 +@@ -238,7 +238,7 @@ struct at_xdmac_lld {
2826 + u32 mbr_cfg; /* Configuration Register */
2827 + };
2828 +
2829 +-
2830 ++/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
2831 + struct at_xdmac_desc {
2832 + struct at_xdmac_lld lld;
2833 + enum dma_transfer_direction direction;
2834 +@@ -249,7 +249,7 @@ struct at_xdmac_desc {
2835 + unsigned int xfer_size;
2836 + struct list_head descs_list;
2837 + struct list_head xfer_node;
2838 +-};
2839 ++} __aligned(sizeof(u64));
2840 +
2841 + static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
2842 + {
2843 +@@ -930,6 +930,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2844 + u32 cur_nda, check_nda, cur_ubc, mask, value;
2845 + u8 dwidth = 0;
2846 + unsigned long flags;
2847 ++ bool initd;
2848 +
2849 + ret = dma_cookie_status(chan, cookie, txstate);
2850 + if (ret == DMA_COMPLETE)
2851 +@@ -954,7 +955,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2852 + residue = desc->xfer_size;
2853 + /*
2854 + * Flush FIFO: only relevant when the transfer is source peripheral
2855 +- * synchronized.
2856 ++ * synchronized. Flush is needed before reading CUBC because data in
2857 ++ * the FIFO are not reported by CUBC. Reporting a residue of the
2858 ++ * transfer length while we have data in FIFO can cause issue.
2859 ++ * Usecase: atmel USART has a timeout which means I have received
2860 ++ * characters but there is no more character received for a while. On
2861 ++ * timeout, it requests the residue. If the data are in the DMA FIFO,
2862 ++ * we will return a residue of the transfer length. It means no data
2863 ++ * received. If an application is waiting for these data, it will hang
2864 ++ * since we won't have another USART timeout without receiving new
2865 ++ * data.
2866 + */
2867 + mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
2868 + value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
2869 +@@ -965,34 +975,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2870 + }
2871 +
2872 + /*
2873 +- * When processing the residue, we need to read two registers but we
2874 +- * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
2875 +- * we stand in the descriptor list and AT_XDMAC_CUBC is used
2876 +- * to know how many data are remaining for the current descriptor.
2877 +- * Since the dma channel is not paused to not loose data, between the
2878 +- * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
2879 +- * descriptor.
2880 +- * For that reason, after reading AT_XDMAC_CUBC, we check if we are
2881 +- * still using the same descriptor by reading a second time
2882 +- * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
2883 +- * read again AT_XDMAC_CUBC.
2884 ++ * The easiest way to compute the residue should be to pause the DMA
2885 ++ * but doing this can lead to miss some data as some devices don't
2886 ++ * have FIFO.
2887 ++ * We need to read several registers because:
2888 ++ * - DMA is running therefore a descriptor change is possible while
2889 ++ * reading these registers
2890 ++ * - When the block transfer is done, the value of the CUBC register
2891 ++ * is set to its initial value until the fetch of the next descriptor.
2892 ++ * This value will corrupt the residue calculation so we have to skip
2893 ++ * it.
2894 ++ *
2895 ++ * INITD -------- ------------
2896 ++ * |____________________|
2897 ++ * _______________________ _______________
2898 ++ * NDA @desc2 \/ @desc3
2899 ++ * _______________________/\_______________
2900 ++ * __________ ___________ _______________
2901 ++ * CUBC 0 \/ MAX desc1 \/ MAX desc2
2902 ++ * __________/\___________/\_______________
2903 ++ *
2904 ++ * Since descriptors are aligned on 64 bits, we can assume that
2905 ++ * the update of NDA and CUBC is atomic.
2906 + * Memory barriers are used to ensure the read order of the registers.
2907 +- * A max number of retries is set because unlikely it can never ends if
2908 +- * we are transferring a lot of data with small buffers.
2909 ++ * A max number of retries is set because unlikely it could never ends.
2910 + */
2911 +- cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
2912 +- rmb();
2913 +- cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
2914 + for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
2915 +- rmb();
2916 + check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
2917 +-
2918 +- if (likely(cur_nda == check_nda))
2919 +- break;
2920 +-
2921 +- cur_nda = check_nda;
2922 ++ rmb();
2923 ++ initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
2924 + rmb();
2925 + cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
2926 ++ rmb();
2927 ++ cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
2928 ++ rmb();
2929 ++
2930 ++ if ((check_nda == cur_nda) && initd)
2931 ++ break;
2932 + }
2933 +
2934 + if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
2935 +@@ -1001,6 +1020,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2936 + }
2937 +
2938 + /*
2939 ++ * Flush FIFO: only relevant when the transfer is source peripheral
2940 ++ * synchronized. Another flush is needed here because CUBC is updated
2941 ++ * when the controller sends the data write command. It can lead to
2942 ++ * report data that are not written in the memory or the device. The
2943 ++ * FIFO flush ensures that data are really written.
2944 ++ */
2945 ++ if ((desc->lld.mbr_cfg & mask) == value) {
2946 ++ at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
2947 ++ while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
2948 ++ cpu_relax();
2949 ++ }
2950 ++
2951 ++ /*
2952 + * Remove size of all microblocks already transferred and the current
2953 + * one. Then add the remaining size to transfer of the current
2954 + * microblock.
2955 +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
2956 +index 63226e9036a1..1f2c86d81176 100644
2957 +--- a/drivers/firmware/efi/efi.c
2958 ++++ b/drivers/firmware/efi/efi.c
2959 +@@ -170,6 +170,7 @@ static int generic_ops_register(void)
2960 + {
2961 + generic_ops.get_variable = efi.get_variable;
2962 + generic_ops.set_variable = efi.set_variable;
2963 ++ generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
2964 + generic_ops.get_next_variable = efi.get_next_variable;
2965 + generic_ops.query_variable_store = efi_query_variable_store;
2966 +
2967 +diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
2968 +index be9fa8220499..767d0eaabe97 100644
2969 +--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
2970 ++++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
2971 +@@ -335,6 +335,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
2972 +
2973 + atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
2974 + factor_reg);
2975 ++ } else {
2976 ++ atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0);
2977 + }
2978 + }
2979 +
2980 +diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
2981 +index 71dcbc64ae98..7f0356ea0bbf 100644
2982 +--- a/drivers/gpu/drm/drm_dp_helper.c
2983 ++++ b/drivers/gpu/drm/drm_dp_helper.c
2984 +@@ -432,7 +432,7 @@ static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
2985 + */
2986 + static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
2987 + {
2988 +- unsigned int retry;
2989 ++ unsigned int retry, defer_i2c;
2990 + int ret;
2991 +
2992 + /*
2993 +@@ -440,7 +440,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
2994 + * is required to retry at least seven times upon receiving AUX_DEFER
2995 + * before giving up the AUX transaction.
2996 + */
2997 +- for (retry = 0; retry < 7; retry++) {
2998 ++ for (retry = 0, defer_i2c = 0; retry < (7 + defer_i2c); retry++) {
2999 + mutex_lock(&aux->hw_mutex);
3000 + ret = aux->transfer(aux, msg);
3001 + mutex_unlock(&aux->hw_mutex);
3002 +@@ -499,7 +499,13 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
3003 +
3004 + case DP_AUX_I2C_REPLY_DEFER:
3005 + DRM_DEBUG_KMS("I2C defer\n");
3006 ++ /* DP Compliance Test 4.2.2.5 Requirement:
3007 ++ * Must have at least 7 retries for I2C defers on the
3008 ++ * transaction to pass this test
3009 ++ */
3010 + aux->i2c_defer_count++;
3011 ++ if (defer_i2c < 7)
3012 ++ defer_i2c++;
3013 + usleep_range(400, 500);
3014 + continue;
3015 +
3016 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
3017 +index 10b8839cbd0c..52dea773bb1b 100644
3018 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
3019 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
3020 +@@ -2862,11 +2862,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
3021 + drm_dp_port_teardown_pdt(port, port->pdt);
3022 +
3023 + if (!port->input && port->vcpi.vcpi > 0) {
3024 +- if (mgr->mst_state) {
3025 +- drm_dp_mst_reset_vcpi_slots(mgr, port);
3026 +- drm_dp_update_payload_part1(mgr);
3027 +- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3028 +- }
3029 ++ drm_dp_mst_reset_vcpi_slots(mgr, port);
3030 ++ drm_dp_update_payload_part1(mgr);
3031 ++ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3032 + }
3033 +
3034 + kref_put(&port->kref, drm_dp_free_mst_port);
3035 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
3036 +index 56323732c748..5250596a612e 100644
3037 +--- a/drivers/gpu/drm/i915/intel_display.c
3038 ++++ b/drivers/gpu/drm/i915/intel_display.c
3039 +@@ -7129,12 +7129,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
3040 + {
3041 + struct drm_i915_private *dev_priv = dev->dev_private;
3042 + struct intel_encoder *encoder;
3043 ++ int i;
3044 + u32 val, final;
3045 + bool has_lvds = false;
3046 + bool has_cpu_edp = false;
3047 + bool has_panel = false;
3048 + bool has_ck505 = false;
3049 + bool can_ssc = false;
3050 ++ bool using_ssc_source = false;
3051 +
3052 + /* We need to take the global config into account */
3053 + for_each_intel_encoder(dev, encoder) {
3054 +@@ -7161,8 +7163,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
3055 + can_ssc = true;
3056 + }
3057 +
3058 +- DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
3059 +- has_panel, has_lvds, has_ck505);
3060 ++ /* Check if any DPLLs are using the SSC source */
3061 ++ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3062 ++ u32 temp = I915_READ(PCH_DPLL(i));
3063 ++
3064 ++ if (!(temp & DPLL_VCO_ENABLE))
3065 ++ continue;
3066 ++
3067 ++ if ((temp & PLL_REF_INPUT_MASK) ==
3068 ++ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
3069 ++ using_ssc_source = true;
3070 ++ break;
3071 ++ }
3072 ++ }
3073 ++
3074 ++ DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
3075 ++ has_panel, has_lvds, has_ck505, using_ssc_source);
3076 +
3077 + /* Ironlake: try to setup display ref clock before DPLL
3078 + * enabling. This is only under driver's control after
3079 +@@ -7199,9 +7215,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
3080 + final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
3081 + } else
3082 + final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
3083 +- } else {
3084 +- final |= DREF_SSC_SOURCE_DISABLE;
3085 +- final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
3086 ++ } else if (using_ssc_source) {
3087 ++ final |= DREF_SSC_SOURCE_ENABLE;
3088 ++ final |= DREF_SSC1_ENABLE;
3089 + }
3090 +
3091 + if (final == val)
3092 +@@ -7247,7 +7263,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
3093 + POSTING_READ(PCH_DREF_CONTROL);
3094 + udelay(200);
3095 + } else {
3096 +- DRM_DEBUG_KMS("Disabling SSC entirely\n");
3097 ++ DRM_DEBUG_KMS("Disabling CPU source output\n");
3098 +
3099 + val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
3100 +
3101 +@@ -7258,16 +7274,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
3102 + POSTING_READ(PCH_DREF_CONTROL);
3103 + udelay(200);
3104 +
3105 +- /* Turn off the SSC source */
3106 +- val &= ~DREF_SSC_SOURCE_MASK;
3107 +- val |= DREF_SSC_SOURCE_DISABLE;
3108 ++ if (!using_ssc_source) {
3109 ++ DRM_DEBUG_KMS("Disabling SSC source\n");
3110 +
3111 +- /* Turn off SSC1 */
3112 +- val &= ~DREF_SSC1_ENABLE;
3113 ++ /* Turn off the SSC source */
3114 ++ val &= ~DREF_SSC_SOURCE_MASK;
3115 ++ val |= DREF_SSC_SOURCE_DISABLE;
3116 +
3117 +- I915_WRITE(PCH_DREF_CONTROL, val);
3118 +- POSTING_READ(PCH_DREF_CONTROL);
3119 +- udelay(200);
3120 ++ /* Turn off SSC1 */
3121 ++ val &= ~DREF_SSC1_ENABLE;
3122 ++
3123 ++ I915_WRITE(PCH_DREF_CONTROL, val);
3124 ++ POSTING_READ(PCH_DREF_CONTROL);
3125 ++ udelay(200);
3126 ++ }
3127 + }
3128 +
3129 + BUG_ON(val != final);
3130 +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
3131 +index b7e20dee64c4..09844b5fe250 100644
3132 +--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
3133 ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
3134 +@@ -1814,6 +1814,17 @@ i915_dispatch_execbuffer(struct intel_engine_cs *ring,
3135 + return 0;
3136 + }
3137 +
3138 ++static void cleanup_phys_status_page(struct intel_engine_cs *ring)
3139 ++{
3140 ++ struct drm_i915_private *dev_priv = to_i915(ring->dev);
3141 ++
3142 ++ if (!dev_priv->status_page_dmah)
3143 ++ return;
3144 ++
3145 ++ drm_pci_free(ring->dev, dev_priv->status_page_dmah);
3146 ++ ring->status_page.page_addr = NULL;
3147 ++}
3148 ++
3149 + static void cleanup_status_page(struct intel_engine_cs *ring)
3150 + {
3151 + struct drm_i915_gem_object *obj;
3152 +@@ -1830,9 +1841,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
3153 +
3154 + static int init_status_page(struct intel_engine_cs *ring)
3155 + {
3156 +- struct drm_i915_gem_object *obj;
3157 ++ struct drm_i915_gem_object *obj = ring->status_page.obj;
3158 +
3159 +- if ((obj = ring->status_page.obj) == NULL) {
3160 ++ if (obj == NULL) {
3161 + unsigned flags;
3162 + int ret;
3163 +
3164 +@@ -1985,7 +1996,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
3165 + if (ret)
3166 + goto error;
3167 + } else {
3168 +- BUG_ON(ring->id != RCS);
3169 ++ WARN_ON(ring->id != RCS);
3170 + ret = init_phys_status_page(ring);
3171 + if (ret)
3172 + goto error;
3173 +@@ -2049,7 +2060,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
3174 + if (ring->cleanup)
3175 + ring->cleanup(ring);
3176 +
3177 +- cleanup_status_page(ring);
3178 ++ if (I915_NEED_GFX_HWS(ring->dev)) {
3179 ++ cleanup_status_page(ring);
3180 ++ } else {
3181 ++ WARN_ON(ring->id != RCS);
3182 ++ cleanup_phys_status_page(ring);
3183 ++ }
3184 +
3185 + i915_cmd_parser_fini_ring(ring);
3186 +
3187 +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
3188 +index cf43f77be254..bb29f1e482d7 100644
3189 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
3190 ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
3191 +@@ -572,7 +572,8 @@ nouveau_fbcon_init(struct drm_device *dev)
3192 + if (ret)
3193 + goto fini;
3194 +
3195 +- fbcon->helper.fbdev->pixmap.buf_align = 4;
3196 ++ if (fbcon->helper.fbdev)
3197 ++ fbcon->helper.fbdev->pixmap.buf_align = 4;
3198 + return 0;
3199 +
3200 + fini:
3201 +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
3202 +index dac78ad24b31..79bab6fd76bb 100644
3203 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c
3204 ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
3205 +@@ -1739,6 +1739,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
3206 + static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
3207 + {
3208 + struct drm_device *dev = crtc->dev;
3209 ++ struct radeon_device *rdev = dev->dev_private;
3210 + struct drm_crtc *test_crtc;
3211 + struct radeon_crtc *test_radeon_crtc;
3212 +
3213 +@@ -1748,6 +1749,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
3214 + test_radeon_crtc = to_radeon_crtc(test_crtc);
3215 + if (test_radeon_crtc->encoder &&
3216 + ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
3217 ++ /* PPLL2 is exclusive to UNIPHYA on DCE61 */
3218 ++ if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
3219 ++ test_radeon_crtc->pll_id == ATOM_PPLL2)
3220 ++ continue;
3221 + /* for DP use the same PLL for all */
3222 + if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
3223 + return test_radeon_crtc->pll_id;
3224 +@@ -1769,6 +1774,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
3225 + {
3226 + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
3227 + struct drm_device *dev = crtc->dev;
3228 ++ struct radeon_device *rdev = dev->dev_private;
3229 + struct drm_crtc *test_crtc;
3230 + struct radeon_crtc *test_radeon_crtc;
3231 + u32 adjusted_clock, test_adjusted_clock;
3232 +@@ -1784,6 +1790,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
3233 + test_radeon_crtc = to_radeon_crtc(test_crtc);
3234 + if (test_radeon_crtc->encoder &&
3235 + !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
3236 ++ /* PPLL2 is exclusive to UNIPHYA on DCE61 */
3237 ++ if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
3238 ++ test_radeon_crtc->pll_id == ATOM_PPLL2)
3239 ++ continue;
3240 + /* check if we are already driving this connector with another crtc */
3241 + if (test_radeon_crtc->connector == radeon_crtc->connector) {
3242 + /* if we are, return that pll */
3243 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
3244 +index 604c44d88e7a..83b3eb2e444a 100644
3245 +--- a/drivers/gpu/drm/radeon/radeon_device.c
3246 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
3247 +@@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
3248 + /*
3249 + * GPU helpers function.
3250 + */
3251 ++
3252 ++/**
3253 ++ * radeon_device_is_virtual - check if we are running is a virtual environment
3254 ++ *
3255 ++ * Check if the asic has been passed through to a VM (all asics).
3256 ++ * Used at driver startup.
3257 ++ * Returns true if virtual or false if not.
3258 ++ */
3259 ++static bool radeon_device_is_virtual(void)
3260 ++{
3261 ++#ifdef CONFIG_X86
3262 ++ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
3263 ++#else
3264 ++ return false;
3265 ++#endif
3266 ++}
3267 ++
3268 + /**
3269 + * radeon_card_posted - check if the hw has already been initialized
3270 + *
3271 +@@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
3272 + {
3273 + uint32_t reg;
3274 +
3275 ++ /* for pass through, always force asic_init */
3276 ++ if (radeon_device_is_virtual())
3277 ++ return false;
3278 ++
3279 + /* required for EFI mode on macbook2,1 which uses an r5xx asic */
3280 + if (efi_enabled(EFI_BOOT) &&
3281 + (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
3282 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
3283 +index c4e0e69b688d..f666277a8993 100644
3284 +--- a/drivers/gpu/drm/radeon/si_dpm.c
3285 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
3286 +@@ -2925,6 +2925,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
3287 + /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
3288 + { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
3289 + { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
3290 ++ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
3291 + { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
3292 + { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
3293 + { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
3294 +@@ -2959,6 +2960,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
3295 + }
3296 + ++p;
3297 + }
3298 ++ /* limit mclk on all R7 370 parts for stability */
3299 ++ if (rdev->pdev->device == 0x6811 &&
3300 ++ rdev->pdev->revision == 0x81)
3301 ++ max_mclk = 120000;
3302 +
3303 + if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
3304 + ni_dpm_vblank_too_short(rdev))
3305 +diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
3306 +index 5fc16cecd3ba..cd8d183dcfe5 100644
3307 +--- a/drivers/gpu/drm/udl/udl_fb.c
3308 ++++ b/drivers/gpu/drm/udl/udl_fb.c
3309 +@@ -546,7 +546,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
3310 +
3311 + return ret;
3312 + out_gfree:
3313 +- drm_gem_object_unreference(&ufbdev->ufb.obj->base);
3314 ++ drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
3315 + out:
3316 + return ret;
3317 + }
3318 +diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
3319 +index 2a0a784ab6ee..d7528e0d8442 100644
3320 +--- a/drivers/gpu/drm/udl/udl_gem.c
3321 ++++ b/drivers/gpu/drm/udl/udl_gem.c
3322 +@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
3323 + return ret;
3324 + }
3325 +
3326 +- drm_gem_object_unreference(&obj->base);
3327 ++ drm_gem_object_unreference_unlocked(&obj->base);
3328 + *handle_p = handle;
3329 + return 0;
3330 + }
3331 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
3332 +index bc23db196930..bf039dbaa7eb 100644
3333 +--- a/drivers/hid/hid-core.c
3334 ++++ b/drivers/hid/hid-core.c
3335 +@@ -1864,6 +1864,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
3336 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
3337 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
3338 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
3339 ++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
3340 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
3341 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
3342 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
3343 +diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
3344 +index 4e49462870ab..d0c8a1c1e1fe 100644
3345 +--- a/drivers/hid/hid-elo.c
3346 ++++ b/drivers/hid/hid-elo.c
3347 +@@ -259,7 +259,7 @@ static void elo_remove(struct hid_device *hdev)
3348 + struct elo_priv *priv = hid_get_drvdata(hdev);
3349 +
3350 + hid_hw_stop(hdev);
3351 +- flush_workqueue(wq);
3352 ++ cancel_delayed_work_sync(&priv->work);
3353 + kfree(priv);
3354 + }
3355 +
3356 +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
3357 +index 2f1ddca6f2e0..700145b15088 100644
3358 +--- a/drivers/hid/usbhid/hiddev.c
3359 ++++ b/drivers/hid/usbhid/hiddev.c
3360 +@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
3361 + goto inval;
3362 + } else if (uref->usage_index >= field->report_count)
3363 + goto inval;
3364 +-
3365 +- else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
3366 +- (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
3367 +- uref->usage_index + uref_multi->num_values > field->report_count))
3368 +- goto inval;
3369 + }
3370 +
3371 ++ if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
3372 ++ (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
3373 ++ uref->usage_index + uref_multi->num_values > field->report_count))
3374 ++ goto inval;
3375 ++
3376 + switch (cmd) {
3377 + case HIDIOCGUSAGE:
3378 + uref->value = field->value[uref->usage_index];
3379 +diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
3380 +index f67d71ee8386..159f50d0ae39 100644
3381 +--- a/drivers/hwmon/max1111.c
3382 ++++ b/drivers/hwmon/max1111.c
3383 +@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
3384 +
3385 + int max1111_read_channel(int channel)
3386 + {
3387 ++ if (!the_max1111 || !the_max1111->spi)
3388 ++ return -ENODEV;
3389 ++
3390 + return max1111_read(&the_max1111->spi->dev, channel);
3391 + }
3392 + EXPORT_SYMBOL(max1111_read_channel);
3393 +@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
3394 + {
3395 + struct max1111_data *data = spi_get_drvdata(spi);
3396 +
3397 ++#ifdef CONFIG_SHARPSL_PM
3398 ++ the_max1111 = NULL;
3399 ++#endif
3400 + hwmon_device_unregister(data->hwmon_dev);
3401 + sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
3402 + sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
3403 +diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
3404 +index b29c7500461a..f54ece8fce78 100644
3405 +--- a/drivers/i2c/busses/i2c-exynos5.c
3406 ++++ b/drivers/i2c/busses/i2c-exynos5.c
3407 +@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
3408 + return -EIO;
3409 + }
3410 +
3411 +- clk_prepare_enable(i2c->clk);
3412 ++ ret = clk_enable(i2c->clk);
3413 ++ if (ret)
3414 ++ return ret;
3415 +
3416 + for (i = 0; i < num; i++, msgs++) {
3417 + stop = (i == num - 1);
3418 +@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
3419 + }
3420 +
3421 + out:
3422 +- clk_disable_unprepare(i2c->clk);
3423 ++ clk_disable(i2c->clk);
3424 + return ret;
3425 + }
3426 +
3427 +@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
3428 + return -ENOENT;
3429 + }
3430 +
3431 +- clk_prepare_enable(i2c->clk);
3432 ++ ret = clk_prepare_enable(i2c->clk);
3433 ++ if (ret)
3434 ++ return ret;
3435 +
3436 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3437 + i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
3438 +@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
3439 +
3440 + platform_set_drvdata(pdev, i2c);
3441 +
3442 ++ clk_disable(i2c->clk);
3443 ++
3444 ++ return 0;
3445 ++
3446 + err_clk:
3447 + clk_disable_unprepare(i2c->clk);
3448 + return ret;
3449 +@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
3450 +
3451 + i2c_del_adapter(&i2c->adap);
3452 +
3453 ++ clk_unprepare(i2c->clk);
3454 ++
3455 + return 0;
3456 + }
3457 +
3458 +@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
3459 +
3460 + i2c->suspended = 1;
3461 +
3462 ++ clk_unprepare(i2c->clk);
3463 ++
3464 + return 0;
3465 + }
3466 +
3467 +@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
3468 + struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
3469 + int ret = 0;
3470 +
3471 +- clk_prepare_enable(i2c->clk);
3472 ++ ret = clk_prepare_enable(i2c->clk);
3473 ++ if (ret)
3474 ++ return ret;
3475 +
3476 + ret = exynos5_hsi2c_clock_setup(i2c);
3477 + if (ret) {
3478 +@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
3479 + }
3480 +
3481 + exynos5_i2c_init(i2c);
3482 +- clk_disable_unprepare(i2c->clk);
3483 ++ clk_disable(i2c->clk);
3484 + i2c->suspended = 0;
3485 +
3486 + return 0;
3487 +diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
3488 +index fd780bbcd07e..f2a7f72f7aa6 100644
3489 +--- a/drivers/iio/magnetometer/ak8975.c
3490 ++++ b/drivers/iio/magnetometer/ak8975.c
3491 +@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
3492 + int eoc_gpio;
3493 + int err;
3494 + const char *name = NULL;
3495 +- enum asahi_compass_chipset chipset;
3496 ++ enum asahi_compass_chipset chipset = AK_MAX_TYPE;
3497 +
3498 + /* Grab and set up the supplied GPIO. */
3499 + if (client->dev.platform_data)
3500 +diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
3501 +index 33fdd50123f7..9fa27b0cda32 100644
3502 +--- a/drivers/infiniband/hw/mlx4/ah.c
3503 ++++ b/drivers/infiniband/hw/mlx4/ah.c
3504 +@@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
3505 +
3506 + ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
3507 + ah->av.ib.g_slid = ah_attr->src_path_bits;
3508 ++ ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
3509 + if (ah_attr->ah_flags & IB_AH_GRH) {
3510 + ah->av.ib.g_slid |= 0x80;
3511 + ah->av.ib.gid_index = ah_attr->grh.sgid_index;
3512 +@@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
3513 + !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
3514 + --ah->av.ib.stat_rate;
3515 + }
3516 +- ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
3517 +
3518 + return &ah->ibah;
3519 + }
3520 +diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
3521 +index c4ca20e63221..b6d14bba6645 100644
3522 +--- a/drivers/input/misc/pmic8xxx-pwrkey.c
3523 ++++ b/drivers/input/misc/pmic8xxx-pwrkey.c
3524 +@@ -92,7 +92,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
3525 + if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
3526 + kpd_delay = 15625;
3527 +
3528 +- if (kpd_delay > 62500 || kpd_delay == 0) {
3529 ++ /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
3530 ++ if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
3531 + dev_err(&pdev->dev, "invalid power key trigger delay\n");
3532 + return -EINVAL;
3533 + }
3534 +@@ -122,8 +123,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
3535 + pwr->name = "pmic8xxx_pwrkey";
3536 + pwr->phys = "pmic8xxx_pwrkey/input0";
3537 +
3538 +- delay = (kpd_delay << 10) / USEC_PER_SEC;
3539 +- delay = 1 + ilog2(delay);
3540 ++ delay = (kpd_delay << 6) / USEC_PER_SEC;
3541 ++ delay = ilog2(delay);
3542 +
3543 + err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
3544 + if (err < 0) {
3545 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
3546 +index 0f5b400706d7..c3c5d492cba0 100644
3547 +--- a/drivers/input/mouse/elantech.c
3548 ++++ b/drivers/input/mouse/elantech.c
3549 +@@ -1550,13 +1550,7 @@ static int elantech_set_properties(struct elantech_data *etd)
3550 + case 5:
3551 + etd->hw_version = 3;
3552 + break;
3553 +- case 6:
3554 +- case 7:
3555 +- case 8:
3556 +- case 9:
3557 +- case 10:
3558 +- case 13:
3559 +- case 14:
3560 ++ case 6 ... 14:
3561 + etd->hw_version = 4;
3562 + break;
3563 + default:
3564 +diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
3565 +index a3f0f5a47490..0f586780ceb4 100644
3566 +--- a/drivers/input/mouse/vmmouse.c
3567 ++++ b/drivers/input/mouse/vmmouse.c
3568 +@@ -355,18 +355,11 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
3569 + return -ENXIO;
3570 + }
3571 +
3572 +- if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
3573 +- psmouse_dbg(psmouse, "VMMouse port in use.\n");
3574 +- return -EBUSY;
3575 +- }
3576 +-
3577 + /* Check if the device is present */
3578 + response = ~VMMOUSE_PROTO_MAGIC;
3579 + VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2);
3580 +- if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) {
3581 +- release_region(VMMOUSE_PROTO_PORT, 4);
3582 ++ if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU)
3583 + return -ENXIO;
3584 +- }
3585 +
3586 + if (set_properties) {
3587 + psmouse->vendor = VMMOUSE_VENDOR;
3588 +@@ -374,8 +367,6 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
3589 + psmouse->model = version;
3590 + }
3591 +
3592 +- release_region(VMMOUSE_PROTO_PORT, 4);
3593 +-
3594 + return 0;
3595 + }
3596 +
3597 +@@ -394,7 +385,6 @@ static void vmmouse_disconnect(struct psmouse *psmouse)
3598 + psmouse_reset(psmouse);
3599 + input_unregister_device(priv->abs_dev);
3600 + kfree(priv);
3601 +- release_region(VMMOUSE_PROTO_PORT, 4);
3602 + }
3603 +
3604 + /**
3605 +@@ -438,15 +428,10 @@ int vmmouse_init(struct psmouse *psmouse)
3606 + struct input_dev *rel_dev = psmouse->dev, *abs_dev;
3607 + int error;
3608 +
3609 +- if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
3610 +- psmouse_dbg(psmouse, "VMMouse port in use.\n");
3611 +- return -EBUSY;
3612 +- }
3613 +-
3614 + psmouse_reset(psmouse);
3615 + error = vmmouse_enable(psmouse);
3616 + if (error)
3617 +- goto release_region;
3618 ++ return error;
3619 +
3620 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
3621 + abs_dev = input_allocate_device();
3622 +@@ -502,8 +487,5 @@ init_fail:
3623 + kfree(priv);
3624 + psmouse->private = NULL;
3625 +
3626 +-release_region:
3627 +- release_region(VMMOUSE_PROTO_PORT, 4);
3628 +-
3629 + return error;
3630 + }
3631 +diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
3632 +index 2792ca397dd0..3ed0ce1e4dcb 100644
3633 +--- a/drivers/input/touchscreen/wacom_w8001.c
3634 ++++ b/drivers/input/touchscreen/wacom_w8001.c
3635 +@@ -27,7 +27,7 @@ MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@×××××.com>");
3636 + MODULE_DESCRIPTION(DRIVER_DESC);
3637 + MODULE_LICENSE("GPL");
3638 +
3639 +-#define W8001_MAX_LENGTH 11
3640 ++#define W8001_MAX_LENGTH 13
3641 + #define W8001_LEAD_MASK 0x80
3642 + #define W8001_LEAD_BYTE 0x80
3643 + #define W8001_TAB_MASK 0x40
3644 +diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
3645 +index 19880c7385e3..d618f67a9f48 100644
3646 +--- a/drivers/input/touchscreen/zforce_ts.c
3647 ++++ b/drivers/input/touchscreen/zforce_ts.c
3648 +@@ -359,8 +359,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
3649 + point.coord_x = point.coord_y = 0;
3650 + }
3651 +
3652 +- point.state = payload[9 * i + 5] & 0x03;
3653 +- point.id = (payload[9 * i + 5] & 0xfc) >> 2;
3654 ++ point.state = payload[9 * i + 5] & 0x0f;
3655 ++ point.id = (payload[9 * i + 5] & 0xf0) >> 4;
3656 +
3657 + /* determine touch major, minor and orientation */
3658 + point.area_major = max(payload[9 * i + 6],
3659 +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
3660 +index 11ec9d2a27df..38f375516ae6 100644
3661 +--- a/drivers/md/dm-snap.c
3662 ++++ b/drivers/md/dm-snap.c
3663 +@@ -1099,6 +1099,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3664 + int i;
3665 + int r = -EINVAL;
3666 + char *origin_path, *cow_path;
3667 ++ dev_t origin_dev, cow_dev;
3668 + unsigned args_used, num_flush_bios = 1;
3669 + fmode_t origin_mode = FMODE_READ;
3670 +
3671 +@@ -1129,11 +1130,19 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3672 + ti->error = "Cannot get origin device";
3673 + goto bad_origin;
3674 + }
3675 ++ origin_dev = s->origin->bdev->bd_dev;
3676 +
3677 + cow_path = argv[0];
3678 + argv++;
3679 + argc--;
3680 +
3681 ++ cow_dev = dm_get_dev_t(cow_path);
3682 ++ if (cow_dev && cow_dev == origin_dev) {
3683 ++ ti->error = "COW device cannot be the same as origin device";
3684 ++ r = -EINVAL;
3685 ++ goto bad_cow;
3686 ++ }
3687 ++
3688 + r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
3689 + if (r) {
3690 + ti->error = "Cannot get COW device";
3691 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
3692 +index 16ba55ad7089..e411ccba0af6 100644
3693 +--- a/drivers/md/dm-table.c
3694 ++++ b/drivers/md/dm-table.c
3695 +@@ -365,6 +365,26 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
3696 + }
3697 +
3698 + /*
3699 ++ * Convert the path to a device
3700 ++ */
3701 ++dev_t dm_get_dev_t(const char *path)
3702 ++{
3703 ++ dev_t uninitialized_var(dev);
3704 ++ struct block_device *bdev;
3705 ++
3706 ++ bdev = lookup_bdev(path);
3707 ++ if (IS_ERR(bdev))
3708 ++ dev = name_to_dev_t(path);
3709 ++ else {
3710 ++ dev = bdev->bd_dev;
3711 ++ bdput(bdev);
3712 ++ }
3713 ++
3714 ++ return dev;
3715 ++}
3716 ++EXPORT_SYMBOL_GPL(dm_get_dev_t);
3717 ++
3718 ++/*
3719 + * Add a device to the list, or just increment the usage count if
3720 + * it's already present.
3721 + */
3722 +@@ -372,23 +392,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
3723 + struct dm_dev **result)
3724 + {
3725 + int r;
3726 +- dev_t uninitialized_var(dev);
3727 ++ dev_t dev;
3728 + struct dm_dev_internal *dd;
3729 + struct dm_table *t = ti->table;
3730 +- struct block_device *bdev;
3731 +
3732 + BUG_ON(!t);
3733 +
3734 +- /* convert the path to a device */
3735 +- bdev = lookup_bdev(path);
3736 +- if (IS_ERR(bdev)) {
3737 +- dev = name_to_dev_t(path);
3738 +- if (!dev)
3739 +- return -ENODEV;
3740 +- } else {
3741 +- dev = bdev->bd_dev;
3742 +- bdput(bdev);
3743 +- }
3744 ++ dev = dm_get_dev_t(path);
3745 ++ if (!dev)
3746 ++ return -ENODEV;
3747 +
3748 + dd = find_device(&t->devices, dev);
3749 + if (!dd) {
3750 +diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
3751 +index 8e6fe0200117..e14b1a19f4e6 100644
3752 +--- a/drivers/media/platform/coda/coda-common.c
3753 ++++ b/drivers/media/platform/coda/coda-common.c
3754 +@@ -2100,14 +2100,12 @@ static int coda_probe(struct platform_device *pdev)
3755 +
3756 + pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
3757 +
3758 +- if (of_id) {
3759 ++ if (of_id)
3760 + dev->devtype = of_id->data;
3761 +- } else if (pdev_id) {
3762 ++ else if (pdev_id)
3763 + dev->devtype = &coda_devdata[pdev_id->driver_data];
3764 +- } else {
3765 +- ret = -EINVAL;
3766 +- goto err_v4l2_register;
3767 +- }
3768 ++ else
3769 ++ return -EINVAL;
3770 +
3771 + spin_lock_init(&dev->irqlock);
3772 + INIT_LIST_HEAD(&dev->instances);
3773 +diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
3774 +index 6310acab60e7..d41ae950d1a1 100644
3775 +--- a/drivers/media/platform/vsp1/vsp1_sru.c
3776 ++++ b/drivers/media/platform/vsp1/vsp1_sru.c
3777 +@@ -154,6 +154,7 @@ static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
3778 + mutex_lock(sru->ctrls.lock);
3779 + ctrl0 |= vsp1_sru_read(sru, VI6_SRU_CTRL0)
3780 + & (VI6_SRU_CTRL0_PARAM0_MASK | VI6_SRU_CTRL0_PARAM1_MASK);
3781 ++ vsp1_sru_write(sru, VI6_SRU_CTRL0, ctrl0);
3782 + mutex_unlock(sru->ctrls.lock);
3783 +
3784 + vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
3785 +diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
3786 +index c94ea0d68746..2c51acce4b34 100644
3787 +--- a/drivers/memory/omap-gpmc.c
3788 ++++ b/drivers/memory/omap-gpmc.c
3789 +@@ -394,7 +394,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
3790 + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
3791 + GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
3792 + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
3793 +- GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
3794 ++ GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
3795 + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
3796 + GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
3797 + p->cycle2cyclesamecsen);
3798 +diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
3799 +index 006242c8bca0..b3c10b7dae1f 100644
3800 +--- a/drivers/misc/Kconfig
3801 ++++ b/drivers/misc/Kconfig
3802 +@@ -429,7 +429,7 @@ config ARM_CHARLCD
3803 + still useful.
3804 +
3805 + config BMP085
3806 +- bool
3807 ++ tristate
3808 + depends on SYSFS
3809 +
3810 + config BMP085_I2C
3811 +diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
3812 +index 15e88078ba1e..f1a0b99f5a9a 100644
3813 +--- a/drivers/misc/ad525x_dpot.c
3814 ++++ b/drivers/misc/ad525x_dpot.c
3815 +@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
3816 + */
3817 + value = swab16(value);
3818 +
3819 +- if (dpot->uid == DPOT_UID(AD5271_ID))
3820 ++ if (dpot->uid == DPOT_UID(AD5274_ID))
3821 + value = value >> 2;
3822 + return value;
3823 + default:
3824 +diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
3825 +index 1c73ba6efdbd..f109aeed9883 100644
3826 +--- a/drivers/mtd/ubi/eba.c
3827 ++++ b/drivers/mtd/ubi/eba.c
3828 +@@ -575,6 +575,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
3829 + int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
3830 + struct ubi_volume *vol = ubi->volumes[idx];
3831 + struct ubi_vid_hdr *vid_hdr;
3832 ++ uint32_t crc;
3833 +
3834 + vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
3835 + if (!vid_hdr)
3836 +@@ -599,14 +600,8 @@ retry:
3837 + goto out_put;
3838 + }
3839 +
3840 +- vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
3841 +- err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
3842 +- if (err) {
3843 +- up_read(&ubi->fm_eba_sem);
3844 +- goto write_error;
3845 +- }
3846 ++ ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
3847 +
3848 +- data_size = offset + len;
3849 + mutex_lock(&ubi->buf_mutex);
3850 + memset(ubi->peb_buf + offset, 0xFF, len);
3851 +
3852 +@@ -621,6 +616,19 @@ retry:
3853 +
3854 + memcpy(ubi->peb_buf + offset, buf, len);
3855 +
3856 ++ data_size = offset + len;
3857 ++ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
3858 ++ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
3859 ++ vid_hdr->copy_flag = 1;
3860 ++ vid_hdr->data_size = cpu_to_be32(data_size);
3861 ++ vid_hdr->data_crc = cpu_to_be32(crc);
3862 ++ err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
3863 ++ if (err) {
3864 ++ mutex_unlock(&ubi->buf_mutex);
3865 ++ up_read(&ubi->fm_eba_sem);
3866 ++ goto write_error;
3867 ++ }
3868 ++
3869 + err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
3870 + if (err) {
3871 + mutex_unlock(&ubi->buf_mutex);
3872 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
3873 +index bd744e31c434..9ba92e23e67f 100644
3874 +--- a/drivers/net/bonding/bond_main.c
3875 ++++ b/drivers/net/bonding/bond_main.c
3876 +@@ -3246,6 +3246,30 @@ static int bond_close(struct net_device *bond_dev)
3877 + return 0;
3878 + }
3879 +
3880 ++/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
3881 ++ * that some drivers can provide 32bit values only.
3882 ++ */
3883 ++static void bond_fold_stats(struct rtnl_link_stats64 *_res,
3884 ++ const struct rtnl_link_stats64 *_new,
3885 ++ const struct rtnl_link_stats64 *_old)
3886 ++{
3887 ++ const u64 *new = (const u64 *)_new;
3888 ++ const u64 *old = (const u64 *)_old;
3889 ++ u64 *res = (u64 *)_res;
3890 ++ int i;
3891 ++
3892 ++ for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
3893 ++ u64 nv = new[i];
3894 ++ u64 ov = old[i];
3895 ++
3896 ++ /* detects if this particular field is 32bit only */
3897 ++ if (((nv | ov) >> 32) == 0)
3898 ++ res[i] += (u32)nv - (u32)ov;
3899 ++ else
3900 ++ res[i] += nv - ov;
3901 ++ }
3902 ++}
3903 ++
3904 + static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
3905 + struct rtnl_link_stats64 *stats)
3906 + {
3907 +@@ -3254,43 +3278,23 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
3908 + struct list_head *iter;
3909 + struct slave *slave;
3910 +
3911 ++ spin_lock(&bond->stats_lock);
3912 + memcpy(stats, &bond->bond_stats, sizeof(*stats));
3913 +
3914 +- bond_for_each_slave(bond, slave, iter) {
3915 +- const struct rtnl_link_stats64 *sstats =
3916 ++ rcu_read_lock();
3917 ++ bond_for_each_slave_rcu(bond, slave, iter) {
3918 ++ const struct rtnl_link_stats64 *new =
3919 + dev_get_stats(slave->dev, &temp);
3920 +- struct rtnl_link_stats64 *pstats = &slave->slave_stats;
3921 +-
3922 +- stats->rx_packets += sstats->rx_packets - pstats->rx_packets;
3923 +- stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes;
3924 +- stats->rx_errors += sstats->rx_errors - pstats->rx_errors;
3925 +- stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped;
3926 +-
3927 +- stats->tx_packets += sstats->tx_packets - pstats->tx_packets;;
3928 +- stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes;
3929 +- stats->tx_errors += sstats->tx_errors - pstats->tx_errors;
3930 +- stats->tx_dropped += sstats->tx_dropped - pstats->tx_dropped;
3931 +-
3932 +- stats->multicast += sstats->multicast - pstats->multicast;
3933 +- stats->collisions += sstats->collisions - pstats->collisions;
3934 +-
3935 +- stats->rx_length_errors += sstats->rx_length_errors - pstats->rx_length_errors;
3936 +- stats->rx_over_errors += sstats->rx_over_errors - pstats->rx_over_errors;
3937 +- stats->rx_crc_errors += sstats->rx_crc_errors - pstats->rx_crc_errors;
3938 +- stats->rx_frame_errors += sstats->rx_frame_errors - pstats->rx_frame_errors;
3939 +- stats->rx_fifo_errors += sstats->rx_fifo_errors - pstats->rx_fifo_errors;
3940 +- stats->rx_missed_errors += sstats->rx_missed_errors - pstats->rx_missed_errors;
3941 +-
3942 +- stats->tx_aborted_errors += sstats->tx_aborted_errors - pstats->tx_aborted_errors;
3943 +- stats->tx_carrier_errors += sstats->tx_carrier_errors - pstats->tx_carrier_errors;
3944 +- stats->tx_fifo_errors += sstats->tx_fifo_errors - pstats->tx_fifo_errors;
3945 +- stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors - pstats->tx_heartbeat_errors;
3946 +- stats->tx_window_errors += sstats->tx_window_errors - pstats->tx_window_errors;
3947 ++
3948 ++ bond_fold_stats(stats, new, &slave->slave_stats);
3949 +
3950 + /* save off the slave stats for the next run */
3951 +- memcpy(pstats, sstats, sizeof(*sstats));
3952 ++ memcpy(&slave->slave_stats, new, sizeof(*new));
3953 + }
3954 ++ rcu_read_unlock();
3955 ++
3956 + memcpy(&bond->bond_stats, stats, sizeof(*stats));
3957 ++ spin_unlock(&bond->stats_lock);
3958 +
3959 + return stats;
3960 + }
3961 +@@ -4102,6 +4106,7 @@ void bond_setup(struct net_device *bond_dev)
3962 + struct bonding *bond = netdev_priv(bond_dev);
3963 +
3964 + spin_lock_init(&bond->mode_lock);
3965 ++ spin_lock_init(&bond->stats_lock);
3966 + bond->params = bonding_defaults;
3967 +
3968 + /* Initialize pointers */
3969 +diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
3970 +index f4e40aa4d2a2..35233aa5a88a 100644
3971 +--- a/drivers/net/can/at91_can.c
3972 ++++ b/drivers/net/can/at91_can.c
3973 +@@ -733,9 +733,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
3974 +
3975 + /* upper group completed, look again in lower */
3976 + if (priv->rx_next > get_mb_rx_low_last(priv) &&
3977 +- quota > 0 && mb > get_mb_rx_last(priv)) {
3978 ++ mb > get_mb_rx_last(priv)) {
3979 + priv->rx_next = get_mb_rx_first(priv);
3980 +- goto again;
3981 ++ if (quota > 0)
3982 ++ goto again;
3983 + }
3984 +
3985 + return received;
3986 +diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
3987 +index 5d214d135332..c076414103d2 100644
3988 +--- a/drivers/net/can/c_can/c_can.c
3989 ++++ b/drivers/net/can/c_can/c_can.c
3990 +@@ -332,9 +332,23 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
3991 +
3992 + priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
3993 +
3994 +- for (i = 0; i < frame->can_dlc; i += 2) {
3995 +- priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
3996 +- frame->data[i] | (frame->data[i + 1] << 8));
3997 ++ if (priv->type == BOSCH_D_CAN) {
3998 ++ u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
3999 ++
4000 ++ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
4001 ++ data = (u32)frame->data[i];
4002 ++ data |= (u32)frame->data[i + 1] << 8;
4003 ++ data |= (u32)frame->data[i + 2] << 16;
4004 ++ data |= (u32)frame->data[i + 3] << 24;
4005 ++ priv->write_reg32(priv, dreg, data);
4006 ++ }
4007 ++ } else {
4008 ++ for (i = 0; i < frame->can_dlc; i += 2) {
4009 ++ priv->write_reg(priv,
4010 ++ C_CAN_IFACE(DATA1_REG, iface) + i / 2,
4011 ++ frame->data[i] |
4012 ++ (frame->data[i + 1] << 8));
4013 ++ }
4014 + }
4015 + }
4016 +
4017 +@@ -402,10 +416,20 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
4018 + } else {
4019 + int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
4020 +
4021 +- for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
4022 +- data = priv->read_reg(priv, dreg);
4023 +- frame->data[i] = data;
4024 +- frame->data[i + 1] = data >> 8;
4025 ++ if (priv->type == BOSCH_D_CAN) {
4026 ++ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
4027 ++ data = priv->read_reg32(priv, dreg);
4028 ++ frame->data[i] = data;
4029 ++ frame->data[i + 1] = data >> 8;
4030 ++ frame->data[i + 2] = data >> 16;
4031 ++ frame->data[i + 3] = data >> 24;
4032 ++ }
4033 ++ } else {
4034 ++ for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
4035 ++ data = priv->read_reg(priv, dreg);
4036 ++ frame->data[i] = data;
4037 ++ frame->data[i + 1] = data >> 8;
4038 ++ }
4039 + }
4040 + }
4041 +
4042 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
4043 +index 910c12e2638e..ad535a854e5c 100644
4044 +--- a/drivers/net/can/dev.c
4045 ++++ b/drivers/net/can/dev.c
4046 +@@ -798,6 +798,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[])
4047 + * - control mode with CAN_CTRLMODE_FD set
4048 + */
4049 +
4050 ++ if (!data)
4051 ++ return 0;
4052 ++
4053 + if (data[IFLA_CAN_CTRLMODE]) {
4054 + struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
4055 +
4056 +@@ -1008,6 +1011,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
4057 + return -EOPNOTSUPP;
4058 + }
4059 +
4060 ++static void can_dellink(struct net_device *dev, struct list_head *head)
4061 ++{
4062 ++ return;
4063 ++}
4064 ++
4065 + static struct rtnl_link_ops can_link_ops __read_mostly = {
4066 + .kind = "can",
4067 + .maxtype = IFLA_CAN_MAX,
4068 +@@ -1016,6 +1024,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
4069 + .validate = can_validate,
4070 + .newlink = can_newlink,
4071 + .changelink = can_changelink,
4072 ++ .dellink = can_dellink,
4073 + .get_size = can_get_size,
4074 + .fill_info = can_fill_info,
4075 + .get_xstats_size = can_get_xstats_size,
4076 +diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
4077 +index 46a535318c7a..972ee645fac6 100644
4078 +--- a/drivers/net/ethernet/atheros/atlx/atl2.c
4079 ++++ b/drivers/net/ethernet/atheros/atlx/atl2.c
4080 +@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4081 +
4082 + err = -EIO;
4083 +
4084 +- netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
4085 ++ netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
4086 + netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
4087 +
4088 + /* Init PHY as early as possible due to power saving issue */
4089 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
4090 +index 6043734ea613..a9fcac044e9e 100644
4091 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
4092 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
4093 +@@ -1048,7 +1048,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
4094 + dev->stats.tx_bytes += tx_cb_ptr->skb->len;
4095 + dma_unmap_single(&dev->dev,
4096 + dma_unmap_addr(tx_cb_ptr, dma_addr),
4097 +- tx_cb_ptr->skb->len,
4098 ++ dma_unmap_len(tx_cb_ptr, dma_len),
4099 + DMA_TO_DEVICE);
4100 + bcmgenet_free_cb(tx_cb_ptr);
4101 + } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
4102 +@@ -1159,7 +1159,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
4103 + }
4104 +
4105 + dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
4106 +- dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
4107 ++ dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
4108 + length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
4109 + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
4110 + DMA_TX_APPEND_CRC;
4111 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
4112 +index 570390b5cd42..67aec18dd76c 100644
4113 +--- a/drivers/net/ethernet/freescale/fec_main.c
4114 ++++ b/drivers/net/ethernet/freescale/fec_main.c
4115 +@@ -1546,9 +1546,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
4116 + struct fec_enet_private *fep = netdev_priv(ndev);
4117 +
4118 + for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
4119 +- clear_bit(queue_id, &fep->work_rx);
4120 +- pkt_received += fec_enet_rx_queue(ndev,
4121 ++ int ret;
4122 ++
4123 ++ ret = fec_enet_rx_queue(ndev,
4124 + budget - pkt_received, queue_id);
4125 ++
4126 ++ if (ret < budget - pkt_received)
4127 ++ clear_bit(queue_id, &fep->work_rx);
4128 ++
4129 ++ pkt_received += ret;
4130 + }
4131 + return pkt_received;
4132 + }
4133 +diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
4134 +index 6e9a792097d3..46e8d5b12c1a 100644
4135 +--- a/drivers/net/ethernet/jme.c
4136 ++++ b/drivers/net/ethernet/jme.c
4137 +@@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme)
4138 + }
4139 +
4140 + static inline void
4141 +-jme_clear_pm(struct jme_adapter *jme)
4142 ++jme_clear_pm_enable_wol(struct jme_adapter *jme)
4143 + {
4144 + jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
4145 + }
4146 +
4147 ++static inline void
4148 ++jme_clear_pm_disable_wol(struct jme_adapter *jme)
4149 ++{
4150 ++ jwrite32(jme, JME_PMCS, PMCS_STMASK);
4151 ++}
4152 ++
4153 + static int
4154 + jme_reload_eeprom(struct jme_adapter *jme)
4155 + {
4156 +@@ -1857,7 +1863,7 @@ jme_open(struct net_device *netdev)
4157 + struct jme_adapter *jme = netdev_priv(netdev);
4158 + int rc;
4159 +
4160 +- jme_clear_pm(jme);
4161 ++ jme_clear_pm_disable_wol(jme);
4162 + JME_NAPI_ENABLE(jme);
4163 +
4164 + tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
4165 +@@ -1929,11 +1935,11 @@ jme_wait_link(struct jme_adapter *jme)
4166 + static void
4167 + jme_powersave_phy(struct jme_adapter *jme)
4168 + {
4169 +- if (jme->reg_pmcs) {
4170 ++ if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
4171 + jme_set_100m_half(jme);
4172 + if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
4173 + jme_wait_link(jme);
4174 +- jme_clear_pm(jme);
4175 ++ jme_clear_pm_enable_wol(jme);
4176 + } else {
4177 + jme_phy_off(jme);
4178 + }
4179 +@@ -2650,9 +2656,6 @@ jme_set_wol(struct net_device *netdev,
4180 + if (wol->wolopts & WAKE_MAGIC)
4181 + jme->reg_pmcs |= PMCS_MFEN;
4182 +
4183 +- jwrite32(jme, JME_PMCS, jme->reg_pmcs);
4184 +- device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
4185 +-
4186 + return 0;
4187 + }
4188 +
4189 +@@ -3176,8 +3179,8 @@ jme_init_one(struct pci_dev *pdev,
4190 + jme->mii_if.mdio_read = jme_mdio_read;
4191 + jme->mii_if.mdio_write = jme_mdio_write;
4192 +
4193 +- jme_clear_pm(jme);
4194 +- device_set_wakeup_enable(&pdev->dev, true);
4195 ++ jme_clear_pm_disable_wol(jme);
4196 ++ device_init_wakeup(&pdev->dev, true);
4197 +
4198 + jme_set_phyfifo_5level(jme);
4199 + jme->pcirev = pdev->revision;
4200 +@@ -3308,7 +3311,7 @@ jme_resume(struct device *dev)
4201 + if (!netif_running(netdev))
4202 + return 0;
4203 +
4204 +- jme_clear_pm(jme);
4205 ++ jme_clear_pm_disable_wol(jme);
4206 + jme_phy_on(jme);
4207 + if (test_bit(JME_FLAG_SSET, &jme->flags))
4208 + jme_set_settings(netdev, &jme->old_ecmd);
4209 +@@ -3316,13 +3319,14 @@ jme_resume(struct device *dev)
4210 + jme_reset_phy_processor(jme);
4211 + jme_phy_calibration(jme);
4212 + jme_phy_setEA(jme);
4213 +- jme_start_irq(jme);
4214 + netif_device_attach(netdev);
4215 +
4216 + atomic_inc(&jme->link_changing);
4217 +
4218 + jme_reset_link(jme);
4219 +
4220 ++ jme_start_irq(jme);
4221 ++
4222 + return 0;
4223 + }
4224 +
4225 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
4226 +index 80aac20104de..f6095d2b77de 100644
4227 +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
4228 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
4229 +@@ -710,7 +710,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
4230 +
4231 + if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
4232 + return -1;
4233 +- hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
4234 ++ hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
4235 +
4236 + csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
4237 + sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
4238 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
4239 +index c10d98f6ad96..a1b4301f719a 100644
4240 +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
4241 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
4242 +@@ -400,7 +400,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
4243 + u32 packets = 0;
4244 + u32 bytes = 0;
4245 + int factor = priv->cqe_factor;
4246 +- u64 timestamp = 0;
4247 + int done = 0;
4248 + int budget = priv->tx_work_limit;
4249 + u32 last_nr_txbb;
4250 +@@ -440,9 +439,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
4251 + new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
4252 +
4253 + do {
4254 ++ u64 timestamp = 0;
4255 ++
4256 + txbbs_skipped += last_nr_txbb;
4257 + ring_index = (ring_index + last_nr_txbb) & size_mask;
4258 +- if (ring->tx_info[ring_index].ts_requested)
4259 ++
4260 ++ if (unlikely(ring->tx_info[ring_index].ts_requested))
4261 + timestamp = mlx4_en_get_cqe_ts(cqe);
4262 +
4263 + /* free next descriptor */
4264 +diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
4265 +index bafe2180cf0c..e662ab39499e 100644
4266 +--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
4267 ++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
4268 +@@ -2960,7 +2960,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
4269 + case QP_TRANS_RTS2RTS:
4270 + case QP_TRANS_SQD2SQD:
4271 + case QP_TRANS_SQD2RTS:
4272 +- if (slave != mlx4_master_func_num(dev))
4273 ++ if (slave != mlx4_master_func_num(dev)) {
4274 + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
4275 + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
4276 + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
4277 +@@ -2979,6 +2979,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
4278 + if (qp_ctx->alt_path.mgid_index >= num_gids)
4279 + return -EINVAL;
4280 + }
4281 ++ }
4282 + break;
4283 + default:
4284 + break;
4285 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
4286 +index f221126a5c4e..d0992825c47c 100644
4287 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
4288 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
4289 +@@ -567,6 +567,7 @@ struct qlcnic_adapter_stats {
4290 + u64 tx_dma_map_error;
4291 + u64 spurious_intr;
4292 + u64 mac_filter_limit_overrun;
4293 ++ u64 mbx_spurious_intr;
4294 + };
4295 +
4296 + /*
4297 +@@ -1099,7 +1100,7 @@ struct qlcnic_mailbox {
4298 + unsigned long status;
4299 + spinlock_t queue_lock; /* Mailbox queue lock */
4300 + spinlock_t aen_lock; /* Mailbox response/AEN lock */
4301 +- atomic_t rsp_status;
4302 ++ u32 rsp_status;
4303 + u32 num_cmds;
4304 + };
4305 +
4306 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
4307 +index 840bf36b5e9d..dd618d7ed257 100644
4308 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
4309 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
4310 +@@ -489,7 +489,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
4311 +
4312 + static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
4313 + {
4314 +- atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
4315 ++ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
4316 + complete(&mbx->completion);
4317 + }
4318 +
4319 +@@ -508,7 +508,7 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
4320 + if (event & QLCNIC_MBX_ASYNC_EVENT) {
4321 + __qlcnic_83xx_process_aen(adapter);
4322 + } else {
4323 +- if (atomic_read(&mbx->rsp_status) != rsp_status)
4324 ++ if (mbx->rsp_status != rsp_status)
4325 + qlcnic_83xx_notify_mbx_response(mbx);
4326 + }
4327 + out:
4328 +@@ -1023,7 +1023,7 @@ static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
4329 + if (event & QLCNIC_MBX_ASYNC_EVENT) {
4330 + __qlcnic_83xx_process_aen(adapter);
4331 + } else {
4332 +- if (atomic_read(&mbx->rsp_status) != rsp_status)
4333 ++ if (mbx->rsp_status != rsp_status)
4334 + qlcnic_83xx_notify_mbx_response(mbx);
4335 + }
4336 + }
4337 +@@ -2338,9 +2338,9 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
4338 +
4339 + static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
4340 + {
4341 ++ u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
4342 + struct qlcnic_adapter *adapter = data;
4343 + struct qlcnic_mailbox *mbx;
4344 +- u32 mask, resp, event;
4345 + unsigned long flags;
4346 +
4347 + mbx = adapter->ahw->mailbox;
4348 +@@ -2350,10 +2350,14 @@ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
4349 + goto out;
4350 +
4351 + event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
4352 +- if (event & QLCNIC_MBX_ASYNC_EVENT)
4353 ++ if (event & QLCNIC_MBX_ASYNC_EVENT) {
4354 + __qlcnic_83xx_process_aen(adapter);
4355 +- else
4356 +- qlcnic_83xx_notify_mbx_response(mbx);
4357 ++ } else {
4358 ++ if (mbx->rsp_status != rsp_status)
4359 ++ qlcnic_83xx_notify_mbx_response(mbx);
4360 ++ else
4361 ++ adapter->stats.mbx_spurious_intr++;
4362 ++ }
4363 +
4364 + out:
4365 + mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
4366 +@@ -4025,10 +4029,10 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
4367 + struct qlcnic_adapter *adapter = mbx->adapter;
4368 + struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
4369 + struct device *dev = &adapter->pdev->dev;
4370 +- atomic_t *rsp_status = &mbx->rsp_status;
4371 + struct list_head *head = &mbx->cmd_q;
4372 + struct qlcnic_hardware_context *ahw;
4373 + struct qlcnic_cmd_args *cmd = NULL;
4374 ++ unsigned long flags;
4375 +
4376 + ahw = adapter->ahw;
4377 +
4378 +@@ -4038,7 +4042,9 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
4379 + return;
4380 + }
4381 +
4382 +- atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
4383 ++ spin_lock_irqsave(&mbx->aen_lock, flags);
4384 ++ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
4385 ++ spin_unlock_irqrestore(&mbx->aen_lock, flags);
4386 +
4387 + spin_lock(&mbx->queue_lock);
4388 +
4389 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
4390 +index 494e8105adee..0a2318cad34d 100644
4391 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
4392 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
4393 +@@ -59,7 +59,8 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
4394 + QLC_OFF(stats.mac_filter_limit_overrun)},
4395 + {"spurious intr", QLC_SIZEOF(stats.spurious_intr),
4396 + QLC_OFF(stats.spurious_intr)},
4397 +-
4398 ++ {"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr),
4399 ++ QLC_OFF(stats.mbx_spurious_intr)},
4400 + };
4401 +
4402 + static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
4403 +diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
4404 +index 25800a1dedcb..b915de060a42 100644
4405 +--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
4406 ++++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
4407 +@@ -1648,7 +1648,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
4408 + return;
4409 + }
4410 + skb_reserve(new_skb, NET_IP_ALIGN);
4411 ++
4412 ++ pci_dma_sync_single_for_cpu(qdev->pdev,
4413 ++ dma_unmap_addr(sbq_desc, mapaddr),
4414 ++ dma_unmap_len(sbq_desc, maplen),
4415 ++ PCI_DMA_FROMDEVICE);
4416 ++
4417 + memcpy(skb_put(new_skb, length), skb->data, length);
4418 ++
4419 ++ pci_dma_sync_single_for_device(qdev->pdev,
4420 ++ dma_unmap_addr(sbq_desc, mapaddr),
4421 ++ dma_unmap_len(sbq_desc, maplen),
4422 ++ PCI_DMA_FROMDEVICE);
4423 + skb = new_skb;
4424 +
4425 + /* Frame error, so drop the packet. */
4426 +diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
4427 +index 97e4df9bf407..cba41860167c 100644
4428 +--- a/drivers/net/ethernet/qualcomm/qca_spi.c
4429 ++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
4430 +@@ -811,7 +811,7 @@ qcaspi_netdev_setup(struct net_device *dev)
4431 + dev->netdev_ops = &qcaspi_netdev_ops;
4432 + qcaspi_set_ethtool_ops(dev);
4433 + dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
4434 +- dev->flags = IFF_MULTICAST;
4435 ++ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
4436 + dev->tx_queue_len = 100;
4437 +
4438 + qca = netdev_priv(dev);
4439 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
4440 +index 13463c4acc86..c93a458f96f7 100644
4441 +--- a/drivers/net/ethernet/renesas/sh_eth.c
4442 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
4443 +@@ -1148,8 +1148,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
4444 +
4445 + /* RX descriptor */
4446 + rxdesc = &mdp->rx_ring[i];
4447 +- /* The size of the buffer is a multiple of 16 bytes. */
4448 +- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
4449 ++ /* The size of the buffer is a multiple of 32 bytes. */
4450 ++ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
4451 + dma_addr = dma_map_single(&ndev->dev, skb->data,
4452 + rxdesc->buffer_length,
4453 + DMA_FROM_DEVICE);
4454 +@@ -1173,7 +1173,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
4455 + mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
4456 +
4457 + /* Mark the last entry as wrapping the ring. */
4458 +- rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
4459 ++ if (rxdesc)
4460 ++ rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
4461 +
4462 + memset(mdp->tx_ring, 0, tx_ringsize);
4463 +
4464 +@@ -1506,7 +1507,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
4465 + if (mdp->cd->rpadir)
4466 + skb_reserve(skb, NET_IP_ALIGN);
4467 + dma_unmap_single(&ndev->dev, rxdesc->addr,
4468 +- ALIGN(mdp->rx_buf_sz, 16),
4469 ++ ALIGN(mdp->rx_buf_sz, 32),
4470 + DMA_FROM_DEVICE);
4471 + skb_put(skb, pkt_len);
4472 + skb->protocol = eth_type_trans(skb, ndev);
4473 +@@ -1524,8 +1525,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
4474 + for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
4475 + entry = mdp->dirty_rx % mdp->num_rx_ring;
4476 + rxdesc = &mdp->rx_ring[entry];
4477 +- /* The size of the buffer is 16 byte boundary. */
4478 +- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
4479 ++ /* The size of the buffer is 32 byte boundary. */
4480 ++ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
4481 +
4482 + if (mdp->rx_skbuff[entry] == NULL) {
4483 + skb = netdev_alloc_skb(ndev, skbuff_size);
4484 +diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
4485 +index feca46efa12f..c642e201a45e 100644
4486 +--- a/drivers/net/ethernet/sfc/ef10.c
4487 ++++ b/drivers/net/ethernet/sfc/ef10.c
4488 +@@ -452,6 +452,17 @@ fail:
4489 + return rc;
4490 + }
4491 +
4492 ++static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
4493 ++{
4494 ++ struct efx_channel *channel;
4495 ++ struct efx_tx_queue *tx_queue;
4496 ++
4497 ++ /* All our existing PIO buffers went away */
4498 ++ efx_for_each_channel(channel, efx)
4499 ++ efx_for_each_channel_tx_queue(tx_queue, channel)
4500 ++ tx_queue->piobuf = NULL;
4501 ++}
4502 ++
4503 + #else /* !EFX_USE_PIO */
4504 +
4505 + static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
4506 +@@ -468,6 +479,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
4507 + {
4508 + }
4509 +
4510 ++static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
4511 ++{
4512 ++}
4513 ++
4514 + #endif /* EFX_USE_PIO */
4515 +
4516 + static void efx_ef10_remove(struct efx_nic *efx)
4517 +@@ -699,6 +714,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
4518 + nic_data->must_realloc_vis = true;
4519 + nic_data->must_restore_filters = true;
4520 + nic_data->must_restore_piobufs = true;
4521 ++ efx_ef10_forget_old_piobufs(efx);
4522 + nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
4523 + }
4524 +
4525 +diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
4526 +index 4dba5fbc735e..2b212f3e140c 100644
4527 +--- a/drivers/net/macvtap.c
4528 ++++ b/drivers/net/macvtap.c
4529 +@@ -710,6 +710,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
4530 + macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
4531 + if (copylen > good_linear)
4532 + copylen = good_linear;
4533 ++ else if (copylen < ETH_HLEN)
4534 ++ copylen = ETH_HLEN;
4535 + linear = copylen;
4536 + i = *from;
4537 + iov_iter_advance(&i, copylen);
4538 +@@ -719,10 +721,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
4539 +
4540 + if (!zerocopy) {
4541 + copylen = len;
4542 +- if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear)
4543 ++ linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
4544 ++ if (linear > good_linear)
4545 + linear = good_linear;
4546 +- else
4547 +- linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
4548 ++ else if (linear < ETH_HLEN)
4549 ++ linear = ETH_HLEN;
4550 + }
4551 +
4552 + skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
4553 +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
4554 +index cfe49a07c7c1..51ba895f0522 100644
4555 +--- a/drivers/net/ppp/ppp_generic.c
4556 ++++ b/drivers/net/ppp/ppp_generic.c
4557 +@@ -563,7 +563,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
4558 +
4559 + static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4560 + {
4561 +- struct ppp_file *pf = file->private_data;
4562 ++ struct ppp_file *pf;
4563 + struct ppp *ppp;
4564 + int err = -EFAULT, val, val2, i;
4565 + struct ppp_idle idle;
4566 +@@ -573,9 +573,14 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4567 + void __user *argp = (void __user *)arg;
4568 + int __user *p = argp;
4569 +
4570 +- if (!pf)
4571 +- return ppp_unattached_ioctl(current->nsproxy->net_ns,
4572 +- pf, file, cmd, arg);
4573 ++ mutex_lock(&ppp_mutex);
4574 ++
4575 ++ pf = file->private_data;
4576 ++ if (!pf) {
4577 ++ err = ppp_unattached_ioctl(current->nsproxy->net_ns,
4578 ++ pf, file, cmd, arg);
4579 ++ goto out;
4580 ++ }
4581 +
4582 + if (cmd == PPPIOCDETACH) {
4583 + /*
4584 +@@ -590,7 +595,6 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4585 + * this fd and reopening /dev/ppp.
4586 + */
4587 + err = -EINVAL;
4588 +- mutex_lock(&ppp_mutex);
4589 + if (pf->kind == INTERFACE) {
4590 + ppp = PF_TO_PPP(pf);
4591 + if (file == ppp->owner)
4592 +@@ -602,15 +606,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4593 + } else
4594 + pr_warn("PPPIOCDETACH file->f_count=%ld\n",
4595 + atomic_long_read(&file->f_count));
4596 +- mutex_unlock(&ppp_mutex);
4597 +- return err;
4598 ++ goto out;
4599 + }
4600 +
4601 + if (pf->kind == CHANNEL) {
4602 + struct channel *pch;
4603 + struct ppp_channel *chan;
4604 +
4605 +- mutex_lock(&ppp_mutex);
4606 + pch = PF_TO_CHANNEL(pf);
4607 +
4608 + switch (cmd) {
4609 +@@ -632,17 +634,16 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4610 + err = chan->ops->ioctl(chan, cmd, arg);
4611 + up_read(&pch->chan_sem);
4612 + }
4613 +- mutex_unlock(&ppp_mutex);
4614 +- return err;
4615 ++ goto out;
4616 + }
4617 +
4618 + if (pf->kind != INTERFACE) {
4619 + /* can't happen */
4620 + pr_err("PPP: not interface or channel??\n");
4621 +- return -EINVAL;
4622 ++ err = -EINVAL;
4623 ++ goto out;
4624 + }
4625 +
4626 +- mutex_lock(&ppp_mutex);
4627 + ppp = PF_TO_PPP(pf);
4628 + switch (cmd) {
4629 + case PPPIOCSMRU:
4630 +@@ -817,7 +818,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4631 + default:
4632 + err = -ENOTTY;
4633 + }
4634 ++
4635 ++out:
4636 + mutex_unlock(&ppp_mutex);
4637 ++
4638 + return err;
4639 + }
4640 +
4641 +@@ -830,7 +834,6 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
4642 + struct ppp_net *pn;
4643 + int __user *p = (int __user *)arg;
4644 +
4645 +- mutex_lock(&ppp_mutex);
4646 + switch (cmd) {
4647 + case PPPIOCNEWUNIT:
4648 + /* Create a new ppp unit */
4649 +@@ -881,7 +884,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
4650 + default:
4651 + err = -ENOTTY;
4652 + }
4653 +- mutex_unlock(&ppp_mutex);
4654 ++
4655 + return err;
4656 + }
4657 +
4658 +@@ -2244,7 +2247,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
4659 +
4660 + pch->ppp = NULL;
4661 + pch->chan = chan;
4662 +- pch->chan_net = net;
4663 ++ pch->chan_net = get_net(net);
4664 + chan->ppp = pch;
4665 + init_ppp_file(&pch->file, CHANNEL);
4666 + pch->file.hdrlen = chan->hdrlen;
4667 +@@ -2341,6 +2344,8 @@ ppp_unregister_channel(struct ppp_channel *chan)
4668 + spin_lock_bh(&pn->all_channels_lock);
4669 + list_del(&pch->list);
4670 + spin_unlock_bh(&pn->all_channels_lock);
4671 ++ put_net(pch->chan_net);
4672 ++ pch->chan_net = NULL;
4673 +
4674 + pch->file.dead = 1;
4675 + wake_up_interruptible(&pch->file.rwait);
4676 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
4677 +index e470ae59d405..01f5ff84cf6b 100644
4678 +--- a/drivers/net/tun.c
4679 ++++ b/drivers/net/tun.c
4680 +@@ -516,11 +516,13 @@ static void tun_detach_all(struct net_device *dev)
4681 + for (i = 0; i < n; i++) {
4682 + tfile = rtnl_dereference(tun->tfiles[i]);
4683 + BUG_ON(!tfile);
4684 ++ tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
4685 + tfile->socket.sk->sk_data_ready(tfile->socket.sk);
4686 + RCU_INIT_POINTER(tfile->tun, NULL);
4687 + --tun->numqueues;
4688 + }
4689 + list_for_each_entry(tfile, &tun->disabled, next) {
4690 ++ tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
4691 + tfile->socket.sk->sk_data_ready(tfile->socket.sk);
4692 + RCU_INIT_POINTER(tfile->tun, NULL);
4693 + }
4694 +@@ -575,6 +577,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
4695 + goto out;
4696 + }
4697 + tfile->queue_index = tun->numqueues;
4698 ++ tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
4699 + rcu_assign_pointer(tfile->tun, tun);
4700 + rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
4701 + tun->numqueues++;
4702 +@@ -1357,9 +1360,6 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
4703 + if (!iov_iter_count(to))
4704 + return 0;
4705 +
4706 +- if (tun->dev->reg_state != NETREG_REGISTERED)
4707 +- return -EIO;
4708 +-
4709 + /* Read frames from queue */
4710 + skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
4711 + &peeked, &off, &err);
4712 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
4713 +index 0b481c30979b..5db25e46a962 100644
4714 +--- a/drivers/net/usb/cdc_ncm.c
4715 ++++ b/drivers/net/usb/cdc_ncm.c
4716 +@@ -843,7 +843,11 @@ advance:
4717 +
4718 + iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
4719 +
4720 +- /* reset data interface */
4721 ++ /* Reset data interface. Some devices will not reset properly
4722 ++ * unless they are configured first. Toggle the altsetting to
4723 ++ * force a reset
4724 ++ */
4725 ++ usb_set_interface(dev->udev, iface_no, data_altsetting);
4726 + temp = usb_set_interface(dev->udev, iface_no, 0);
4727 + if (temp) {
4728 + dev_dbg(&intf->dev, "set interface failed\n");
4729 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
4730 +index cffb25280a3b..8153e97408e7 100644
4731 +--- a/drivers/net/usb/qmi_wwan.c
4732 ++++ b/drivers/net/usb/qmi_wwan.c
4733 +@@ -749,6 +749,7 @@ static const struct usb_device_id products[] = {
4734 + {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
4735 + {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
4736 + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
4737 ++ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
4738 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
4739 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
4740 + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
4741 +@@ -767,8 +768,10 @@ static const struct usb_device_id products[] = {
4742 + {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
4743 + {QMI_FIXED_INTF(0x1199, 0x9070, 8)}, /* Sierra Wireless MC74xx/EM74xx */
4744 + {QMI_FIXED_INTF(0x1199, 0x9070, 10)}, /* Sierra Wireless MC74xx/EM74xx */
4745 +- {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */
4746 +- {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */
4747 ++ {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
4748 ++ {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
4749 ++ {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
4750 ++ {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
4751 + {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
4752 + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
4753 + {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
4754 +diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
4755 +index e0498571ae26..edbb2f389337 100644
4756 +--- a/drivers/net/usb/usbnet.c
4757 ++++ b/drivers/net/usb/usbnet.c
4758 +@@ -1754,6 +1754,13 @@ out3:
4759 + if (info->unbind)
4760 + info->unbind (dev, udev);
4761 + out1:
4762 ++ /* subdrivers must undo all they did in bind() if they
4763 ++ * fail it, but we may fail later and a deferred kevent
4764 ++ * may trigger an error resubmitting itself and, worse,
4765 ++ * schedule a timer. So we kill it all just in case.
4766 ++ */
4767 ++ cancel_work_sync(&dev->kevent);
4768 ++ del_timer_sync(&dev->delay);
4769 + free_netdev(net);
4770 + out:
4771 + return status;
4772 +diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
4773 +index 44541dbc5c28..69b994f3b8c5 100644
4774 +--- a/drivers/net/wan/farsync.c
4775 ++++ b/drivers/net/wan/farsync.c
4776 +@@ -2516,7 +2516,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4777 + dev->mem_start = card->phys_mem
4778 + + BUF_OFFSET ( txBuffer[i][0][0]);
4779 + dev->mem_end = card->phys_mem
4780 +- + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
4781 ++ + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
4782 + dev->base_addr = card->pci_conf;
4783 + dev->irq = card->irq;
4784 +
4785 +diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
4786 +index cc81482c934d..113a43fca9cf 100644
4787 +--- a/drivers/net/wireless/ath/ath9k/eeprom.c
4788 ++++ b/drivers/net/wireless/ath/ath9k/eeprom.c
4789 +@@ -403,10 +403,9 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
4790 +
4791 + if (match) {
4792 + if (AR_SREV_9287(ah)) {
4793 +- /* FIXME: array overrun? */
4794 + for (i = 0; i < numXpdGains; i++) {
4795 + minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
4796 +- maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4];
4797 ++ maxPwrT4[i] = data_9287[idxL].pwrPdg[i][intercepts - 1];
4798 + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
4799 + data_9287[idxL].pwrPdg[i],
4800 + data_9287[idxL].vpdPdg[i],
4801 +@@ -416,7 +415,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
4802 + } else if (eeprom_4k) {
4803 + for (i = 0; i < numXpdGains; i++) {
4804 + minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
4805 +- maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4];
4806 ++ maxPwrT4[i] = data_4k[idxL].pwrPdg[i][intercepts - 1];
4807 + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
4808 + data_4k[idxL].pwrPdg[i],
4809 + data_4k[idxL].vpdPdg[i],
4810 +@@ -426,7 +425,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
4811 + } else {
4812 + for (i = 0; i < numXpdGains; i++) {
4813 + minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
4814 +- maxPwrT4[i] = data_def[idxL].pwrPdg[i][4];
4815 ++ maxPwrT4[i] = data_def[idxL].pwrPdg[i][intercepts - 1];
4816 + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
4817 + data_def[idxL].pwrPdg[i],
4818 + data_def[idxL].vpdPdg[i],
4819 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
4820 +index c44393f26fd3..4e720ed402ef 100644
4821 +--- a/drivers/pci/pci.c
4822 ++++ b/drivers/pci/pci.c
4823 +@@ -4520,8 +4520,10 @@ int pci_get_new_domain_nr(void)
4824 + void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
4825 + {
4826 + static int use_dt_domains = -1;
4827 +- int domain = of_get_pci_domain_nr(parent->of_node);
4828 ++ int domain = -1;
4829 +
4830 ++ if (parent)
4831 ++ domain = of_get_pci_domain_nr(parent->of_node);
4832 + /*
4833 + * Check DT domain and use_dt_domains values.
4834 + *
4835 +diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
4836 +index e261f1cf85c6..09c05bffe026 100644
4837 +--- a/drivers/pinctrl/freescale/pinctrl-imx.c
4838 ++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
4839 +@@ -205,9 +205,9 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
4840 + pin_reg = &info->pin_regs[pin_id];
4841 +
4842 + if (pin_reg->mux_reg == -1) {
4843 +- dev_err(ipctl->dev, "Pin(%s) does not support mux function\n",
4844 ++ dev_dbg(ipctl->dev, "Pin(%s) does not support mux function\n",
4845 + info->pins[pin_id].name);
4846 +- return -EINVAL;
4847 ++ continue;
4848 + }
4849 +
4850 + if (info->flags & SHARE_MUX_CONF_REG) {
4851 +diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
4852 +index a6a22054c0ba..f4b1dac45aca 100644
4853 +--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
4854 ++++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
4855 +@@ -1025,7 +1025,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
4856 + int pullidx = 0;
4857 +
4858 + if (pull)
4859 +- pullidx = data_out ? 1 : 2;
4860 ++ pullidx = data_out ? 2 : 1;
4861 +
4862 + seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
4863 + gpio,
4864 +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
4865 +index 13b45f297727..f2e4232ea98d 100644
4866 +--- a/drivers/pinctrl/pinctrl-single.c
4867 ++++ b/drivers/pinctrl/pinctrl-single.c
4868 +@@ -1273,9 +1273,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
4869 +
4870 + /* Parse pins in each row from LSB */
4871 + while (mask) {
4872 +- bit_pos = ffs(mask);
4873 ++ bit_pos = __ffs(mask);
4874 + pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
4875 +- mask_pos = ((pcs->fmask) << (bit_pos - 1));
4876 ++ mask_pos = ((pcs->fmask) << bit_pos);
4877 + val_pos = val & mask_pos;
4878 + submask = mask & mask_pos;
4879 +
4880 +@@ -1576,6 +1576,9 @@ static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc,
4881 + else
4882 + mask &= ~soc_mask;
4883 + pcs->write(mask, pcswi->reg);
4884 ++
4885 ++ /* flush posted write */
4886 ++ mask = pcs->read(pcswi->reg);
4887 + raw_spin_unlock(&pcs->lock);
4888 + }
4889 +
4890 +@@ -1851,7 +1854,7 @@ static int pcs_probe(struct platform_device *pdev)
4891 + ret = of_property_read_u32(np, "pinctrl-single,function-mask",
4892 + &pcs->fmask);
4893 + if (!ret) {
4894 +- pcs->fshift = ffs(pcs->fmask) - 1;
4895 ++ pcs->fshift = __ffs(pcs->fmask);
4896 + pcs->fmax = pcs->fmask >> pcs->fshift;
4897 + } else {
4898 + /* If mask property doesn't exist, function mux is invalid. */
4899 +diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
4900 +index 4bc0c7f459a5..b2bf48c7dc36 100644
4901 +--- a/drivers/power/power_supply_core.c
4902 ++++ b/drivers/power/power_supply_core.c
4903 +@@ -526,11 +526,12 @@ static int power_supply_read_temp(struct thermal_zone_device *tzd,
4904 +
4905 + WARN_ON(tzd == NULL);
4906 + psy = tzd->devdata;
4907 +- ret = psy->desc->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
4908 ++ ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
4909 ++ if (ret)
4910 ++ return ret;
4911 +
4912 + /* Convert tenths of degree Celsius to milli degree Celsius. */
4913 +- if (!ret)
4914 +- *temp = val.intval * 100;
4915 ++ *temp = val.intval * 100;
4916 +
4917 + return ret;
4918 + }
4919 +@@ -573,10 +574,12 @@ static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
4920 + int ret;
4921 +
4922 + psy = tcd->devdata;
4923 +- ret = psy->desc->get_property(psy,
4924 +- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
4925 +- if (!ret)
4926 +- *state = val.intval;
4927 ++ ret = power_supply_get_property(psy,
4928 ++ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
4929 ++ if (ret)
4930 ++ return ret;
4931 ++
4932 ++ *state = val.intval;
4933 +
4934 + return ret;
4935 + }
4936 +@@ -589,10 +592,12 @@ static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
4937 + int ret;
4938 +
4939 + psy = tcd->devdata;
4940 +- ret = psy->desc->get_property(psy,
4941 +- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
4942 +- if (!ret)
4943 +- *state = val.intval;
4944 ++ ret = power_supply_get_property(psy,
4945 ++ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
4946 ++ if (ret)
4947 ++ return ret;
4948 ++
4949 ++ *state = val.intval;
4950 +
4951 + return ret;
4952 + }
4953 +diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
4954 +index 58f5d3b8e981..27343e1c43ef 100644
4955 +--- a/drivers/regulator/s5m8767.c
4956 ++++ b/drivers/regulator/s5m8767.c
4957 +@@ -202,9 +202,10 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
4958 + }
4959 + }
4960 +
4961 +- if (i < s5m8767->num_regulators)
4962 +- *enable_ctrl =
4963 +- s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
4964 ++ if (i >= s5m8767->num_regulators)
4965 ++ return -EINVAL;
4966 ++
4967 ++ *enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
4968 +
4969 + return 0;
4970 + }
4971 +@@ -937,8 +938,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
4972 + else
4973 + regulators[id].vsel_mask = 0xff;
4974 +
4975 +- s5m8767_get_register(s5m8767, id, &enable_reg,
4976 ++ ret = s5m8767_get_register(s5m8767, id, &enable_reg,
4977 + &enable_val);
4978 ++ if (ret) {
4979 ++ dev_err(s5m8767->dev, "error reading registers\n");
4980 ++ return ret;
4981 ++ }
4982 + regulators[id].enable_reg = enable_reg;
4983 + regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
4984 + regulators[id].enable_val = enable_val;
4985 +diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
4986 +index 818a3635a8c8..86865881ce4b 100644
4987 +--- a/drivers/rtc/rtc-ds1685.c
4988 ++++ b/drivers/rtc/rtc-ds1685.c
4989 +@@ -187,9 +187,9 @@ ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
4990 + * Only use this where you are certain another lock will not be held.
4991 + */
4992 + static inline void
4993 +-ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
4994 ++ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
4995 + {
4996 +- spin_lock_irqsave(&rtc->lock, flags);
4997 ++ spin_lock_irqsave(&rtc->lock, *flags);
4998 + ds1685_rtc_switch_to_bank1(rtc);
4999 + }
5000 +
5001 +@@ -1304,7 +1304,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
5002 + {
5003 + struct ds1685_priv *rtc = dev_get_drvdata(dev);
5004 + u8 reg = 0, bit = 0, tmp;
5005 +- unsigned long flags = 0;
5006 ++ unsigned long flags;
5007 + long int val = 0;
5008 + const struct ds1685_rtc_ctrl_regs *reg_info =
5009 + ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
5010 +@@ -1325,7 +1325,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
5011 + bit = reg_info->bit;
5012 +
5013 + /* Safe to spinlock during a write. */
5014 +- ds1685_rtc_begin_ctrl_access(rtc, flags);
5015 ++ ds1685_rtc_begin_ctrl_access(rtc, &flags);
5016 + tmp = rtc->read(rtc, reg);
5017 + rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
5018 + ds1685_rtc_end_ctrl_access(rtc, flags);
5019 +diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
5020 +index 0f710e98538f..3db1557e5394 100644
5021 +--- a/drivers/rtc/rtc-hym8563.c
5022 ++++ b/drivers/rtc/rtc-hym8563.c
5023 +@@ -144,7 +144,7 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
5024 + * it does not seem to carry it over a subsequent write/read.
5025 + * So we'll limit ourself to 100 years, starting at 2000 for now.
5026 + */
5027 +- buf[6] = tm->tm_year - 100;
5028 ++ buf[6] = bin2bcd(tm->tm_year - 100);
5029 +
5030 + /*
5031 + * CTL1 only contains TEST-mode bits apart from stop,
5032 +diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
5033 +index 7632a87784c3..d42cef0ca939 100644
5034 +--- a/drivers/rtc/rtc-max77686.c
5035 ++++ b/drivers/rtc/rtc-max77686.c
5036 +@@ -465,7 +465,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
5037 +
5038 + info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
5039 + MAX77686_RTCIRQ_RTCA1);
5040 +- if (!info->virq) {
5041 ++ if (info->virq <= 0) {
5042 + ret = -ENXIO;
5043 + goto err_rtc;
5044 + }
5045 +diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
5046 +index f64c282275b3..e1b86bb01062 100644
5047 +--- a/drivers/rtc/rtc-vr41xx.c
5048 ++++ b/drivers/rtc/rtc-vr41xx.c
5049 +@@ -272,12 +272,13 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
5050 + }
5051 +
5052 + static const struct rtc_class_ops vr41xx_rtc_ops = {
5053 +- .release = vr41xx_rtc_release,
5054 +- .ioctl = vr41xx_rtc_ioctl,
5055 +- .read_time = vr41xx_rtc_read_time,
5056 +- .set_time = vr41xx_rtc_set_time,
5057 +- .read_alarm = vr41xx_rtc_read_alarm,
5058 +- .set_alarm = vr41xx_rtc_set_alarm,
5059 ++ .release = vr41xx_rtc_release,
5060 ++ .ioctl = vr41xx_rtc_ioctl,
5061 ++ .read_time = vr41xx_rtc_read_time,
5062 ++ .set_time = vr41xx_rtc_set_time,
5063 ++ .read_alarm = vr41xx_rtc_read_alarm,
5064 ++ .set_alarm = vr41xx_rtc_set_alarm,
5065 ++ .alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
5066 + };
5067 +
5068 + static int rtc_probe(struct platform_device *pdev)
5069 +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
5070 +index e8c8c1ecc1f5..bf8fd38abbbd 100644
5071 +--- a/drivers/scsi/lpfc/lpfc_init.c
5072 ++++ b/drivers/scsi/lpfc/lpfc_init.c
5073 +@@ -2848,7 +2848,7 @@ lpfc_online(struct lpfc_hba *phba)
5074 + }
5075 +
5076 + vports = lpfc_create_vport_work_array(phba);
5077 +- if (vports != NULL)
5078 ++ if (vports != NULL) {
5079 + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5080 + struct Scsi_Host *shost;
5081 + shost = lpfc_shost_from_vport(vports[i]);
5082 +@@ -2865,7 +2865,8 @@ lpfc_online(struct lpfc_hba *phba)
5083 + }
5084 + spin_unlock_irq(shost->host_lock);
5085 + }
5086 +- lpfc_destroy_vport_work_array(phba, vports);
5087 ++ }
5088 ++ lpfc_destroy_vport_work_array(phba, vports);
5089 +
5090 + lpfc_unblock_mgmt_io(phba);
5091 + return 0;
5092 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
5093 +index 890637fdd61e..7a1c4b4e764b 100644
5094 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
5095 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
5096 +@@ -6203,12 +6203,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
5097 + }
5098 +
5099 + for (i = 0; i < ioc->sge_count; i++) {
5100 +- if (kbuff_arr[i])
5101 ++ if (kbuff_arr[i]) {
5102 + dma_free_coherent(&instance->pdev->dev,
5103 + le32_to_cpu(kern_sge32[i].length),
5104 + kbuff_arr[i],
5105 + le32_to_cpu(kern_sge32[i].phys_addr));
5106 + kbuff_arr[i] = NULL;
5107 ++ }
5108 + }
5109 +
5110 + if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
5111 +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
5112 +index c6b93d273799..841fdf745fcf 100644
5113 +--- a/drivers/scsi/scsi_error.c
5114 ++++ b/drivers/scsi/scsi_error.c
5115 +@@ -1122,7 +1122,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
5116 + */
5117 + void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
5118 + {
5119 +- scmd->device->host->host_failed--;
5120 + scmd->eh_eflags = 0;
5121 + list_move_tail(&scmd->eh_entry, done_q);
5122 + }
5123 +@@ -2216,6 +2215,9 @@ int scsi_error_handler(void *data)
5124 + else
5125 + scsi_unjam_host(shost);
5126 +
5127 ++ /* All scmds have been handled */
5128 ++ shost->host_failed = 0;
5129 ++
5130 + /*
5131 + * Note - if the above fails completely, the action is to take
5132 + * individual devices offline and flush the queue of any
5133 +diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
5134 +index 68e7efeb9a27..1d308cba29b1 100644
5135 +--- a/drivers/spi/spi-rockchip.c
5136 ++++ b/drivers/spi/spi-rockchip.c
5137 +@@ -265,7 +265,10 @@ static inline u32 rx_max(struct rockchip_spi *rs)
5138 + static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
5139 + {
5140 + u32 ser;
5141 +- struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
5142 ++ struct spi_master *master = spi->master;
5143 ++ struct rockchip_spi *rs = spi_master_get_devdata(master);
5144 ++
5145 ++ pm_runtime_get_sync(rs->dev);
5146 +
5147 + ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
5148 +
5149 +@@ -290,6 +293,8 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
5150 + ser &= ~(1 << spi->chip_select);
5151 +
5152 + writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
5153 ++
5154 ++ pm_runtime_put_sync(rs->dev);
5155 + }
5156 +
5157 + static int rockchip_spi_prepare_message(struct spi_master *master,
5158 +diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
5159 +index fbb0a4d74e91..39d7c7c70112 100644
5160 +--- a/drivers/spi/spi-sun4i.c
5161 ++++ b/drivers/spi/spi-sun4i.c
5162 +@@ -170,13 +170,17 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
5163 + {
5164 + struct sun4i_spi *sspi = spi_master_get_devdata(master);
5165 + unsigned int mclk_rate, div, timeout;
5166 ++ unsigned int start, end, tx_time;
5167 + unsigned int tx_len = 0;
5168 + int ret = 0;
5169 + u32 reg;
5170 +
5171 + /* We don't support transfer larger than the FIFO */
5172 + if (tfr->len > SUN4I_FIFO_DEPTH)
5173 +- return -EINVAL;
5174 ++ return -EMSGSIZE;
5175 ++
5176 ++ if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
5177 ++ return -EMSGSIZE;
5178 +
5179 + reinit_completion(&sspi->done);
5180 + sspi->tx_buf = tfr->tx_buf;
5181 +@@ -269,8 +273,12 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
5182 + sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
5183 + sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
5184 +
5185 +- /* Fill the TX FIFO */
5186 +- sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
5187 ++ /*
5188 ++ * Fill the TX FIFO
5189 ++ * Filling the FIFO fully causes timeout for some reason
5190 ++ * at least on spi2 on A10s
5191 ++ */
5192 ++ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
5193 +
5194 + /* Enable the interrupts */
5195 + sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
5196 +@@ -279,9 +287,16 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
5197 + reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
5198 + sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
5199 +
5200 ++ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
5201 ++ start = jiffies;
5202 + timeout = wait_for_completion_timeout(&sspi->done,
5203 +- msecs_to_jiffies(1000));
5204 ++ msecs_to_jiffies(tx_time));
5205 ++ end = jiffies;
5206 + if (!timeout) {
5207 ++ dev_warn(&master->dev,
5208 ++ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
5209 ++ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
5210 ++ jiffies_to_msecs(end - start), tx_time);
5211 + ret = -ETIMEDOUT;
5212 + goto out;
5213 + }
5214 +diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
5215 +index ac48f59705a8..e77add01b0e9 100644
5216 +--- a/drivers/spi/spi-sun6i.c
5217 ++++ b/drivers/spi/spi-sun6i.c
5218 +@@ -160,6 +160,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
5219 + {
5220 + struct sun6i_spi *sspi = spi_master_get_devdata(master);
5221 + unsigned int mclk_rate, div, timeout;
5222 ++ unsigned int start, end, tx_time;
5223 + unsigned int tx_len = 0;
5224 + int ret = 0;
5225 + u32 reg;
5226 +@@ -269,9 +270,16 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
5227 + reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
5228 + sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
5229 +
5230 ++ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
5231 ++ start = jiffies;
5232 + timeout = wait_for_completion_timeout(&sspi->done,
5233 +- msecs_to_jiffies(1000));
5234 ++ msecs_to_jiffies(tx_time));
5235 ++ end = jiffies;
5236 + if (!timeout) {
5237 ++ dev_warn(&master->dev,
5238 ++ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
5239 ++ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
5240 ++ jiffies_to_msecs(end - start), tx_time);
5241 + ret = -ETIMEDOUT;
5242 + goto out;
5243 + }
5244 +diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
5245 +index b4fd8debf941..a64d53f7b1d1 100644
5246 +--- a/drivers/tty/serial/8250/8250_core.c
5247 ++++ b/drivers/tty/serial/8250/8250_core.c
5248 +@@ -791,22 +791,16 @@ static int size_fifo(struct uart_8250_port *up)
5249 + */
5250 + static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
5251 + {
5252 +- unsigned char old_dll, old_dlm, old_lcr;
5253 +- unsigned int id;
5254 ++ unsigned char old_lcr;
5255 ++ unsigned int id, old_dl;
5256 +
5257 + old_lcr = serial_in(p, UART_LCR);
5258 + serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
5259 ++ old_dl = serial_dl_read(p);
5260 ++ serial_dl_write(p, 0);
5261 ++ id = serial_dl_read(p);
5262 ++ serial_dl_write(p, old_dl);
5263 +
5264 +- old_dll = serial_in(p, UART_DLL);
5265 +- old_dlm = serial_in(p, UART_DLM);
5266 +-
5267 +- serial_out(p, UART_DLL, 0);
5268 +- serial_out(p, UART_DLM, 0);
5269 +-
5270 +- id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
5271 +-
5272 +- serial_out(p, UART_DLL, old_dll);
5273 +- serial_out(p, UART_DLM, old_dlm);
5274 + serial_out(p, UART_LCR, old_lcr);
5275 +
5276 + return id;
5277 +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
5278 +index 1e0d9b8c48c9..e42cb6bdd31d 100644
5279 +--- a/drivers/tty/serial/samsung.c
5280 ++++ b/drivers/tty/serial/samsung.c
5281 +@@ -1288,6 +1288,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
5282 + /* check to see if we need to change clock source */
5283 +
5284 + if (ourport->baudclk != clk) {
5285 ++ clk_prepare_enable(clk);
5286 ++
5287 + s3c24xx_serial_setsource(port, clk_sel);
5288 +
5289 + if (!IS_ERR(ourport->baudclk)) {
5290 +@@ -1295,8 +1297,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
5291 + ourport->baudclk = ERR_PTR(-EINVAL);
5292 + }
5293 +
5294 +- clk_prepare_enable(clk);
5295 +-
5296 + ourport->baudclk = clk;
5297 + ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
5298 + }
5299 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
5300 +index 4a24eb2b0ede..ba86956ef4b5 100644
5301 +--- a/drivers/tty/vt/vt.c
5302 ++++ b/drivers/tty/vt/vt.c
5303 +@@ -3587,9 +3587,10 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
5304 + goto err;
5305 +
5306 + desc = csw->con_startup();
5307 +-
5308 +- if (!desc)
5309 ++ if (!desc) {
5310 ++ retval = -ENODEV;
5311 + goto err;
5312 ++ }
5313 +
5314 + retval = -EINVAL;
5315 +
5316 +diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
5317 +index 61d538aa2346..4f4f06a5889f 100644
5318 +--- a/drivers/usb/common/usb-otg-fsm.c
5319 ++++ b/drivers/usb/common/usb-otg-fsm.c
5320 +@@ -21,6 +21,7 @@
5321 + * 675 Mass Ave, Cambridge, MA 02139, USA.
5322 + */
5323 +
5324 ++#include <linux/module.h>
5325 + #include <linux/kernel.h>
5326 + #include <linux/types.h>
5327 + #include <linux/mutex.h>
5328 +@@ -365,3 +366,4 @@ int otg_statemachine(struct otg_fsm *fsm)
5329 + return state_changed;
5330 + }
5331 + EXPORT_SYMBOL_GPL(otg_statemachine);
5332 ++MODULE_LICENSE("GPL");
5333 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
5334 +index e47cfcd5640c..3a49ba2910df 100644
5335 +--- a/drivers/usb/core/hcd.c
5336 ++++ b/drivers/usb/core/hcd.c
5337 +@@ -2522,26 +2522,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd);
5338 + * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
5339 + * deallocated.
5340 + *
5341 +- * Make sure to only deallocate the bandwidth_mutex when the primary HCD is
5342 +- * freed. When hcd_release() is called for either hcd in a peer set
5343 +- * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to
5344 +- * block new peering attempts
5345 ++ * Make sure to deallocate the bandwidth_mutex only when the last HCD is
5346 ++ * freed. When hcd_release() is called for either hcd in a peer set,
5347 ++ * invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
5348 + */
5349 + static void hcd_release(struct kref *kref)
5350 + {
5351 + struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
5352 +
5353 + mutex_lock(&usb_port_peer_mutex);
5354 +- if (usb_hcd_is_primary_hcd(hcd)) {
5355 +- kfree(hcd->address0_mutex);
5356 +- kfree(hcd->bandwidth_mutex);
5357 +- }
5358 + if (hcd->shared_hcd) {
5359 + struct usb_hcd *peer = hcd->shared_hcd;
5360 +
5361 + peer->shared_hcd = NULL;
5362 +- if (peer->primary_hcd == hcd)
5363 +- peer->primary_hcd = NULL;
5364 ++ peer->primary_hcd = NULL;
5365 ++ } else {
5366 ++ kfree(hcd->address0_mutex);
5367 ++ kfree(hcd->bandwidth_mutex);
5368 + }
5369 + mutex_unlock(&usb_port_peer_mutex);
5370 + kfree(hcd);
5371 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
5372 +index 017c1de53aa5..f28b5375e2c8 100644
5373 +--- a/drivers/usb/core/quirks.c
5374 ++++ b/drivers/usb/core/quirks.c
5375 +@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
5376 + /* Creative SB Audigy 2 NX */
5377 + { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
5378 +
5379 ++ /* USB3503 */
5380 ++ { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
5381 ++
5382 + /* Microsoft Wireless Laser Mouse 6000 Receiver */
5383 + { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
5384 +
5385 +@@ -170,6 +173,10 @@ static const struct usb_device_id usb_quirk_list[] = {
5386 + /* MAYA44USB sound device */
5387 + { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
5388 +
5389 ++ /* ASUS Base Station(T100) */
5390 ++ { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
5391 ++ USB_QUIRK_IGNORE_REMOTE_WAKEUP },
5392 ++
5393 + /* Action Semiconductor flash disk */
5394 + { USB_DEVICE(0x10d6, 0x2200), .driver_info =
5395 + USB_QUIRK_STRING_FETCH_255 },
5396 +@@ -185,26 +192,22 @@ static const struct usb_device_id usb_quirk_list[] = {
5397 + { USB_DEVICE(0x1908, 0x1315), .driver_info =
5398 + USB_QUIRK_HONOR_BNUMINTERFACES },
5399 +
5400 +- /* INTEL VALUE SSD */
5401 +- { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
5402 +-
5403 +- /* USB3503 */
5404 +- { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
5405 +-
5406 +- /* ASUS Base Station(T100) */
5407 +- { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
5408 +- USB_QUIRK_IGNORE_REMOTE_WAKEUP },
5409 +-
5410 + /* Protocol and OTG Electrical Test Device */
5411 + { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
5412 + USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
5413 +
5414 ++ /* Acer C120 LED Projector */
5415 ++ { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
5416 ++
5417 + /* Blackmagic Design Intensity Shuttle */
5418 + { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
5419 +
5420 + /* Blackmagic Design UltraStudio SDI */
5421 + { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
5422 +
5423 ++ /* INTEL VALUE SSD */
5424 ++ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
5425 ++
5426 + { } /* terminating entry must be last */
5427 + };
5428 +
5429 +diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
5430 +index 7bd0a95b2815..0a465a90f0d6 100644
5431 +--- a/drivers/usb/dwc3/dwc3-exynos.c
5432 ++++ b/drivers/usb/dwc3/dwc3-exynos.c
5433 +@@ -128,12 +128,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
5434 +
5435 + platform_set_drvdata(pdev, exynos);
5436 +
5437 +- ret = dwc3_exynos_register_phys(exynos);
5438 +- if (ret) {
5439 +- dev_err(dev, "couldn't register PHYs\n");
5440 +- return ret;
5441 +- }
5442 +-
5443 + exynos->dev = dev;
5444 +
5445 + exynos->clk = devm_clk_get(dev, "usbdrd30");
5446 +@@ -183,20 +177,29 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
5447 + goto err3;
5448 + }
5449 +
5450 ++ ret = dwc3_exynos_register_phys(exynos);
5451 ++ if (ret) {
5452 ++ dev_err(dev, "couldn't register PHYs\n");
5453 ++ goto err4;
5454 ++ }
5455 ++
5456 + if (node) {
5457 + ret = of_platform_populate(node, NULL, NULL, dev);
5458 + if (ret) {
5459 + dev_err(dev, "failed to add dwc3 core\n");
5460 +- goto err4;
5461 ++ goto err5;
5462 + }
5463 + } else {
5464 + dev_err(dev, "no device node, failed to add dwc3 core\n");
5465 + ret = -ENODEV;
5466 +- goto err4;
5467 ++ goto err5;
5468 + }
5469 +
5470 + return 0;
5471 +
5472 ++err5:
5473 ++ platform_device_unregister(exynos->usb2_phy);
5474 ++ platform_device_unregister(exynos->usb3_phy);
5475 + err4:
5476 + regulator_disable(exynos->vdd10);
5477 + err3:
5478 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
5479 +index 82240dbdf6dd..db9433eed2cc 100644
5480 +--- a/drivers/usb/gadget/function/f_fs.c
5481 ++++ b/drivers/usb/gadget/function/f_fs.c
5482 +@@ -651,7 +651,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
5483 + if (io_data->read && ret > 0) {
5484 + use_mm(io_data->mm);
5485 + ret = copy_to_iter(io_data->buf, ret, &io_data->data);
5486 +- if (iov_iter_count(&io_data->data))
5487 ++ if (ret != io_data->req->actual && iov_iter_count(&io_data->data))
5488 + ret = -EFAULT;
5489 + unuse_mm(io_data->mm);
5490 + }
5491 +diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
5492 +index 2030565c6789..bccc5788bb98 100644
5493 +--- a/drivers/usb/gadget/legacy/inode.c
5494 ++++ b/drivers/usb/gadget/legacy/inode.c
5495 +@@ -934,8 +934,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
5496 + struct usb_ep *ep = dev->gadget->ep0;
5497 + struct usb_request *req = dev->req;
5498 +
5499 +- if ((retval = setup_req (ep, req, 0)) == 0)
5500 +- retval = usb_ep_queue (ep, req, GFP_ATOMIC);
5501 ++ if ((retval = setup_req (ep, req, 0)) == 0) {
5502 ++ spin_unlock_irq (&dev->lock);
5503 ++ retval = usb_ep_queue (ep, req, GFP_KERNEL);
5504 ++ spin_lock_irq (&dev->lock);
5505 ++ }
5506 + dev->state = STATE_DEV_CONNECTED;
5507 +
5508 + /* assume that was SET_CONFIGURATION */
5509 +@@ -1453,8 +1456,11 @@ delegate:
5510 + w_length);
5511 + if (value < 0)
5512 + break;
5513 ++
5514 ++ spin_unlock (&dev->lock);
5515 + value = usb_ep_queue (gadget->ep0, dev->req,
5516 +- GFP_ATOMIC);
5517 ++ GFP_KERNEL);
5518 ++ spin_lock (&dev->lock);
5519 + if (value < 0) {
5520 + clean_req (gadget->ep0, dev->req);
5521 + break;
5522 +@@ -1477,11 +1483,14 @@ delegate:
5523 + if (value >= 0 && dev->state != STATE_DEV_SETUP) {
5524 + req->length = value;
5525 + req->zero = value < w_length;
5526 +- value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
5527 ++
5528 ++ spin_unlock (&dev->lock);
5529 ++ value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
5530 + if (value < 0) {
5531 + DBG (dev, "ep_queue --> %d\n", value);
5532 + req->status = 0;
5533 + }
5534 ++ return value;
5535 + }
5536 +
5537 + /* device stalls when value < 0 */
5538 +diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
5539 +index ff9af29b4e9f..d888a00195ac 100644
5540 +--- a/drivers/usb/host/ehci-tegra.c
5541 ++++ b/drivers/usb/host/ehci-tegra.c
5542 +@@ -89,7 +89,7 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
5543 + if (!usb1_reset_attempted) {
5544 + struct reset_control *usb1_reset;
5545 +
5546 +- usb1_reset = of_reset_control_get(phy_np, "usb");
5547 ++ usb1_reset = of_reset_control_get(phy_np, "utmi-pads");
5548 + if (IS_ERR(usb1_reset)) {
5549 + dev_warn(&pdev->dev,
5550 + "can't get utmi-pads reset from the PHY\n");
5551 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
5552 +index c6027acb6263..54caaf87c567 100644
5553 +--- a/drivers/usb/host/xhci-pci.c
5554 ++++ b/drivers/usb/host/xhci-pci.c
5555 +@@ -37,6 +37,7 @@
5556 + /* Device for a quirk */
5557 + #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
5558 + #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
5559 ++#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009
5560 + #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400
5561 +
5562 + #define PCI_VENDOR_ID_ETRON 0x1b6f
5563 +@@ -108,6 +109,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
5564 + xhci->quirks |= XHCI_TRUST_TX_LENGTH;
5565 + }
5566 +
5567 ++ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
5568 ++ pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
5569 ++ xhci->quirks |= XHCI_BROKEN_STREAMS;
5570 ++
5571 + if (pdev->vendor == PCI_VENDOR_ID_NEC)
5572 + xhci->quirks |= XHCI_NEC_HOST;
5573 +
5574 +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
5575 +index 783e819139a7..7606710baf43 100644
5576 +--- a/drivers/usb/host/xhci-plat.c
5577 ++++ b/drivers/usb/host/xhci-plat.c
5578 +@@ -116,6 +116,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
5579 + ret = clk_prepare_enable(clk);
5580 + if (ret)
5581 + goto put_hcd;
5582 ++ } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
5583 ++ ret = -EPROBE_DEFER;
5584 ++ goto put_hcd;
5585 + }
5586 +
5587 + if (of_device_is_compatible(pdev->dev.of_node,
5588 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
5589 +index 6fe0377ec5cf..6ef255142e01 100644
5590 +--- a/drivers/usb/host/xhci-ring.c
5591 ++++ b/drivers/usb/host/xhci-ring.c
5592 +@@ -289,6 +289,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
5593 +
5594 + temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
5595 + xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
5596 ++
5597 ++ /*
5598 ++ * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
5599 ++ * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
5600 ++ * but the completion event in never sent. Use the cmd timeout timer to
5601 ++ * handle those cases. Use twice the time to cover the bit polling retry
5602 ++ */
5603 ++ mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
5604 + xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
5605 + &xhci->op_regs->cmd_ring);
5606 +
5607 +@@ -313,6 +321,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
5608 +
5609 + xhci_err(xhci, "Stopped the command ring failed, "
5610 + "maybe the host is dead\n");
5611 ++ del_timer(&xhci->cmd_timer);
5612 + xhci->xhc_state |= XHCI_STATE_DYING;
5613 + xhci_quiesce(xhci);
5614 + xhci_halt(xhci);
5615 +@@ -1252,22 +1261,21 @@ void xhci_handle_command_timeout(unsigned long data)
5616 + int ret;
5617 + unsigned long flags;
5618 + u64 hw_ring_state;
5619 +- struct xhci_command *cur_cmd = NULL;
5620 ++ bool second_timeout = false;
5621 + xhci = (struct xhci_hcd *) data;
5622 +
5623 + /* mark this command to be cancelled */
5624 + spin_lock_irqsave(&xhci->lock, flags);
5625 + if (xhci->current_cmd) {
5626 +- cur_cmd = xhci->current_cmd;
5627 +- cur_cmd->status = COMP_CMD_ABORT;
5628 ++ if (xhci->current_cmd->status == COMP_CMD_ABORT)
5629 ++ second_timeout = true;
5630 ++ xhci->current_cmd->status = COMP_CMD_ABORT;
5631 + }
5632 +
5633 +-
5634 + /* Make sure command ring is running before aborting it */
5635 + hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
5636 + if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
5637 + (hw_ring_state & CMD_RING_RUNNING)) {
5638 +-
5639 + spin_unlock_irqrestore(&xhci->lock, flags);
5640 + xhci_dbg(xhci, "Command timeout\n");
5641 + ret = xhci_abort_cmd_ring(xhci);
5642 +@@ -1279,6 +1287,15 @@ void xhci_handle_command_timeout(unsigned long data)
5643 + }
5644 + return;
5645 + }
5646 ++
5647 ++ /* command ring failed to restart, or host removed. Bail out */
5648 ++ if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
5649 ++ spin_unlock_irqrestore(&xhci->lock, flags);
5650 ++ xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
5651 ++ xhci_cleanup_command_queue(xhci);
5652 ++ return;
5653 ++ }
5654 ++
5655 + /* command timeout on stopped ring, ring can't be aborted */
5656 + xhci_dbg(xhci, "Command timeout on stopped ring\n");
5657 + xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
5658 +diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
5659 +index c3d5fc9dfb5b..06853d7c89fd 100644
5660 +--- a/drivers/usb/musb/musb_host.c
5661 ++++ b/drivers/usb/musb/musb_host.c
5662 +@@ -583,14 +583,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
5663 + musb_writew(ep->regs, MUSB_TXCSR, 0);
5664 +
5665 + /* scrub all previous state, clearing toggle */
5666 +- } else {
5667 +- csr = musb_readw(ep->regs, MUSB_RXCSR);
5668 +- if (csr & MUSB_RXCSR_RXPKTRDY)
5669 +- WARNING("rx%d, packet/%d ready?\n", ep->epnum,
5670 +- musb_readw(ep->regs, MUSB_RXCOUNT));
5671 +-
5672 +- musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
5673 + }
5674 ++ csr = musb_readw(ep->regs, MUSB_RXCSR);
5675 ++ if (csr & MUSB_RXCSR_RXPKTRDY)
5676 ++ WARNING("rx%d, packet/%d ready?\n", ep->epnum,
5677 ++ musb_readw(ep->regs, MUSB_RXCOUNT));
5678 ++
5679 ++ musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
5680 +
5681 + /* target addr and (for multipoint) hub addr/port */
5682 + if (musb->is_multipoint) {
5683 +@@ -950,9 +949,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
5684 + if (is_in) {
5685 + dma = is_dma_capable() ? ep->rx_channel : NULL;
5686 +
5687 +- /* clear nak timeout bit */
5688 ++ /*
5689 ++ * Need to stop the transaction by clearing REQPKT first
5690 ++ * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
5691 ++ * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
5692 ++ */
5693 + rx_csr = musb_readw(epio, MUSB_RXCSR);
5694 + rx_csr |= MUSB_RXCSR_H_WZC_BITS;
5695 ++ rx_csr &= ~MUSB_RXCSR_H_REQPKT;
5696 ++ musb_writew(epio, MUSB_RXCSR, rx_csr);
5697 + rx_csr &= ~MUSB_RXCSR_DATAERROR;
5698 + musb_writew(epio, MUSB_RXCSR, rx_csr);
5699 +
5700 +diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
5701 +index facaaf003f19..e40da7759a0e 100644
5702 +--- a/drivers/usb/usbip/usbip_common.c
5703 ++++ b/drivers/usb/usbip/usbip_common.c
5704 +@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
5705 + if (!(size > 0))
5706 + return 0;
5707 +
5708 ++ if (size > urb->transfer_buffer_length) {
5709 ++ /* should not happen, probably malicious packet */
5710 ++ if (ud->side == USBIP_STUB) {
5711 ++ usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
5712 ++ return 0;
5713 ++ } else {
5714 ++ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
5715 ++ return -EPIPE;
5716 ++ }
5717 ++ }
5718 ++
5719 + ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
5720 + if (ret != size) {
5721 + dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
5722 +diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
5723 +index d1e1e1704da1..44eb7c737ea2 100644
5724 +--- a/drivers/video/fbdev/Kconfig
5725 ++++ b/drivers/video/fbdev/Kconfig
5726 +@@ -2246,7 +2246,6 @@ config XEN_FBDEV_FRONTEND
5727 + select FB_SYS_IMAGEBLIT
5728 + select FB_SYS_FOPS
5729 + select FB_DEFERRED_IO
5730 +- select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
5731 + select XEN_XENBUS_FRONTEND
5732 + default y
5733 + help
5734 +diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
5735 +index 0081725c6b5b..d00510029c93 100644
5736 +--- a/drivers/video/fbdev/da8xx-fb.c
5737 ++++ b/drivers/video/fbdev/da8xx-fb.c
5738 +@@ -209,8 +209,7 @@ static struct fb_videomode known_lcd_panels[] = {
5739 + .lower_margin = 2,
5740 + .hsync_len = 0,
5741 + .vsync_len = 0,
5742 +- .sync = FB_SYNC_CLK_INVERT |
5743 +- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
5744 ++ .sync = FB_SYNC_CLK_INVERT,
5745 + },
5746 + /* Sharp LK043T1DG01 */
5747 + [1] = {
5748 +@@ -224,7 +223,7 @@ static struct fb_videomode known_lcd_panels[] = {
5749 + .lower_margin = 2,
5750 + .hsync_len = 41,
5751 + .vsync_len = 10,
5752 +- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
5753 ++ .sync = 0,
5754 + .flag = 0,
5755 + },
5756 + [2] = {
5757 +@@ -239,7 +238,7 @@ static struct fb_videomode known_lcd_panels[] = {
5758 + .lower_margin = 10,
5759 + .hsync_len = 10,
5760 + .vsync_len = 10,
5761 +- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
5762 ++ .sync = 0,
5763 + .flag = 0,
5764 + },
5765 + [3] = {
5766 +diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
5767 +index 9c234209d8b5..47a4177b16d2 100644
5768 +--- a/drivers/xen/xen-pciback/conf_space.c
5769 ++++ b/drivers/xen/xen-pciback/conf_space.c
5770 +@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
5771 + field_start = OFFSET(cfg_entry);
5772 + field_end = OFFSET(cfg_entry) + field->size;
5773 +
5774 +- if ((req_start >= field_start && req_start < field_end)
5775 +- || (req_end > field_start && req_end <= field_end)) {
5776 ++ if (req_end > field_start && field_end > req_start) {
5777 + err = conf_space_read(dev, cfg_entry, field_start,
5778 + &tmp_val);
5779 + if (err)
5780 +@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
5781 + field_start = OFFSET(cfg_entry);
5782 + field_end = OFFSET(cfg_entry) + field->size;
5783 +
5784 +- if ((req_start >= field_start && req_start < field_end)
5785 +- || (req_end > field_start && req_end <= field_end)) {
5786 ++ if (req_end > field_start && field_end > req_start) {
5787 + tmp_val = 0;
5788 +
5789 + err = xen_pcibk_config_read(dev, field_start,
5790 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
5791 +index 0f11ebc92f02..844c883a7169 100644
5792 +--- a/fs/btrfs/ctree.c
5793 ++++ b/fs/btrfs/ctree.c
5794 +@@ -1548,6 +1548,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
5795 + trans->transid, root->fs_info->generation);
5796 +
5797 + if (!should_cow_block(trans, root, buf)) {
5798 ++ trans->dirty = true;
5799 + *cow_ret = buf;
5800 + return 0;
5801 + }
5802 +@@ -2767,8 +2768,10 @@ again:
5803 + * then we don't want to set the path blocking,
5804 + * so we test it here
5805 + */
5806 +- if (!should_cow_block(trans, root, b))
5807 ++ if (!should_cow_block(trans, root, b)) {
5808 ++ trans->dirty = true;
5809 + goto cow_done;
5810 ++ }
5811 +
5812 + /*
5813 + * must have write locks on this node and the
5814 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
5815 +index d1ae1322648a..2771bc32dbd9 100644
5816 +--- a/fs/btrfs/extent-tree.c
5817 ++++ b/fs/btrfs/extent-tree.c
5818 +@@ -7504,7 +7504,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5819 + set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
5820 + buf->start + buf->len - 1, GFP_NOFS);
5821 + }
5822 +- trans->blocks_used++;
5823 ++ trans->dirty = true;
5824 + /* this returns a buffer locked for blocking */
5825 + return buf;
5826 + }
5827 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
5828 +index 43502247e176..2eca30adb3e3 100644
5829 +--- a/fs/btrfs/ioctl.c
5830 ++++ b/fs/btrfs/ioctl.c
5831 +@@ -1639,7 +1639,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
5832 +
5833 + src_inode = file_inode(src.file);
5834 + if (src_inode->i_sb != file_inode(file)->i_sb) {
5835 +- btrfs_info(BTRFS_I(src_inode)->root->fs_info,
5836 ++ btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
5837 + "Snapshot src from another FS");
5838 + ret = -EXDEV;
5839 + } else if (!inode_owner_or_capable(src_inode)) {
5840 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
5841 +index 70734d89193a..a40b454aea44 100644
5842 +--- a/fs/btrfs/super.c
5843 ++++ b/fs/btrfs/super.c
5844 +@@ -262,7 +262,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
5845 + trans->aborted = errno;
5846 + /* Nothing used. The other threads that have joined this
5847 + * transaction may be able to continue. */
5848 +- if (!trans->blocks_used && list_empty(&trans->new_bgs)) {
5849 ++ if (!trans->dirty && list_empty(&trans->new_bgs)) {
5850 + const char *errstr;
5851 +
5852 + errstr = btrfs_decode_error(errno);
5853 +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
5854 +index 00d18c2bdb0f..6d43b2ab183b 100644
5855 +--- a/fs/btrfs/transaction.c
5856 ++++ b/fs/btrfs/transaction.c
5857 +@@ -507,7 +507,6 @@ again:
5858 +
5859 + h->transid = cur_trans->transid;
5860 + h->transaction = cur_trans;
5861 +- h->blocks_used = 0;
5862 + h->bytes_reserved = 0;
5863 + h->root = root;
5864 + h->delayed_ref_updates = 0;
5865 +diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
5866 +index 0b24755596ba..4ce102be6d6b 100644
5867 +--- a/fs/btrfs/transaction.h
5868 ++++ b/fs/btrfs/transaction.h
5869 +@@ -105,7 +105,6 @@ struct btrfs_trans_handle {
5870 + u64 qgroup_reserved;
5871 + unsigned long use_count;
5872 + unsigned long blocks_reserved;
5873 +- unsigned long blocks_used;
5874 + unsigned long delayed_ref_updates;
5875 + struct btrfs_transaction *transaction;
5876 + struct btrfs_block_rsv *block_rsv;
5877 +@@ -115,6 +114,7 @@ struct btrfs_trans_handle {
5878 + bool allocating_chunk;
5879 + bool reloc_reserved;
5880 + bool sync;
5881 ++ bool dirty;
5882 + unsigned int type;
5883 + /*
5884 + * this root is only needed to validate that the root passed to
5885 +diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
5886 +index 5a53ac6b1e02..02b071bf3732 100644
5887 +--- a/fs/cifs/cifs_unicode.c
5888 ++++ b/fs/cifs/cifs_unicode.c
5889 +@@ -101,6 +101,12 @@ convert_sfm_char(const __u16 src_char, char *target)
5890 + case SFM_SLASH:
5891 + *target = '\\';
5892 + break;
5893 ++ case SFM_SPACE:
5894 ++ *target = ' ';
5895 ++ break;
5896 ++ case SFM_PERIOD:
5897 ++ *target = '.';
5898 ++ break;
5899 + default:
5900 + return false;
5901 + }
5902 +@@ -404,7 +410,7 @@ static __le16 convert_to_sfu_char(char src_char)
5903 + return dest_char;
5904 + }
5905 +
5906 +-static __le16 convert_to_sfm_char(char src_char)
5907 ++static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
5908 + {
5909 + __le16 dest_char;
5910 +
5911 +@@ -427,6 +433,18 @@ static __le16 convert_to_sfm_char(char src_char)
5912 + case '|':
5913 + dest_char = cpu_to_le16(SFM_PIPE);
5914 + break;
5915 ++ case '.':
5916 ++ if (end_of_string)
5917 ++ dest_char = cpu_to_le16(SFM_PERIOD);
5918 ++ else
5919 ++ dest_char = 0;
5920 ++ break;
5921 ++ case ' ':
5922 ++ if (end_of_string)
5923 ++ dest_char = cpu_to_le16(SFM_SPACE);
5924 ++ else
5925 ++ dest_char = 0;
5926 ++ break;
5927 + default:
5928 + dest_char = 0;
5929 + }
5930 +@@ -469,9 +487,16 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
5931 + /* see if we must remap this char */
5932 + if (map_chars == SFU_MAP_UNI_RSVD)
5933 + dst_char = convert_to_sfu_char(src_char);
5934 +- else if (map_chars == SFM_MAP_UNI_RSVD)
5935 +- dst_char = convert_to_sfm_char(src_char);
5936 +- else
5937 ++ else if (map_chars == SFM_MAP_UNI_RSVD) {
5938 ++ bool end_of_string;
5939 ++
5940 ++ if (i == srclen - 1)
5941 ++ end_of_string = true;
5942 ++ else
5943 ++ end_of_string = false;
5944 ++
5945 ++ dst_char = convert_to_sfm_char(src_char, end_of_string);
5946 ++ } else
5947 + dst_char = 0;
5948 + /*
5949 + * FIXME: We can not handle remapping backslash (UNI_SLASH)
5950 +diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
5951 +index bdc52cb9a676..479bc0a941f3 100644
5952 +--- a/fs/cifs/cifs_unicode.h
5953 ++++ b/fs/cifs/cifs_unicode.h
5954 +@@ -64,6 +64,8 @@
5955 + #define SFM_LESSTHAN ((__u16) 0xF023)
5956 + #define SFM_PIPE ((__u16) 0xF027)
5957 + #define SFM_SLASH ((__u16) 0xF026)
5958 ++#define SFM_PERIOD ((__u16) 0xF028)
5959 ++#define SFM_SPACE ((__u16) 0xF029)
5960 +
5961 + /*
5962 + * Mapping mechanism to use when one of the seven reserved characters is
5963 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
5964 +index de626b939811..17998d19b166 100644
5965 +--- a/fs/cifs/connect.c
5966 ++++ b/fs/cifs/connect.c
5967 +@@ -414,7 +414,9 @@ cifs_echo_request(struct work_struct *work)
5968 + * server->ops->need_neg() == true. Also, no need to ping if
5969 + * we got a response recently.
5970 + */
5971 +- if (!server->ops->need_neg || server->ops->need_neg(server) ||
5972 ++
5973 ++ if (server->tcpStatus == CifsNeedReconnect ||
5974 ++ server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
5975 + (server->ops->can_echo && !server->ops->can_echo(server)) ||
5976 + time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
5977 + goto requeue_echo;
5978 +diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
5979 +index 848249fa120f..3079b38f0afb 100644
5980 +--- a/fs/cifs/ntlmssp.h
5981 ++++ b/fs/cifs/ntlmssp.h
5982 +@@ -133,6 +133,6 @@ typedef struct _AUTHENTICATE_MESSAGE {
5983 +
5984 + int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
5985 + void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
5986 +-int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen,
5987 ++int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
5988 + struct cifs_ses *ses,
5989 + const struct nls_table *nls_cp);
5990 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
5991 +index 8ffda5084dbf..5f9229ddf335 100644
5992 +--- a/fs/cifs/sess.c
5993 ++++ b/fs/cifs/sess.c
5994 +@@ -364,19 +364,43 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
5995 + sec_blob->DomainName.MaximumLength = 0;
5996 + }
5997 +
5998 +-/* We do not malloc the blob, it is passed in pbuffer, because its
5999 +- maximum possible size is fixed and small, making this approach cleaner.
6000 +- This function returns the length of the data in the blob */
6001 +-int build_ntlmssp_auth_blob(unsigned char *pbuffer,
6002 ++static int size_of_ntlmssp_blob(struct cifs_ses *ses)
6003 ++{
6004 ++ int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len
6005 ++ - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
6006 ++
6007 ++ if (ses->domainName)
6008 ++ sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
6009 ++ else
6010 ++ sz += 2;
6011 ++
6012 ++ if (ses->user_name)
6013 ++ sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
6014 ++ else
6015 ++ sz += 2;
6016 ++
6017 ++ return sz;
6018 ++}
6019 ++
6020 ++int build_ntlmssp_auth_blob(unsigned char **pbuffer,
6021 + u16 *buflen,
6022 + struct cifs_ses *ses,
6023 + const struct nls_table *nls_cp)
6024 + {
6025 + int rc;
6026 +- AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
6027 ++ AUTHENTICATE_MESSAGE *sec_blob;
6028 + __u32 flags;
6029 + unsigned char *tmp;
6030 +
6031 ++ rc = setup_ntlmv2_rsp(ses, nls_cp);
6032 ++ if (rc) {
6033 ++ cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
6034 ++ *buflen = 0;
6035 ++ goto setup_ntlmv2_ret;
6036 ++ }
6037 ++ *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
6038 ++ sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
6039 ++
6040 + memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
6041 + sec_blob->MessageType = NtLmAuthenticate;
6042 +
6043 +@@ -391,7 +415,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
6044 + flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
6045 + }
6046 +
6047 +- tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
6048 ++ tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
6049 + sec_blob->NegotiateFlags = cpu_to_le32(flags);
6050 +
6051 + sec_blob->LmChallengeResponse.BufferOffset =
6052 +@@ -399,13 +423,9 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
6053 + sec_blob->LmChallengeResponse.Length = 0;
6054 + sec_blob->LmChallengeResponse.MaximumLength = 0;
6055 +
6056 +- sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
6057 ++ sec_blob->NtChallengeResponse.BufferOffset =
6058 ++ cpu_to_le32(tmp - *pbuffer);
6059 + if (ses->user_name != NULL) {
6060 +- rc = setup_ntlmv2_rsp(ses, nls_cp);
6061 +- if (rc) {
6062 +- cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
6063 +- goto setup_ntlmv2_ret;
6064 +- }
6065 + memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
6066 + ses->auth_key.len - CIFS_SESS_KEY_SIZE);
6067 + tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
6068 +@@ -423,7 +443,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
6069 + }
6070 +
6071 + if (ses->domainName == NULL) {
6072 +- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
6073 ++ sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
6074 + sec_blob->DomainName.Length = 0;
6075 + sec_blob->DomainName.MaximumLength = 0;
6076 + tmp += 2;
6077 +@@ -432,14 +452,14 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
6078 + len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
6079 + CIFS_MAX_USERNAME_LEN, nls_cp);
6080 + len *= 2; /* unicode is 2 bytes each */
6081 +- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
6082 ++ sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
6083 + sec_blob->DomainName.Length = cpu_to_le16(len);
6084 + sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
6085 + tmp += len;
6086 + }
6087 +
6088 + if (ses->user_name == NULL) {
6089 +- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
6090 ++ sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
6091 + sec_blob->UserName.Length = 0;
6092 + sec_blob->UserName.MaximumLength = 0;
6093 + tmp += 2;
6094 +@@ -448,13 +468,13 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
6095 + len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
6096 + CIFS_MAX_USERNAME_LEN, nls_cp);
6097 + len *= 2; /* unicode is 2 bytes each */
6098 +- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
6099 ++ sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
6100 + sec_blob->UserName.Length = cpu_to_le16(len);
6101 + sec_blob->UserName.MaximumLength = cpu_to_le16(len);
6102 + tmp += len;
6103 + }
6104 +
6105 +- sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - pbuffer);
6106 ++ sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
6107 + sec_blob->WorkstationName.Length = 0;
6108 + sec_blob->WorkstationName.MaximumLength = 0;
6109 + tmp += 2;
6110 +@@ -463,19 +483,19 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
6111 + (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
6112 + && !calc_seckey(ses)) {
6113 + memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
6114 +- sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
6115 ++ sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
6116 + sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
6117 + sec_blob->SessionKey.MaximumLength =
6118 + cpu_to_le16(CIFS_CPHTXT_SIZE);
6119 + tmp += CIFS_CPHTXT_SIZE;
6120 + } else {
6121 +- sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
6122 ++ sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
6123 + sec_blob->SessionKey.Length = 0;
6124 + sec_blob->SessionKey.MaximumLength = 0;
6125 + }
6126 +
6127 ++ *buflen = tmp - *pbuffer;
6128 + setup_ntlmv2_ret:
6129 +- *buflen = tmp - pbuffer;
6130 + return rc;
6131 + }
6132 +
6133 +@@ -1266,7 +1286,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
6134 + struct cifs_ses *ses = sess_data->ses;
6135 + __u16 bytes_remaining;
6136 + char *bcc_ptr;
6137 +- char *ntlmsspblob = NULL;
6138 ++ unsigned char *ntlmsspblob = NULL;
6139 + u16 blob_len;
6140 +
6141 + cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
6142 +@@ -1279,19 +1299,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
6143 + /* Build security blob before we assemble the request */
6144 + pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
6145 + smb_buf = (struct smb_hdr *)pSMB;
6146 +- /*
6147 +- * 5 is an empirical value, large enough to hold
6148 +- * authenticate message plus max 10 of av paris,
6149 +- * domain, user, workstation names, flags, etc.
6150 +- */
6151 +- ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE),
6152 +- GFP_KERNEL);
6153 +- if (!ntlmsspblob) {
6154 +- rc = -ENOMEM;
6155 +- goto out;
6156 +- }
6157 +-
6158 +- rc = build_ntlmssp_auth_blob(ntlmsspblob,
6159 ++ rc = build_ntlmssp_auth_blob(&ntlmsspblob,
6160 + &blob_len, ses, sess_data->nls_cp);
6161 + if (rc)
6162 + goto out_free_ntlmsspblob;
6163 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
6164 +index 14e845e8996f..8f527c867f78 100644
6165 +--- a/fs/cifs/smb2pdu.c
6166 ++++ b/fs/cifs/smb2pdu.c
6167 +@@ -532,7 +532,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
6168 + u16 blob_length = 0;
6169 + struct key *spnego_key = NULL;
6170 + char *security_blob = NULL;
6171 +- char *ntlmssp_blob = NULL;
6172 ++ unsigned char *ntlmssp_blob = NULL;
6173 + bool use_spnego = false; /* else use raw ntlmssp */
6174 +
6175 + cifs_dbg(FYI, "Session Setup\n");
6176 +@@ -657,13 +657,7 @@ ssetup_ntlmssp_authenticate:
6177 + iov[1].iov_len = blob_length;
6178 + } else if (phase == NtLmAuthenticate) {
6179 + req->hdr.SessionId = ses->Suid;
6180 +- ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
6181 +- GFP_KERNEL);
6182 +- if (ntlmssp_blob == NULL) {
6183 +- rc = -ENOMEM;
6184 +- goto ssetup_exit;
6185 +- }
6186 +- rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
6187 ++ rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
6188 + nls_cp);
6189 + if (rc) {
6190 + cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
6191 +@@ -1632,6 +1626,33 @@ SMB2_echo(struct TCP_Server_Info *server)
6192 +
6193 + cifs_dbg(FYI, "In echo request\n");
6194 +
6195 ++ if (server->tcpStatus == CifsNeedNegotiate) {
6196 ++ struct list_head *tmp, *tmp2;
6197 ++ struct cifs_ses *ses;
6198 ++ struct cifs_tcon *tcon;
6199 ++
6200 ++ cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
6201 ++ spin_lock(&cifs_tcp_ses_lock);
6202 ++ list_for_each(tmp, &server->smb_ses_list) {
6203 ++ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
6204 ++ list_for_each(tmp2, &ses->tcon_list) {
6205 ++ tcon = list_entry(tmp2, struct cifs_tcon,
6206 ++ tcon_list);
6207 ++ /* add check for persistent handle reconnect */
6208 ++ if (tcon && tcon->need_reconnect) {
6209 ++ spin_unlock(&cifs_tcp_ses_lock);
6210 ++ rc = smb2_reconnect(SMB2_ECHO, tcon);
6211 ++ spin_lock(&cifs_tcp_ses_lock);
6212 ++ }
6213 ++ }
6214 ++ }
6215 ++ spin_unlock(&cifs_tcp_ses_lock);
6216 ++ }
6217 ++
6218 ++ /* if no session, renegotiate failed above */
6219 ++ if (server->tcpStatus == CifsNeedNegotiate)
6220 ++ return -EIO;
6221 ++
6222 + rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
6223 + if (rc)
6224 + return rc;
6225 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
6226 +index f43996884242..ba12e2953aec 100644
6227 +--- a/fs/ext4/inode.c
6228 ++++ b/fs/ext4/inode.c
6229 +@@ -5094,6 +5094,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
6230 + might_sleep();
6231 + trace_ext4_mark_inode_dirty(inode, _RET_IP_);
6232 + err = ext4_reserve_inode_write(handle, inode, &iloc);
6233 ++ if (err)
6234 ++ return err;
6235 + if (ext4_handle_valid(handle) &&
6236 + EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
6237 + !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
6238 +@@ -5124,9 +5126,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
6239 + }
6240 + }
6241 + }
6242 +- if (!err)
6243 +- err = ext4_mark_iloc_dirty(handle, inode, &iloc);
6244 +- return err;
6245 ++ return ext4_mark_iloc_dirty(handle, inode, &iloc);
6246 + }
6247 +
6248 + /*
6249 +diff --git a/fs/locks.c b/fs/locks.c
6250 +index 8501eecb2af0..3c234b9fbdd9 100644
6251 +--- a/fs/locks.c
6252 ++++ b/fs/locks.c
6253 +@@ -1596,7 +1596,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
6254 + {
6255 + struct file_lock *fl, *my_fl = NULL, *lease;
6256 + struct dentry *dentry = filp->f_path.dentry;
6257 +- struct inode *inode = dentry->d_inode;
6258 ++ struct inode *inode = file_inode(filp);
6259 + struct file_lock_context *ctx;
6260 + bool is_deleg = (*flp)->fl_flags & FL_DELEG;
6261 + int error;
6262 +diff --git a/fs/namespace.c b/fs/namespace.c
6263 +index 6257268147ee..556721fb0cf6 100644
6264 +--- a/fs/namespace.c
6265 ++++ b/fs/namespace.c
6266 +@@ -1551,6 +1551,7 @@ void __detach_mounts(struct dentry *dentry)
6267 + goto out_unlock;
6268 +
6269 + lock_mount_hash();
6270 ++ event++;
6271 + while (!hlist_empty(&mp->m_list)) {
6272 + mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
6273 + if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
6274 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
6275 +index b2c8b31b2be7..aadb4af4a0fe 100644
6276 +--- a/fs/nfs/dir.c
6277 ++++ b/fs/nfs/dir.c
6278 +@@ -1542,9 +1542,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
6279 + err = PTR_ERR(inode);
6280 + trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
6281 + put_nfs_open_context(ctx);
6282 ++ d_drop(dentry);
6283 + switch (err) {
6284 + case -ENOENT:
6285 +- d_drop(dentry);
6286 + d_add(dentry, NULL);
6287 + nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
6288 + break;
6289 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
6290 +index 84706204cc33..eef16ec0638a 100644
6291 +--- a/fs/nfs/nfs4proc.c
6292 ++++ b/fs/nfs/nfs4proc.c
6293 +@@ -2715,12 +2715,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
6294 + call_close |= is_wronly;
6295 + else if (is_wronly)
6296 + calldata->arg.fmode |= FMODE_WRITE;
6297 ++ if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
6298 ++ call_close |= is_rdwr;
6299 + } else if (is_rdwr)
6300 + calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
6301 +
6302 +- if (calldata->arg.fmode == 0)
6303 +- call_close |= is_rdwr;
6304 +-
6305 + if (!nfs4_valid_open_stateid(state))
6306 + call_close = 0;
6307 + spin_unlock(&state->owner->so_lock);
6308 +diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
6309 +index 1705c78ee2d8..19c1bcf70e3e 100644
6310 +--- a/fs/nfs/pnfs_nfs.c
6311 ++++ b/fs/nfs/pnfs_nfs.c
6312 +@@ -124,11 +124,12 @@ pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
6313 + if (ret) {
6314 + cinfo->ds->nwritten -= ret;
6315 + cinfo->ds->ncommitting += ret;
6316 +- bucket->clseg = bucket->wlseg;
6317 +- if (list_empty(src))
6318 ++ if (bucket->clseg == NULL)
6319 ++ bucket->clseg = pnfs_get_lseg(bucket->wlseg);
6320 ++ if (list_empty(src)) {
6321 ++ pnfs_put_lseg_locked(bucket->wlseg);
6322 + bucket->wlseg = NULL;
6323 +- else
6324 +- pnfs_get_lseg(bucket->clseg);
6325 ++ }
6326 + }
6327 + return ret;
6328 + }
6329 +@@ -182,19 +183,23 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
6330 + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
6331 + struct pnfs_commit_bucket *bucket;
6332 + struct pnfs_layout_segment *freeme;
6333 ++ LIST_HEAD(pages);
6334 + int i;
6335 +
6336 ++ spin_lock(cinfo->lock);
6337 + for (i = idx; i < fl_cinfo->nbuckets; i++) {
6338 + bucket = &fl_cinfo->buckets[i];
6339 + if (list_empty(&bucket->committing))
6340 + continue;
6341 +- nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo, i);
6342 +- spin_lock(cinfo->lock);
6343 + freeme = bucket->clseg;
6344 + bucket->clseg = NULL;
6345 ++ list_splice_init(&bucket->committing, &pages);
6346 + spin_unlock(cinfo->lock);
6347 ++ nfs_retry_commit(&pages, freeme, cinfo, i);
6348 + pnfs_put_lseg(freeme);
6349 ++ spin_lock(cinfo->lock);
6350 + }
6351 ++ spin_unlock(cinfo->lock);
6352 + }
6353 +
6354 + static unsigned int
6355 +@@ -216,10 +221,6 @@ pnfs_generic_alloc_ds_commits(struct nfs_commit_info *cinfo,
6356 + if (!data)
6357 + break;
6358 + data->ds_commit_index = i;
6359 +- spin_lock(cinfo->lock);
6360 +- data->lseg = bucket->clseg;
6361 +- bucket->clseg = NULL;
6362 +- spin_unlock(cinfo->lock);
6363 + list_add(&data->pages, list);
6364 + nreq++;
6365 + }
6366 +@@ -229,6 +230,47 @@ pnfs_generic_alloc_ds_commits(struct nfs_commit_info *cinfo,
6367 + return nreq;
6368 + }
6369 +
6370 ++static inline
6371 ++void pnfs_fetch_commit_bucket_list(struct list_head *pages,
6372 ++ struct nfs_commit_data *data,
6373 ++ struct nfs_commit_info *cinfo)
6374 ++{
6375 ++ struct pnfs_commit_bucket *bucket;
6376 ++
6377 ++ bucket = &cinfo->ds->buckets[data->ds_commit_index];
6378 ++ spin_lock(cinfo->lock);
6379 ++ list_splice_init(pages, &bucket->committing);
6380 ++ data->lseg = bucket->clseg;
6381 ++ bucket->clseg = NULL;
6382 ++ spin_unlock(cinfo->lock);
6383 ++
6384 ++}
6385 ++
6386 ++/* Helper function for pnfs_generic_commit_pagelist to catch an empty
6387 ++ * page list. This can happen when two commits race.
6388 ++ *
6389 ++ * This must be called instead of nfs_init_commit - call one or the other, but
6390 ++ * not both!
6391 ++ */
6392 ++static bool
6393 ++pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
6394 ++ struct nfs_commit_data *data,
6395 ++ struct nfs_commit_info *cinfo)
6396 ++{
6397 ++ if (list_empty(pages)) {
6398 ++ if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
6399 ++ wake_up_atomic_t(&cinfo->mds->rpcs_out);
6400 ++ /* don't call nfs_commitdata_release - it tries to put
6401 ++ * the open_context which is not acquired until nfs_init_commit
6402 ++ * which has not been called on @data */
6403 ++ WARN_ON_ONCE(data->context);
6404 ++ nfs_commit_free(data);
6405 ++ return true;
6406 ++ }
6407 ++
6408 ++ return false;
6409 ++}
6410 ++
6411 + /* This follows nfs_commit_list pretty closely */
6412 + int
6413 + pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
6414 +@@ -243,7 +285,7 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
6415 + if (!list_empty(mds_pages)) {
6416 + data = nfs_commitdata_alloc();
6417 + if (data != NULL) {
6418 +- data->lseg = NULL;
6419 ++ data->ds_commit_index = -1;
6420 + list_add(&data->pages, &list);
6421 + nreq++;
6422 + } else {
6423 +@@ -265,19 +307,27 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
6424 +
6425 + list_for_each_entry_safe(data, tmp, &list, pages) {
6426 + list_del_init(&data->pages);
6427 +- if (!data->lseg) {
6428 ++ if (data->ds_commit_index < 0) {
6429 ++ /* another commit raced with us */
6430 ++ if (pnfs_generic_commit_cancel_empty_pagelist(mds_pages,
6431 ++ data, cinfo))
6432 ++ continue;
6433 ++
6434 + nfs_init_commit(data, mds_pages, NULL, cinfo);
6435 + nfs_initiate_commit(NFS_CLIENT(inode), data,
6436 + NFS_PROTO(data->inode),
6437 + data->mds_ops, how, 0);
6438 + } else {
6439 +- struct pnfs_commit_bucket *buckets;
6440 ++ LIST_HEAD(pages);
6441 ++
6442 ++ pnfs_fetch_commit_bucket_list(&pages, data, cinfo);
6443 ++
6444 ++ /* another commit raced with us */
6445 ++ if (pnfs_generic_commit_cancel_empty_pagelist(&pages,
6446 ++ data, cinfo))
6447 ++ continue;
6448 +
6449 +- buckets = cinfo->ds->buckets;
6450 +- nfs_init_commit(data,
6451 +- &buckets[data->ds_commit_index].committing,
6452 +- data->lseg,
6453 +- cinfo);
6454 ++ nfs_init_commit(data, &pages, data->lseg, cinfo);
6455 + initiate_commit(data, how);
6456 + }
6457 + }
6458 +diff --git a/fs/nfs/write.c b/fs/nfs/write.c
6459 +index d9851a6a2813..f98cd9adbc0d 100644
6460 +--- a/fs/nfs/write.c
6461 ++++ b/fs/nfs/write.c
6462 +@@ -1653,6 +1653,10 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
6463 + {
6464 + struct nfs_commit_data *data;
6465 +
6466 ++ /* another commit raced with us */
6467 ++ if (list_empty(head))
6468 ++ return 0;
6469 ++
6470 + data = nfs_commitdata_alloc();
6471 +
6472 + if (!data)
6473 +diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
6474 +index 5694cfb7a47b..29c4bff1e6e1 100644
6475 +--- a/fs/nfsd/nfs4callback.c
6476 ++++ b/fs/nfsd/nfs4callback.c
6477 +@@ -710,22 +710,6 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
6478 + }
6479 + }
6480 +
6481 +-static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
6482 +-{
6483 +- struct rpc_xprt *xprt;
6484 +-
6485 +- if (args->protocol != XPRT_TRANSPORT_BC_TCP)
6486 +- return rpc_create(args);
6487 +-
6488 +- xprt = args->bc_xprt->xpt_bc_xprt;
6489 +- if (xprt) {
6490 +- xprt_get(xprt);
6491 +- return rpc_create_xprt(args, xprt);
6492 +- }
6493 +-
6494 +- return rpc_create(args);
6495 +-}
6496 +-
6497 + static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
6498 + {
6499 + int maxtime = max_cb_time(clp->net);
6500 +@@ -768,7 +752,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
6501 + args.authflavor = ses->se_cb_sec.flavor;
6502 + }
6503 + /* Create RPC client */
6504 +- client = create_backchannel_client(&args);
6505 ++ client = rpc_create(&args);
6506 + if (IS_ERR(client)) {
6507 + dprintk("NFSD: couldn't create callback client: %ld\n",
6508 + PTR_ERR(client));
6509 +diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
6510 +index 69bd801afb53..37e49cb2ac4c 100644
6511 +--- a/fs/nilfs2/the_nilfs.c
6512 ++++ b/fs/nilfs2/the_nilfs.c
6513 +@@ -443,7 +443,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
6514 + if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
6515 + return 0;
6516 + bytes = le16_to_cpu(sbp->s_bytes);
6517 +- if (bytes > BLOCK_SIZE)
6518 ++ if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
6519 + return 0;
6520 + crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
6521 + sumoff);
6522 +diff --git a/fs/pipe.c b/fs/pipe.c
6523 +index 8865f7963700..5916c19dbb02 100644
6524 +--- a/fs/pipe.c
6525 ++++ b/fs/pipe.c
6526 +@@ -38,6 +38,12 @@ unsigned int pipe_max_size = 1048576;
6527 + */
6528 + unsigned int pipe_min_size = PAGE_SIZE;
6529 +
6530 ++/* Maximum allocatable pages per user. Hard limit is unset by default, soft
6531 ++ * matches default values.
6532 ++ */
6533 ++unsigned long pipe_user_pages_hard;
6534 ++unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
6535 ++
6536 + /*
6537 + * We use a start+len construction, which provides full use of the
6538 + * allocated memory.
6539 +@@ -584,20 +590,49 @@ pipe_fasync(int fd, struct file *filp, int on)
6540 + return retval;
6541 + }
6542 +
6543 ++static void account_pipe_buffers(struct pipe_inode_info *pipe,
6544 ++ unsigned long old, unsigned long new)
6545 ++{
6546 ++ atomic_long_add(new - old, &pipe->user->pipe_bufs);
6547 ++}
6548 ++
6549 ++static bool too_many_pipe_buffers_soft(struct user_struct *user)
6550 ++{
6551 ++ return pipe_user_pages_soft &&
6552 ++ atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
6553 ++}
6554 ++
6555 ++static bool too_many_pipe_buffers_hard(struct user_struct *user)
6556 ++{
6557 ++ return pipe_user_pages_hard &&
6558 ++ atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
6559 ++}
6560 ++
6561 + struct pipe_inode_info *alloc_pipe_info(void)
6562 + {
6563 + struct pipe_inode_info *pipe;
6564 +
6565 + pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
6566 + if (pipe) {
6567 +- pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
6568 ++ unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
6569 ++ struct user_struct *user = get_current_user();
6570 ++
6571 ++ if (!too_many_pipe_buffers_hard(user)) {
6572 ++ if (too_many_pipe_buffers_soft(user))
6573 ++ pipe_bufs = 1;
6574 ++ pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL);
6575 ++ }
6576 ++
6577 + if (pipe->bufs) {
6578 + init_waitqueue_head(&pipe->wait);
6579 + pipe->r_counter = pipe->w_counter = 1;
6580 +- pipe->buffers = PIPE_DEF_BUFFERS;
6581 ++ pipe->buffers = pipe_bufs;
6582 ++ pipe->user = user;
6583 ++ account_pipe_buffers(pipe, 0, pipe_bufs);
6584 + mutex_init(&pipe->mutex);
6585 + return pipe;
6586 + }
6587 ++ free_uid(user);
6588 + kfree(pipe);
6589 + }
6590 +
6591 +@@ -608,6 +643,8 @@ void free_pipe_info(struct pipe_inode_info *pipe)
6592 + {
6593 + int i;
6594 +
6595 ++ account_pipe_buffers(pipe, pipe->buffers, 0);
6596 ++ free_uid(pipe->user);
6597 + for (i = 0; i < pipe->buffers; i++) {
6598 + struct pipe_buffer *buf = pipe->bufs + i;
6599 + if (buf->ops)
6600 +@@ -996,6 +1033,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
6601 + memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
6602 + }
6603 +
6604 ++ account_pipe_buffers(pipe, pipe->buffers, nr_pages);
6605 + pipe->curbuf = 0;
6606 + kfree(pipe->bufs);
6607 + pipe->bufs = bufs;
6608 +@@ -1067,6 +1105,11 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
6609 + if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
6610 + ret = -EPERM;
6611 + goto out;
6612 ++ } else if ((too_many_pipe_buffers_hard(pipe->user) ||
6613 ++ too_many_pipe_buffers_soft(pipe->user)) &&
6614 ++ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
6615 ++ ret = -EPERM;
6616 ++ goto out;
6617 + }
6618 + ret = pipe_set_size(pipe, nr_pages);
6619 + break;
6620 +diff --git a/fs/proc/base.c b/fs/proc/base.c
6621 +index 68d51ed1666f..239dca3fb676 100644
6622 +--- a/fs/proc/base.c
6623 ++++ b/fs/proc/base.c
6624 +@@ -759,7 +759,8 @@ static ssize_t environ_read(struct file *file, char __user *buf,
6625 + int ret = 0;
6626 + struct mm_struct *mm = file->private_data;
6627 +
6628 +- if (!mm)
6629 ++ /* Ensure the process spawned far enough to have an environment. */
6630 ++ if (!mm || !mm->env_end)
6631 + return 0;
6632 +
6633 + page = (char *)__get_free_page(GFP_TEMPORARY);
6634 +diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
6635 +index 35efc103c39c..75e9b2db14ab 100644
6636 +--- a/fs/ubifs/file.c
6637 ++++ b/fs/ubifs/file.c
6638 +@@ -53,6 +53,7 @@
6639 + #include <linux/mount.h>
6640 + #include <linux/namei.h>
6641 + #include <linux/slab.h>
6642 ++#include <linux/migrate.h>
6643 +
6644 + static int read_block(struct inode *inode, void *addr, unsigned int block,
6645 + struct ubifs_data_node *dn)
6646 +@@ -1420,6 +1421,26 @@ static int ubifs_set_page_dirty(struct page *page)
6647 + return ret;
6648 + }
6649 +
6650 ++#ifdef CONFIG_MIGRATION
6651 ++static int ubifs_migrate_page(struct address_space *mapping,
6652 ++ struct page *newpage, struct page *page, enum migrate_mode mode)
6653 ++{
6654 ++ int rc;
6655 ++
6656 ++ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
6657 ++ if (rc != MIGRATEPAGE_SUCCESS)
6658 ++ return rc;
6659 ++
6660 ++ if (PagePrivate(page)) {
6661 ++ ClearPagePrivate(page);
6662 ++ SetPagePrivate(newpage);
6663 ++ }
6664 ++
6665 ++ migrate_page_copy(newpage, page);
6666 ++ return MIGRATEPAGE_SUCCESS;
6667 ++}
6668 ++#endif
6669 ++
6670 + static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
6671 + {
6672 + /*
6673 +@@ -1556,6 +1577,9 @@ const struct address_space_operations ubifs_file_address_operations = {
6674 + .write_end = ubifs_write_end,
6675 + .invalidatepage = ubifs_invalidatepage,
6676 + .set_page_dirty = ubifs_set_page_dirty,
6677 ++#ifdef CONFIG_MIGRATION
6678 ++ .migratepage = ubifs_migrate_page,
6679 ++#endif
6680 + .releasepage = ubifs_releasepage,
6681 + };
6682 +
6683 +diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
6684 +index 516162be1398..e1b1c8278294 100644
6685 +--- a/fs/xfs/libxfs/xfs_alloc.c
6686 ++++ b/fs/xfs/libxfs/xfs_alloc.c
6687 +@@ -519,6 +519,7 @@ xfs_agfl_write_verify(
6688 + }
6689 +
6690 + const struct xfs_buf_ops xfs_agfl_buf_ops = {
6691 ++ .name = "xfs_agfl",
6692 + .verify_read = xfs_agfl_read_verify,
6693 + .verify_write = xfs_agfl_write_verify,
6694 + };
6695 +@@ -2276,6 +2277,7 @@ xfs_agf_write_verify(
6696 + }
6697 +
6698 + const struct xfs_buf_ops xfs_agf_buf_ops = {
6699 ++ .name = "xfs_agf",
6700 + .verify_read = xfs_agf_read_verify,
6701 + .verify_write = xfs_agf_write_verify,
6702 + };
6703 +diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
6704 +index 59d521c09a17..13629ad8a60c 100644
6705 +--- a/fs/xfs/libxfs/xfs_alloc_btree.c
6706 ++++ b/fs/xfs/libxfs/xfs_alloc_btree.c
6707 +@@ -379,6 +379,7 @@ xfs_allocbt_write_verify(
6708 + }
6709 +
6710 + const struct xfs_buf_ops xfs_allocbt_buf_ops = {
6711 ++ .name = "xfs_allocbt",
6712 + .verify_read = xfs_allocbt_read_verify,
6713 + .verify_write = xfs_allocbt_write_verify,
6714 + };
6715 +diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
6716 +index e9d401ce93bb..0532561a6010 100644
6717 +--- a/fs/xfs/libxfs/xfs_attr_leaf.c
6718 ++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
6719 +@@ -325,6 +325,7 @@ xfs_attr3_leaf_read_verify(
6720 + }
6721 +
6722 + const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = {
6723 ++ .name = "xfs_attr3_leaf",
6724 + .verify_read = xfs_attr3_leaf_read_verify,
6725 + .verify_write = xfs_attr3_leaf_write_verify,
6726 + };
6727 +diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
6728 +index dd714037c322..c3db53d1bdb3 100644
6729 +--- a/fs/xfs/libxfs/xfs_attr_remote.c
6730 ++++ b/fs/xfs/libxfs/xfs_attr_remote.c
6731 +@@ -201,6 +201,7 @@ xfs_attr3_rmt_write_verify(
6732 + }
6733 +
6734 + const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
6735 ++ .name = "xfs_attr3_rmt",
6736 + .verify_read = xfs_attr3_rmt_read_verify,
6737 + .verify_write = xfs_attr3_rmt_write_verify,
6738 + };
6739 +diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
6740 +index 2c44c8e50782..225f2a8c0436 100644
6741 +--- a/fs/xfs/libxfs/xfs_bmap_btree.c
6742 ++++ b/fs/xfs/libxfs/xfs_bmap_btree.c
6743 +@@ -719,6 +719,7 @@ xfs_bmbt_write_verify(
6744 + }
6745 +
6746 + const struct xfs_buf_ops xfs_bmbt_buf_ops = {
6747 ++ .name = "xfs_bmbt",
6748 + .verify_read = xfs_bmbt_read_verify,
6749 + .verify_write = xfs_bmbt_write_verify,
6750 + };
6751 +diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
6752 +index 2385f8cd08ab..5d1827056efb 100644
6753 +--- a/fs/xfs/libxfs/xfs_da_btree.c
6754 ++++ b/fs/xfs/libxfs/xfs_da_btree.c
6755 +@@ -241,6 +241,7 @@ xfs_da3_node_read_verify(
6756 + }
6757 +
6758 + const struct xfs_buf_ops xfs_da3_node_buf_ops = {
6759 ++ .name = "xfs_da3_node",
6760 + .verify_read = xfs_da3_node_read_verify,
6761 + .verify_write = xfs_da3_node_write_verify,
6762 + };
6763 +diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
6764 +index 9354e190b82e..a02ee011c8da 100644
6765 +--- a/fs/xfs/libxfs/xfs_dir2_block.c
6766 ++++ b/fs/xfs/libxfs/xfs_dir2_block.c
6767 +@@ -120,6 +120,7 @@ xfs_dir3_block_write_verify(
6768 + }
6769 +
6770 + const struct xfs_buf_ops xfs_dir3_block_buf_ops = {
6771 ++ .name = "xfs_dir3_block",
6772 + .verify_read = xfs_dir3_block_read_verify,
6773 + .verify_write = xfs_dir3_block_write_verify,
6774 + };
6775 +diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
6776 +index 534bbf283d6b..e020a2c3a73f 100644
6777 +--- a/fs/xfs/libxfs/xfs_dir2_data.c
6778 ++++ b/fs/xfs/libxfs/xfs_dir2_data.c
6779 +@@ -302,11 +302,13 @@ xfs_dir3_data_write_verify(
6780 + }
6781 +
6782 + const struct xfs_buf_ops xfs_dir3_data_buf_ops = {
6783 ++ .name = "xfs_dir3_data",
6784 + .verify_read = xfs_dir3_data_read_verify,
6785 + .verify_write = xfs_dir3_data_write_verify,
6786 + };
6787 +
6788 + static const struct xfs_buf_ops xfs_dir3_data_reada_buf_ops = {
6789 ++ .name = "xfs_dir3_data_reada",
6790 + .verify_read = xfs_dir3_data_reada_verify,
6791 + .verify_write = xfs_dir3_data_write_verify,
6792 + };
6793 +diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
6794 +index 106119955400..eb66ae07428a 100644
6795 +--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
6796 ++++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
6797 +@@ -242,11 +242,13 @@ xfs_dir3_leafn_write_verify(
6798 + }
6799 +
6800 + const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops = {
6801 ++ .name = "xfs_dir3_leaf1",
6802 + .verify_read = xfs_dir3_leaf1_read_verify,
6803 + .verify_write = xfs_dir3_leaf1_write_verify,
6804 + };
6805 +
6806 + const struct xfs_buf_ops xfs_dir3_leafn_buf_ops = {
6807 ++ .name = "xfs_dir3_leafn",
6808 + .verify_read = xfs_dir3_leafn_read_verify,
6809 + .verify_write = xfs_dir3_leafn_write_verify,
6810 + };
6811 +diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
6812 +index 06bb4218b362..f6e591edbb98 100644
6813 +--- a/fs/xfs/libxfs/xfs_dir2_node.c
6814 ++++ b/fs/xfs/libxfs/xfs_dir2_node.c
6815 +@@ -147,6 +147,7 @@ xfs_dir3_free_write_verify(
6816 + }
6817 +
6818 + const struct xfs_buf_ops xfs_dir3_free_buf_ops = {
6819 ++ .name = "xfs_dir3_free",
6820 + .verify_read = xfs_dir3_free_read_verify,
6821 + .verify_write = xfs_dir3_free_write_verify,
6822 + };
6823 +diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
6824 +index 48aff071591d..f48c3040c9ce 100644
6825 +--- a/fs/xfs/libxfs/xfs_dquot_buf.c
6826 ++++ b/fs/xfs/libxfs/xfs_dquot_buf.c
6827 +@@ -301,6 +301,7 @@ xfs_dquot_buf_write_verify(
6828 + }
6829 +
6830 + const struct xfs_buf_ops xfs_dquot_buf_ops = {
6831 ++ .name = "xfs_dquot",
6832 + .verify_read = xfs_dquot_buf_read_verify,
6833 + .verify_write = xfs_dquot_buf_write_verify,
6834 + };
6835 +diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
6836 +index 1c9e75521250..fe20c2670f6c 100644
6837 +--- a/fs/xfs/libxfs/xfs_ialloc.c
6838 ++++ b/fs/xfs/libxfs/xfs_ialloc.c
6839 +@@ -2117,6 +2117,7 @@ xfs_agi_write_verify(
6840 + }
6841 +
6842 + const struct xfs_buf_ops xfs_agi_buf_ops = {
6843 ++ .name = "xfs_agi",
6844 + .verify_read = xfs_agi_read_verify,
6845 + .verify_write = xfs_agi_write_verify,
6846 + };
6847 +diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
6848 +index 964c465ca69c..216a6f0997f6 100644
6849 +--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
6850 ++++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
6851 +@@ -295,6 +295,7 @@ xfs_inobt_write_verify(
6852 + }
6853 +
6854 + const struct xfs_buf_ops xfs_inobt_buf_ops = {
6855 ++ .name = "xfs_inobt",
6856 + .verify_read = xfs_inobt_read_verify,
6857 + .verify_write = xfs_inobt_write_verify,
6858 + };
6859 +diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
6860 +index 7da6d0b2c2ed..a217176fde65 100644
6861 +--- a/fs/xfs/libxfs/xfs_inode_buf.c
6862 ++++ b/fs/xfs/libxfs/xfs_inode_buf.c
6863 +@@ -138,11 +138,13 @@ xfs_inode_buf_write_verify(
6864 + }
6865 +
6866 + const struct xfs_buf_ops xfs_inode_buf_ops = {
6867 ++ .name = "xfs_inode",
6868 + .verify_read = xfs_inode_buf_read_verify,
6869 + .verify_write = xfs_inode_buf_write_verify,
6870 + };
6871 +
6872 + const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
6873 ++ .name = "xxfs_inode_ra",
6874 + .verify_read = xfs_inode_buf_readahead_verify,
6875 + .verify_write = xfs_inode_buf_write_verify,
6876 + };
6877 +diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
6878 +index dc4bfc5d88fc..535bd843f2f4 100644
6879 +--- a/fs/xfs/libxfs/xfs_sb.c
6880 ++++ b/fs/xfs/libxfs/xfs_sb.c
6881 +@@ -637,11 +637,13 @@ xfs_sb_write_verify(
6882 + }
6883 +
6884 + const struct xfs_buf_ops xfs_sb_buf_ops = {
6885 ++ .name = "xfs_sb",
6886 + .verify_read = xfs_sb_read_verify,
6887 + .verify_write = xfs_sb_write_verify,
6888 + };
6889 +
6890 + const struct xfs_buf_ops xfs_sb_quiet_buf_ops = {
6891 ++ .name = "xfs_sb_quiet",
6892 + .verify_read = xfs_sb_quiet_read_verify,
6893 + .verify_write = xfs_sb_write_verify,
6894 + };
6895 +diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
6896 +index e7e26bd6468f..4caff91ced51 100644
6897 +--- a/fs/xfs/libxfs/xfs_symlink_remote.c
6898 ++++ b/fs/xfs/libxfs/xfs_symlink_remote.c
6899 +@@ -164,6 +164,7 @@ xfs_symlink_write_verify(
6900 + }
6901 +
6902 + const struct xfs_buf_ops xfs_symlink_buf_ops = {
6903 ++ .name = "xfs_symlink",
6904 + .verify_read = xfs_symlink_read_verify,
6905 + .verify_write = xfs_symlink_write_verify,
6906 + };
6907 +diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
6908 +index 75ff5d5a7d2e..110cb85e04f3 100644
6909 +--- a/fs/xfs/xfs_buf.h
6910 ++++ b/fs/xfs/xfs_buf.h
6911 +@@ -131,6 +131,7 @@ struct xfs_buf_map {
6912 + struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
6913 +
6914 + struct xfs_buf_ops {
6915 ++ char *name;
6916 + void (*verify_read)(struct xfs_buf *);
6917 + void (*verify_write)(struct xfs_buf *);
6918 + };
6919 +diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
6920 +index 338e50bbfd1e..63db1cc2091a 100644
6921 +--- a/fs/xfs/xfs_error.c
6922 ++++ b/fs/xfs/xfs_error.c
6923 +@@ -164,9 +164,9 @@ xfs_verifier_error(
6924 + {
6925 + struct xfs_mount *mp = bp->b_target->bt_mount;
6926 +
6927 +- xfs_alert(mp, "Metadata %s detected at %pF, block 0x%llx",
6928 ++ xfs_alert(mp, "Metadata %s detected at %pF, %s block 0x%llx",
6929 + bp->b_error == -EFSBADCRC ? "CRC error" : "corruption",
6930 +- __return_address, bp->b_bn);
6931 ++ __return_address, bp->b_ops->name, bp->b_bn);
6932 +
6933 + xfs_alert(mp, "Unmount and run xfs_repair");
6934 +
6935 +diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
6936 +index 51cc1deb7af3..30c8971bc615 100644
6937 +--- a/include/linux/device-mapper.h
6938 ++++ b/include/linux/device-mapper.h
6939 +@@ -127,6 +127,8 @@ struct dm_dev {
6940 + char name[16];
6941 + };
6942 +
6943 ++dev_t dm_get_dev_t(const char *path);
6944 ++
6945 + /*
6946 + * Constructors should call these functions to ensure destination devices
6947 + * are opened/closed correctly.
6948 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
6949 +index 05b9a694e213..6c86c7edafa7 100644
6950 +--- a/include/linux/netdevice.h
6951 ++++ b/include/linux/netdevice.h
6952 +@@ -265,6 +265,7 @@ struct header_ops {
6953 + void (*cache_update)(struct hh_cache *hh,
6954 + const struct net_device *dev,
6955 + const unsigned char *haddr);
6956 ++ bool (*validate)(const char *ll_header, unsigned int len);
6957 + };
6958 +
6959 + /* These flag bits are private to the generic network queueing
6960 +@@ -1372,7 +1373,7 @@ enum netdev_priv_flags {
6961 + * @dma: DMA channel
6962 + * @mtu: Interface MTU value
6963 + * @type: Interface hardware type
6964 +- * @hard_header_len: Hardware header length
6965 ++ * @hard_header_len: Maximum hardware header length.
6966 + *
6967 + * @needed_headroom: Extra headroom the hardware may need, but not in all
6968 + * cases can this be guaranteed
6969 +@@ -2416,6 +2417,24 @@ static inline int dev_parse_header(const struct sk_buff *skb,
6970 + return dev->header_ops->parse(skb, haddr);
6971 + }
6972 +
6973 ++/* ll_header must have at least hard_header_len allocated */
6974 ++static inline bool dev_validate_header(const struct net_device *dev,
6975 ++ char *ll_header, int len)
6976 ++{
6977 ++ if (likely(len >= dev->hard_header_len))
6978 ++ return true;
6979 ++
6980 ++ if (capable(CAP_SYS_RAWIO)) {
6981 ++ memset(ll_header + len, 0, dev->hard_header_len - len);
6982 ++ return true;
6983 ++ }
6984 ++
6985 ++ if (dev->header_ops && dev->header_ops->validate)
6986 ++ return dev->header_ops->validate(ll_header, len);
6987 ++
6988 ++ return false;
6989 ++}
6990 ++
6991 + typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
6992 + int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
6993 + static inline int unregister_gifconf(unsigned int family)
6994 +diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
6995 +index a3e215bb0241..7741efa43b35 100644
6996 +--- a/include/linux/netfilter/x_tables.h
6997 ++++ b/include/linux/netfilter/x_tables.h
6998 +@@ -239,11 +239,18 @@ void xt_unregister_match(struct xt_match *target);
6999 + int xt_register_matches(struct xt_match *match, unsigned int n);
7000 + void xt_unregister_matches(struct xt_match *match, unsigned int n);
7001 +
7002 ++int xt_check_entry_offsets(const void *base, const char *elems,
7003 ++ unsigned int target_offset,
7004 ++ unsigned int next_offset);
7005 ++
7006 + int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
7007 + bool inv_proto);
7008 + int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
7009 + bool inv_proto);
7010 +
7011 ++void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
7012 ++ struct xt_counters_info *info, bool compat);
7013 ++
7014 + struct xt_table *xt_register_table(struct net *net,
7015 + const struct xt_table *table,
7016 + struct xt_table_info *bootstrap,
7017 +@@ -421,7 +428,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number);
7018 + int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
7019 +
7020 + int xt_compat_match_offset(const struct xt_match *match);
7021 +-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
7022 ++void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
7023 + unsigned int *size);
7024 + int xt_compat_match_to_user(const struct xt_entry_match *m,
7025 + void __user **dstptr, unsigned int *size);
7026 +@@ -431,6 +438,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
7027 + unsigned int *size);
7028 + int xt_compat_target_to_user(const struct xt_entry_target *t,
7029 + void __user **dstptr, unsigned int *size);
7030 ++int xt_compat_check_entry_offsets(const void *base, const char *elems,
7031 ++ unsigned int target_offset,
7032 ++ unsigned int next_offset);
7033 +
7034 + #endif /* CONFIG_COMPAT */
7035 + #endif /* _X_TABLES_H */
7036 +diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
7037 +index eb8b8ac6df3c..24f5470d3944 100644
7038 +--- a/include/linux/pipe_fs_i.h
7039 ++++ b/include/linux/pipe_fs_i.h
7040 +@@ -42,6 +42,7 @@ struct pipe_buffer {
7041 + * @fasync_readers: reader side fasync
7042 + * @fasync_writers: writer side fasync
7043 + * @bufs: the circular array of pipe buffers
7044 ++ * @user: the user who created this pipe
7045 + **/
7046 + struct pipe_inode_info {
7047 + struct mutex mutex;
7048 +@@ -57,6 +58,7 @@ struct pipe_inode_info {
7049 + struct fasync_struct *fasync_readers;
7050 + struct fasync_struct *fasync_writers;
7051 + struct pipe_buffer *bufs;
7052 ++ struct user_struct *user;
7053 + };
7054 +
7055 + /*
7056 +@@ -123,6 +125,8 @@ void pipe_unlock(struct pipe_inode_info *);
7057 + void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
7058 +
7059 + extern unsigned int pipe_max_size, pipe_min_size;
7060 ++extern unsigned long pipe_user_pages_hard;
7061 ++extern unsigned long pipe_user_pages_soft;
7062 + int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
7063 +
7064 +
7065 +diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
7066 +index 5e0bc779e6c5..33f88b4479e4 100644
7067 +--- a/include/linux/platform_data/asoc-s3c.h
7068 ++++ b/include/linux/platform_data/asoc-s3c.h
7069 +@@ -39,6 +39,10 @@ struct samsung_i2s {
7070 + */
7071 + struct s3c_audio_pdata {
7072 + int (*cfg_gpio)(struct platform_device *);
7073 ++ void *dma_playback;
7074 ++ void *dma_capture;
7075 ++ void *dma_play_sec;
7076 ++ void *dma_capture_mic;
7077 + union {
7078 + struct samsung_i2s i2s;
7079 + } type;
7080 +diff --git a/include/linux/sched.h b/include/linux/sched.h
7081 +index 9128b4e9f541..9e39deaeddd6 100644
7082 +--- a/include/linux/sched.h
7083 ++++ b/include/linux/sched.h
7084 +@@ -803,6 +803,7 @@ struct user_struct {
7085 + #endif
7086 + unsigned long locked_shm; /* How many pages of mlocked shm ? */
7087 + unsigned long unix_inflight; /* How many files in flight in unix sockets */
7088 ++ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
7089 +
7090 + #ifdef CONFIG_KEYS
7091 + struct key *uid_keyring; /* UID specific keyring */
7092 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
7093 +index 6633b0cd3fb9..ca2e26a486ee 100644
7094 +--- a/include/linux/skbuff.h
7095 ++++ b/include/linux/skbuff.h
7096 +@@ -1781,6 +1781,30 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
7097 + skb->tail += len;
7098 + }
7099 +
7100 ++/**
7101 ++ * skb_tailroom_reserve - adjust reserved_tailroom
7102 ++ * @skb: buffer to alter
7103 ++ * @mtu: maximum amount of headlen permitted
7104 ++ * @needed_tailroom: minimum amount of reserved_tailroom
7105 ++ *
7106 ++ * Set reserved_tailroom so that headlen can be as large as possible but
7107 ++ * not larger than mtu and tailroom cannot be smaller than
7108 ++ * needed_tailroom.
7109 ++ * The required headroom should already have been reserved before using
7110 ++ * this function.
7111 ++ */
7112 ++static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
7113 ++ unsigned int needed_tailroom)
7114 ++{
7115 ++ SKB_LINEAR_ASSERT(skb);
7116 ++ if (mtu < skb_tailroom(skb) - needed_tailroom)
7117 ++ /* use at most mtu */
7118 ++ skb->reserved_tailroom = skb_tailroom(skb) - mtu;
7119 ++ else
7120 ++ /* use up to all available space */
7121 ++ skb->reserved_tailroom = needed_tailroom;
7122 ++}
7123 ++
7124 + #define ENCAP_TYPE_ETHER 0
7125 + #define ENCAP_TYPE_IPPROTO 1
7126 +
7127 +diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
7128 +index 598ba80ec30c..ee29cb43470f 100644
7129 +--- a/include/linux/sunrpc/clnt.h
7130 ++++ b/include/linux/sunrpc/clnt.h
7131 +@@ -134,8 +134,6 @@ struct rpc_create_args {
7132 + #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
7133 +
7134 + struct rpc_clnt *rpc_create(struct rpc_create_args *args);
7135 +-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
7136 +- struct rpc_xprt *xprt);
7137 + struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
7138 + const struct rpc_program *, u32);
7139 + void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt);
7140 +diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
7141 +index 966889a20ea3..e479033bd782 100644
7142 +--- a/include/linux/usb/ehci_def.h
7143 ++++ b/include/linux/usb/ehci_def.h
7144 +@@ -180,11 +180,11 @@ struct ehci_regs {
7145 + * PORTSCx
7146 + */
7147 + /* HOSTPC: offset 0x84 */
7148 +- u32 hostpc[1]; /* HOSTPC extension */
7149 ++ u32 hostpc[0]; /* HOSTPC extension */
7150 + #define HOSTPC_PHCD (1<<22) /* Phy clock disable */
7151 + #define HOSTPC_PSPD (3<<25) /* Port speed detection */
7152 +
7153 +- u32 reserved5[16];
7154 ++ u32 reserved5[17];
7155 +
7156 + /* USBMODE_EX: offset 0xc8 */
7157 + u32 usbmode_ex; /* USB Device mode extension */
7158 +diff --git a/include/net/bonding.h b/include/net/bonding.h
7159 +index 78ed135e9dea..5cba8f3c3fe4 100644
7160 +--- a/include/net/bonding.h
7161 ++++ b/include/net/bonding.h
7162 +@@ -211,6 +211,7 @@ struct bonding {
7163 + * ALB mode (6) - to sync the use and modifications of its hash table
7164 + */
7165 + spinlock_t mode_lock;
7166 ++ spinlock_t stats_lock;
7167 + u8 send_peer_notif;
7168 + u8 igmp_retrans;
7169 + #ifdef CONFIG_PROC_FS
7170 +diff --git a/include/net/codel.h b/include/net/codel.h
7171 +index 1e18005f7f65..0ee76108e741 100644
7172 +--- a/include/net/codel.h
7173 ++++ b/include/net/codel.h
7174 +@@ -160,11 +160,13 @@ struct codel_vars {
7175 + * struct codel_stats - contains codel shared variables and stats
7176 + * @maxpacket: largest packet we've seen so far
7177 + * @drop_count: temp count of dropped packets in dequeue()
7178 ++ * @drop_len: bytes of dropped packets in dequeue()
7179 + * ecn_mark: number of packets we ECN marked instead of dropping
7180 + */
7181 + struct codel_stats {
7182 + u32 maxpacket;
7183 + u32 drop_count;
7184 ++ u32 drop_len;
7185 + u32 ecn_mark;
7186 + };
7187 +
7188 +@@ -301,6 +303,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
7189 + vars->rec_inv_sqrt);
7190 + goto end;
7191 + }
7192 ++ stats->drop_len += qdisc_pkt_len(skb);
7193 + qdisc_drop(skb, sch);
7194 + stats->drop_count++;
7195 + skb = dequeue_func(vars, sch);
7196 +@@ -323,6 +326,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
7197 + if (params->ecn && INET_ECN_set_ce(skb)) {
7198 + stats->ecn_mark++;
7199 + } else {
7200 ++ stats->drop_len += qdisc_pkt_len(skb);
7201 + qdisc_drop(skb, sch);
7202 + stats->drop_count++;
7203 +
7204 +diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
7205 +index 4e3731ee4eac..cd8594528d4b 100644
7206 +--- a/include/net/ip_vs.h
7207 ++++ b/include/net/ip_vs.h
7208 +@@ -1584,6 +1584,23 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
7209 + }
7210 + #endif /* CONFIG_IP_VS_NFCT */
7211 +
7212 ++/* Really using conntrack? */
7213 ++static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
7214 ++ struct sk_buff *skb)
7215 ++{
7216 ++#ifdef CONFIG_IP_VS_NFCT
7217 ++ enum ip_conntrack_info ctinfo;
7218 ++ struct nf_conn *ct;
7219 ++
7220 ++ if (!(cp->flags & IP_VS_CONN_F_NFCT))
7221 ++ return false;
7222 ++ ct = nf_ct_get(skb, &ctinfo);
7223 ++ if (ct && !nf_ct_is_untracked(ct))
7224 ++ return true;
7225 ++#endif
7226 ++ return false;
7227 ++}
7228 ++
7229 + static inline int
7230 + ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
7231 + {
7232 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
7233 +index 080b657ef8fb..530bdca19803 100644
7234 +--- a/include/net/sch_generic.h
7235 ++++ b/include/net/sch_generic.h
7236 +@@ -395,7 +395,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
7237 + struct Qdisc *qdisc);
7238 + void qdisc_reset(struct Qdisc *qdisc);
7239 + void qdisc_destroy(struct Qdisc *qdisc);
7240 +-void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
7241 ++void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
7242 ++ unsigned int len);
7243 + struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
7244 + const struct Qdisc_ops *ops);
7245 + struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
7246 +@@ -691,6 +692,23 @@ static inline void qdisc_reset_queue(struct Qdisc *sch)
7247 + sch->qstats.backlog = 0;
7248 + }
7249 +
7250 ++static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
7251 ++ struct Qdisc **pold)
7252 ++{
7253 ++ struct Qdisc *old;
7254 ++
7255 ++ sch_tree_lock(sch);
7256 ++ old = *pold;
7257 ++ *pold = new;
7258 ++ if (old != NULL) {
7259 ++ qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
7260 ++ qdisc_reset(old);
7261 ++ }
7262 ++ sch_tree_unlock(sch);
7263 ++
7264 ++ return old;
7265 ++}
7266 ++
7267 + static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
7268 + struct sk_buff_head *list)
7269 + {
7270 +diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h
7271 +index df705908480a..2f48d648e051 100644
7272 +--- a/include/sound/hda_regmap.h
7273 ++++ b/include/sound/hda_regmap.h
7274 +@@ -17,6 +17,8 @@ int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
7275 + unsigned int verb);
7276 + int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
7277 + unsigned int *val);
7278 ++int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
7279 ++ unsigned int reg, unsigned int *val);
7280 + int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
7281 + unsigned int val);
7282 + int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
7283 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
7284 +index 6582410a71c7..efd143dcedf1 100644
7285 +--- a/kernel/bpf/verifier.c
7286 ++++ b/kernel/bpf/verifier.c
7287 +@@ -1227,6 +1227,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
7288 + }
7289 +
7290 + if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
7291 ++ BPF_SIZE(insn->code) == BPF_DW ||
7292 + (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
7293 + verbose("BPF_LD_ABS uses reserved fields\n");
7294 + return -EINVAL;
7295 +@@ -1864,7 +1865,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
7296 + if (IS_ERR(map)) {
7297 + verbose("fd %d is not pointing to valid bpf_map\n",
7298 + insn->imm);
7299 +- fdput(f);
7300 + return PTR_ERR(map);
7301 + }
7302 +
7303 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
7304 +index 359da3abb004..3abce1e0f910 100644
7305 +--- a/kernel/cgroup.c
7306 ++++ b/kernel/cgroup.c
7307 +@@ -4378,14 +4378,15 @@ static void css_free_work_fn(struct work_struct *work)
7308 +
7309 + if (ss) {
7310 + /* css free path */
7311 ++ struct cgroup_subsys_state *parent = css->parent;
7312 + int id = css->id;
7313 +
7314 +- if (css->parent)
7315 +- css_put(css->parent);
7316 +-
7317 + ss->css_free(css);
7318 + cgroup_idr_remove(&ss->css_idr, id);
7319 + cgroup_put(cgrp);
7320 ++
7321 ++ if (parent)
7322 ++ css_put(parent);
7323 + } else {
7324 + /* cgroup free path */
7325 + atomic_dec(&cgrp->root->nr_cgrps);
7326 +@@ -4478,6 +4479,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
7327 + memset(css, 0, sizeof(*css));
7328 + css->cgroup = cgrp;
7329 + css->ss = ss;
7330 ++ css->id = -1;
7331 + INIT_LIST_HEAD(&css->sibling);
7332 + INIT_LIST_HEAD(&css->children);
7333 + css->serial_nr = css_serial_nr_next++;
7334 +@@ -4563,7 +4565,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
7335 +
7336 + err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_NOWAIT);
7337 + if (err < 0)
7338 +- goto err_free_percpu_ref;
7339 ++ goto err_free_css;
7340 + css->id = err;
7341 +
7342 + if (visible) {
7343 +@@ -4595,9 +4597,6 @@ err_list_del:
7344 + list_del_rcu(&css->sibling);
7345 + cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
7346 + err_free_id:
7347 +- cgroup_idr_remove(&ss->css_idr, css->id);
7348 +-err_free_percpu_ref:
7349 +- percpu_ref_exit(&css->refcnt);
7350 + err_free_css:
7351 + call_rcu(&css->rcu_head, css_free_rcu_fn);
7352 + return err;
7353 +diff --git a/kernel/futex.c b/kernel/futex.c
7354 +index 46b168e19c98..2214b70f1910 100644
7355 +--- a/kernel/futex.c
7356 ++++ b/kernel/futex.c
7357 +@@ -1381,8 +1381,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
7358 + if (likely(&hb1->chain != &hb2->chain)) {
7359 + plist_del(&q->list, &hb1->chain);
7360 + hb_waiters_dec(hb1);
7361 +- plist_add(&q->list, &hb2->chain);
7362 + hb_waiters_inc(hb2);
7363 ++ plist_add(&q->list, &hb2->chain);
7364 + q->lock_ptr = &hb2->lock;
7365 + }
7366 + get_futex_key_refs(key2);
7367 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
7368 +index 3b0f4c09ab92..98e607121d09 100644
7369 +--- a/kernel/sched/core.c
7370 ++++ b/kernel/sched/core.c
7371 +@@ -4568,14 +4568,16 @@ void show_state_filter(unsigned long state_filter)
7372 + /*
7373 + * reset the NMI-timeout, listing all files on a slow
7374 + * console might take a lot of time:
7375 ++ * Also, reset softlockup watchdogs on all CPUs, because
7376 ++ * another CPU might be blocked waiting for us to process
7377 ++ * an IPI.
7378 + */
7379 + touch_nmi_watchdog();
7380 ++ touch_all_softlockup_watchdogs();
7381 + if (!state_filter || (p->state & state_filter))
7382 + sched_show_task(p);
7383 + }
7384 +
7385 +- touch_all_softlockup_watchdogs();
7386 +-
7387 + #ifdef CONFIG_SCHED_DEBUG
7388 + sysrq_sched_debug_show();
7389 + #endif
7390 +diff --git a/kernel/sysctl.c b/kernel/sysctl.c
7391 +index c3eee4c6d6c1..7d4900404c94 100644
7392 +--- a/kernel/sysctl.c
7393 ++++ b/kernel/sysctl.c
7394 +@@ -1695,6 +1695,20 @@ static struct ctl_table fs_table[] = {
7395 + .proc_handler = &pipe_proc_fn,
7396 + .extra1 = &pipe_min_size,
7397 + },
7398 ++ {
7399 ++ .procname = "pipe-user-pages-hard",
7400 ++ .data = &pipe_user_pages_hard,
7401 ++ .maxlen = sizeof(pipe_user_pages_hard),
7402 ++ .mode = 0644,
7403 ++ .proc_handler = proc_doulongvec_minmax,
7404 ++ },
7405 ++ {
7406 ++ .procname = "pipe-user-pages-soft",
7407 ++ .data = &pipe_user_pages_soft,
7408 ++ .maxlen = sizeof(pipe_user_pages_soft),
7409 ++ .mode = 0644,
7410 ++ .proc_handler = proc_doulongvec_minmax,
7411 ++ },
7412 + { }
7413 + };
7414 +
7415 +diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
7416 +index 6d6c0411cbe8..fc0c74f18288 100644
7417 +--- a/kernel/trace/trace_printk.c
7418 ++++ b/kernel/trace/trace_printk.c
7419 +@@ -36,6 +36,10 @@ struct trace_bprintk_fmt {
7420 + static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
7421 + {
7422 + struct trace_bprintk_fmt *pos;
7423 ++
7424 ++ if (!fmt)
7425 ++ return ERR_PTR(-EINVAL);
7426 ++
7427 + list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
7428 + if (!strcmp(pos->fmt, fmt))
7429 + return pos;
7430 +@@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
7431 + for (iter = start; iter < end; iter++) {
7432 + struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
7433 + if (tb_fmt) {
7434 +- *iter = tb_fmt->fmt;
7435 ++ if (!IS_ERR(tb_fmt))
7436 ++ *iter = tb_fmt->fmt;
7437 + continue;
7438 + }
7439 +
7440 +diff --git a/mm/compaction.c b/mm/compaction.c
7441 +index 3dcf93cd622b..32c719a4bc3d 100644
7442 +--- a/mm/compaction.c
7443 ++++ b/mm/compaction.c
7444 +@@ -431,6 +431,24 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
7445 +
7446 + if (!valid_page)
7447 + valid_page = page;
7448 ++
7449 ++ /*
7450 ++ * For compound pages such as THP and hugetlbfs, we can save
7451 ++ * potentially a lot of iterations if we skip them at once.
7452 ++ * The check is racy, but we can consider only valid values
7453 ++ * and the only danger is skipping too much.
7454 ++ */
7455 ++ if (PageCompound(page)) {
7456 ++ unsigned int comp_order = compound_order(page);
7457 ++
7458 ++ if (likely(comp_order < MAX_ORDER)) {
7459 ++ blockpfn += (1UL << comp_order) - 1;
7460 ++ cursor += (1UL << comp_order) - 1;
7461 ++ }
7462 ++
7463 ++ goto isolate_fail;
7464 ++ }
7465 ++
7466 + if (!PageBuddy(page))
7467 + goto isolate_fail;
7468 +
7469 +@@ -462,25 +480,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
7470 +
7471 + /* Found a free page, break it into order-0 pages */
7472 + isolated = split_free_page(page);
7473 ++ if (!isolated)
7474 ++ break;
7475 ++
7476 + total_isolated += isolated;
7477 ++ cc->nr_freepages += isolated;
7478 + for (i = 0; i < isolated; i++) {
7479 + list_add(&page->lru, freelist);
7480 + page++;
7481 + }
7482 +-
7483 +- /* If a page was split, advance to the end of it */
7484 +- if (isolated) {
7485 +- cc->nr_freepages += isolated;
7486 +- if (!strict &&
7487 +- cc->nr_migratepages <= cc->nr_freepages) {
7488 +- blockpfn += isolated;
7489 +- break;
7490 +- }
7491 +-
7492 +- blockpfn += isolated - 1;
7493 +- cursor += isolated - 1;
7494 +- continue;
7495 ++ if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
7496 ++ blockpfn += isolated;
7497 ++ break;
7498 + }
7499 ++ /* Advance to the end of split page */
7500 ++ blockpfn += isolated - 1;
7501 ++ cursor += isolated - 1;
7502 ++ continue;
7503 +
7504 + isolate_fail:
7505 + if (strict)
7506 +@@ -490,6 +506,16 @@ isolate_fail:
7507 +
7508 + }
7509 +
7510 ++ if (locked)
7511 ++ spin_unlock_irqrestore(&cc->zone->lock, flags);
7512 ++
7513 ++ /*
7514 ++ * There is a tiny chance that we have read bogus compound_order(),
7515 ++ * so be careful to not go outside of the pageblock.
7516 ++ */
7517 ++ if (unlikely(blockpfn > end_pfn))
7518 ++ blockpfn = end_pfn;
7519 ++
7520 + trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
7521 + nr_scanned, total_isolated);
7522 +
7523 +@@ -504,9 +530,6 @@ isolate_fail:
7524 + if (strict && blockpfn < end_pfn)
7525 + total_isolated = 0;
7526 +
7527 +- if (locked)
7528 +- spin_unlock_irqrestore(&cc->zone->lock, flags);
7529 +-
7530 + /* Update the pageblock-skip if the whole pageblock was scanned */
7531 + if (blockpfn == end_pfn)
7532 + update_pageblock_skip(cc, valid_page, total_isolated, false);
7533 +@@ -930,6 +953,7 @@ static void isolate_freepages(struct compact_control *cc)
7534 + block_end_pfn = block_start_pfn,
7535 + block_start_pfn -= pageblock_nr_pages,
7536 + isolate_start_pfn = block_start_pfn) {
7537 ++ unsigned long isolated;
7538 +
7539 + /*
7540 + * This can iterate a massively long zone without finding any
7541 +@@ -954,8 +978,12 @@ static void isolate_freepages(struct compact_control *cc)
7542 + continue;
7543 +
7544 + /* Found a block suitable for isolating free pages from. */
7545 +- isolate_freepages_block(cc, &isolate_start_pfn,
7546 +- block_end_pfn, freelist, false);
7547 ++ isolated = isolate_freepages_block(cc, &isolate_start_pfn,
7548 ++ block_end_pfn, freelist, false);
7549 ++ /* If isolation failed early, do not continue needlessly */
7550 ++ if (!isolated && isolate_start_pfn < block_end_pfn &&
7551 ++ cc->nr_migratepages > cc->nr_freepages)
7552 ++ break;
7553 +
7554 + /*
7555 + * Remember where the free scanner should restart next time,
7556 +diff --git a/mm/migrate.c b/mm/migrate.c
7557 +index fe71f91c7b27..2599977221aa 100644
7558 +--- a/mm/migrate.c
7559 ++++ b/mm/migrate.c
7560 +@@ -389,6 +389,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
7561 +
7562 + return MIGRATEPAGE_SUCCESS;
7563 + }
7564 ++EXPORT_SYMBOL(migrate_page_move_mapping);
7565 +
7566 + /*
7567 + * The expected number of remaining references is the same as that
7568 +@@ -549,6 +550,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
7569 + if (PageWriteback(newpage))
7570 + end_page_writeback(newpage);
7571 + }
7572 ++EXPORT_SYMBOL(migrate_page_copy);
7573 +
7574 + /************************************************************
7575 + * Migration functions
7576 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
7577 +index 551923097bbc..f6f6831cec52 100644
7578 +--- a/mm/page_alloc.c
7579 ++++ b/mm/page_alloc.c
7580 +@@ -5926,7 +5926,7 @@ int __meminit init_per_zone_wmark_min(void)
7581 + setup_per_zone_inactive_ratio();
7582 + return 0;
7583 + }
7584 +-module_init(init_per_zone_wmark_min)
7585 ++core_initcall(init_per_zone_wmark_min)
7586 +
7587 + /*
7588 + * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
7589 +diff --git a/mm/page_isolation.c b/mm/page_isolation.c
7590 +index 303c908790ef..4b640c20f5c5 100644
7591 +--- a/mm/page_isolation.c
7592 ++++ b/mm/page_isolation.c
7593 +@@ -300,11 +300,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
7594 + * now as a simple work-around, we use the next node for destination.
7595 + */
7596 + if (PageHuge(page)) {
7597 +- nodemask_t src = nodemask_of_node(page_to_nid(page));
7598 +- nodemask_t dst;
7599 +- nodes_complement(dst, src);
7600 ++ int node = next_online_node(page_to_nid(page));
7601 ++ if (node == MAX_NUMNODES)
7602 ++ node = first_online_node;
7603 + return alloc_huge_page_node(page_hstate(compound_head(page)),
7604 +- next_node(page_to_nid(page), dst));
7605 ++ node);
7606 + }
7607 +
7608 + if (PageHighMem(page))
7609 +diff --git a/mm/percpu.c b/mm/percpu.c
7610 +index 2dd74487a0af..b97617587620 100644
7611 +--- a/mm/percpu.c
7612 ++++ b/mm/percpu.c
7613 +@@ -110,7 +110,7 @@ struct pcpu_chunk {
7614 + int map_used; /* # of map entries used before the sentry */
7615 + int map_alloc; /* # of map entries allocated */
7616 + int *map; /* allocation map */
7617 +- struct work_struct map_extend_work;/* async ->map[] extension */
7618 ++ struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
7619 +
7620 + void *data; /* chunk data */
7621 + int first_free; /* no free below this */
7622 +@@ -160,10 +160,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
7623 + static int pcpu_reserved_chunk_limit;
7624 +
7625 + static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
7626 +-static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
7627 ++static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
7628 +
7629 + static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
7630 +
7631 ++/* chunks which need their map areas extended, protected by pcpu_lock */
7632 ++static LIST_HEAD(pcpu_map_extend_chunks);
7633 ++
7634 + /*
7635 + * The number of empty populated pages, protected by pcpu_lock. The
7636 + * reserved chunk doesn't contribute to the count.
7637 +@@ -397,13 +400,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
7638 + {
7639 + int margin, new_alloc;
7640 +
7641 ++ lockdep_assert_held(&pcpu_lock);
7642 ++
7643 + if (is_atomic) {
7644 + margin = 3;
7645 +
7646 + if (chunk->map_alloc <
7647 +- chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
7648 +- pcpu_async_enabled)
7649 +- schedule_work(&chunk->map_extend_work);
7650 ++ chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
7651 ++ if (list_empty(&chunk->map_extend_list)) {
7652 ++ list_add_tail(&chunk->map_extend_list,
7653 ++ &pcpu_map_extend_chunks);
7654 ++ pcpu_schedule_balance_work();
7655 ++ }
7656 ++ }
7657 + } else {
7658 + margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
7659 + }
7660 +@@ -437,6 +446,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
7661 + size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
7662 + unsigned long flags;
7663 +
7664 ++ lockdep_assert_held(&pcpu_alloc_mutex);
7665 ++
7666 + new = pcpu_mem_zalloc(new_size);
7667 + if (!new)
7668 + return -ENOMEM;
7669 +@@ -469,20 +480,6 @@ out_unlock:
7670 + return 0;
7671 + }
7672 +
7673 +-static void pcpu_map_extend_workfn(struct work_struct *work)
7674 +-{
7675 +- struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
7676 +- map_extend_work);
7677 +- int new_alloc;
7678 +-
7679 +- spin_lock_irq(&pcpu_lock);
7680 +- new_alloc = pcpu_need_to_extend(chunk, false);
7681 +- spin_unlock_irq(&pcpu_lock);
7682 +-
7683 +- if (new_alloc)
7684 +- pcpu_extend_area_map(chunk, new_alloc);
7685 +-}
7686 +-
7687 + /**
7688 + * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
7689 + * @chunk: chunk the candidate area belongs to
7690 +@@ -742,7 +739,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
7691 + chunk->map_used = 1;
7692 +
7693 + INIT_LIST_HEAD(&chunk->list);
7694 +- INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
7695 ++ INIT_LIST_HEAD(&chunk->map_extend_list);
7696 + chunk->free_size = pcpu_unit_size;
7697 + chunk->contig_hint = pcpu_unit_size;
7698 +
7699 +@@ -897,6 +894,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
7700 + return NULL;
7701 + }
7702 +
7703 ++ if (!is_atomic)
7704 ++ mutex_lock(&pcpu_alloc_mutex);
7705 ++
7706 + spin_lock_irqsave(&pcpu_lock, flags);
7707 +
7708 + /* serve reserved allocations from the reserved chunk if available */
7709 +@@ -969,12 +969,9 @@ restart:
7710 + if (is_atomic)
7711 + goto fail;
7712 +
7713 +- mutex_lock(&pcpu_alloc_mutex);
7714 +-
7715 + if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
7716 + chunk = pcpu_create_chunk();
7717 + if (!chunk) {
7718 +- mutex_unlock(&pcpu_alloc_mutex);
7719 + err = "failed to allocate new chunk";
7720 + goto fail;
7721 + }
7722 +@@ -985,7 +982,6 @@ restart:
7723 + spin_lock_irqsave(&pcpu_lock, flags);
7724 + }
7725 +
7726 +- mutex_unlock(&pcpu_alloc_mutex);
7727 + goto restart;
7728 +
7729 + area_found:
7730 +@@ -995,8 +991,6 @@ area_found:
7731 + if (!is_atomic) {
7732 + int page_start, page_end, rs, re;
7733 +
7734 +- mutex_lock(&pcpu_alloc_mutex);
7735 +-
7736 + page_start = PFN_DOWN(off);
7737 + page_end = PFN_UP(off + size);
7738 +
7739 +@@ -1007,7 +1001,6 @@ area_found:
7740 +
7741 + spin_lock_irqsave(&pcpu_lock, flags);
7742 + if (ret) {
7743 +- mutex_unlock(&pcpu_alloc_mutex);
7744 + pcpu_free_area(chunk, off, &occ_pages);
7745 + err = "failed to populate";
7746 + goto fail_unlock;
7747 +@@ -1047,6 +1040,8 @@ fail:
7748 + /* see the flag handling in pcpu_blance_workfn() */
7749 + pcpu_atomic_alloc_failed = true;
7750 + pcpu_schedule_balance_work();
7751 ++ } else {
7752 ++ mutex_unlock(&pcpu_alloc_mutex);
7753 + }
7754 + return NULL;
7755 + }
7756 +@@ -1131,6 +1126,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
7757 + if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
7758 + continue;
7759 +
7760 ++ list_del_init(&chunk->map_extend_list);
7761 + list_move(&chunk->list, &to_free);
7762 + }
7763 +
7764 +@@ -1148,6 +1144,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
7765 + pcpu_destroy_chunk(chunk);
7766 + }
7767 +
7768 ++ /* service chunks which requested async area map extension */
7769 ++ do {
7770 ++ int new_alloc = 0;
7771 ++
7772 ++ spin_lock_irq(&pcpu_lock);
7773 ++
7774 ++ chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
7775 ++ struct pcpu_chunk, map_extend_list);
7776 ++ if (chunk) {
7777 ++ list_del_init(&chunk->map_extend_list);
7778 ++ new_alloc = pcpu_need_to_extend(chunk, false);
7779 ++ }
7780 ++
7781 ++ spin_unlock_irq(&pcpu_lock);
7782 ++
7783 ++ if (new_alloc)
7784 ++ pcpu_extend_area_map(chunk, new_alloc);
7785 ++ } while (chunk);
7786 ++
7787 + /*
7788 + * Ensure there are certain number of free populated pages for
7789 + * atomic allocs. Fill up from the most packed so that atomic
7790 +@@ -1646,7 +1661,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
7791 + */
7792 + schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
7793 + INIT_LIST_HEAD(&schunk->list);
7794 +- INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
7795 ++ INIT_LIST_HEAD(&schunk->map_extend_list);
7796 + schunk->base_addr = base_addr;
7797 + schunk->map = smap;
7798 + schunk->map_alloc = ARRAY_SIZE(smap);
7799 +@@ -1676,7 +1691,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
7800 + if (dyn_size) {
7801 + dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
7802 + INIT_LIST_HEAD(&dchunk->list);
7803 +- INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
7804 ++ INIT_LIST_HEAD(&dchunk->map_extend_list);
7805 + dchunk->base_addr = base_addr;
7806 + dchunk->map = dmap;
7807 + dchunk->map_alloc = ARRAY_SIZE(dmap);
7808 +diff --git a/mm/shmem.c b/mm/shmem.c
7809 +index 47d536e59fc0..46511ad90bc5 100644
7810 +--- a/mm/shmem.c
7811 ++++ b/mm/shmem.c
7812 +@@ -2137,9 +2137,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
7813 + NULL);
7814 + if (error) {
7815 + /* Remove the !PageUptodate pages we added */
7816 +- shmem_undo_range(inode,
7817 +- (loff_t)start << PAGE_CACHE_SHIFT,
7818 +- (loff_t)index << PAGE_CACHE_SHIFT, true);
7819 ++ if (index > start) {
7820 ++ shmem_undo_range(inode,
7821 ++ (loff_t)start << PAGE_CACHE_SHIFT,
7822 ++ ((loff_t)index << PAGE_CACHE_SHIFT) - 1, true);
7823 ++ }
7824 + goto undone;
7825 + }
7826 +
7827 +diff --git a/mm/swap.c b/mm/swap.c
7828 +index a7251a8ed532..b523f0a4cbfb 100644
7829 +--- a/mm/swap.c
7830 ++++ b/mm/swap.c
7831 +@@ -483,7 +483,7 @@ void rotate_reclaimable_page(struct page *page)
7832 + page_cache_get(page);
7833 + local_irq_save(flags);
7834 + pvec = this_cpu_ptr(&lru_rotate_pvecs);
7835 +- if (!pagevec_add(pvec, page))
7836 ++ if (!pagevec_add(pvec, page) || PageCompound(page))
7837 + pagevec_move_tail(pvec);
7838 + local_irq_restore(flags);
7839 + }
7840 +@@ -539,7 +539,7 @@ void activate_page(struct page *page)
7841 + struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
7842 +
7843 + page_cache_get(page);
7844 +- if (!pagevec_add(pvec, page))
7845 ++ if (!pagevec_add(pvec, page) || PageCompound(page))
7846 + pagevec_lru_move_fn(pvec, __activate_page, NULL);
7847 + put_cpu_var(activate_page_pvecs);
7848 + }
7849 +@@ -631,9 +631,8 @@ static void __lru_cache_add(struct page *page)
7850 + struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
7851 +
7852 + page_cache_get(page);
7853 +- if (!pagevec_space(pvec))
7854 ++ if (!pagevec_space(pvec) || PageCompound(page))
7855 + __pagevec_lru_add(pvec);
7856 +- pagevec_add(pvec, page);
7857 + put_cpu_var(lru_add_pvec);
7858 + }
7859 +
7860 +@@ -846,7 +845,7 @@ void deactivate_file_page(struct page *page)
7861 + if (likely(get_page_unless_zero(page))) {
7862 + struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
7863 +
7864 +- if (!pagevec_add(pvec, page))
7865 ++ if (!pagevec_add(pvec, page) || PageCompound(page))
7866 + pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
7867 + put_cpu_var(lru_deactivate_file_pvecs);
7868 + }
7869 +diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
7870 +index 7c646bb2c6f7..98d206b22653 100644
7871 +--- a/net/ax25/ax25_ip.c
7872 ++++ b/net/ax25/ax25_ip.c
7873 +@@ -229,8 +229,23 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
7874 + }
7875 + #endif
7876 +
7877 ++static bool ax25_validate_header(const char *header, unsigned int len)
7878 ++{
7879 ++ ax25_digi digi;
7880 ++
7881 ++ if (!len)
7882 ++ return false;
7883 ++
7884 ++ if (header[0])
7885 ++ return true;
7886 ++
7887 ++ return ax25_addr_parse(header + 1, len - 1, NULL, NULL, &digi, NULL,
7888 ++ NULL);
7889 ++}
7890 ++
7891 + const struct header_ops ax25_header_ops = {
7892 + .create = ax25_hard_header,
7893 ++ .validate = ax25_validate_header,
7894 + };
7895 +
7896 + EXPORT_SYMBOL(ax25_header_ops);
7897 +diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
7898 +index da83982bf974..f77dafc114a6 100644
7899 +--- a/net/batman-adv/routing.c
7900 ++++ b/net/batman-adv/routing.c
7901 +@@ -88,6 +88,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
7902 + neigh_node = NULL;
7903 +
7904 + spin_lock_bh(&orig_node->neigh_list_lock);
7905 ++ /* curr_router used earlier may not be the current orig_ifinfo->router
7906 ++ * anymore because it was dereferenced outside of the neigh_list_lock
7907 ++ * protected region. After the new best neighbor has replace the current
7908 ++ * best neighbor the reference counter needs to decrease. Consequently,
7909 ++ * the code needs to ensure the curr_router variable contains a pointer
7910 ++ * to the replaced best neighbor.
7911 ++ */
7912 ++ curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
7913 ++
7914 + rcu_assign_pointer(orig_ifinfo->router, neigh_node);
7915 + spin_unlock_bh(&orig_node->neigh_list_lock);
7916 + batadv_orig_ifinfo_free_ref(orig_ifinfo);
7917 +diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
7918 +index 3d64ed20c393..6004c2de7b2a 100644
7919 +--- a/net/batman-adv/send.c
7920 ++++ b/net/batman-adv/send.c
7921 +@@ -611,6 +611,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
7922 +
7923 + if (pending) {
7924 + hlist_del(&forw_packet->list);
7925 ++ if (!forw_packet->own)
7926 ++ atomic_inc(&bat_priv->bcast_queue_left);
7927 ++
7928 + batadv_forw_packet_free(forw_packet);
7929 + }
7930 + }
7931 +@@ -638,6 +641,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
7932 +
7933 + if (pending) {
7934 + hlist_del(&forw_packet->list);
7935 ++ if (!forw_packet->own)
7936 ++ atomic_inc(&bat_priv->batman_queue_left);
7937 ++
7938 + batadv_forw_packet_free(forw_packet);
7939 + }
7940 + }
7941 +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
7942 +index a0b1b861b968..b38a8aa3cce8 100644
7943 +--- a/net/batman-adv/soft-interface.c
7944 ++++ b/net/batman-adv/soft-interface.c
7945 +@@ -377,11 +377,17 @@ void batadv_interface_rx(struct net_device *soft_iface,
7946 + */
7947 + nf_reset(skb);
7948 +
7949 ++ if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
7950 ++ goto dropped;
7951 ++
7952 + vid = batadv_get_vid(skb, 0);
7953 + ethhdr = eth_hdr(skb);
7954 +
7955 + switch (ntohs(ethhdr->h_proto)) {
7956 + case ETH_P_8021Q:
7957 ++ if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
7958 ++ goto dropped;
7959 ++
7960 + vhdr = (struct vlan_ethhdr *)skb->data;
7961 +
7962 + if (vhdr->h_vlan_encapsulated_proto != ethertype)
7963 +@@ -393,8 +399,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
7964 + }
7965 +
7966 + /* skb->dev & skb->pkt_type are set here */
7967 +- if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
7968 +- goto dropped;
7969 + skb->protocol = eth_type_trans(skb, soft_iface);
7970 +
7971 + /* should not be necessary anymore as we use skb_pull_rcsum()
7972 +diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
7973 +index 8d423bc649b9..f876f707fd9e 100644
7974 +--- a/net/bridge/br_ioctl.c
7975 ++++ b/net/bridge/br_ioctl.c
7976 +@@ -21,18 +21,19 @@
7977 + #include <asm/uaccess.h>
7978 + #include "br_private.h"
7979 +
7980 +-/* called with RTNL */
7981 + static int get_bridge_ifindices(struct net *net, int *indices, int num)
7982 + {
7983 + struct net_device *dev;
7984 + int i = 0;
7985 +
7986 +- for_each_netdev(net, dev) {
7987 ++ rcu_read_lock();
7988 ++ for_each_netdev_rcu(net, dev) {
7989 + if (i >= num)
7990 + break;
7991 + if (dev->priv_flags & IFF_EBRIDGE)
7992 + indices[i++] = dev->ifindex;
7993 + }
7994 ++ rcu_read_unlock();
7995 +
7996 + return i;
7997 + }
7998 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
7999 +index fe95cb704aaa..e9c4b51525de 100644
8000 +--- a/net/core/rtnetlink.c
8001 ++++ b/net/core/rtnetlink.c
8002 +@@ -884,7 +884,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
8003 + + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
8004 + + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
8005 + + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
8006 +- + nla_total_size(MAX_PHYS_ITEM_ID_LEN); /* IFLA_PHYS_SWITCH_ID */
8007 ++ + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
8008 ++ + nla_total_size(IFNAMSIZ); /* IFLA_PHYS_PORT_NAME */
8009 ++
8010 + }
8011 +
8012 + static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
8013 +@@ -1070,14 +1072,16 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
8014 + goto nla_put_failure;
8015 +
8016 + if (1) {
8017 +- struct rtnl_link_ifmap map = {
8018 +- .mem_start = dev->mem_start,
8019 +- .mem_end = dev->mem_end,
8020 +- .base_addr = dev->base_addr,
8021 +- .irq = dev->irq,
8022 +- .dma = dev->dma,
8023 +- .port = dev->if_port,
8024 +- };
8025 ++ struct rtnl_link_ifmap map;
8026 ++
8027 ++ memset(&map, 0, sizeof(map));
8028 ++ map.mem_start = dev->mem_start;
8029 ++ map.mem_end = dev->mem_end;
8030 ++ map.base_addr = dev->base_addr;
8031 ++ map.irq = dev->irq;
8032 ++ map.dma = dev->dma;
8033 ++ map.port = dev->if_port;
8034 ++
8035 + if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
8036 + goto nla_put_failure;
8037 + }
8038 +diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
8039 +index 03227ffd19ce..76d3bf70c31a 100644
8040 +--- a/net/decnet/dn_route.c
8041 ++++ b/net/decnet/dn_route.c
8042 +@@ -1036,10 +1036,13 @@ source_ok:
8043 + if (!fld.daddr) {
8044 + fld.daddr = fld.saddr;
8045 +
8046 +- err = -EADDRNOTAVAIL;
8047 + if (dev_out)
8048 + dev_put(dev_out);
8049 ++ err = -EINVAL;
8050 + dev_out = init_net.loopback_dev;
8051 ++ if (!dev_out->dn_ptr)
8052 ++ goto out;
8053 ++ err = -EADDRNOTAVAIL;
8054 + dev_hold(dev_out);
8055 + if (!fld.daddr) {
8056 + fld.daddr =
8057 +@@ -1112,6 +1115,8 @@ source_ok:
8058 + if (dev_out == NULL)
8059 + goto out;
8060 + dn_db = rcu_dereference_raw(dev_out->dn_ptr);
8061 ++ if (!dn_db)
8062 ++ goto e_inval;
8063 + /* Possible improvement - check all devices for local addr */
8064 + if (dn_dev_islocal(dev_out, fld.daddr)) {
8065 + dev_put(dev_out);
8066 +@@ -1153,6 +1158,8 @@ select_source:
8067 + dev_put(dev_out);
8068 + dev_out = init_net.loopback_dev;
8069 + dev_hold(dev_out);
8070 ++ if (!dev_out->dn_ptr)
8071 ++ goto e_inval;
8072 + fld.flowidn_oif = dev_out->ifindex;
8073 + if (res.fi)
8074 + dn_fib_info_put(res.fi);
8075 +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
8076 +index 280d46f947ea..a57056d87a43 100644
8077 +--- a/net/ipv4/devinet.c
8078 ++++ b/net/ipv4/devinet.c
8079 +@@ -334,6 +334,9 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
8080 +
8081 + ASSERT_RTNL();
8082 +
8083 ++ if (in_dev->dead)
8084 ++ goto no_promotions;
8085 ++
8086 + /* 1. Deleting primary ifaddr forces deletion all secondaries
8087 + * unless alias promotion is set
8088 + **/
8089 +@@ -380,6 +383,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
8090 + fib_del_ifaddr(ifa, ifa1);
8091 + }
8092 +
8093 ++no_promotions:
8094 + /* 2. Unlink it */
8095 +
8096 + *ifap = ifa1->ifa_next;
8097 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
8098 +index 872494e6e6eb..513b6aabc5b7 100644
8099 +--- a/net/ipv4/fib_frontend.c
8100 ++++ b/net/ipv4/fib_frontend.c
8101 +@@ -861,7 +861,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
8102 + if (ifa->ifa_flags & IFA_F_SECONDARY) {
8103 + prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
8104 + if (!prim) {
8105 +- pr_warn("%s: bug: prim == NULL\n", __func__);
8106 ++ /* if the device has been deleted, we don't perform
8107 ++ * address promotion
8108 ++ */
8109 ++ if (!in_dev->dead)
8110 ++ pr_warn("%s: bug: prim == NULL\n", __func__);
8111 + return;
8112 + }
8113 + if (iprim && iprim != prim) {
8114 +@@ -876,6 +880,9 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
8115 + subnet = 1;
8116 + }
8117 +
8118 ++ if (in_dev->dead)
8119 ++ goto no_promotions;
8120 ++
8121 + /* Deletion is more complicated than add.
8122 + * We should take care of not to delete too much :-)
8123 + *
8124 +@@ -951,6 +958,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
8125 + }
8126 + }
8127 +
8128 ++no_promotions:
8129 + if (!(ok & BRD_OK))
8130 + fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
8131 + if (subnet && ifa->ifa_prefixlen < 31) {
8132 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
8133 +index a3a697f5ffba..218abf9fb1ed 100644
8134 +--- a/net/ipv4/igmp.c
8135 ++++ b/net/ipv4/igmp.c
8136 +@@ -353,9 +353,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
8137 + skb_dst_set(skb, &rt->dst);
8138 + skb->dev = dev;
8139 +
8140 +- skb->reserved_tailroom = skb_end_offset(skb) -
8141 +- min(mtu, skb_end_offset(skb));
8142 + skb_reserve(skb, hlen);
8143 ++ skb_tailroom_reserve(skb, mtu, tlen);
8144 +
8145 + skb_reset_network_header(skb);
8146 + pip = ip_hdr(skb);
8147 +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
8148 +index 626d9e56a6bd..35080a708b59 100644
8149 +--- a/net/ipv4/ip_tunnel.c
8150 ++++ b/net/ipv4/ip_tunnel.c
8151 +@@ -652,6 +652,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
8152 + inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
8153 + connected = (tunnel->parms.iph.daddr != 0);
8154 +
8155 ++ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
8156 ++
8157 + dst = tnl_params->daddr;
8158 + if (dst == 0) {
8159 + /* NBMA tunnel */
8160 +@@ -749,7 +751,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
8161 + tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
8162 + tunnel->err_count--;
8163 +
8164 +- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
8165 + dst_link_failure(skb);
8166 + } else
8167 + tunnel->err_count = 0;
8168 +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
8169 +index a61200754f4b..2953ee9e5fa0 100644
8170 +--- a/net/ipv4/netfilter/arp_tables.c
8171 ++++ b/net/ipv4/netfilter/arp_tables.c
8172 +@@ -354,11 +354,24 @@ unsigned int arpt_do_table(struct sk_buff *skb,
8173 + }
8174 +
8175 + /* All zeroes == unconditional rule. */
8176 +-static inline bool unconditional(const struct arpt_arp *arp)
8177 ++static inline bool unconditional(const struct arpt_entry *e)
8178 + {
8179 + static const struct arpt_arp uncond;
8180 +
8181 +- return memcmp(arp, &uncond, sizeof(uncond)) == 0;
8182 ++ return e->target_offset == sizeof(struct arpt_entry) &&
8183 ++ memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
8184 ++}
8185 ++
8186 ++static bool find_jump_target(const struct xt_table_info *t,
8187 ++ const struct arpt_entry *target)
8188 ++{
8189 ++ struct arpt_entry *iter;
8190 ++
8191 ++ xt_entry_foreach(iter, t->entries, t->size) {
8192 ++ if (iter == target)
8193 ++ return true;
8194 ++ }
8195 ++ return false;
8196 + }
8197 +
8198 + /* Figures out from what hook each rule can be called: returns 0 if
8199 +@@ -397,11 +410,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
8200 + |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
8201 +
8202 + /* Unconditional return/END. */
8203 +- if ((e->target_offset == sizeof(struct arpt_entry) &&
8204 ++ if ((unconditional(e) &&
8205 + (strcmp(t->target.u.user.name,
8206 + XT_STANDARD_TARGET) == 0) &&
8207 +- t->verdict < 0 && unconditional(&e->arp)) ||
8208 +- visited) {
8209 ++ t->verdict < 0) || visited) {
8210 + unsigned int oldpos, size;
8211 +
8212 + if ((strcmp(t->target.u.user.name,
8213 +@@ -434,6 +446,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
8214 + size = e->next_offset;
8215 + e = (struct arpt_entry *)
8216 + (entry0 + pos + size);
8217 ++ if (pos + size >= newinfo->size)
8218 ++ return 0;
8219 + e->counters.pcnt = pos;
8220 + pos += size;
8221 + } else {
8222 +@@ -453,9 +467,15 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
8223 + /* This a jump; chase it. */
8224 + duprintf("Jump rule %u -> %u\n",
8225 + pos, newpos);
8226 ++ e = (struct arpt_entry *)
8227 ++ (entry0 + newpos);
8228 ++ if (!find_jump_target(newinfo, e))
8229 ++ return 0;
8230 + } else {
8231 + /* ... this is a fallthru */
8232 + newpos = pos + e->next_offset;
8233 ++ if (newpos >= newinfo->size)
8234 ++ return 0;
8235 + }
8236 + e = (struct arpt_entry *)
8237 + (entry0 + newpos);
8238 +@@ -469,25 +489,6 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
8239 + return 1;
8240 + }
8241 +
8242 +-static inline int check_entry(const struct arpt_entry *e, const char *name)
8243 +-{
8244 +- const struct xt_entry_target *t;
8245 +-
8246 +- if (!arp_checkentry(&e->arp)) {
8247 +- duprintf("arp_tables: arp check failed %p %s.\n", e, name);
8248 +- return -EINVAL;
8249 +- }
8250 +-
8251 +- if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
8252 +- return -EINVAL;
8253 +-
8254 +- t = arpt_get_target_c(e);
8255 +- if (e->target_offset + t->u.target_size > e->next_offset)
8256 +- return -EINVAL;
8257 +-
8258 +- return 0;
8259 +-}
8260 +-
8261 + static inline int check_target(struct arpt_entry *e, const char *name)
8262 + {
8263 + struct xt_entry_target *t = arpt_get_target(e);
8264 +@@ -517,10 +518,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
8265 + struct xt_target *target;
8266 + int ret;
8267 +
8268 +- ret = check_entry(e, name);
8269 +- if (ret)
8270 +- return ret;
8271 +-
8272 + t = arpt_get_target(e);
8273 + target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
8274 + t->u.user.revision);
8275 +@@ -546,7 +543,7 @@ static bool check_underflow(const struct arpt_entry *e)
8276 + const struct xt_entry_target *t;
8277 + unsigned int verdict;
8278 +
8279 +- if (!unconditional(&e->arp))
8280 ++ if (!unconditional(e))
8281 + return false;
8282 + t = arpt_get_target_c(e);
8283 + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
8284 +@@ -565,9 +562,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
8285 + unsigned int valid_hooks)
8286 + {
8287 + unsigned int h;
8288 ++ int err;
8289 +
8290 + if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
8291 +- (unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
8292 ++ (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
8293 ++ (unsigned char *)e + e->next_offset > limit) {
8294 + duprintf("Bad offset %p\n", e);
8295 + return -EINVAL;
8296 + }
8297 +@@ -579,6 +578,14 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
8298 + return -EINVAL;
8299 + }
8300 +
8301 ++ if (!arp_checkentry(&e->arp))
8302 ++ return -EINVAL;
8303 ++
8304 ++ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
8305 ++ e->next_offset);
8306 ++ if (err)
8307 ++ return err;
8308 ++
8309 + /* Check hooks & underflows */
8310 + for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
8311 + if (!(valid_hooks & (1 << h)))
8312 +@@ -587,9 +594,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
8313 + newinfo->hook_entry[h] = hook_entries[h];
8314 + if ((unsigned char *)e - base == underflows[h]) {
8315 + if (!check_underflow(e)) {
8316 +- pr_err("Underflows must be unconditional and "
8317 +- "use the STANDARD target with "
8318 +- "ACCEPT/DROP\n");
8319 ++ pr_debug("Underflows must be unconditional and "
8320 ++ "use the STANDARD target with "
8321 ++ "ACCEPT/DROP\n");
8322 + return -EINVAL;
8323 + }
8324 + newinfo->underflow[h] = underflows[h];
8325 +@@ -679,10 +686,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
8326 + }
8327 + }
8328 +
8329 +- if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
8330 +- duprintf("Looping hook\n");
8331 ++ if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
8332 + return -ELOOP;
8333 +- }
8334 +
8335 + /* Finally, each sanity check must pass */
8336 + i = 0;
8337 +@@ -1118,56 +1123,18 @@ static int do_add_counters(struct net *net, const void __user *user,
8338 + unsigned int i, curcpu;
8339 + struct xt_counters_info tmp;
8340 + struct xt_counters *paddc;
8341 +- unsigned int num_counters;
8342 +- const char *name;
8343 +- int size;
8344 +- void *ptmp;
8345 + struct xt_table *t;
8346 + const struct xt_table_info *private;
8347 + int ret = 0;
8348 + void *loc_cpu_entry;
8349 + struct arpt_entry *iter;
8350 + unsigned int addend;
8351 +-#ifdef CONFIG_COMPAT
8352 +- struct compat_xt_counters_info compat_tmp;
8353 +
8354 +- if (compat) {
8355 +- ptmp = &compat_tmp;
8356 +- size = sizeof(struct compat_xt_counters_info);
8357 +- } else
8358 +-#endif
8359 +- {
8360 +- ptmp = &tmp;
8361 +- size = sizeof(struct xt_counters_info);
8362 +- }
8363 ++ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
8364 ++ if (IS_ERR(paddc))
8365 ++ return PTR_ERR(paddc);
8366 +
8367 +- if (copy_from_user(ptmp, user, size) != 0)
8368 +- return -EFAULT;
8369 +-
8370 +-#ifdef CONFIG_COMPAT
8371 +- if (compat) {
8372 +- num_counters = compat_tmp.num_counters;
8373 +- name = compat_tmp.name;
8374 +- } else
8375 +-#endif
8376 +- {
8377 +- num_counters = tmp.num_counters;
8378 +- name = tmp.name;
8379 +- }
8380 +-
8381 +- if (len != size + num_counters * sizeof(struct xt_counters))
8382 +- return -EINVAL;
8383 +-
8384 +- paddc = vmalloc(len - size);
8385 +- if (!paddc)
8386 +- return -ENOMEM;
8387 +-
8388 +- if (copy_from_user(paddc, user + size, len - size) != 0) {
8389 +- ret = -EFAULT;
8390 +- goto free;
8391 +- }
8392 +-
8393 +- t = xt_find_table_lock(net, NFPROTO_ARP, name);
8394 ++ t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
8395 + if (IS_ERR_OR_NULL(t)) {
8396 + ret = t ? PTR_ERR(t) : -ENOENT;
8397 + goto free;
8398 +@@ -1175,7 +1142,7 @@ static int do_add_counters(struct net *net, const void __user *user,
8399 +
8400 + local_bh_disable();
8401 + private = t->private;
8402 +- if (private->number != num_counters) {
8403 ++ if (private->number != tmp.num_counters) {
8404 + ret = -EINVAL;
8405 + goto unlock_up_free;
8406 + }
8407 +@@ -1201,6 +1168,18 @@ static int do_add_counters(struct net *net, const void __user *user,
8408 + }
8409 +
8410 + #ifdef CONFIG_COMPAT
8411 ++struct compat_arpt_replace {
8412 ++ char name[XT_TABLE_MAXNAMELEN];
8413 ++ u32 valid_hooks;
8414 ++ u32 num_entries;
8415 ++ u32 size;
8416 ++ u32 hook_entry[NF_ARP_NUMHOOKS];
8417 ++ u32 underflow[NF_ARP_NUMHOOKS];
8418 ++ u32 num_counters;
8419 ++ compat_uptr_t counters;
8420 ++ struct compat_arpt_entry entries[0];
8421 ++};
8422 ++
8423 + static inline void compat_release_entry(struct compat_arpt_entry *e)
8424 + {
8425 + struct xt_entry_target *t;
8426 +@@ -1209,24 +1188,22 @@ static inline void compat_release_entry(struct compat_arpt_entry *e)
8427 + module_put(t->u.kernel.target->me);
8428 + }
8429 +
8430 +-static inline int
8431 ++static int
8432 + check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
8433 + struct xt_table_info *newinfo,
8434 + unsigned int *size,
8435 + const unsigned char *base,
8436 +- const unsigned char *limit,
8437 +- const unsigned int *hook_entries,
8438 +- const unsigned int *underflows,
8439 +- const char *name)
8440 ++ const unsigned char *limit)
8441 + {
8442 + struct xt_entry_target *t;
8443 + struct xt_target *target;
8444 + unsigned int entry_offset;
8445 +- int ret, off, h;
8446 ++ int ret, off;
8447 +
8448 + duprintf("check_compat_entry_size_and_hooks %p\n", e);
8449 + if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
8450 +- (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
8451 ++ (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
8452 ++ (unsigned char *)e + e->next_offset > limit) {
8453 + duprintf("Bad offset %p, limit = %p\n", e, limit);
8454 + return -EINVAL;
8455 + }
8456 +@@ -1238,8 +1215,11 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
8457 + return -EINVAL;
8458 + }
8459 +
8460 +- /* For purposes of check_entry casting the compat entry is fine */
8461 +- ret = check_entry((struct arpt_entry *)e, name);
8462 ++ if (!arp_checkentry(&e->arp))
8463 ++ return -EINVAL;
8464 ++
8465 ++ ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
8466 ++ e->next_offset);
8467 + if (ret)
8468 + return ret;
8469 +
8470 +@@ -1263,17 +1243,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
8471 + if (ret)
8472 + goto release_target;
8473 +
8474 +- /* Check hooks & underflows */
8475 +- for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
8476 +- if ((unsigned char *)e - base == hook_entries[h])
8477 +- newinfo->hook_entry[h] = hook_entries[h];
8478 +- if ((unsigned char *)e - base == underflows[h])
8479 +- newinfo->underflow[h] = underflows[h];
8480 +- }
8481 +-
8482 +- /* Clear counters and comefrom */
8483 +- memset(&e->counters, 0, sizeof(e->counters));
8484 +- e->comefrom = 0;
8485 + return 0;
8486 +
8487 + release_target:
8488 +@@ -1282,18 +1251,17 @@ out:
8489 + return ret;
8490 + }
8491 +
8492 +-static int
8493 ++static void
8494 + compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
8495 +- unsigned int *size, const char *name,
8496 ++ unsigned int *size,
8497 + struct xt_table_info *newinfo, unsigned char *base)
8498 + {
8499 + struct xt_entry_target *t;
8500 + struct xt_target *target;
8501 + struct arpt_entry *de;
8502 + unsigned int origsize;
8503 +- int ret, h;
8504 ++ int h;
8505 +
8506 +- ret = 0;
8507 + origsize = *size;
8508 + de = (struct arpt_entry *)*dstptr;
8509 + memcpy(de, e, sizeof(struct arpt_entry));
8510 +@@ -1314,144 +1282,81 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
8511 + if ((unsigned char *)de - base < newinfo->underflow[h])
8512 + newinfo->underflow[h] -= origsize - *size;
8513 + }
8514 +- return ret;
8515 + }
8516 +
8517 +-static int translate_compat_table(const char *name,
8518 +- unsigned int valid_hooks,
8519 +- struct xt_table_info **pinfo,
8520 ++static int translate_compat_table(struct xt_table_info **pinfo,
8521 + void **pentry0,
8522 +- unsigned int total_size,
8523 +- unsigned int number,
8524 +- unsigned int *hook_entries,
8525 +- unsigned int *underflows)
8526 ++ const struct compat_arpt_replace *compatr)
8527 + {
8528 + unsigned int i, j;
8529 + struct xt_table_info *newinfo, *info;
8530 + void *pos, *entry0, *entry1;
8531 + struct compat_arpt_entry *iter0;
8532 +- struct arpt_entry *iter1;
8533 ++ struct arpt_replace repl;
8534 + unsigned int size;
8535 + int ret = 0;
8536 +
8537 + info = *pinfo;
8538 + entry0 = *pentry0;
8539 +- size = total_size;
8540 +- info->number = number;
8541 +-
8542 +- /* Init all hooks to impossible value. */
8543 +- for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
8544 +- info->hook_entry[i] = 0xFFFFFFFF;
8545 +- info->underflow[i] = 0xFFFFFFFF;
8546 +- }
8547 ++ size = compatr->size;
8548 ++ info->number = compatr->num_entries;
8549 +
8550 + duprintf("translate_compat_table: size %u\n", info->size);
8551 + j = 0;
8552 + xt_compat_lock(NFPROTO_ARP);
8553 +- xt_compat_init_offsets(NFPROTO_ARP, number);
8554 ++ xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
8555 + /* Walk through entries, checking offsets. */
8556 +- xt_entry_foreach(iter0, entry0, total_size) {
8557 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
8558 + ret = check_compat_entry_size_and_hooks(iter0, info, &size,
8559 + entry0,
8560 +- entry0 + total_size,
8561 +- hook_entries,
8562 +- underflows,
8563 +- name);
8564 ++ entry0 + compatr->size);
8565 + if (ret != 0)
8566 + goto out_unlock;
8567 + ++j;
8568 + }
8569 +
8570 + ret = -EINVAL;
8571 +- if (j != number) {
8572 ++ if (j != compatr->num_entries) {
8573 + duprintf("translate_compat_table: %u not %u entries\n",
8574 +- j, number);
8575 ++ j, compatr->num_entries);
8576 + goto out_unlock;
8577 + }
8578 +
8579 +- /* Check hooks all assigned */
8580 +- for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
8581 +- /* Only hooks which are valid */
8582 +- if (!(valid_hooks & (1 << i)))
8583 +- continue;
8584 +- if (info->hook_entry[i] == 0xFFFFFFFF) {
8585 +- duprintf("Invalid hook entry %u %u\n",
8586 +- i, hook_entries[i]);
8587 +- goto out_unlock;
8588 +- }
8589 +- if (info->underflow[i] == 0xFFFFFFFF) {
8590 +- duprintf("Invalid underflow %u %u\n",
8591 +- i, underflows[i]);
8592 +- goto out_unlock;
8593 +- }
8594 +- }
8595 +-
8596 + ret = -ENOMEM;
8597 + newinfo = xt_alloc_table_info(size);
8598 + if (!newinfo)
8599 + goto out_unlock;
8600 +
8601 +- newinfo->number = number;
8602 ++ newinfo->number = compatr->num_entries;
8603 + for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
8604 + newinfo->hook_entry[i] = info->hook_entry[i];
8605 + newinfo->underflow[i] = info->underflow[i];
8606 + }
8607 + entry1 = newinfo->entries[raw_smp_processor_id()];
8608 + pos = entry1;
8609 +- size = total_size;
8610 +- xt_entry_foreach(iter0, entry0, total_size) {
8611 +- ret = compat_copy_entry_from_user(iter0, &pos, &size,
8612 +- name, newinfo, entry1);
8613 +- if (ret != 0)
8614 +- break;
8615 +- }
8616 ++ size = compatr->size;
8617 ++ xt_entry_foreach(iter0, entry0, compatr->size)
8618 ++ compat_copy_entry_from_user(iter0, &pos, &size,
8619 ++ newinfo, entry1);
8620 ++
8621 ++ /* all module references in entry0 are now gone */
8622 ++
8623 + xt_compat_flush_offsets(NFPROTO_ARP);
8624 + xt_compat_unlock(NFPROTO_ARP);
8625 +- if (ret)
8626 +- goto free_newinfo;
8627 +
8628 +- ret = -ELOOP;
8629 +- if (!mark_source_chains(newinfo, valid_hooks, entry1))
8630 +- goto free_newinfo;
8631 ++ memcpy(&repl, compatr, sizeof(*compatr));
8632 +
8633 +- i = 0;
8634 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
8635 +- ret = check_target(iter1, name);
8636 +- if (ret != 0)
8637 +- break;
8638 +- ++i;
8639 +- if (strcmp(arpt_get_target(iter1)->u.user.name,
8640 +- XT_ERROR_TARGET) == 0)
8641 +- ++newinfo->stacksize;
8642 +- }
8643 +- if (ret) {
8644 +- /*
8645 +- * The first i matches need cleanup_entry (calls ->destroy)
8646 +- * because they had called ->check already. The other j-i
8647 +- * entries need only release.
8648 +- */
8649 +- int skip = i;
8650 +- j -= i;
8651 +- xt_entry_foreach(iter0, entry0, newinfo->size) {
8652 +- if (skip-- > 0)
8653 +- continue;
8654 +- if (j-- == 0)
8655 +- break;
8656 +- compat_release_entry(iter0);
8657 +- }
8658 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
8659 +- if (i-- == 0)
8660 +- break;
8661 +- cleanup_entry(iter1);
8662 +- }
8663 +- xt_free_table_info(newinfo);
8664 +- return ret;
8665 ++ for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
8666 ++ repl.hook_entry[i] = newinfo->hook_entry[i];
8667 ++ repl.underflow[i] = newinfo->underflow[i];
8668 + }
8669 +
8670 +- /* And one copy for every other CPU */
8671 +- for_each_possible_cpu(i)
8672 +- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
8673 +- memcpy(newinfo->entries[i], entry1, newinfo->size);
8674 ++ repl.num_counters = 0;
8675 ++ repl.counters = NULL;
8676 ++ repl.size = newinfo->size;
8677 ++ ret = translate_table(newinfo, entry1, &repl);
8678 ++ if (ret)
8679 ++ goto free_newinfo;
8680 +
8681 + *pinfo = newinfo;
8682 + *pentry0 = entry1;
8683 +@@ -1460,31 +1365,18 @@ static int translate_compat_table(const char *name,
8684 +
8685 + free_newinfo:
8686 + xt_free_table_info(newinfo);
8687 +-out:
8688 +- xt_entry_foreach(iter0, entry0, total_size) {
8689 ++ return ret;
8690 ++out_unlock:
8691 ++ xt_compat_flush_offsets(NFPROTO_ARP);
8692 ++ xt_compat_unlock(NFPROTO_ARP);
8693 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
8694 + if (j-- == 0)
8695 + break;
8696 + compat_release_entry(iter0);
8697 + }
8698 + return ret;
8699 +-out_unlock:
8700 +- xt_compat_flush_offsets(NFPROTO_ARP);
8701 +- xt_compat_unlock(NFPROTO_ARP);
8702 +- goto out;
8703 + }
8704 +
8705 +-struct compat_arpt_replace {
8706 +- char name[XT_TABLE_MAXNAMELEN];
8707 +- u32 valid_hooks;
8708 +- u32 num_entries;
8709 +- u32 size;
8710 +- u32 hook_entry[NF_ARP_NUMHOOKS];
8711 +- u32 underflow[NF_ARP_NUMHOOKS];
8712 +- u32 num_counters;
8713 +- compat_uptr_t counters;
8714 +- struct compat_arpt_entry entries[0];
8715 +-};
8716 +-
8717 + static int compat_do_replace(struct net *net, void __user *user,
8718 + unsigned int len)
8719 + {
8720 +@@ -1518,10 +1410,7 @@ static int compat_do_replace(struct net *net, void __user *user,
8721 + goto free_newinfo;
8722 + }
8723 +
8724 +- ret = translate_compat_table(tmp.name, tmp.valid_hooks,
8725 +- &newinfo, &loc_cpu_entry, tmp.size,
8726 +- tmp.num_entries, tmp.hook_entry,
8727 +- tmp.underflow);
8728 ++ ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
8729 + if (ret != 0)
8730 + goto free_newinfo;
8731 +
8732 +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
8733 +index 2d0e265fef6e..3bcf28bf1525 100644
8734 +--- a/net/ipv4/netfilter/ip_tables.c
8735 ++++ b/net/ipv4/netfilter/ip_tables.c
8736 +@@ -168,11 +168,12 @@ get_entry(const void *base, unsigned int offset)
8737 +
8738 + /* All zeroes == unconditional rule. */
8739 + /* Mildly perf critical (only if packet tracing is on) */
8740 +-static inline bool unconditional(const struct ipt_ip *ip)
8741 ++static inline bool unconditional(const struct ipt_entry *e)
8742 + {
8743 + static const struct ipt_ip uncond;
8744 +
8745 +- return memcmp(ip, &uncond, sizeof(uncond)) == 0;
8746 ++ return e->target_offset == sizeof(struct ipt_entry) &&
8747 ++ memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
8748 + #undef FWINV
8749 + }
8750 +
8751 +@@ -229,11 +230,10 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
8752 + } else if (s == e) {
8753 + (*rulenum)++;
8754 +
8755 +- if (s->target_offset == sizeof(struct ipt_entry) &&
8756 ++ if (unconditional(s) &&
8757 + strcmp(t->target.u.kernel.target->name,
8758 + XT_STANDARD_TARGET) == 0 &&
8759 +- t->verdict < 0 &&
8760 +- unconditional(&s->ip)) {
8761 ++ t->verdict < 0) {
8762 + /* Tail of chains: STANDARD target (return/policy) */
8763 + *comment = *chainname == hookname
8764 + ? comments[NF_IP_TRACE_COMMENT_POLICY]
8765 +@@ -438,6 +438,18 @@ ipt_do_table(struct sk_buff *skb,
8766 + #endif
8767 + }
8768 +
8769 ++static bool find_jump_target(const struct xt_table_info *t,
8770 ++ const struct ipt_entry *target)
8771 ++{
8772 ++ struct ipt_entry *iter;
8773 ++
8774 ++ xt_entry_foreach(iter, t->entries, t->size) {
8775 ++ if (iter == target)
8776 ++ return true;
8777 ++ }
8778 ++ return false;
8779 ++}
8780 ++
8781 + /* Figures out from what hook each rule can be called: returns 0 if
8782 + there are loops. Puts hook bitmask in comefrom. */
8783 + static int
8784 +@@ -471,11 +483,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
8785 + e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
8786 +
8787 + /* Unconditional return/END. */
8788 +- if ((e->target_offset == sizeof(struct ipt_entry) &&
8789 ++ if ((unconditional(e) &&
8790 + (strcmp(t->target.u.user.name,
8791 + XT_STANDARD_TARGET) == 0) &&
8792 +- t->verdict < 0 && unconditional(&e->ip)) ||
8793 +- visited) {
8794 ++ t->verdict < 0) || visited) {
8795 + unsigned int oldpos, size;
8796 +
8797 + if ((strcmp(t->target.u.user.name,
8798 +@@ -516,6 +527,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
8799 + size = e->next_offset;
8800 + e = (struct ipt_entry *)
8801 + (entry0 + pos + size);
8802 ++ if (pos + size >= newinfo->size)
8803 ++ return 0;
8804 + e->counters.pcnt = pos;
8805 + pos += size;
8806 + } else {
8807 +@@ -534,9 +547,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
8808 + /* This a jump; chase it. */
8809 + duprintf("Jump rule %u -> %u\n",
8810 + pos, newpos);
8811 ++ e = (struct ipt_entry *)
8812 ++ (entry0 + newpos);
8813 ++ if (!find_jump_target(newinfo, e))
8814 ++ return 0;
8815 + } else {
8816 + /* ... this is a fallthru */
8817 + newpos = pos + e->next_offset;
8818 ++ if (newpos >= newinfo->size)
8819 ++ return 0;
8820 + }
8821 + e = (struct ipt_entry *)
8822 + (entry0 + newpos);
8823 +@@ -564,27 +583,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
8824 + }
8825 +
8826 + static int
8827 +-check_entry(const struct ipt_entry *e, const char *name)
8828 +-{
8829 +- const struct xt_entry_target *t;
8830 +-
8831 +- if (!ip_checkentry(&e->ip)) {
8832 +- duprintf("ip check failed %p %s.\n", e, name);
8833 +- return -EINVAL;
8834 +- }
8835 +-
8836 +- if (e->target_offset + sizeof(struct xt_entry_target) >
8837 +- e->next_offset)
8838 +- return -EINVAL;
8839 +-
8840 +- t = ipt_get_target_c(e);
8841 +- if (e->target_offset + t->u.target_size > e->next_offset)
8842 +- return -EINVAL;
8843 +-
8844 +- return 0;
8845 +-}
8846 +-
8847 +-static int
8848 + check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
8849 + {
8850 + const struct ipt_ip *ip = par->entryinfo;
8851 +@@ -661,10 +659,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
8852 + struct xt_mtchk_param mtpar;
8853 + struct xt_entry_match *ematch;
8854 +
8855 +- ret = check_entry(e, name);
8856 +- if (ret)
8857 +- return ret;
8858 +-
8859 + j = 0;
8860 + mtpar.net = net;
8861 + mtpar.table = name;
8862 +@@ -708,7 +702,7 @@ static bool check_underflow(const struct ipt_entry *e)
8863 + const struct xt_entry_target *t;
8864 + unsigned int verdict;
8865 +
8866 +- if (!unconditional(&e->ip))
8867 ++ if (!unconditional(e))
8868 + return false;
8869 + t = ipt_get_target_c(e);
8870 + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
8871 +@@ -728,9 +722,11 @@ check_entry_size_and_hooks(struct ipt_entry *e,
8872 + unsigned int valid_hooks)
8873 + {
8874 + unsigned int h;
8875 ++ int err;
8876 +
8877 + if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
8878 +- (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
8879 ++ (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
8880 ++ (unsigned char *)e + e->next_offset > limit) {
8881 + duprintf("Bad offset %p\n", e);
8882 + return -EINVAL;
8883 + }
8884 +@@ -742,6 +738,14 @@ check_entry_size_and_hooks(struct ipt_entry *e,
8885 + return -EINVAL;
8886 + }
8887 +
8888 ++ if (!ip_checkentry(&e->ip))
8889 ++ return -EINVAL;
8890 ++
8891 ++ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
8892 ++ e->next_offset);
8893 ++ if (err)
8894 ++ return err;
8895 ++
8896 + /* Check hooks & underflows */
8897 + for (h = 0; h < NF_INET_NUMHOOKS; h++) {
8898 + if (!(valid_hooks & (1 << h)))
8899 +@@ -750,9 +754,9 @@ check_entry_size_and_hooks(struct ipt_entry *e,
8900 + newinfo->hook_entry[h] = hook_entries[h];
8901 + if ((unsigned char *)e - base == underflows[h]) {
8902 + if (!check_underflow(e)) {
8903 +- pr_err("Underflows must be unconditional and "
8904 +- "use the STANDARD target with "
8905 +- "ACCEPT/DROP\n");
8906 ++ pr_debug("Underflows must be unconditional and "
8907 ++ "use the STANDARD target with "
8908 ++ "ACCEPT/DROP\n");
8909 + return -EINVAL;
8910 + }
8911 + newinfo->underflow[h] = underflows[h];
8912 +@@ -1306,56 +1310,18 @@ do_add_counters(struct net *net, const void __user *user,
8913 + unsigned int i, curcpu;
8914 + struct xt_counters_info tmp;
8915 + struct xt_counters *paddc;
8916 +- unsigned int num_counters;
8917 +- const char *name;
8918 +- int size;
8919 +- void *ptmp;
8920 + struct xt_table *t;
8921 + const struct xt_table_info *private;
8922 + int ret = 0;
8923 + void *loc_cpu_entry;
8924 + struct ipt_entry *iter;
8925 + unsigned int addend;
8926 +-#ifdef CONFIG_COMPAT
8927 +- struct compat_xt_counters_info compat_tmp;
8928 +-
8929 +- if (compat) {
8930 +- ptmp = &compat_tmp;
8931 +- size = sizeof(struct compat_xt_counters_info);
8932 +- } else
8933 +-#endif
8934 +- {
8935 +- ptmp = &tmp;
8936 +- size = sizeof(struct xt_counters_info);
8937 +- }
8938 +-
8939 +- if (copy_from_user(ptmp, user, size) != 0)
8940 +- return -EFAULT;
8941 +-
8942 +-#ifdef CONFIG_COMPAT
8943 +- if (compat) {
8944 +- num_counters = compat_tmp.num_counters;
8945 +- name = compat_tmp.name;
8946 +- } else
8947 +-#endif
8948 +- {
8949 +- num_counters = tmp.num_counters;
8950 +- name = tmp.name;
8951 +- }
8952 +
8953 +- if (len != size + num_counters * sizeof(struct xt_counters))
8954 +- return -EINVAL;
8955 +-
8956 +- paddc = vmalloc(len - size);
8957 +- if (!paddc)
8958 +- return -ENOMEM;
8959 +-
8960 +- if (copy_from_user(paddc, user + size, len - size) != 0) {
8961 +- ret = -EFAULT;
8962 +- goto free;
8963 +- }
8964 ++ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
8965 ++ if (IS_ERR(paddc))
8966 ++ return PTR_ERR(paddc);
8967 +
8968 +- t = xt_find_table_lock(net, AF_INET, name);
8969 ++ t = xt_find_table_lock(net, AF_INET, tmp.name);
8970 + if (IS_ERR_OR_NULL(t)) {
8971 + ret = t ? PTR_ERR(t) : -ENOENT;
8972 + goto free;
8973 +@@ -1363,7 +1329,7 @@ do_add_counters(struct net *net, const void __user *user,
8974 +
8975 + local_bh_disable();
8976 + private = t->private;
8977 +- if (private->number != num_counters) {
8978 ++ if (private->number != tmp.num_counters) {
8979 + ret = -EINVAL;
8980 + goto unlock_up_free;
8981 + }
8982 +@@ -1442,7 +1408,6 @@ compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
8983 +
8984 + static int
8985 + compat_find_calc_match(struct xt_entry_match *m,
8986 +- const char *name,
8987 + const struct ipt_ip *ip,
8988 + unsigned int hookmask,
8989 + int *size)
8990 +@@ -1478,21 +1443,19 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
8991 + struct xt_table_info *newinfo,
8992 + unsigned int *size,
8993 + const unsigned char *base,
8994 +- const unsigned char *limit,
8995 +- const unsigned int *hook_entries,
8996 +- const unsigned int *underflows,
8997 +- const char *name)
8998 ++ const unsigned char *limit)
8999 + {
9000 + struct xt_entry_match *ematch;
9001 + struct xt_entry_target *t;
9002 + struct xt_target *target;
9003 + unsigned int entry_offset;
9004 + unsigned int j;
9005 +- int ret, off, h;
9006 ++ int ret, off;
9007 +
9008 + duprintf("check_compat_entry_size_and_hooks %p\n", e);
9009 + if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
9010 +- (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
9011 ++ (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
9012 ++ (unsigned char *)e + e->next_offset > limit) {
9013 + duprintf("Bad offset %p, limit = %p\n", e, limit);
9014 + return -EINVAL;
9015 + }
9016 +@@ -1504,8 +1467,11 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
9017 + return -EINVAL;
9018 + }
9019 +
9020 +- /* For purposes of check_entry casting the compat entry is fine */
9021 +- ret = check_entry((struct ipt_entry *)e, name);
9022 ++ if (!ip_checkentry(&e->ip))
9023 ++ return -EINVAL;
9024 ++
9025 ++ ret = xt_compat_check_entry_offsets(e, e->elems,
9026 ++ e->target_offset, e->next_offset);
9027 + if (ret)
9028 + return ret;
9029 +
9030 +@@ -1513,8 +1479,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
9031 + entry_offset = (void *)e - (void *)base;
9032 + j = 0;
9033 + xt_ematch_foreach(ematch, e) {
9034 +- ret = compat_find_calc_match(ematch, name,
9035 +- &e->ip, e->comefrom, &off);
9036 ++ ret = compat_find_calc_match(ematch, &e->ip, e->comefrom,
9037 ++ &off);
9038 + if (ret != 0)
9039 + goto release_matches;
9040 + ++j;
9041 +@@ -1537,17 +1503,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
9042 + if (ret)
9043 + goto out;
9044 +
9045 +- /* Check hooks & underflows */
9046 +- for (h = 0; h < NF_INET_NUMHOOKS; h++) {
9047 +- if ((unsigned char *)e - base == hook_entries[h])
9048 +- newinfo->hook_entry[h] = hook_entries[h];
9049 +- if ((unsigned char *)e - base == underflows[h])
9050 +- newinfo->underflow[h] = underflows[h];
9051 +- }
9052 +-
9053 +- /* Clear counters and comefrom */
9054 +- memset(&e->counters, 0, sizeof(e->counters));
9055 +- e->comefrom = 0;
9056 + return 0;
9057 +
9058 + out:
9059 +@@ -1561,19 +1516,18 @@ release_matches:
9060 + return ret;
9061 + }
9062 +
9063 +-static int
9064 ++static void
9065 + compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
9066 +- unsigned int *size, const char *name,
9067 ++ unsigned int *size,
9068 + struct xt_table_info *newinfo, unsigned char *base)
9069 + {
9070 + struct xt_entry_target *t;
9071 + struct xt_target *target;
9072 + struct ipt_entry *de;
9073 + unsigned int origsize;
9074 +- int ret, h;
9075 ++ int h;
9076 + struct xt_entry_match *ematch;
9077 +
9078 +- ret = 0;
9079 + origsize = *size;
9080 + de = (struct ipt_entry *)*dstptr;
9081 + memcpy(de, e, sizeof(struct ipt_entry));
9082 +@@ -1582,198 +1536,104 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
9083 + *dstptr += sizeof(struct ipt_entry);
9084 + *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
9085 +
9086 +- xt_ematch_foreach(ematch, e) {
9087 +- ret = xt_compat_match_from_user(ematch, dstptr, size);
9088 +- if (ret != 0)
9089 +- return ret;
9090 +- }
9091 ++ xt_ematch_foreach(ematch, e)
9092 ++ xt_compat_match_from_user(ematch, dstptr, size);
9093 ++
9094 + de->target_offset = e->target_offset - (origsize - *size);
9095 + t = compat_ipt_get_target(e);
9096 + target = t->u.kernel.target;
9097 + xt_compat_target_from_user(t, dstptr, size);
9098 +
9099 + de->next_offset = e->next_offset - (origsize - *size);
9100 ++
9101 + for (h = 0; h < NF_INET_NUMHOOKS; h++) {
9102 + if ((unsigned char *)de - base < newinfo->hook_entry[h])
9103 + newinfo->hook_entry[h] -= origsize - *size;
9104 + if ((unsigned char *)de - base < newinfo->underflow[h])
9105 + newinfo->underflow[h] -= origsize - *size;
9106 + }
9107 +- return ret;
9108 +-}
9109 +-
9110 +-static int
9111 +-compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
9112 +-{
9113 +- struct xt_entry_match *ematch;
9114 +- struct xt_mtchk_param mtpar;
9115 +- unsigned int j;
9116 +- int ret = 0;
9117 +-
9118 +- j = 0;
9119 +- mtpar.net = net;
9120 +- mtpar.table = name;
9121 +- mtpar.entryinfo = &e->ip;
9122 +- mtpar.hook_mask = e->comefrom;
9123 +- mtpar.family = NFPROTO_IPV4;
9124 +- xt_ematch_foreach(ematch, e) {
9125 +- ret = check_match(ematch, &mtpar);
9126 +- if (ret != 0)
9127 +- goto cleanup_matches;
9128 +- ++j;
9129 +- }
9130 +-
9131 +- ret = check_target(e, net, name);
9132 +- if (ret)
9133 +- goto cleanup_matches;
9134 +- return 0;
9135 +-
9136 +- cleanup_matches:
9137 +- xt_ematch_foreach(ematch, e) {
9138 +- if (j-- == 0)
9139 +- break;
9140 +- cleanup_match(ematch, net);
9141 +- }
9142 +- return ret;
9143 + }
9144 +
9145 + static int
9146 + translate_compat_table(struct net *net,
9147 +- const char *name,
9148 +- unsigned int valid_hooks,
9149 + struct xt_table_info **pinfo,
9150 + void **pentry0,
9151 +- unsigned int total_size,
9152 +- unsigned int number,
9153 +- unsigned int *hook_entries,
9154 +- unsigned int *underflows)
9155 ++ const struct compat_ipt_replace *compatr)
9156 + {
9157 + unsigned int i, j;
9158 + struct xt_table_info *newinfo, *info;
9159 + void *pos, *entry0, *entry1;
9160 + struct compat_ipt_entry *iter0;
9161 +- struct ipt_entry *iter1;
9162 ++ struct ipt_replace repl;
9163 + unsigned int size;
9164 + int ret;
9165 +
9166 + info = *pinfo;
9167 + entry0 = *pentry0;
9168 +- size = total_size;
9169 +- info->number = number;
9170 +-
9171 +- /* Init all hooks to impossible value. */
9172 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
9173 +- info->hook_entry[i] = 0xFFFFFFFF;
9174 +- info->underflow[i] = 0xFFFFFFFF;
9175 +- }
9176 ++ size = compatr->size;
9177 ++ info->number = compatr->num_entries;
9178 +
9179 + duprintf("translate_compat_table: size %u\n", info->size);
9180 + j = 0;
9181 + xt_compat_lock(AF_INET);
9182 +- xt_compat_init_offsets(AF_INET, number);
9183 ++ xt_compat_init_offsets(AF_INET, compatr->num_entries);
9184 + /* Walk through entries, checking offsets. */
9185 +- xt_entry_foreach(iter0, entry0, total_size) {
9186 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
9187 + ret = check_compat_entry_size_and_hooks(iter0, info, &size,
9188 + entry0,
9189 +- entry0 + total_size,
9190 +- hook_entries,
9191 +- underflows,
9192 +- name);
9193 ++ entry0 + compatr->size);
9194 + if (ret != 0)
9195 + goto out_unlock;
9196 + ++j;
9197 + }
9198 +
9199 + ret = -EINVAL;
9200 +- if (j != number) {
9201 ++ if (j != compatr->num_entries) {
9202 + duprintf("translate_compat_table: %u not %u entries\n",
9203 +- j, number);
9204 ++ j, compatr->num_entries);
9205 + goto out_unlock;
9206 + }
9207 +
9208 +- /* Check hooks all assigned */
9209 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
9210 +- /* Only hooks which are valid */
9211 +- if (!(valid_hooks & (1 << i)))
9212 +- continue;
9213 +- if (info->hook_entry[i] == 0xFFFFFFFF) {
9214 +- duprintf("Invalid hook entry %u %u\n",
9215 +- i, hook_entries[i]);
9216 +- goto out_unlock;
9217 +- }
9218 +- if (info->underflow[i] == 0xFFFFFFFF) {
9219 +- duprintf("Invalid underflow %u %u\n",
9220 +- i, underflows[i]);
9221 +- goto out_unlock;
9222 +- }
9223 +- }
9224 +-
9225 + ret = -ENOMEM;
9226 + newinfo = xt_alloc_table_info(size);
9227 + if (!newinfo)
9228 + goto out_unlock;
9229 +
9230 +- newinfo->number = number;
9231 ++ newinfo->number = compatr->num_entries;
9232 + for (i = 0; i < NF_INET_NUMHOOKS; i++) {
9233 +- newinfo->hook_entry[i] = info->hook_entry[i];
9234 +- newinfo->underflow[i] = info->underflow[i];
9235 ++ newinfo->hook_entry[i] = compatr->hook_entry[i];
9236 ++ newinfo->underflow[i] = compatr->underflow[i];
9237 + }
9238 + entry1 = newinfo->entries[raw_smp_processor_id()];
9239 + pos = entry1;
9240 +- size = total_size;
9241 +- xt_entry_foreach(iter0, entry0, total_size) {
9242 +- ret = compat_copy_entry_from_user(iter0, &pos, &size,
9243 +- name, newinfo, entry1);
9244 +- if (ret != 0)
9245 +- break;
9246 +- }
9247 ++ size = compatr->size;
9248 ++ xt_entry_foreach(iter0, entry0, compatr->size)
9249 ++ compat_copy_entry_from_user(iter0, &pos, &size,
9250 ++ newinfo, entry1);
9251 ++
9252 ++ /* all module references in entry0 are now gone.
9253 ++ * entry1/newinfo contains a 64bit ruleset that looks exactly as
9254 ++ * generated by 64bit userspace.
9255 ++ *
9256 ++ * Call standard translate_table() to validate all hook_entrys,
9257 ++ * underflows, check for loops, etc.
9258 ++ */
9259 + xt_compat_flush_offsets(AF_INET);
9260 + xt_compat_unlock(AF_INET);
9261 +- if (ret)
9262 +- goto free_newinfo;
9263 +
9264 +- ret = -ELOOP;
9265 +- if (!mark_source_chains(newinfo, valid_hooks, entry1))
9266 +- goto free_newinfo;
9267 ++ memcpy(&repl, compatr, sizeof(*compatr));
9268 +
9269 +- i = 0;
9270 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
9271 +- ret = compat_check_entry(iter1, net, name);
9272 +- if (ret != 0)
9273 +- break;
9274 +- ++i;
9275 +- if (strcmp(ipt_get_target(iter1)->u.user.name,
9276 +- XT_ERROR_TARGET) == 0)
9277 +- ++newinfo->stacksize;
9278 +- }
9279 +- if (ret) {
9280 +- /*
9281 +- * The first i matches need cleanup_entry (calls ->destroy)
9282 +- * because they had called ->check already. The other j-i
9283 +- * entries need only release.
9284 +- */
9285 +- int skip = i;
9286 +- j -= i;
9287 +- xt_entry_foreach(iter0, entry0, newinfo->size) {
9288 +- if (skip-- > 0)
9289 +- continue;
9290 +- if (j-- == 0)
9291 +- break;
9292 +- compat_release_entry(iter0);
9293 +- }
9294 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
9295 +- if (i-- == 0)
9296 +- break;
9297 +- cleanup_entry(iter1, net);
9298 +- }
9299 +- xt_free_table_info(newinfo);
9300 +- return ret;
9301 ++ for (i = 0; i < NF_INET_NUMHOOKS; i++) {
9302 ++ repl.hook_entry[i] = newinfo->hook_entry[i];
9303 ++ repl.underflow[i] = newinfo->underflow[i];
9304 + }
9305 +
9306 +- /* And one copy for every other CPU */
9307 +- for_each_possible_cpu(i)
9308 +- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
9309 +- memcpy(newinfo->entries[i], entry1, newinfo->size);
9310 ++ repl.num_counters = 0;
9311 ++ repl.counters = NULL;
9312 ++ repl.size = newinfo->size;
9313 ++ ret = translate_table(net, newinfo, entry1, &repl);
9314 ++ if (ret)
9315 ++ goto free_newinfo;
9316 +
9317 + *pinfo = newinfo;
9318 + *pentry0 = entry1;
9319 +@@ -1782,17 +1642,16 @@ translate_compat_table(struct net *net,
9320 +
9321 + free_newinfo:
9322 + xt_free_table_info(newinfo);
9323 +-out:
9324 +- xt_entry_foreach(iter0, entry0, total_size) {
9325 ++ return ret;
9326 ++out_unlock:
9327 ++ xt_compat_flush_offsets(AF_INET);
9328 ++ xt_compat_unlock(AF_INET);
9329 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
9330 + if (j-- == 0)
9331 + break;
9332 + compat_release_entry(iter0);
9333 + }
9334 + return ret;
9335 +-out_unlock:
9336 +- xt_compat_flush_offsets(AF_INET);
9337 +- xt_compat_unlock(AF_INET);
9338 +- goto out;
9339 + }
9340 +
9341 + static int
9342 +@@ -1829,10 +1688,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
9343 + goto free_newinfo;
9344 + }
9345 +
9346 +- ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
9347 +- &newinfo, &loc_cpu_entry, tmp.size,
9348 +- tmp.num_entries, tmp.hook_entry,
9349 +- tmp.underflow);
9350 ++ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
9351 + if (ret != 0)
9352 + goto free_newinfo;
9353 +
9354 +diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
9355 +index c6eb42100e9a..ea91058b5f6f 100644
9356 +--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
9357 ++++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
9358 +@@ -108,10 +108,18 @@ static int masq_inet_event(struct notifier_block *this,
9359 + unsigned long event,
9360 + void *ptr)
9361 + {
9362 +- struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
9363 ++ struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
9364 + struct netdev_notifier_info info;
9365 +
9366 +- netdev_notifier_info_init(&info, dev);
9367 ++ /* The masq_dev_notifier will catch the case of the device going
9368 ++ * down. So if the inetdev is dead and being destroyed we have
9369 ++ * no work to do. Otherwise this is an individual address removal
9370 ++ * and we have to perform the flush.
9371 ++ */
9372 ++ if (idev->dead)
9373 ++ return NOTIFY_DONE;
9374 ++
9375 ++ netdev_notifier_info_init(&info, idev->dev);
9376 + return masq_device_event(this, event, &info);
9377 + }
9378 +
9379 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
9380 +index 1d3cdb4d4ebc..eb1d9839a257 100644
9381 +--- a/net/ipv4/route.c
9382 ++++ b/net/ipv4/route.c
9383 +@@ -1976,6 +1976,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
9384 + */
9385 + if (fi && res->prefixlen < 4)
9386 + fi = NULL;
9387 ++ } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
9388 ++ (orig_oif != dev_out->ifindex)) {
9389 ++ /* For local routes that require a particular output interface
9390 ++ * we do not want to cache the result. Caching the result
9391 ++ * causes incorrect behaviour when there are multiple source
9392 ++ * addresses on the interface, the end result being that if the
9393 ++ * intended recipient is waiting on that interface for the
9394 ++ * packet he won't receive it because it will be delivered on
9395 ++ * the loopback interface and the IP_PKTINFO ipi_ifindex will
9396 ++ * be set to the loopback interface as well.
9397 ++ */
9398 ++ fi = NULL;
9399 + }
9400 +
9401 + fnhe = NULL;
9402 +diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
9403 +index a51d63a43e33..9c840c5c6047 100644
9404 +--- a/net/ipv4/tcp_metrics.c
9405 ++++ b/net/ipv4/tcp_metrics.c
9406 +@@ -566,7 +566,7 @@ reset:
9407 + */
9408 + if (crtt > tp->srtt_us) {
9409 + /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
9410 +- crtt /= 8 * USEC_PER_MSEC;
9411 ++ crtt /= 8 * USEC_PER_SEC / HZ;
9412 + inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
9413 + } else if (tp->srtt_us == 0) {
9414 + /* RFC6298: 5.7 We've failed to get a valid RTT sample from
9415 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
9416 +index 1ea4322c3b0c..ae66c8426ad0 100644
9417 +--- a/net/ipv4/tcp_output.c
9418 ++++ b/net/ipv4/tcp_output.c
9419 +@@ -2627,8 +2627,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
9420 + */
9421 + if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
9422 + skb_headroom(skb) >= 0xFFFF)) {
9423 +- struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
9424 +- GFP_ATOMIC);
9425 ++ struct sk_buff *nskb;
9426 ++
9427 ++ skb_mstamp_get(&skb->skb_mstamp);
9428 ++ nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
9429 + err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
9430 + -ENOBUFS;
9431 + } else {
9432 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
9433 +index a390174b96de..031752efe1ab 100644
9434 +--- a/net/ipv4/udp.c
9435 ++++ b/net/ipv4/udp.c
9436 +@@ -1979,10 +1979,14 @@ void udp_v4_early_demux(struct sk_buff *skb)
9437 + if (!in_dev)
9438 + return;
9439 +
9440 +- ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
9441 +- iph->protocol);
9442 +- if (!ours)
9443 +- return;
9444 ++ /* we are supposed to accept bcast packets */
9445 ++ if (skb->pkt_type == PACKET_MULTICAST) {
9446 ++ ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
9447 ++ iph->protocol);
9448 ++ if (!ours)
9449 ++ return;
9450 ++ }
9451 ++
9452 + sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
9453 + uh->source, iph->saddr, dif);
9454 + } else if (skb->pkt_type == PACKET_HOST) {
9455 +diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
9456 +index 6bb98cc193c9..7b534ac04056 100644
9457 +--- a/net/ipv4/udp_tunnel.c
9458 ++++ b/net/ipv4/udp_tunnel.c
9459 +@@ -90,6 +90,8 @@ int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
9460 + uh->source = src_port;
9461 + uh->len = htons(skb->len);
9462 +
9463 ++ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
9464 ++
9465 + udp_set_csum(nocheck, skb, src, dst, skb->len);
9466 +
9467 + return iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP,
9468 +diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
9469 +index 5c5d23e59da5..9508a20fbf61 100644
9470 +--- a/net/ipv6/exthdrs_core.c
9471 ++++ b/net/ipv6/exthdrs_core.c
9472 +@@ -257,7 +257,11 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
9473 + *fragoff = _frag_off;
9474 + return hp->nexthdr;
9475 + }
9476 +- return -ENOENT;
9477 ++ if (!found)
9478 ++ return -ENOENT;
9479 ++ if (fragoff)
9480 ++ *fragoff = _frag_off;
9481 ++ break;
9482 + }
9483 + hdrlen = 8;
9484 + } else if (nexthdr == NEXTHDR_AUTH) {
9485 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
9486 +index 76be7d311cc4..b1311da5d7b8 100644
9487 +--- a/net/ipv6/ip6_gre.c
9488 ++++ b/net/ipv6/ip6_gre.c
9489 +@@ -783,6 +783,8 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
9490 + __u32 mtu;
9491 + int err;
9492 +
9493 ++ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
9494 ++
9495 + if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
9496 + encap_limit = t->parms.encap_limit;
9497 +
9498 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
9499 +index 5cafd92c2312..c7c2c33aa4af 100644
9500 +--- a/net/ipv6/ip6_tunnel.c
9501 ++++ b/net/ipv6/ip6_tunnel.c
9502 +@@ -284,12 +284,12 @@ static int ip6_tnl_create2(struct net_device *dev)
9503 +
9504 + t = netdev_priv(dev);
9505 +
9506 ++ dev->rtnl_link_ops = &ip6_link_ops;
9507 + err = register_netdevice(dev);
9508 + if (err < 0)
9509 + goto out;
9510 +
9511 + strcpy(t->parms.name, dev->name);
9512 +- dev->rtnl_link_ops = &ip6_link_ops;
9513 +
9514 + dev_hold(dev);
9515 + ip6_tnl_link(ip6n, t);
9516 +@@ -1124,6 +1124,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
9517 + u8 tproto;
9518 + int err;
9519 +
9520 ++ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
9521 ++
9522 + tproto = ACCESS_ONCE(t->parms.proto);
9523 + if (tproto != IPPROTO_IPIP && tproto != 0)
9524 + return -1;
9525 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
9526 +index 41e3b5ee8d0b..9a63110b6548 100644
9527 +--- a/net/ipv6/mcast.c
9528 ++++ b/net/ipv6/mcast.c
9529 +@@ -1574,9 +1574,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
9530 + return NULL;
9531 +
9532 + skb->priority = TC_PRIO_CONTROL;
9533 +- skb->reserved_tailroom = skb_end_offset(skb) -
9534 +- min(mtu, skb_end_offset(skb));
9535 + skb_reserve(skb, hlen);
9536 ++ skb_tailroom_reserve(skb, mtu, tlen);
9537 +
9538 + if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
9539 + /* <draft-ietf-magma-mld-source-05.txt>:
9540 +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
9541 +index 62f5b0d0bc9b..5254d76dfce8 100644
9542 +--- a/net/ipv6/netfilter/ip6_tables.c
9543 ++++ b/net/ipv6/netfilter/ip6_tables.c
9544 +@@ -198,11 +198,12 @@ get_entry(const void *base, unsigned int offset)
9545 +
9546 + /* All zeroes == unconditional rule. */
9547 + /* Mildly perf critical (only if packet tracing is on) */
9548 +-static inline bool unconditional(const struct ip6t_ip6 *ipv6)
9549 ++static inline bool unconditional(const struct ip6t_entry *e)
9550 + {
9551 + static const struct ip6t_ip6 uncond;
9552 +
9553 +- return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
9554 ++ return e->target_offset == sizeof(struct ip6t_entry) &&
9555 ++ memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
9556 + }
9557 +
9558 + static inline const struct xt_entry_target *
9559 +@@ -258,11 +259,10 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
9560 + } else if (s == e) {
9561 + (*rulenum)++;
9562 +
9563 +- if (s->target_offset == sizeof(struct ip6t_entry) &&
9564 ++ if (unconditional(s) &&
9565 + strcmp(t->target.u.kernel.target->name,
9566 + XT_STANDARD_TARGET) == 0 &&
9567 +- t->verdict < 0 &&
9568 +- unconditional(&s->ipv6)) {
9569 ++ t->verdict < 0) {
9570 + /* Tail of chains: STANDARD target (return/policy) */
9571 + *comment = *chainname == hookname
9572 + ? comments[NF_IP6_TRACE_COMMENT_POLICY]
9573 +@@ -451,6 +451,18 @@ ip6t_do_table(struct sk_buff *skb,
9574 + #endif
9575 + }
9576 +
9577 ++static bool find_jump_target(const struct xt_table_info *t,
9578 ++ const struct ip6t_entry *target)
9579 ++{
9580 ++ struct ip6t_entry *iter;
9581 ++
9582 ++ xt_entry_foreach(iter, t->entries, t->size) {
9583 ++ if (iter == target)
9584 ++ return true;
9585 ++ }
9586 ++ return false;
9587 ++}
9588 ++
9589 + /* Figures out from what hook each rule can be called: returns 0 if
9590 + there are loops. Puts hook bitmask in comefrom. */
9591 + static int
9592 +@@ -484,11 +496,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
9593 + e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
9594 +
9595 + /* Unconditional return/END. */
9596 +- if ((e->target_offset == sizeof(struct ip6t_entry) &&
9597 ++ if ((unconditional(e) &&
9598 + (strcmp(t->target.u.user.name,
9599 + XT_STANDARD_TARGET) == 0) &&
9600 +- t->verdict < 0 &&
9601 +- unconditional(&e->ipv6)) || visited) {
9602 ++ t->verdict < 0) || visited) {
9603 + unsigned int oldpos, size;
9604 +
9605 + if ((strcmp(t->target.u.user.name,
9606 +@@ -529,6 +540,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
9607 + size = e->next_offset;
9608 + e = (struct ip6t_entry *)
9609 + (entry0 + pos + size);
9610 ++ if (pos + size >= newinfo->size)
9611 ++ return 0;
9612 + e->counters.pcnt = pos;
9613 + pos += size;
9614 + } else {
9615 +@@ -547,9 +560,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
9616 + /* This a jump; chase it. */
9617 + duprintf("Jump rule %u -> %u\n",
9618 + pos, newpos);
9619 ++ e = (struct ip6t_entry *)
9620 ++ (entry0 + newpos);
9621 ++ if (!find_jump_target(newinfo, e))
9622 ++ return 0;
9623 + } else {
9624 + /* ... this is a fallthru */
9625 + newpos = pos + e->next_offset;
9626 ++ if (newpos >= newinfo->size)
9627 ++ return 0;
9628 + }
9629 + e = (struct ip6t_entry *)
9630 + (entry0 + newpos);
9631 +@@ -576,27 +595,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
9632 + module_put(par.match->me);
9633 + }
9634 +
9635 +-static int
9636 +-check_entry(const struct ip6t_entry *e, const char *name)
9637 +-{
9638 +- const struct xt_entry_target *t;
9639 +-
9640 +- if (!ip6_checkentry(&e->ipv6)) {
9641 +- duprintf("ip_tables: ip check failed %p %s.\n", e, name);
9642 +- return -EINVAL;
9643 +- }
9644 +-
9645 +- if (e->target_offset + sizeof(struct xt_entry_target) >
9646 +- e->next_offset)
9647 +- return -EINVAL;
9648 +-
9649 +- t = ip6t_get_target_c(e);
9650 +- if (e->target_offset + t->u.target_size > e->next_offset)
9651 +- return -EINVAL;
9652 +-
9653 +- return 0;
9654 +-}
9655 +-
9656 + static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
9657 + {
9658 + const struct ip6t_ip6 *ipv6 = par->entryinfo;
9659 +@@ -675,10 +673,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
9660 + struct xt_mtchk_param mtpar;
9661 + struct xt_entry_match *ematch;
9662 +
9663 +- ret = check_entry(e, name);
9664 +- if (ret)
9665 +- return ret;
9666 +-
9667 + j = 0;
9668 + mtpar.net = net;
9669 + mtpar.table = name;
9670 +@@ -722,7 +716,7 @@ static bool check_underflow(const struct ip6t_entry *e)
9671 + const struct xt_entry_target *t;
9672 + unsigned int verdict;
9673 +
9674 +- if (!unconditional(&e->ipv6))
9675 ++ if (!unconditional(e))
9676 + return false;
9677 + t = ip6t_get_target_c(e);
9678 + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
9679 +@@ -742,9 +736,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
9680 + unsigned int valid_hooks)
9681 + {
9682 + unsigned int h;
9683 ++ int err;
9684 +
9685 + if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
9686 +- (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
9687 ++ (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
9688 ++ (unsigned char *)e + e->next_offset > limit) {
9689 + duprintf("Bad offset %p\n", e);
9690 + return -EINVAL;
9691 + }
9692 +@@ -756,6 +752,14 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
9693 + return -EINVAL;
9694 + }
9695 +
9696 ++ if (!ip6_checkentry(&e->ipv6))
9697 ++ return -EINVAL;
9698 ++
9699 ++ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
9700 ++ e->next_offset);
9701 ++ if (err)
9702 ++ return err;
9703 ++
9704 + /* Check hooks & underflows */
9705 + for (h = 0; h < NF_INET_NUMHOOKS; h++) {
9706 + if (!(valid_hooks & (1 << h)))
9707 +@@ -764,9 +768,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
9708 + newinfo->hook_entry[h] = hook_entries[h];
9709 + if ((unsigned char *)e - base == underflows[h]) {
9710 + if (!check_underflow(e)) {
9711 +- pr_err("Underflows must be unconditional and "
9712 +- "use the STANDARD target with "
9713 +- "ACCEPT/DROP\n");
9714 ++ pr_debug("Underflows must be unconditional and "
9715 ++ "use the STANDARD target with "
9716 ++ "ACCEPT/DROP\n");
9717 + return -EINVAL;
9718 + }
9719 + newinfo->underflow[h] = underflows[h];
9720 +@@ -1319,56 +1323,17 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
9721 + unsigned int i, curcpu;
9722 + struct xt_counters_info tmp;
9723 + struct xt_counters *paddc;
9724 +- unsigned int num_counters;
9725 +- char *name;
9726 +- int size;
9727 +- void *ptmp;
9728 + struct xt_table *t;
9729 + const struct xt_table_info *private;
9730 + int ret = 0;
9731 + const void *loc_cpu_entry;
9732 + struct ip6t_entry *iter;
9733 + unsigned int addend;
9734 +-#ifdef CONFIG_COMPAT
9735 +- struct compat_xt_counters_info compat_tmp;
9736 +-
9737 +- if (compat) {
9738 +- ptmp = &compat_tmp;
9739 +- size = sizeof(struct compat_xt_counters_info);
9740 +- } else
9741 +-#endif
9742 +- {
9743 +- ptmp = &tmp;
9744 +- size = sizeof(struct xt_counters_info);
9745 +- }
9746 +-
9747 +- if (copy_from_user(ptmp, user, size) != 0)
9748 +- return -EFAULT;
9749 +
9750 +-#ifdef CONFIG_COMPAT
9751 +- if (compat) {
9752 +- num_counters = compat_tmp.num_counters;
9753 +- name = compat_tmp.name;
9754 +- } else
9755 +-#endif
9756 +- {
9757 +- num_counters = tmp.num_counters;
9758 +- name = tmp.name;
9759 +- }
9760 +-
9761 +- if (len != size + num_counters * sizeof(struct xt_counters))
9762 +- return -EINVAL;
9763 +-
9764 +- paddc = vmalloc(len - size);
9765 +- if (!paddc)
9766 +- return -ENOMEM;
9767 +-
9768 +- if (copy_from_user(paddc, user + size, len - size) != 0) {
9769 +- ret = -EFAULT;
9770 +- goto free;
9771 +- }
9772 +-
9773 +- t = xt_find_table_lock(net, AF_INET6, name);
9774 ++ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
9775 ++ if (IS_ERR(paddc))
9776 ++ return PTR_ERR(paddc);
9777 ++ t = xt_find_table_lock(net, AF_INET6, tmp.name);
9778 + if (IS_ERR_OR_NULL(t)) {
9779 + ret = t ? PTR_ERR(t) : -ENOENT;
9780 + goto free;
9781 +@@ -1377,7 +1342,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
9782 +
9783 + local_bh_disable();
9784 + private = t->private;
9785 +- if (private->number != num_counters) {
9786 ++ if (private->number != tmp.num_counters) {
9787 + ret = -EINVAL;
9788 + goto unlock_up_free;
9789 + }
9790 +@@ -1457,7 +1422,6 @@ compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
9791 +
9792 + static int
9793 + compat_find_calc_match(struct xt_entry_match *m,
9794 +- const char *name,
9795 + const struct ip6t_ip6 *ipv6,
9796 + unsigned int hookmask,
9797 + int *size)
9798 +@@ -1493,21 +1457,19 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
9799 + struct xt_table_info *newinfo,
9800 + unsigned int *size,
9801 + const unsigned char *base,
9802 +- const unsigned char *limit,
9803 +- const unsigned int *hook_entries,
9804 +- const unsigned int *underflows,
9805 +- const char *name)
9806 ++ const unsigned char *limit)
9807 + {
9808 + struct xt_entry_match *ematch;
9809 + struct xt_entry_target *t;
9810 + struct xt_target *target;
9811 + unsigned int entry_offset;
9812 + unsigned int j;
9813 +- int ret, off, h;
9814 ++ int ret, off;
9815 +
9816 + duprintf("check_compat_entry_size_and_hooks %p\n", e);
9817 + if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
9818 +- (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
9819 ++ (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
9820 ++ (unsigned char *)e + e->next_offset > limit) {
9821 + duprintf("Bad offset %p, limit = %p\n", e, limit);
9822 + return -EINVAL;
9823 + }
9824 +@@ -1519,8 +1481,11 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
9825 + return -EINVAL;
9826 + }
9827 +
9828 +- /* For purposes of check_entry casting the compat entry is fine */
9829 +- ret = check_entry((struct ip6t_entry *)e, name);
9830 ++ if (!ip6_checkentry(&e->ipv6))
9831 ++ return -EINVAL;
9832 ++
9833 ++ ret = xt_compat_check_entry_offsets(e, e->elems,
9834 ++ e->target_offset, e->next_offset);
9835 + if (ret)
9836 + return ret;
9837 +
9838 +@@ -1528,8 +1493,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
9839 + entry_offset = (void *)e - (void *)base;
9840 + j = 0;
9841 + xt_ematch_foreach(ematch, e) {
9842 +- ret = compat_find_calc_match(ematch, name,
9843 +- &e->ipv6, e->comefrom, &off);
9844 ++ ret = compat_find_calc_match(ematch, &e->ipv6, e->comefrom,
9845 ++ &off);
9846 + if (ret != 0)
9847 + goto release_matches;
9848 + ++j;
9849 +@@ -1552,17 +1517,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
9850 + if (ret)
9851 + goto out;
9852 +
9853 +- /* Check hooks & underflows */
9854 +- for (h = 0; h < NF_INET_NUMHOOKS; h++) {
9855 +- if ((unsigned char *)e - base == hook_entries[h])
9856 +- newinfo->hook_entry[h] = hook_entries[h];
9857 +- if ((unsigned char *)e - base == underflows[h])
9858 +- newinfo->underflow[h] = underflows[h];
9859 +- }
9860 +-
9861 +- /* Clear counters and comefrom */
9862 +- memset(&e->counters, 0, sizeof(e->counters));
9863 +- e->comefrom = 0;
9864 + return 0;
9865 +
9866 + out:
9867 +@@ -1576,18 +1530,17 @@ release_matches:
9868 + return ret;
9869 + }
9870 +
9871 +-static int
9872 ++static void
9873 + compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
9874 +- unsigned int *size, const char *name,
9875 ++ unsigned int *size,
9876 + struct xt_table_info *newinfo, unsigned char *base)
9877 + {
9878 + struct xt_entry_target *t;
9879 + struct ip6t_entry *de;
9880 + unsigned int origsize;
9881 +- int ret, h;
9882 ++ int h;
9883 + struct xt_entry_match *ematch;
9884 +
9885 +- ret = 0;
9886 + origsize = *size;
9887 + de = (struct ip6t_entry *)*dstptr;
9888 + memcpy(de, e, sizeof(struct ip6t_entry));
9889 +@@ -1596,11 +1549,9 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
9890 + *dstptr += sizeof(struct ip6t_entry);
9891 + *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
9892 +
9893 +- xt_ematch_foreach(ematch, e) {
9894 +- ret = xt_compat_match_from_user(ematch, dstptr, size);
9895 +- if (ret != 0)
9896 +- return ret;
9897 +- }
9898 ++ xt_ematch_foreach(ematch, e)
9899 ++ xt_compat_match_from_user(ematch, dstptr, size);
9900 ++
9901 + de->target_offset = e->target_offset - (origsize - *size);
9902 + t = compat_ip6t_get_target(e);
9903 + xt_compat_target_from_user(t, dstptr, size);
9904 +@@ -1612,181 +1563,82 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
9905 + if ((unsigned char *)de - base < newinfo->underflow[h])
9906 + newinfo->underflow[h] -= origsize - *size;
9907 + }
9908 +- return ret;
9909 +-}
9910 +-
9911 +-static int compat_check_entry(struct ip6t_entry *e, struct net *net,
9912 +- const char *name)
9913 +-{
9914 +- unsigned int j;
9915 +- int ret = 0;
9916 +- struct xt_mtchk_param mtpar;
9917 +- struct xt_entry_match *ematch;
9918 +-
9919 +- j = 0;
9920 +- mtpar.net = net;
9921 +- mtpar.table = name;
9922 +- mtpar.entryinfo = &e->ipv6;
9923 +- mtpar.hook_mask = e->comefrom;
9924 +- mtpar.family = NFPROTO_IPV6;
9925 +- xt_ematch_foreach(ematch, e) {
9926 +- ret = check_match(ematch, &mtpar);
9927 +- if (ret != 0)
9928 +- goto cleanup_matches;
9929 +- ++j;
9930 +- }
9931 +-
9932 +- ret = check_target(e, net, name);
9933 +- if (ret)
9934 +- goto cleanup_matches;
9935 +- return 0;
9936 +-
9937 +- cleanup_matches:
9938 +- xt_ematch_foreach(ematch, e) {
9939 +- if (j-- == 0)
9940 +- break;
9941 +- cleanup_match(ematch, net);
9942 +- }
9943 +- return ret;
9944 + }
9945 +
9946 + static int
9947 + translate_compat_table(struct net *net,
9948 +- const char *name,
9949 +- unsigned int valid_hooks,
9950 + struct xt_table_info **pinfo,
9951 + void **pentry0,
9952 +- unsigned int total_size,
9953 +- unsigned int number,
9954 +- unsigned int *hook_entries,
9955 +- unsigned int *underflows)
9956 ++ const struct compat_ip6t_replace *compatr)
9957 + {
9958 + unsigned int i, j;
9959 + struct xt_table_info *newinfo, *info;
9960 + void *pos, *entry0, *entry1;
9961 + struct compat_ip6t_entry *iter0;
9962 +- struct ip6t_entry *iter1;
9963 ++ struct ip6t_replace repl;
9964 + unsigned int size;
9965 + int ret = 0;
9966 +
9967 + info = *pinfo;
9968 + entry0 = *pentry0;
9969 +- size = total_size;
9970 +- info->number = number;
9971 +-
9972 +- /* Init all hooks to impossible value. */
9973 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
9974 +- info->hook_entry[i] = 0xFFFFFFFF;
9975 +- info->underflow[i] = 0xFFFFFFFF;
9976 +- }
9977 ++ size = compatr->size;
9978 ++ info->number = compatr->num_entries;
9979 +
9980 + duprintf("translate_compat_table: size %u\n", info->size);
9981 + j = 0;
9982 + xt_compat_lock(AF_INET6);
9983 +- xt_compat_init_offsets(AF_INET6, number);
9984 ++ xt_compat_init_offsets(AF_INET6, compatr->num_entries);
9985 + /* Walk through entries, checking offsets. */
9986 +- xt_entry_foreach(iter0, entry0, total_size) {
9987 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
9988 + ret = check_compat_entry_size_and_hooks(iter0, info, &size,
9989 + entry0,
9990 +- entry0 + total_size,
9991 +- hook_entries,
9992 +- underflows,
9993 +- name);
9994 ++ entry0 + compatr->size);
9995 + if (ret != 0)
9996 + goto out_unlock;
9997 + ++j;
9998 + }
9999 +
10000 + ret = -EINVAL;
10001 +- if (j != number) {
10002 ++ if (j != compatr->num_entries) {
10003 + duprintf("translate_compat_table: %u not %u entries\n",
10004 +- j, number);
10005 ++ j, compatr->num_entries);
10006 + goto out_unlock;
10007 + }
10008 +
10009 +- /* Check hooks all assigned */
10010 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
10011 +- /* Only hooks which are valid */
10012 +- if (!(valid_hooks & (1 << i)))
10013 +- continue;
10014 +- if (info->hook_entry[i] == 0xFFFFFFFF) {
10015 +- duprintf("Invalid hook entry %u %u\n",
10016 +- i, hook_entries[i]);
10017 +- goto out_unlock;
10018 +- }
10019 +- if (info->underflow[i] == 0xFFFFFFFF) {
10020 +- duprintf("Invalid underflow %u %u\n",
10021 +- i, underflows[i]);
10022 +- goto out_unlock;
10023 +- }
10024 +- }
10025 +-
10026 + ret = -ENOMEM;
10027 + newinfo = xt_alloc_table_info(size);
10028 + if (!newinfo)
10029 + goto out_unlock;
10030 +
10031 +- newinfo->number = number;
10032 ++ newinfo->number = compatr->num_entries;
10033 + for (i = 0; i < NF_INET_NUMHOOKS; i++) {
10034 +- newinfo->hook_entry[i] = info->hook_entry[i];
10035 +- newinfo->underflow[i] = info->underflow[i];
10036 ++ newinfo->hook_entry[i] = compatr->hook_entry[i];
10037 ++ newinfo->underflow[i] = compatr->underflow[i];
10038 + }
10039 + entry1 = newinfo->entries[raw_smp_processor_id()];
10040 + pos = entry1;
10041 +- size = total_size;
10042 +- xt_entry_foreach(iter0, entry0, total_size) {
10043 +- ret = compat_copy_entry_from_user(iter0, &pos, &size,
10044 +- name, newinfo, entry1);
10045 +- if (ret != 0)
10046 +- break;
10047 +- }
10048 ++ size = compatr->size;
10049 ++ xt_entry_foreach(iter0, entry0, compatr->size)
10050 ++ compat_copy_entry_from_user(iter0, &pos, &size,
10051 ++ newinfo, entry1);
10052 ++
10053 ++ /* all module references in entry0 are now gone. */
10054 + xt_compat_flush_offsets(AF_INET6);
10055 + xt_compat_unlock(AF_INET6);
10056 +- if (ret)
10057 +- goto free_newinfo;
10058 +
10059 +- ret = -ELOOP;
10060 +- if (!mark_source_chains(newinfo, valid_hooks, entry1))
10061 +- goto free_newinfo;
10062 ++ memcpy(&repl, compatr, sizeof(*compatr));
10063 +
10064 +- i = 0;
10065 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
10066 +- ret = compat_check_entry(iter1, net, name);
10067 +- if (ret != 0)
10068 +- break;
10069 +- ++i;
10070 +- if (strcmp(ip6t_get_target(iter1)->u.user.name,
10071 +- XT_ERROR_TARGET) == 0)
10072 +- ++newinfo->stacksize;
10073 +- }
10074 +- if (ret) {
10075 +- /*
10076 +- * The first i matches need cleanup_entry (calls ->destroy)
10077 +- * because they had called ->check already. The other j-i
10078 +- * entries need only release.
10079 +- */
10080 +- int skip = i;
10081 +- j -= i;
10082 +- xt_entry_foreach(iter0, entry0, newinfo->size) {
10083 +- if (skip-- > 0)
10084 +- continue;
10085 +- if (j-- == 0)
10086 +- break;
10087 +- compat_release_entry(iter0);
10088 +- }
10089 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
10090 +- if (i-- == 0)
10091 +- break;
10092 +- cleanup_entry(iter1, net);
10093 +- }
10094 +- xt_free_table_info(newinfo);
10095 +- return ret;
10096 ++ for (i = 0; i < NF_INET_NUMHOOKS; i++) {
10097 ++ repl.hook_entry[i] = newinfo->hook_entry[i];
10098 ++ repl.underflow[i] = newinfo->underflow[i];
10099 + }
10100 +
10101 +- /* And one copy for every other CPU */
10102 +- for_each_possible_cpu(i)
10103 +- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
10104 +- memcpy(newinfo->entries[i], entry1, newinfo->size);
10105 ++ repl.num_counters = 0;
10106 ++ repl.counters = NULL;
10107 ++ repl.size = newinfo->size;
10108 ++ ret = translate_table(net, newinfo, entry1, &repl);
10109 ++ if (ret)
10110 ++ goto free_newinfo;
10111 +
10112 + *pinfo = newinfo;
10113 + *pentry0 = entry1;
10114 +@@ -1795,17 +1647,16 @@ translate_compat_table(struct net *net,
10115 +
10116 + free_newinfo:
10117 + xt_free_table_info(newinfo);
10118 +-out:
10119 +- xt_entry_foreach(iter0, entry0, total_size) {
10120 ++ return ret;
10121 ++out_unlock:
10122 ++ xt_compat_flush_offsets(AF_INET6);
10123 ++ xt_compat_unlock(AF_INET6);
10124 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
10125 + if (j-- == 0)
10126 + break;
10127 + compat_release_entry(iter0);
10128 + }
10129 + return ret;
10130 +-out_unlock:
10131 +- xt_compat_flush_offsets(AF_INET6);
10132 +- xt_compat_unlock(AF_INET6);
10133 +- goto out;
10134 + }
10135 +
10136 + static int
10137 +@@ -1842,10 +1693,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
10138 + goto free_newinfo;
10139 + }
10140 +
10141 +- ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
10142 +- &newinfo, &loc_cpu_entry, tmp.size,
10143 +- tmp.num_entries, tmp.hook_entry,
10144 +- tmp.underflow);
10145 ++ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
10146 + if (ret != 0)
10147 + goto free_newinfo;
10148 +
10149 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
10150 +index c1147acbc8c4..ac6c40d08ac5 100644
10151 +--- a/net/ipv6/tcp_ipv6.c
10152 ++++ b/net/ipv6/tcp_ipv6.c
10153 +@@ -1691,7 +1691,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
10154 + destp = ntohs(inet->inet_dport);
10155 + srcp = ntohs(inet->inet_sport);
10156 +
10157 +- if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
10158 ++ if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
10159 ++ icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
10160 ++ icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
10161 + timer_active = 1;
10162 + timer_expires = icsk->icsk_timeout;
10163 + } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
10164 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
10165 +index 7333f3575fc5..1173557ea551 100644
10166 +--- a/net/ipv6/udp.c
10167 ++++ b/net/ipv6/udp.c
10168 +@@ -834,8 +834,8 @@ start_lookup:
10169 + flush_stack(stack, count, skb, count - 1);
10170 + } else {
10171 + if (!inner_flushed)
10172 +- UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
10173 +- proto == IPPROTO_UDPLITE);
10174 ++ UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
10175 ++ proto == IPPROTO_UDPLITE);
10176 + consume_skb(skb);
10177 + }
10178 + return 0;
10179 +@@ -913,11 +913,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
10180 + ret = udpv6_queue_rcv_skb(sk, skb);
10181 + sock_put(sk);
10182 +
10183 +- /* a return value > 0 means to resubmit the input, but
10184 +- * it wants the return to be -protocol, or 0
10185 +- */
10186 ++ /* a return value > 0 means to resubmit the input */
10187 + if (ret > 0)
10188 +- return -ret;
10189 ++ return ret;
10190 +
10191 + return 0;
10192 + }
10193 +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
10194 +index 79649937ec71..44ee0683b14b 100644
10195 +--- a/net/l2tp/l2tp_ip.c
10196 ++++ b/net/l2tp/l2tp_ip.c
10197 +@@ -123,12 +123,11 @@ static int l2tp_ip_recv(struct sk_buff *skb)
10198 + struct l2tp_tunnel *tunnel = NULL;
10199 + int length;
10200 +
10201 +- /* Point to L2TP header */
10202 +- optr = ptr = skb->data;
10203 +-
10204 + if (!pskb_may_pull(skb, 4))
10205 + goto discard;
10206 +
10207 ++ /* Point to L2TP header */
10208 ++ optr = ptr = skb->data;
10209 + session_id = ntohl(*((__be32 *) ptr));
10210 + ptr += 4;
10211 +
10212 +@@ -156,6 +155,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
10213 + if (!pskb_may_pull(skb, length))
10214 + goto discard;
10215 +
10216 ++ /* Point to L2TP header */
10217 ++ optr = ptr = skb->data;
10218 ++ ptr += 4;
10219 + pr_debug("%s: ip recv\n", tunnel->name);
10220 + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
10221 + }
10222 +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
10223 +index 0ce9da948ad7..36f8fa223a78 100644
10224 +--- a/net/l2tp/l2tp_ip6.c
10225 ++++ b/net/l2tp/l2tp_ip6.c
10226 +@@ -135,12 +135,11 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
10227 + struct l2tp_tunnel *tunnel = NULL;
10228 + int length;
10229 +
10230 +- /* Point to L2TP header */
10231 +- optr = ptr = skb->data;
10232 +-
10233 + if (!pskb_may_pull(skb, 4))
10234 + goto discard;
10235 +
10236 ++ /* Point to L2TP header */
10237 ++ optr = ptr = skb->data;
10238 + session_id = ntohl(*((__be32 *) ptr));
10239 + ptr += 4;
10240 +
10241 +@@ -168,6 +167,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
10242 + if (!pskb_may_pull(skb, length))
10243 + goto discard;
10244 +
10245 ++ /* Point to L2TP header */
10246 ++ optr = ptr = skb->data;
10247 ++ ptr += 4;
10248 + pr_debug("%s: ip recv\n", tunnel->name);
10249 + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
10250 + }
10251 +diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
10252 +index 17a8dff06090..c58f242c00f1 100644
10253 +--- a/net/llc/af_llc.c
10254 ++++ b/net/llc/af_llc.c
10255 +@@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
10256 + if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
10257 + struct llc_pktinfo info;
10258 +
10259 ++ memset(&info, 0, sizeof(info));
10260 + info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
10261 + llc_pdu_decode_dsap(skb, &info.lpi_sap);
10262 + llc_pdu_decode_da(skb, info.lpi_mac);
10263 +diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
10264 +index 41adfc898a18..6f1f3bdddea2 100644
10265 +--- a/net/mac80211/ibss.c
10266 ++++ b/net/mac80211/ibss.c
10267 +@@ -7,6 +7,7 @@
10268 + * Copyright 2007, Michael Wu <flamingice@××××××××.net>
10269 + * Copyright 2009, Johannes Berg <johannes@××××××××××××.net>
10270 + * Copyright 2013-2014 Intel Mobile Communications GmbH
10271 ++ * Copyright(c) 2016 Intel Deutschland GmbH
10272 + *
10273 + * This program is free software; you can redistribute it and/or modify
10274 + * it under the terms of the GNU General Public License version 2 as
10275 +@@ -1479,14 +1480,21 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
10276 +
10277 + sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
10278 +
10279 +- num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
10280 +- &ifibss->chandef,
10281 +- channels,
10282 +- ARRAY_SIZE(channels));
10283 + scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
10284 +- ieee80211_request_ibss_scan(sdata, ifibss->ssid,
10285 +- ifibss->ssid_len, channels, num,
10286 +- scan_width);
10287 ++
10288 ++ if (ifibss->fixed_channel) {
10289 ++ num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
10290 ++ &ifibss->chandef,
10291 ++ channels,
10292 ++ ARRAY_SIZE(channels));
10293 ++ ieee80211_request_ibss_scan(sdata, ifibss->ssid,
10294 ++ ifibss->ssid_len, channels,
10295 ++ num, scan_width);
10296 ++ } else {
10297 ++ ieee80211_request_ibss_scan(sdata, ifibss->ssid,
10298 ++ ifibss->ssid_len, NULL,
10299 ++ 0, scan_width);
10300 ++ }
10301 + } else {
10302 + int interval = IEEE80211_SCAN_INTERVAL;
10303 +
10304 +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
10305 +index 84cef600c573..6e89ab8eac44 100644
10306 +--- a/net/mac80211/iface.c
10307 ++++ b/net/mac80211/iface.c
10308 +@@ -980,7 +980,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
10309 + if (sdata->vif.txq) {
10310 + struct txq_info *txqi = to_txq_info(sdata->vif.txq);
10311 +
10312 ++ spin_lock_bh(&txqi->queue.lock);
10313 + ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
10314 ++ spin_unlock_bh(&txqi->queue.lock);
10315 ++
10316 + atomic_set(&sdata->txqs_len[txqi->txq.ac], 0);
10317 + }
10318 +
10319 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
10320 +index d4b08d87537c..3073164a6fcf 100644
10321 +--- a/net/mac80211/rx.c
10322 ++++ b/net/mac80211/rx.c
10323 +@@ -2227,7 +2227,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
10324 + struct ieee80211_sub_if_data *sdata = rx->sdata;
10325 + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
10326 + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
10327 +- u16 q, hdrlen;
10328 ++ u16 ac, q, hdrlen;
10329 +
10330 + hdr = (struct ieee80211_hdr *) skb->data;
10331 + hdrlen = ieee80211_hdrlen(hdr->frame_control);
10332 +@@ -2297,7 +2297,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
10333 + ether_addr_equal(sdata->vif.addr, hdr->addr3))
10334 + return RX_CONTINUE;
10335 +
10336 +- q = ieee80211_select_queue_80211(sdata, skb, hdr);
10337 ++ ac = ieee80211_select_queue_80211(sdata, skb, hdr);
10338 ++ q = sdata->vif.hw_queue[ac];
10339 + if (ieee80211_queue_stopped(&local->hw, q)) {
10340 + IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
10341 + return RX_DROP_MONITOR;
10342 +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
10343 +index a7027190f298..bcdbda289d75 100644
10344 +--- a/net/mac80211/sta_info.c
10345 ++++ b/net/mac80211/sta_info.c
10346 +@@ -472,11 +472,17 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
10347 + {
10348 + struct ieee80211_local *local = sta->local;
10349 + struct ieee80211_sub_if_data *sdata = sta->sdata;
10350 +- struct station_info sinfo;
10351 ++ struct station_info *sinfo;
10352 + int err = 0;
10353 +
10354 + lockdep_assert_held(&local->sta_mtx);
10355 +
10356 ++ sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
10357 ++ if (!sinfo) {
10358 ++ err = -ENOMEM;
10359 ++ goto out_err;
10360 ++ }
10361 ++
10362 + /* check if STA exists already */
10363 + if (sta_info_get_bss(sdata, sta->sta.addr)) {
10364 + err = -EEXIST;
10365 +@@ -510,10 +516,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
10366 + ieee80211_sta_debugfs_add(sta);
10367 + rate_control_add_sta_debugfs(sta);
10368 +
10369 +- memset(&sinfo, 0, sizeof(sinfo));
10370 +- sinfo.filled = 0;
10371 +- sinfo.generation = local->sta_generation;
10372 +- cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
10373 ++ sinfo->generation = local->sta_generation;
10374 ++ cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
10375 ++ kfree(sinfo);
10376 +
10377 + sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
10378 +
10379 +@@ -876,7 +881,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
10380 + {
10381 + struct ieee80211_local *local = sta->local;
10382 + struct ieee80211_sub_if_data *sdata = sta->sdata;
10383 +- struct station_info sinfo = {};
10384 ++ struct station_info *sinfo;
10385 + int ret;
10386 +
10387 + /*
10388 +@@ -914,8 +919,11 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
10389 +
10390 + sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
10391 +
10392 +- sta_set_sinfo(sta, &sinfo);
10393 +- cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
10394 ++ sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
10395 ++ if (sinfo)
10396 ++ sta_set_sinfo(sta, sinfo);
10397 ++ cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
10398 ++ kfree(sinfo);
10399 +
10400 + rate_control_remove_sta_debugfs(sta);
10401 + ieee80211_sta_debugfs_remove(sta);
10402 +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
10403 +index 38fbc194b9cb..a26bd6532829 100644
10404 +--- a/net/netfilter/ipvs/ip_vs_core.c
10405 ++++ b/net/netfilter/ipvs/ip_vs_core.c
10406 +@@ -1689,15 +1689,34 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
10407 + cp = pp->conn_in_get(af, skb, &iph, 0);
10408 +
10409 + conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
10410 +- if (conn_reuse_mode && !iph.fragoffs &&
10411 +- is_new_conn(skb, &iph) && cp &&
10412 +- ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
10413 +- unlikely(!atomic_read(&cp->dest->weight))) ||
10414 +- unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) {
10415 +- if (!atomic_read(&cp->n_control))
10416 +- ip_vs_conn_expire_now(cp);
10417 +- __ip_vs_conn_put(cp);
10418 +- cp = NULL;
10419 ++ if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
10420 ++ bool uses_ct = false, resched = false;
10421 ++
10422 ++ if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
10423 ++ unlikely(!atomic_read(&cp->dest->weight))) {
10424 ++ resched = true;
10425 ++ uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
10426 ++ } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
10427 ++ uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
10428 ++ if (!atomic_read(&cp->n_control)) {
10429 ++ resched = true;
10430 ++ } else {
10431 ++ /* Do not reschedule controlling connection
10432 ++ * that uses conntrack while it is still
10433 ++ * referenced by controlled connection(s).
10434 ++ */
10435 ++ resched = !uses_ct;
10436 ++ }
10437 ++ }
10438 ++
10439 ++ if (resched) {
10440 ++ if (!atomic_read(&cp->n_control))
10441 ++ ip_vs_conn_expire_now(cp);
10442 ++ __ip_vs_conn_put(cp);
10443 ++ if (uses_ct)
10444 ++ return NF_DROP;
10445 ++ cp = NULL;
10446 ++ }
10447 + }
10448 +
10449 + if (unlikely(!cp) && !iph.fragoffs) {
10450 +diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
10451 +index bed5f7042529..bb318e4623a3 100644
10452 +--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
10453 ++++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
10454 +@@ -88,7 +88,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
10455 + dptr = skb->data + dataoff;
10456 + datalen = skb->len - dataoff;
10457 +
10458 +- if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
10459 ++ if (get_callid(dptr, 0, datalen, &matchoff, &matchlen))
10460 + return -EINVAL;
10461 +
10462 + /* N.B: pe_data is only set on success,
10463 +diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
10464 +index 51a459c3c649..4b850c639ac5 100644
10465 +--- a/net/netfilter/x_tables.c
10466 ++++ b/net/netfilter/x_tables.c
10467 +@@ -418,6 +418,47 @@ int xt_check_match(struct xt_mtchk_param *par,
10468 + }
10469 + EXPORT_SYMBOL_GPL(xt_check_match);
10470 +
10471 ++/** xt_check_entry_match - check that matches end before start of target
10472 ++ *
10473 ++ * @match: beginning of xt_entry_match
10474 ++ * @target: beginning of this rules target (alleged end of matches)
10475 ++ * @alignment: alignment requirement of match structures
10476 ++ *
10477 ++ * Validates that all matches add up to the beginning of the target,
10478 ++ * and that each match covers at least the base structure size.
10479 ++ *
10480 ++ * Return: 0 on success, negative errno on failure.
10481 ++ */
10482 ++static int xt_check_entry_match(const char *match, const char *target,
10483 ++ const size_t alignment)
10484 ++{
10485 ++ const struct xt_entry_match *pos;
10486 ++ int length = target - match;
10487 ++
10488 ++ if (length == 0) /* no matches */
10489 ++ return 0;
10490 ++
10491 ++ pos = (struct xt_entry_match *)match;
10492 ++ do {
10493 ++ if ((unsigned long)pos % alignment)
10494 ++ return -EINVAL;
10495 ++
10496 ++ if (length < (int)sizeof(struct xt_entry_match))
10497 ++ return -EINVAL;
10498 ++
10499 ++ if (pos->u.match_size < sizeof(struct xt_entry_match))
10500 ++ return -EINVAL;
10501 ++
10502 ++ if (pos->u.match_size > length)
10503 ++ return -EINVAL;
10504 ++
10505 ++ length -= pos->u.match_size;
10506 ++ pos = ((void *)((char *)(pos) + (pos)->u.match_size));
10507 ++ } while (length > 0);
10508 ++
10509 ++ return 0;
10510 ++}
10511 ++
10512 + #ifdef CONFIG_COMPAT
10513 + int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
10514 + {
10515 +@@ -487,13 +528,14 @@ int xt_compat_match_offset(const struct xt_match *match)
10516 + }
10517 + EXPORT_SYMBOL_GPL(xt_compat_match_offset);
10518 +
10519 +-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
10520 +- unsigned int *size)
10521 ++void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
10522 ++ unsigned int *size)
10523 + {
10524 + const struct xt_match *match = m->u.kernel.match;
10525 + struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
10526 + int pad, off = xt_compat_match_offset(match);
10527 + u_int16_t msize = cm->u.user.match_size;
10528 ++ char name[sizeof(m->u.user.name)];
10529 +
10530 + m = *dstptr;
10531 + memcpy(m, cm, sizeof(*cm));
10532 +@@ -507,10 +549,12 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
10533 +
10534 + msize += off;
10535 + m->u.user.match_size = msize;
10536 ++ strlcpy(name, match->name, sizeof(name));
10537 ++ module_put(match->me);
10538 ++ strncpy(m->u.user.name, name, sizeof(m->u.user.name));
10539 +
10540 + *size += off;
10541 + *dstptr += msize;
10542 +- return 0;
10543 + }
10544 + EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
10545 +
10546 +@@ -541,8 +585,125 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
10547 + return 0;
10548 + }
10549 + EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
10550 ++
10551 ++/* non-compat version may have padding after verdict */
10552 ++struct compat_xt_standard_target {
10553 ++ struct compat_xt_entry_target t;
10554 ++ compat_uint_t verdict;
10555 ++};
10556 ++
10557 ++int xt_compat_check_entry_offsets(const void *base, const char *elems,
10558 ++ unsigned int target_offset,
10559 ++ unsigned int next_offset)
10560 ++{
10561 ++ long size_of_base_struct = elems - (const char *)base;
10562 ++ const struct compat_xt_entry_target *t;
10563 ++ const char *e = base;
10564 ++
10565 ++ if (target_offset < size_of_base_struct)
10566 ++ return -EINVAL;
10567 ++
10568 ++ if (target_offset + sizeof(*t) > next_offset)
10569 ++ return -EINVAL;
10570 ++
10571 ++ t = (void *)(e + target_offset);
10572 ++ if (t->u.target_size < sizeof(*t))
10573 ++ return -EINVAL;
10574 ++
10575 ++ if (target_offset + t->u.target_size > next_offset)
10576 ++ return -EINVAL;
10577 ++
10578 ++ if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
10579 ++ COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
10580 ++ return -EINVAL;
10581 ++
10582 ++ /* compat_xt_entry match has less strict aligment requirements,
10583 ++ * otherwise they are identical. In case of padding differences
10584 ++ * we need to add compat version of xt_check_entry_match.
10585 ++ */
10586 ++ BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
10587 ++
10588 ++ return xt_check_entry_match(elems, base + target_offset,
10589 ++ __alignof__(struct compat_xt_entry_match));
10590 ++}
10591 ++EXPORT_SYMBOL(xt_compat_check_entry_offsets);
10592 + #endif /* CONFIG_COMPAT */
10593 +
10594 ++/**
10595 ++ * xt_check_entry_offsets - validate arp/ip/ip6t_entry
10596 ++ *
10597 ++ * @base: pointer to arp/ip/ip6t_entry
10598 ++ * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
10599 ++ * @target_offset: the arp/ip/ip6_t->target_offset
10600 ++ * @next_offset: the arp/ip/ip6_t->next_offset
10601 ++ *
10602 ++ * validates that target_offset and next_offset are sane and that all
10603 ++ * match sizes (if any) align with the target offset.
10604 ++ *
10605 ++ * This function does not validate the targets or matches themselves, it
10606 ++ * only tests that all the offsets and sizes are correct, that all
10607 ++ * match structures are aligned, and that the last structure ends where
10608 ++ * the target structure begins.
10609 ++ *
10610 ++ * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
10611 ++ *
10612 ++ * The arp/ip/ip6t_entry structure @base must have passed following tests:
10613 ++ * - it must point to a valid memory location
10614 ++ * - base to base + next_offset must be accessible, i.e. not exceed allocated
10615 ++ * length.
10616 ++ *
10617 ++ * A well-formed entry looks like this:
10618 ++ *
10619 ++ * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
10620 ++ * e->elems[]-----' | |
10621 ++ * matchsize | |
10622 ++ * matchsize | |
10623 ++ * | |
10624 ++ * target_offset---------------------------------' |
10625 ++ * next_offset---------------------------------------------------'
10626 ++ *
10627 ++ * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
10628 ++ * This is where matches (if any) and the target reside.
10629 ++ * target_offset: beginning of target.
10630 ++ * next_offset: start of the next rule; also: size of this rule.
10631 ++ * Since targets have a minimum size, target_offset + minlen <= next_offset.
10632 ++ *
10633 ++ * Every match stores its size, sum of sizes must not exceed target_offset.
10634 ++ *
10635 ++ * Return: 0 on success, negative errno on failure.
10636 ++ */
10637 ++int xt_check_entry_offsets(const void *base,
10638 ++ const char *elems,
10639 ++ unsigned int target_offset,
10640 ++ unsigned int next_offset)
10641 ++{
10642 ++ long size_of_base_struct = elems - (const char *)base;
10643 ++ const struct xt_entry_target *t;
10644 ++ const char *e = base;
10645 ++
10646 ++ /* target start is within the ip/ip6/arpt_entry struct */
10647 ++ if (target_offset < size_of_base_struct)
10648 ++ return -EINVAL;
10649 ++
10650 ++ if (target_offset + sizeof(*t) > next_offset)
10651 ++ return -EINVAL;
10652 ++
10653 ++ t = (void *)(e + target_offset);
10654 ++ if (t->u.target_size < sizeof(*t))
10655 ++ return -EINVAL;
10656 ++
10657 ++ if (target_offset + t->u.target_size > next_offset)
10658 ++ return -EINVAL;
10659 ++
10660 ++ if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
10661 ++ XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
10662 ++ return -EINVAL;
10663 ++
10664 ++ return xt_check_entry_match(elems, base + target_offset,
10665 ++ __alignof__(struct xt_entry_match));
10666 ++}
10667 ++EXPORT_SYMBOL(xt_check_entry_offsets);
10668 ++
10669 + int xt_check_target(struct xt_tgchk_param *par,
10670 + unsigned int size, u_int8_t proto, bool inv_proto)
10671 + {
10672 +@@ -593,6 +754,80 @@ int xt_check_target(struct xt_tgchk_param *par,
10673 + }
10674 + EXPORT_SYMBOL_GPL(xt_check_target);
10675 +
10676 ++/**
10677 ++ * xt_copy_counters_from_user - copy counters and metadata from userspace
10678 ++ *
10679 ++ * @user: src pointer to userspace memory
10680 ++ * @len: alleged size of userspace memory
10681 ++ * @info: where to store the xt_counters_info metadata
10682 ++ * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
10683 ++ *
10684 ++ * Copies counter meta data from @user and stores it in @info.
10685 ++ *
10686 ++ * vmallocs memory to hold the counters, then copies the counter data
10687 ++ * from @user to the new memory and returns a pointer to it.
10688 ++ *
10689 ++ * If @compat is true, @info gets converted automatically to the 64bit
10690 ++ * representation.
10691 ++ *
10692 ++ * The metadata associated with the counters is stored in @info.
10693 ++ *
10694 ++ * Return: returns pointer that caller has to test via IS_ERR().
10695 ++ * If IS_ERR is false, caller has to vfree the pointer.
10696 ++ */
10697 ++void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
10698 ++ struct xt_counters_info *info, bool compat)
10699 ++{
10700 ++ void *mem;
10701 ++ u64 size;
10702 ++
10703 ++#ifdef CONFIG_COMPAT
10704 ++ if (compat) {
10705 ++ /* structures only differ in size due to alignment */
10706 ++ struct compat_xt_counters_info compat_tmp;
10707 ++
10708 ++ if (len <= sizeof(compat_tmp))
10709 ++ return ERR_PTR(-EINVAL);
10710 ++
10711 ++ len -= sizeof(compat_tmp);
10712 ++ if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
10713 ++ return ERR_PTR(-EFAULT);
10714 ++
10715 ++ strlcpy(info->name, compat_tmp.name, sizeof(info->name));
10716 ++ info->num_counters = compat_tmp.num_counters;
10717 ++ user += sizeof(compat_tmp);
10718 ++ } else
10719 ++#endif
10720 ++ {
10721 ++ if (len <= sizeof(*info))
10722 ++ return ERR_PTR(-EINVAL);
10723 ++
10724 ++ len -= sizeof(*info);
10725 ++ if (copy_from_user(info, user, sizeof(*info)) != 0)
10726 ++ return ERR_PTR(-EFAULT);
10727 ++
10728 ++ info->name[sizeof(info->name) - 1] = '\0';
10729 ++ user += sizeof(*info);
10730 ++ }
10731 ++
10732 ++ size = sizeof(struct xt_counters);
10733 ++ size *= info->num_counters;
10734 ++
10735 ++ if (size != (u64)len)
10736 ++ return ERR_PTR(-EINVAL);
10737 ++
10738 ++ mem = vmalloc(len);
10739 ++ if (!mem)
10740 ++ return ERR_PTR(-ENOMEM);
10741 ++
10742 ++ if (copy_from_user(mem, user, len) == 0)
10743 ++ return mem;
10744 ++
10745 ++ vfree(mem);
10746 ++ return ERR_PTR(-EFAULT);
10747 ++}
10748 ++EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
10749 ++
10750 + #ifdef CONFIG_COMPAT
10751 + int xt_compat_target_offset(const struct xt_target *target)
10752 + {
10753 +@@ -608,6 +843,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
10754 + struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
10755 + int pad, off = xt_compat_target_offset(target);
10756 + u_int16_t tsize = ct->u.user.target_size;
10757 ++ char name[sizeof(t->u.user.name)];
10758 +
10759 + t = *dstptr;
10760 + memcpy(t, ct, sizeof(*ct));
10761 +@@ -621,6 +857,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
10762 +
10763 + tsize += off;
10764 + t->u.user.target_size = tsize;
10765 ++ strlcpy(name, target->name, sizeof(name));
10766 ++ module_put(target->me);
10767 ++ strncpy(t->u.user.name, name, sizeof(t->u.user.name));
10768 +
10769 + *size += off;
10770 + *dstptr += tsize;
10771 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
10772 +index 0c29986ecd87..dbc32b19c574 100644
10773 +--- a/net/netlink/af_netlink.c
10774 ++++ b/net/netlink/af_netlink.c
10775 +@@ -2683,6 +2683,7 @@ static int netlink_dump(struct sock *sk)
10776 + struct netlink_callback *cb;
10777 + struct sk_buff *skb = NULL;
10778 + struct nlmsghdr *nlh;
10779 ++ struct module *module;
10780 + int len, err = -ENOBUFS;
10781 + int alloc_min_size;
10782 + int alloc_size;
10783 +@@ -2762,9 +2763,11 @@ static int netlink_dump(struct sock *sk)
10784 + cb->done(cb);
10785 +
10786 + nlk->cb_running = false;
10787 ++ module = cb->module;
10788 ++ skb = cb->skb;
10789 + mutex_unlock(nlk->cb_mutex);
10790 +- module_put(cb->module);
10791 +- consume_skb(cb->skb);
10792 ++ module_put(module);
10793 ++ consume_skb(skb);
10794 + return 0;
10795 +
10796 + errout_skb:
10797 +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
10798 +index b491c1c296fe..9920f7502f6d 100644
10799 +--- a/net/openvswitch/actions.c
10800 ++++ b/net/openvswitch/actions.c
10801 +@@ -441,7 +441,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
10802 + mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
10803 +
10804 + if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
10805 +- set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
10806 ++ set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
10807 + true);
10808 + memcpy(&flow_key->ipv6.addr.src, masked,
10809 + sizeof(flow_key->ipv6.addr.src));
10810 +@@ -463,7 +463,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
10811 + NULL, &flags)
10812 + != NEXTHDR_ROUTING);
10813 +
10814 +- set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
10815 ++ set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
10816 + recalc_csum);
10817 + memcpy(&flow_key->ipv6.addr.dst, masked,
10818 + sizeof(flow_key->ipv6.addr.dst));
10819 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
10820 +index ebc39e66d704..a3654d929814 100644
10821 +--- a/net/packet/af_packet.c
10822 ++++ b/net/packet/af_packet.c
10823 +@@ -1699,6 +1699,10 @@ retry:
10824 + goto retry;
10825 + }
10826 +
10827 ++ if (!dev_validate_header(dev, skb->data, len)) {
10828 ++ err = -EINVAL;
10829 ++ goto out_unlock;
10830 ++ }
10831 + if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
10832 + !packet_extra_vlan_len_allowed(dev, skb)) {
10833 + err = -EMSGSIZE;
10834 +@@ -2109,18 +2113,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
10835 + sock_wfree(skb);
10836 + }
10837 +
10838 +-static bool ll_header_truncated(const struct net_device *dev, int len)
10839 +-{
10840 +- /* net device doesn't like empty head */
10841 +- if (unlikely(len <= dev->hard_header_len)) {
10842 +- net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n",
10843 +- current->comm, len, dev->hard_header_len);
10844 +- return true;
10845 +- }
10846 +-
10847 +- return false;
10848 +-}
10849 +-
10850 + static void tpacket_set_protocol(const struct net_device *dev,
10851 + struct sk_buff *skb)
10852 + {
10853 +@@ -2203,19 +2195,19 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
10854 + if (unlikely(err < 0))
10855 + return -EINVAL;
10856 + } else if (dev->hard_header_len) {
10857 +- if (ll_header_truncated(dev, tp_len))
10858 +- return -EINVAL;
10859 ++ int hdrlen = min_t(int, dev->hard_header_len, tp_len);
10860 +
10861 + skb_push(skb, dev->hard_header_len);
10862 +- err = skb_store_bits(skb, 0, data,
10863 +- dev->hard_header_len);
10864 ++ err = skb_store_bits(skb, 0, data, hdrlen);
10865 + if (unlikely(err))
10866 + return err;
10867 ++ if (!dev_validate_header(dev, skb->data, hdrlen))
10868 ++ return -EINVAL;
10869 + if (!skb->protocol)
10870 + tpacket_set_protocol(dev, skb);
10871 +
10872 +- data += dev->hard_header_len;
10873 +- to_write -= dev->hard_header_len;
10874 ++ data += hdrlen;
10875 ++ to_write -= hdrlen;
10876 + }
10877 +
10878 + offset = offset_in_page(data);
10879 +@@ -2538,9 +2530,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
10880 + offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
10881 + if (unlikely(offset < 0))
10882 + goto out_free;
10883 +- } else {
10884 +- if (ll_header_truncated(dev, len))
10885 +- goto out_free;
10886 + }
10887 +
10888 + /* Returns -EFAULT on error */
10889 +@@ -2548,6 +2537,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
10890 + if (err)
10891 + goto out_free;
10892 +
10893 ++ if (sock->type == SOCK_RAW &&
10894 ++ !dev_validate_header(dev, skb->data, len)) {
10895 ++ err = -EINVAL;
10896 ++ goto out_free;
10897 ++ }
10898 ++
10899 + sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
10900 +
10901 + if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
10902 +@@ -3212,6 +3207,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
10903 + i->ifindex = mreq->mr_ifindex;
10904 + i->alen = mreq->mr_alen;
10905 + memcpy(i->addr, mreq->mr_address, i->alen);
10906 ++ memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
10907 + i->count = 1;
10908 + i->next = po->mclist;
10909 + po->mclist = i;
10910 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
10911 +index 68c599a5e1d1..c244a49ae4ac 100644
10912 +--- a/net/sched/sch_api.c
10913 ++++ b/net/sched/sch_api.c
10914 +@@ -744,14 +744,15 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
10915 + return 0;
10916 + }
10917 +
10918 +-void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
10919 ++void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
10920 ++ unsigned int len)
10921 + {
10922 + const struct Qdisc_class_ops *cops;
10923 + unsigned long cl;
10924 + u32 parentid;
10925 + int drops;
10926 +
10927 +- if (n == 0)
10928 ++ if (n == 0 && len == 0)
10929 + return;
10930 + drops = max_t(int, n, 0);
10931 + rcu_read_lock();
10932 +@@ -774,11 +775,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
10933 + cops->put(sch, cl);
10934 + }
10935 + sch->q.qlen -= n;
10936 ++ sch->qstats.backlog -= len;
10937 + __qdisc_qstats_drop(sch, drops);
10938 + }
10939 + rcu_read_unlock();
10940 + }
10941 +-EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
10942 ++EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
10943 +
10944 + static void notify_and_destroy(struct net *net, struct sk_buff *skb,
10945 + struct nlmsghdr *n, u32 clid,
10946 +diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
10947 +index beeb75f80fdb..f6e7a60012b1 100644
10948 +--- a/net/sched/sch_cbq.c
10949 ++++ b/net/sched/sch_cbq.c
10950 +@@ -1624,13 +1624,8 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
10951 + new->reshape_fail = cbq_reshape_fail;
10952 + #endif
10953 + }
10954 +- sch_tree_lock(sch);
10955 +- *old = cl->q;
10956 +- cl->q = new;
10957 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
10958 +- qdisc_reset(*old);
10959 +- sch_tree_unlock(sch);
10960 +
10961 ++ *old = qdisc_replace(sch, new, &cl->q);
10962 + return 0;
10963 + }
10964 +
10965 +@@ -1914,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
10966 + {
10967 + struct cbq_sched_data *q = qdisc_priv(sch);
10968 + struct cbq_class *cl = (struct cbq_class *)arg;
10969 +- unsigned int qlen;
10970 ++ unsigned int qlen, backlog;
10971 +
10972 + if (cl->filters || cl->children || cl == &q->link)
10973 + return -EBUSY;
10974 +@@ -1922,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
10975 + sch_tree_lock(sch);
10976 +
10977 + qlen = cl->q->q.qlen;
10978 ++ backlog = cl->q->qstats.backlog;
10979 + qdisc_reset(cl->q);
10980 +- qdisc_tree_decrease_qlen(cl->q, qlen);
10981 ++ qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
10982 +
10983 + if (cl->next_alive)
10984 + cbq_deactivate_class(cl);
10985 +diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
10986 +index c009eb9045ce..3f6437db9b0f 100644
10987 +--- a/net/sched/sch_choke.c
10988 ++++ b/net/sched/sch_choke.c
10989 +@@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
10990 + choke_zap_tail_holes(q);
10991 +
10992 + qdisc_qstats_backlog_dec(sch, skb);
10993 ++ qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
10994 + qdisc_drop(skb, sch);
10995 +- qdisc_tree_decrease_qlen(sch, 1);
10996 + --sch->q.qlen;
10997 + }
10998 +
10999 +@@ -449,6 +449,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
11000 + old = q->tab;
11001 + if (old) {
11002 + unsigned int oqlen = sch->q.qlen, tail = 0;
11003 ++ unsigned dropped = 0;
11004 +
11005 + while (q->head != q->tail) {
11006 + struct sk_buff *skb = q->tab[q->head];
11007 +@@ -460,11 +461,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
11008 + ntab[tail++] = skb;
11009 + continue;
11010 + }
11011 ++ dropped += qdisc_pkt_len(skb);
11012 + qdisc_qstats_backlog_dec(sch, skb);
11013 + --sch->q.qlen;
11014 + qdisc_drop(skb, sch);
11015 + }
11016 +- qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
11017 ++ qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
11018 + q->head = 0;
11019 + q->tail = tail;
11020 + }
11021 +diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
11022 +index 7a0bdb16ac92..9a9068d00833 100644
11023 +--- a/net/sched/sch_codel.c
11024 ++++ b/net/sched/sch_codel.c
11025 +@@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
11026 +
11027 + skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
11028 +
11029 +- /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
11030 ++ /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
11031 + * or HTB crashes. Defer it for next round.
11032 + */
11033 + if (q->stats.drop_count && sch->q.qlen) {
11034 +- qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
11035 ++ qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
11036 + q->stats.drop_count = 0;
11037 ++ q->stats.drop_len = 0;
11038 + }
11039 + if (skb)
11040 + qdisc_bstats_update(sch, skb);
11041 +@@ -115,7 +116,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
11042 + {
11043 + struct codel_sched_data *q = qdisc_priv(sch);
11044 + struct nlattr *tb[TCA_CODEL_MAX + 1];
11045 +- unsigned int qlen;
11046 ++ unsigned int qlen, dropped = 0;
11047 + int err;
11048 +
11049 + if (!opt)
11050 +@@ -149,10 +150,11 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
11051 + while (sch->q.qlen > sch->limit) {
11052 + struct sk_buff *skb = __skb_dequeue(&sch->q);
11053 +
11054 ++ dropped += qdisc_pkt_len(skb);
11055 + qdisc_qstats_backlog_dec(sch, skb);
11056 + qdisc_drop(skb, sch);
11057 + }
11058 +- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
11059 ++ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
11060 +
11061 + sch_tree_unlock(sch);
11062 + return 0;
11063 +diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
11064 +index 338706092c27..e599803caa1e 100644
11065 +--- a/net/sched/sch_drr.c
11066 ++++ b/net/sched/sch_drr.c
11067 +@@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
11068 + static void drr_purge_queue(struct drr_class *cl)
11069 + {
11070 + unsigned int len = cl->qdisc->q.qlen;
11071 ++ unsigned int backlog = cl->qdisc->qstats.backlog;
11072 +
11073 + qdisc_reset(cl->qdisc);
11074 +- qdisc_tree_decrease_qlen(cl->qdisc, len);
11075 ++ qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
11076 + }
11077 +
11078 + static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
11079 +@@ -226,11 +227,7 @@ static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
11080 + new = &noop_qdisc;
11081 + }
11082 +
11083 +- sch_tree_lock(sch);
11084 +- drr_purge_queue(cl);
11085 +- *old = cl->qdisc;
11086 +- cl->qdisc = new;
11087 +- sch_tree_unlock(sch);
11088 ++ *old = qdisc_replace(sch, new, &cl->qdisc);
11089 + return 0;
11090 + }
11091 +
11092 +diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
11093 +index 66700a6116aa..7288dda2a7fb 100644
11094 +--- a/net/sched/sch_dsmark.c
11095 ++++ b/net/sched/sch_dsmark.c
11096 +@@ -67,13 +67,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
11097 + new = &noop_qdisc;
11098 + }
11099 +
11100 +- sch_tree_lock(sch);
11101 +- *old = p->q;
11102 +- p->q = new;
11103 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
11104 +- qdisc_reset(*old);
11105 +- sch_tree_unlock(sch);
11106 +-
11107 ++ *old = qdisc_replace(sch, new, &p->q);
11108 + return 0;
11109 + }
11110 +
11111 +@@ -262,6 +256,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
11112 + return err;
11113 + }
11114 +
11115 ++ qdisc_qstats_backlog_inc(sch, skb);
11116 + sch->q.qlen++;
11117 +
11118 + return NET_XMIT_SUCCESS;
11119 +@@ -284,6 +279,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
11120 + return NULL;
11121 +
11122 + qdisc_bstats_update(sch, skb);
11123 ++ qdisc_qstats_backlog_dec(sch, skb);
11124 + sch->q.qlen--;
11125 +
11126 + index = skb->tc_index & (p->indices - 1);
11127 +@@ -399,6 +395,7 @@ static void dsmark_reset(struct Qdisc *sch)
11128 +
11129 + pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
11130 + qdisc_reset(p->q);
11131 ++ sch->qstats.backlog = 0;
11132 + sch->q.qlen = 0;
11133 + }
11134 +
11135 +diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
11136 +index f377702d4b91..4816778566d3 100644
11137 +--- a/net/sched/sch_fq.c
11138 ++++ b/net/sched/sch_fq.c
11139 +@@ -659,6 +659,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
11140 + struct fq_sched_data *q = qdisc_priv(sch);
11141 + struct nlattr *tb[TCA_FQ_MAX + 1];
11142 + int err, drop_count = 0;
11143 ++ unsigned drop_len = 0;
11144 + u32 fq_log;
11145 +
11146 + if (!opt)
11147 +@@ -733,10 +734,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
11148 +
11149 + if (!skb)
11150 + break;
11151 ++ drop_len += qdisc_pkt_len(skb);
11152 + kfree_skb(skb);
11153 + drop_count++;
11154 + }
11155 +- qdisc_tree_decrease_qlen(sch, drop_count);
11156 ++ qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
11157 +
11158 + sch_tree_unlock(sch);
11159 + return err;
11160 +diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
11161 +index 9291598b5aad..96971c7ab228 100644
11162 +--- a/net/sched/sch_fq_codel.c
11163 ++++ b/net/sched/sch_fq_codel.c
11164 +@@ -173,7 +173,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
11165 + static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
11166 + {
11167 + struct fq_codel_sched_data *q = qdisc_priv(sch);
11168 +- unsigned int idx;
11169 ++ unsigned int idx, prev_backlog;
11170 + struct fq_codel_flow *flow;
11171 + int uninitialized_var(ret);
11172 +
11173 +@@ -201,6 +201,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
11174 + if (++sch->q.qlen <= sch->limit)
11175 + return NET_XMIT_SUCCESS;
11176 +
11177 ++ prev_backlog = sch->qstats.backlog;
11178 + q->drop_overlimit++;
11179 + /* Return Congestion Notification only if we dropped a packet
11180 + * from this flow.
11181 +@@ -209,7 +210,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
11182 + return NET_XMIT_CN;
11183 +
11184 + /* As we dropped a packet, better let upper stack know this */
11185 +- qdisc_tree_decrease_qlen(sch, 1);
11186 ++ qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
11187 + return NET_XMIT_SUCCESS;
11188 + }
11189 +
11190 +@@ -239,6 +240,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
11191 + struct fq_codel_flow *flow;
11192 + struct list_head *head;
11193 + u32 prev_drop_count, prev_ecn_mark;
11194 ++ unsigned int prev_backlog;
11195 +
11196 + begin:
11197 + head = &q->new_flows;
11198 +@@ -257,6 +259,7 @@ begin:
11199 +
11200 + prev_drop_count = q->cstats.drop_count;
11201 + prev_ecn_mark = q->cstats.ecn_mark;
11202 ++ prev_backlog = sch->qstats.backlog;
11203 +
11204 + skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
11205 + dequeue);
11206 +@@ -274,12 +277,14 @@ begin:
11207 + }
11208 + qdisc_bstats_update(sch, skb);
11209 + flow->deficit -= qdisc_pkt_len(skb);
11210 +- /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
11211 ++ /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
11212 + * or HTB crashes. Defer it for next round.
11213 + */
11214 + if (q->cstats.drop_count && sch->q.qlen) {
11215 +- qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
11216 ++ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
11217 ++ q->cstats.drop_len);
11218 + q->cstats.drop_count = 0;
11219 ++ q->cstats.drop_len = 0;
11220 + }
11221 + return skb;
11222 + }
11223 +@@ -347,11 +352,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
11224 + while (sch->q.qlen > sch->limit) {
11225 + struct sk_buff *skb = fq_codel_dequeue(sch);
11226 +
11227 ++ q->cstats.drop_len += qdisc_pkt_len(skb);
11228 + kfree_skb(skb);
11229 + q->cstats.drop_count++;
11230 + }
11231 +- qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
11232 ++ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
11233 + q->cstats.drop_count = 0;
11234 ++ q->cstats.drop_len = 0;
11235 +
11236 + sch_tree_unlock(sch);
11237 + return 0;
11238 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
11239 +index 3c6f6b774ba6..9821e6d641bb 100644
11240 +--- a/net/sched/sch_generic.c
11241 ++++ b/net/sched/sch_generic.c
11242 +@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
11243 + if (validate)
11244 + skb = validate_xmit_skb_list(skb, dev);
11245 +
11246 +- if (skb) {
11247 ++ if (likely(skb)) {
11248 + HARD_TX_LOCK(dev, txq, smp_processor_id());
11249 + if (!netif_xmit_frozen_or_stopped(txq))
11250 + skb = dev_hard_start_xmit(skb, dev, txq, &ret);
11251 +
11252 + HARD_TX_UNLOCK(dev, txq);
11253 ++ } else {
11254 ++ spin_lock(root_lock);
11255 ++ return qdisc_qlen(q);
11256 + }
11257 + spin_lock(root_lock);
11258 +
11259 +diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
11260 +index e6c7416d0332..d3e21dac8b40 100644
11261 +--- a/net/sched/sch_hfsc.c
11262 ++++ b/net/sched/sch_hfsc.c
11263 +@@ -895,9 +895,10 @@ static void
11264 + hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
11265 + {
11266 + unsigned int len = cl->qdisc->q.qlen;
11267 ++ unsigned int backlog = cl->qdisc->qstats.backlog;
11268 +
11269 + qdisc_reset(cl->qdisc);
11270 +- qdisc_tree_decrease_qlen(cl->qdisc, len);
11271 ++ qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
11272 + }
11273 +
11274 + static void
11275 +@@ -1215,11 +1216,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
11276 + new = &noop_qdisc;
11277 + }
11278 +
11279 +- sch_tree_lock(sch);
11280 +- hfsc_purge_queue(sch, cl);
11281 +- *old = cl->qdisc;
11282 +- cl->qdisc = new;
11283 +- sch_tree_unlock(sch);
11284 ++ *old = qdisc_replace(sch, new, &cl->qdisc);
11285 + return 0;
11286 + }
11287 +
11288 +diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
11289 +index 15d3aabfe250..792c6f330f77 100644
11290 +--- a/net/sched/sch_hhf.c
11291 ++++ b/net/sched/sch_hhf.c
11292 +@@ -390,6 +390,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
11293 + struct hhf_sched_data *q = qdisc_priv(sch);
11294 + enum wdrr_bucket_idx idx;
11295 + struct wdrr_bucket *bucket;
11296 ++ unsigned int prev_backlog;
11297 +
11298 + idx = hhf_classify(skb, sch);
11299 +
11300 +@@ -417,6 +418,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
11301 + if (++sch->q.qlen <= sch->limit)
11302 + return NET_XMIT_SUCCESS;
11303 +
11304 ++ prev_backlog = sch->qstats.backlog;
11305 + q->drop_overlimit++;
11306 + /* Return Congestion Notification only if we dropped a packet from this
11307 + * bucket.
11308 +@@ -425,7 +427,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
11309 + return NET_XMIT_CN;
11310 +
11311 + /* As we dropped a packet, better let upper stack know this. */
11312 +- qdisc_tree_decrease_qlen(sch, 1);
11313 ++ qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
11314 + return NET_XMIT_SUCCESS;
11315 + }
11316 +
11317 +@@ -535,7 +537,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
11318 + {
11319 + struct hhf_sched_data *q = qdisc_priv(sch);
11320 + struct nlattr *tb[TCA_HHF_MAX + 1];
11321 +- unsigned int qlen;
11322 ++ unsigned int qlen, prev_backlog;
11323 + int err;
11324 + u64 non_hh_quantum;
11325 + u32 new_quantum = q->quantum;
11326 +@@ -585,12 +587,14 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
11327 + }
11328 +
11329 + qlen = sch->q.qlen;
11330 ++ prev_backlog = sch->qstats.backlog;
11331 + while (sch->q.qlen > sch->limit) {
11332 + struct sk_buff *skb = hhf_dequeue(sch);
11333 +
11334 + kfree_skb(skb);
11335 + }
11336 +- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
11337 ++ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
11338 ++ prev_backlog - sch->qstats.backlog);
11339 +
11340 + sch_tree_unlock(sch);
11341 + return 0;
11342 +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
11343 +index f1acb0f60dc3..ccff00640713 100644
11344 +--- a/net/sched/sch_htb.c
11345 ++++ b/net/sched/sch_htb.c
11346 +@@ -600,6 +600,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
11347 + htb_activate(q, cl);
11348 + }
11349 +
11350 ++ qdisc_qstats_backlog_inc(sch, skb);
11351 + sch->q.qlen++;
11352 + return NET_XMIT_SUCCESS;
11353 + }
11354 +@@ -889,6 +890,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
11355 + ok:
11356 + qdisc_bstats_update(sch, skb);
11357 + qdisc_unthrottled(sch);
11358 ++ qdisc_qstats_backlog_dec(sch, skb);
11359 + sch->q.qlen--;
11360 + return skb;
11361 + }
11362 +@@ -955,6 +957,7 @@ static unsigned int htb_drop(struct Qdisc *sch)
11363 + unsigned int len;
11364 + if (cl->un.leaf.q->ops->drop &&
11365 + (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
11366 ++ sch->qstats.backlog -= len;
11367 + sch->q.qlen--;
11368 + if (!cl->un.leaf.q->q.qlen)
11369 + htb_deactivate(q, cl);
11370 +@@ -984,12 +987,12 @@ static void htb_reset(struct Qdisc *sch)
11371 + }
11372 + cl->prio_activity = 0;
11373 + cl->cmode = HTB_CAN_SEND;
11374 +-
11375 + }
11376 + }
11377 + qdisc_watchdog_cancel(&q->watchdog);
11378 + __skb_queue_purge(&q->direct_queue);
11379 + sch->q.qlen = 0;
11380 ++ sch->qstats.backlog = 0;
11381 + memset(q->hlevel, 0, sizeof(q->hlevel));
11382 + memset(q->row_mask, 0, sizeof(q->row_mask));
11383 + for (i = 0; i < TC_HTB_NUMPRIO; i++)
11384 +@@ -1165,14 +1168,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
11385 + cl->common.classid)) == NULL)
11386 + return -ENOBUFS;
11387 +
11388 +- sch_tree_lock(sch);
11389 +- *old = cl->un.leaf.q;
11390 +- cl->un.leaf.q = new;
11391 +- if (*old != NULL) {
11392 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
11393 +- qdisc_reset(*old);
11394 +- }
11395 +- sch_tree_unlock(sch);
11396 ++ *old = qdisc_replace(sch, new, &cl->un.leaf.q);
11397 + return 0;
11398 + }
11399 +
11400 +@@ -1274,7 +1270,6 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
11401 + {
11402 + struct htb_sched *q = qdisc_priv(sch);
11403 + struct htb_class *cl = (struct htb_class *)arg;
11404 +- unsigned int qlen;
11405 + struct Qdisc *new_q = NULL;
11406 + int last_child = 0;
11407 +
11408 +@@ -1294,9 +1289,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
11409 + sch_tree_lock(sch);
11410 +
11411 + if (!cl->level) {
11412 +- qlen = cl->un.leaf.q->q.qlen;
11413 ++ unsigned int qlen = cl->un.leaf.q->q.qlen;
11414 ++ unsigned int backlog = cl->un.leaf.q->qstats.backlog;
11415 ++
11416 + qdisc_reset(cl->un.leaf.q);
11417 +- qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
11418 ++ qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
11419 + }
11420 +
11421 + /* delete from hash and active; remainder in destroy_class */
11422 +@@ -1430,10 +1427,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
11423 + sch_tree_lock(sch);
11424 + if (parent && !parent->level) {
11425 + unsigned int qlen = parent->un.leaf.q->q.qlen;
11426 ++ unsigned int backlog = parent->un.leaf.q->qstats.backlog;
11427 +
11428 + /* turn parent into inner node */
11429 + qdisc_reset(parent->un.leaf.q);
11430 +- qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
11431 ++ qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
11432 + qdisc_destroy(parent->un.leaf.q);
11433 + if (parent->prio_activity)
11434 + htb_deactivate(q, parent);
11435 +diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
11436 +index 42dd218871e0..23437d62a8db 100644
11437 +--- a/net/sched/sch_multiq.c
11438 ++++ b/net/sched/sch_multiq.c
11439 +@@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
11440 + if (q->queues[i] != &noop_qdisc) {
11441 + struct Qdisc *child = q->queues[i];
11442 + q->queues[i] = &noop_qdisc;
11443 +- qdisc_tree_decrease_qlen(child, child->q.qlen);
11444 ++ qdisc_tree_reduce_backlog(child, child->q.qlen,
11445 ++ child->qstats.backlog);
11446 + qdisc_destroy(child);
11447 + }
11448 + }
11449 +@@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
11450 + q->queues[i] = child;
11451 +
11452 + if (old != &noop_qdisc) {
11453 +- qdisc_tree_decrease_qlen(old,
11454 +- old->q.qlen);
11455 ++ qdisc_tree_reduce_backlog(old,
11456 ++ old->q.qlen,
11457 ++ old->qstats.backlog);
11458 + qdisc_destroy(old);
11459 + }
11460 + sch_tree_unlock(sch);
11461 +@@ -303,13 +305,7 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
11462 + if (new == NULL)
11463 + new = &noop_qdisc;
11464 +
11465 +- sch_tree_lock(sch);
11466 +- *old = q->queues[band];
11467 +- q->queues[band] = new;
11468 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
11469 +- qdisc_reset(*old);
11470 +- sch_tree_unlock(sch);
11471 +-
11472 ++ *old = qdisc_replace(sch, new, &q->queues[band]);
11473 + return 0;
11474 + }
11475 +
11476 +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
11477 +index 956ead2cab9a..80124c1edbba 100644
11478 +--- a/net/sched/sch_netem.c
11479 ++++ b/net/sched/sch_netem.c
11480 +@@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
11481 + sch->q.qlen++;
11482 + }
11483 +
11484 ++/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
11485 ++ * when we statistically choose to corrupt one, we instead segment it, returning
11486 ++ * the first packet to be corrupted, and re-enqueue the remaining frames
11487 ++ */
11488 ++static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
11489 ++{
11490 ++ struct sk_buff *segs;
11491 ++ netdev_features_t features = netif_skb_features(skb);
11492 ++
11493 ++ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
11494 ++
11495 ++ if (IS_ERR_OR_NULL(segs)) {
11496 ++ qdisc_reshape_fail(skb, sch);
11497 ++ return NULL;
11498 ++ }
11499 ++ consume_skb(skb);
11500 ++ return segs;
11501 ++}
11502 ++
11503 + /*
11504 + * Insert one skb into qdisc.
11505 + * Note: parent depends on return value to account for queue length.
11506 +@@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
11507 + /* We don't fill cb now as skb_unshare() may invalidate it */
11508 + struct netem_skb_cb *cb;
11509 + struct sk_buff *skb2;
11510 ++ struct sk_buff *segs = NULL;
11511 ++ unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
11512 ++ int nb = 0;
11513 + int count = 1;
11514 ++ int rc = NET_XMIT_SUCCESS;
11515 +
11516 + /* Random duplication */
11517 + if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
11518 +@@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
11519 + * do it now in software before we mangle it.
11520 + */
11521 + if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
11522 ++ if (skb_is_gso(skb)) {
11523 ++ segs = netem_segment(skb, sch);
11524 ++ if (!segs)
11525 ++ return NET_XMIT_DROP;
11526 ++ } else {
11527 ++ segs = skb;
11528 ++ }
11529 ++
11530 ++ skb = segs;
11531 ++ segs = segs->next;
11532 ++
11533 + if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
11534 + (skb->ip_summed == CHECKSUM_PARTIAL &&
11535 +- skb_checksum_help(skb)))
11536 +- return qdisc_drop(skb, sch);
11537 ++ skb_checksum_help(skb))) {
11538 ++ rc = qdisc_drop(skb, sch);
11539 ++ goto finish_segs;
11540 ++ }
11541 +
11542 + skb->data[prandom_u32() % skb_headlen(skb)] ^=
11543 + 1<<(prandom_u32() % 8);
11544 +@@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
11545 + sch->qstats.requeues++;
11546 + }
11547 +
11548 ++finish_segs:
11549 ++ if (segs) {
11550 ++ while (segs) {
11551 ++ skb2 = segs->next;
11552 ++ segs->next = NULL;
11553 ++ qdisc_skb_cb(segs)->pkt_len = segs->len;
11554 ++ last_len = segs->len;
11555 ++ rc = qdisc_enqueue(segs, sch);
11556 ++ if (rc != NET_XMIT_SUCCESS) {
11557 ++ if (net_xmit_drop_count(rc))
11558 ++ qdisc_qstats_drop(sch);
11559 ++ } else {
11560 ++ nb++;
11561 ++ len += last_len;
11562 ++ }
11563 ++ segs = skb2;
11564 ++ }
11565 ++ sch->q.qlen += nb;
11566 ++ if (nb > 1)
11567 ++ qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
11568 ++ }
11569 + return NET_XMIT_SUCCESS;
11570 + }
11571 +
11572 +@@ -598,7 +655,8 @@ deliver:
11573 + if (unlikely(err != NET_XMIT_SUCCESS)) {
11574 + if (net_xmit_drop_count(err)) {
11575 + qdisc_qstats_drop(sch);
11576 +- qdisc_tree_decrease_qlen(sch, 1);
11577 ++ qdisc_tree_reduce_backlog(sch, 1,
11578 ++ qdisc_pkt_len(skb));
11579 + }
11580 + }
11581 + goto tfifo_dequeue;
11582 +@@ -1037,15 +1095,7 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
11583 + {
11584 + struct netem_sched_data *q = qdisc_priv(sch);
11585 +
11586 +- sch_tree_lock(sch);
11587 +- *old = q->qdisc;
11588 +- q->qdisc = new;
11589 +- if (*old) {
11590 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
11591 +- qdisc_reset(*old);
11592 +- }
11593 +- sch_tree_unlock(sch);
11594 +-
11595 ++ *old = qdisc_replace(sch, new, &q->qdisc);
11596 + return 0;
11597 + }
11598 +
11599 +diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
11600 +index b783a446d884..71ae3b9629f9 100644
11601 +--- a/net/sched/sch_pie.c
11602 ++++ b/net/sched/sch_pie.c
11603 +@@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
11604 + {
11605 + struct pie_sched_data *q = qdisc_priv(sch);
11606 + struct nlattr *tb[TCA_PIE_MAX + 1];
11607 +- unsigned int qlen;
11608 ++ unsigned int qlen, dropped = 0;
11609 + int err;
11610 +
11611 + if (!opt)
11612 +@@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
11613 + while (sch->q.qlen > sch->limit) {
11614 + struct sk_buff *skb = __skb_dequeue(&sch->q);
11615 +
11616 ++ dropped += qdisc_pkt_len(skb);
11617 + qdisc_qstats_backlog_dec(sch, skb);
11618 + qdisc_drop(skb, sch);
11619 + }
11620 +- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
11621 ++ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
11622 +
11623 + sch_tree_unlock(sch);
11624 + return 0;
11625 +diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
11626 +index 8e5cd34aaa74..e671b1a4e815 100644
11627 +--- a/net/sched/sch_prio.c
11628 ++++ b/net/sched/sch_prio.c
11629 +@@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
11630 + struct Qdisc *child = q->queues[i];
11631 + q->queues[i] = &noop_qdisc;
11632 + if (child != &noop_qdisc) {
11633 +- qdisc_tree_decrease_qlen(child, child->q.qlen);
11634 ++ qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
11635 + qdisc_destroy(child);
11636 + }
11637 + }
11638 +@@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
11639 + q->queues[i] = child;
11640 +
11641 + if (old != &noop_qdisc) {
11642 +- qdisc_tree_decrease_qlen(old,
11643 +- old->q.qlen);
11644 ++ qdisc_tree_reduce_backlog(old,
11645 ++ old->q.qlen,
11646 ++ old->qstats.backlog);
11647 + qdisc_destroy(old);
11648 + }
11649 + sch_tree_unlock(sch);
11650 +@@ -268,13 +269,7 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
11651 + if (new == NULL)
11652 + new = &noop_qdisc;
11653 +
11654 +- sch_tree_lock(sch);
11655 +- *old = q->queues[band];
11656 +- q->queues[band] = new;
11657 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
11658 +- qdisc_reset(*old);
11659 +- sch_tree_unlock(sch);
11660 +-
11661 ++ *old = qdisc_replace(sch, new, &q->queues[band]);
11662 + return 0;
11663 + }
11664 +
11665 +diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
11666 +index 3ec7e88a43ca..e2b8fd47008b 100644
11667 +--- a/net/sched/sch_qfq.c
11668 ++++ b/net/sched/sch_qfq.c
11669 +@@ -221,9 +221,10 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
11670 + static void qfq_purge_queue(struct qfq_class *cl)
11671 + {
11672 + unsigned int len = cl->qdisc->q.qlen;
11673 ++ unsigned int backlog = cl->qdisc->qstats.backlog;
11674 +
11675 + qdisc_reset(cl->qdisc);
11676 +- qdisc_tree_decrease_qlen(cl->qdisc, len);
11677 ++ qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
11678 + }
11679 +
11680 + static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
11681 +@@ -619,11 +620,7 @@ static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
11682 + new = &noop_qdisc;
11683 + }
11684 +
11685 +- sch_tree_lock(sch);
11686 +- qfq_purge_queue(cl);
11687 +- *old = cl->qdisc;
11688 +- cl->qdisc = new;
11689 +- sch_tree_unlock(sch);
11690 ++ *old = qdisc_replace(sch, new, &cl->qdisc);
11691 + return 0;
11692 + }
11693 +
11694 +diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
11695 +index 6c0534cc7758..8c0508c0e287 100644
11696 +--- a/net/sched/sch_red.c
11697 ++++ b/net/sched/sch_red.c
11698 +@@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
11699 + q->flags = ctl->flags;
11700 + q->limit = ctl->limit;
11701 + if (child) {
11702 +- qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
11703 ++ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
11704 ++ q->qdisc->qstats.backlog);
11705 + qdisc_destroy(q->qdisc);
11706 + q->qdisc = child;
11707 + }
11708 +@@ -313,12 +314,7 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
11709 + if (new == NULL)
11710 + new = &noop_qdisc;
11711 +
11712 +- sch_tree_lock(sch);
11713 +- *old = q->qdisc;
11714 +- q->qdisc = new;
11715 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
11716 +- qdisc_reset(*old);
11717 +- sch_tree_unlock(sch);
11718 ++ *old = qdisc_replace(sch, new, &q->qdisc);
11719 + return 0;
11720 + }
11721 +
11722 +diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
11723 +index 5819dd82630d..e1d634e3c255 100644
11724 +--- a/net/sched/sch_sfb.c
11725 ++++ b/net/sched/sch_sfb.c
11726 +@@ -518,7 +518,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
11727 +
11728 + sch_tree_lock(sch);
11729 +
11730 +- qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
11731 ++ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
11732 ++ q->qdisc->qstats.backlog);
11733 + qdisc_destroy(q->qdisc);
11734 + q->qdisc = child;
11735 +
11736 +@@ -614,12 +615,7 @@ static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
11737 + if (new == NULL)
11738 + new = &noop_qdisc;
11739 +
11740 +- sch_tree_lock(sch);
11741 +- *old = q->qdisc;
11742 +- q->qdisc = new;
11743 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
11744 +- qdisc_reset(*old);
11745 +- sch_tree_unlock(sch);
11746 ++ *old = qdisc_replace(sch, new, &q->qdisc);
11747 + return 0;
11748 + }
11749 +
11750 +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
11751 +index b877140beda5..4417fb25166f 100644
11752 +--- a/net/sched/sch_sfq.c
11753 ++++ b/net/sched/sch_sfq.c
11754 +@@ -369,7 +369,7 @@ static int
11755 + sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
11756 + {
11757 + struct sfq_sched_data *q = qdisc_priv(sch);
11758 +- unsigned int hash;
11759 ++ unsigned int hash, dropped;
11760 + sfq_index x, qlen;
11761 + struct sfq_slot *slot;
11762 + int uninitialized_var(ret);
11763 +@@ -484,7 +484,7 @@ enqueue:
11764 + return NET_XMIT_SUCCESS;
11765 +
11766 + qlen = slot->qlen;
11767 +- sfq_drop(sch);
11768 ++ dropped = sfq_drop(sch);
11769 + /* Return Congestion Notification only if we dropped a packet
11770 + * from this flow.
11771 + */
11772 +@@ -492,7 +492,7 @@ enqueue:
11773 + return NET_XMIT_CN;
11774 +
11775 + /* As we dropped a packet, better let upper stack know this */
11776 +- qdisc_tree_decrease_qlen(sch, 1);
11777 ++ qdisc_tree_reduce_backlog(sch, 1, dropped);
11778 + return NET_XMIT_SUCCESS;
11779 + }
11780 +
11781 +@@ -560,6 +560,7 @@ static void sfq_rehash(struct Qdisc *sch)
11782 + struct sfq_slot *slot;
11783 + struct sk_buff_head list;
11784 + int dropped = 0;
11785 ++ unsigned int drop_len = 0;
11786 +
11787 + __skb_queue_head_init(&list);
11788 +
11789 +@@ -588,6 +589,7 @@ static void sfq_rehash(struct Qdisc *sch)
11790 + if (x >= SFQ_MAX_FLOWS) {
11791 + drop:
11792 + qdisc_qstats_backlog_dec(sch, skb);
11793 ++ drop_len += qdisc_pkt_len(skb);
11794 + kfree_skb(skb);
11795 + dropped++;
11796 + continue;
11797 +@@ -617,7 +619,7 @@ drop:
11798 + }
11799 + }
11800 + sch->q.qlen -= dropped;
11801 +- qdisc_tree_decrease_qlen(sch, dropped);
11802 ++ qdisc_tree_reduce_backlog(sch, dropped, drop_len);
11803 + }
11804 +
11805 + static void sfq_perturbation(unsigned long arg)
11806 +@@ -641,7 +643,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
11807 + struct sfq_sched_data *q = qdisc_priv(sch);
11808 + struct tc_sfq_qopt *ctl = nla_data(opt);
11809 + struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
11810 +- unsigned int qlen;
11811 ++ unsigned int qlen, dropped = 0;
11812 + struct red_parms *p = NULL;
11813 +
11814 + if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
11815 +@@ -690,8 +692,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
11816 +
11817 + qlen = sch->q.qlen;
11818 + while (sch->q.qlen > q->limit)
11819 +- sfq_drop(sch);
11820 +- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
11821 ++ dropped += sfq_drop(sch);
11822 ++ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
11823 +
11824 + del_timer(&q->perturb_timer);
11825 + if (q->perturb_period) {
11826 +diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
11827 +index a4afde14e865..c2fbde742f37 100644
11828 +--- a/net/sched/sch_tbf.c
11829 ++++ b/net/sched/sch_tbf.c
11830 +@@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
11831 + struct tbf_sched_data *q = qdisc_priv(sch);
11832 + struct sk_buff *segs, *nskb;
11833 + netdev_features_t features = netif_skb_features(skb);
11834 ++ unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
11835 + int ret, nb;
11836 +
11837 + segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
11838 +@@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
11839 + nskb = segs->next;
11840 + segs->next = NULL;
11841 + qdisc_skb_cb(segs)->pkt_len = segs->len;
11842 ++ len += segs->len;
11843 + ret = qdisc_enqueue(segs, q->qdisc);
11844 + if (ret != NET_XMIT_SUCCESS) {
11845 + if (net_xmit_drop_count(ret))
11846 +@@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
11847 + }
11848 + sch->q.qlen += nb;
11849 + if (nb > 1)
11850 +- qdisc_tree_decrease_qlen(sch, 1 - nb);
11851 ++ qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
11852 + consume_skb(skb);
11853 + return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
11854 + }
11855 +@@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
11856 +
11857 + sch_tree_lock(sch);
11858 + if (child) {
11859 +- qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
11860 ++ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
11861 ++ q->qdisc->qstats.backlog);
11862 + qdisc_destroy(q->qdisc);
11863 + q->qdisc = child;
11864 + }
11865 +@@ -502,13 +505,7 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
11866 + if (new == NULL)
11867 + new = &noop_qdisc;
11868 +
11869 +- sch_tree_lock(sch);
11870 +- *old = q->qdisc;
11871 +- q->qdisc = new;
11872 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
11873 +- qdisc_reset(*old);
11874 +- sch_tree_unlock(sch);
11875 +-
11876 ++ *old = qdisc_replace(sch, new, &q->qdisc);
11877 + return 0;
11878 + }
11879 +
11880 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
11881 +index 3267a5cbb3e8..18361cbfc882 100644
11882 +--- a/net/sctp/ipv6.c
11883 ++++ b/net/sctp/ipv6.c
11884 +@@ -519,6 +519,8 @@ static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
11885 + }
11886 + return 0;
11887 + }
11888 ++ if (addr1->v6.sin6_port != addr2->v6.sin6_port)
11889 ++ return 0;
11890 + if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
11891 + return 0;
11892 + /* If this is a linklocal address, compare the scope_id. */
11893 +diff --git a/net/socket.c b/net/socket.c
11894 +index dcbfa868e398..e66e4f357506 100644
11895 +--- a/net/socket.c
11896 ++++ b/net/socket.c
11897 +@@ -2247,31 +2247,31 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
11898 + break;
11899 + }
11900 +
11901 +-out_put:
11902 +- fput_light(sock->file, fput_needed);
11903 +-
11904 + if (err == 0)
11905 +- return datagrams;
11906 ++ goto out_put;
11907 +
11908 +- if (datagrams != 0) {
11909 ++ if (datagrams == 0) {
11910 ++ datagrams = err;
11911 ++ goto out_put;
11912 ++ }
11913 ++
11914 ++ /*
11915 ++ * We may return less entries than requested (vlen) if the
11916 ++ * sock is non block and there aren't enough datagrams...
11917 ++ */
11918 ++ if (err != -EAGAIN) {
11919 + /*
11920 +- * We may return less entries than requested (vlen) if the
11921 +- * sock is non block and there aren't enough datagrams...
11922 ++ * ... or if recvmsg returns an error after we
11923 ++ * received some datagrams, where we record the
11924 ++ * error to return on the next call or if the
11925 ++ * app asks about it using getsockopt(SO_ERROR).
11926 + */
11927 +- if (err != -EAGAIN) {
11928 +- /*
11929 +- * ... or if recvmsg returns an error after we
11930 +- * received some datagrams, where we record the
11931 +- * error to return on the next call or if the
11932 +- * app asks about it using getsockopt(SO_ERROR).
11933 +- */
11934 +- sock->sk->sk_err = -err;
11935 +- }
11936 +-
11937 +- return datagrams;
11938 ++ sock->sk->sk_err = -err;
11939 + }
11940 ++out_put:
11941 ++ fput_light(sock->file, fput_needed);
11942 +
11943 +- return err;
11944 ++ return datagrams;
11945 + }
11946 +
11947 + SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
11948 +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
11949 +index 8d79e70bd978..9ec709b9707c 100644
11950 +--- a/net/sunrpc/cache.c
11951 ++++ b/net/sunrpc/cache.c
11952 +@@ -1175,14 +1175,14 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
11953 + }
11954 +
11955 + crq->q.reader = 0;
11956 +- crq->item = cache_get(h);
11957 + crq->buf = buf;
11958 + crq->len = 0;
11959 + crq->readers = 0;
11960 + spin_lock(&queue_lock);
11961 +- if (test_bit(CACHE_PENDING, &h->flags))
11962 ++ if (test_bit(CACHE_PENDING, &h->flags)) {
11963 ++ crq->item = cache_get(h);
11964 + list_add_tail(&crq->q.list, &detail->queue);
11965 +- else
11966 ++ } else
11967 + /* Lost a race, no longer PENDING, so don't enqueue */
11968 + ret = -EAGAIN;
11969 + spin_unlock(&queue_lock);
11970 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
11971 +index e6ce1517367f..16e831dcfde0 100644
11972 +--- a/net/sunrpc/clnt.c
11973 ++++ b/net/sunrpc/clnt.c
11974 +@@ -442,7 +442,7 @@ out_no_rpciod:
11975 + return ERR_PTR(err);
11976 + }
11977 +
11978 +-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
11979 ++static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
11980 + struct rpc_xprt *xprt)
11981 + {
11982 + struct rpc_clnt *clnt = NULL;
11983 +@@ -474,7 +474,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
11984 +
11985 + return clnt;
11986 + }
11987 +-EXPORT_SYMBOL_GPL(rpc_create_xprt);
11988 +
11989 + /**
11990 + * rpc_create - create an RPC client and transport with one call
11991 +@@ -500,6 +499,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
11992 + };
11993 + char servername[48];
11994 +
11995 ++ if (args->bc_xprt) {
11996 ++ WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
11997 ++ xprt = args->bc_xprt->xpt_bc_xprt;
11998 ++ if (xprt) {
11999 ++ xprt_get(xprt);
12000 ++ return rpc_create_xprt(args, xprt);
12001 ++ }
12002 ++ }
12003 ++
12004 + if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
12005 + xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
12006 + if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
12007 +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
12008 +index ce9121e8e990..e5ec86dd8dc1 100644
12009 +--- a/net/tipc/netlink_compat.c
12010 ++++ b/net/tipc/netlink_compat.c
12011 +@@ -712,7 +712,7 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
12012 + goto out;
12013 +
12014 + tipc_tlv_sprintf(msg->rep, "%-10u %s",
12015 +- nla_get_u32(publ[TIPC_NLA_PUBL_REF]),
12016 ++ nla_get_u32(publ[TIPC_NLA_PUBL_KEY]),
12017 + scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]);
12018 + out:
12019 + tipc_tlv_sprintf(msg->rep, "\n");
12020 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
12021 +index 20cc6df07157..d41d424b9913 100644
12022 +--- a/net/tipc/socket.c
12023 ++++ b/net/tipc/socket.c
12024 +@@ -2804,6 +2804,9 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
12025 + if (err)
12026 + return err;
12027 +
12028 ++ if (!attrs[TIPC_NLA_SOCK])
12029 ++ return -EINVAL;
12030 ++
12031 + err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
12032 + attrs[TIPC_NLA_SOCK],
12033 + tipc_nl_sock_policy);
12034 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
12035 +index 535a642a1688..03da879008d7 100644
12036 +--- a/net/unix/af_unix.c
12037 ++++ b/net/unix/af_unix.c
12038 +@@ -935,32 +935,20 @@ fail:
12039 + return NULL;
12040 + }
12041 +
12042 +-static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
12043 ++static int unix_mknod(struct dentry *dentry, struct path *path, umode_t mode,
12044 ++ struct path *res)
12045 + {
12046 +- struct dentry *dentry;
12047 +- struct path path;
12048 +- int err = 0;
12049 +- /*
12050 +- * Get the parent directory, calculate the hash for last
12051 +- * component.
12052 +- */
12053 +- dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
12054 +- err = PTR_ERR(dentry);
12055 +- if (IS_ERR(dentry))
12056 +- return err;
12057 ++ int err;
12058 +
12059 +- /*
12060 +- * All right, let's create it.
12061 +- */
12062 +- err = security_path_mknod(&path, dentry, mode, 0);
12063 ++ err = security_path_mknod(path, dentry, mode, 0);
12064 + if (!err) {
12065 +- err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
12066 ++ err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
12067 + if (!err) {
12068 +- res->mnt = mntget(path.mnt);
12069 ++ res->mnt = mntget(path->mnt);
12070 + res->dentry = dget(dentry);
12071 + }
12072 + }
12073 +- done_path_create(&path, dentry);
12074 ++
12075 + return err;
12076 + }
12077 +
12078 +@@ -971,10 +959,12 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
12079 + struct unix_sock *u = unix_sk(sk);
12080 + struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
12081 + char *sun_path = sunaddr->sun_path;
12082 +- int err;
12083 ++ int err, name_err;
12084 + unsigned int hash;
12085 + struct unix_address *addr;
12086 + struct hlist_head *list;
12087 ++ struct path path;
12088 ++ struct dentry *dentry;
12089 +
12090 + err = -EINVAL;
12091 + if (sunaddr->sun_family != AF_UNIX)
12092 +@@ -990,14 +980,34 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
12093 + goto out;
12094 + addr_len = err;
12095 +
12096 ++ name_err = 0;
12097 ++ dentry = NULL;
12098 ++ if (sun_path[0]) {
12099 ++ /* Get the parent directory, calculate the hash for last
12100 ++ * component.
12101 ++ */
12102 ++ dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
12103 ++
12104 ++ if (IS_ERR(dentry)) {
12105 ++ /* delay report until after 'already bound' check */
12106 ++ name_err = PTR_ERR(dentry);
12107 ++ dentry = NULL;
12108 ++ }
12109 ++ }
12110 ++
12111 + err = mutex_lock_interruptible(&u->readlock);
12112 + if (err)
12113 +- goto out;
12114 ++ goto out_path;
12115 +
12116 + err = -EINVAL;
12117 + if (u->addr)
12118 + goto out_up;
12119 +
12120 ++ if (name_err) {
12121 ++ err = name_err == -EEXIST ? -EADDRINUSE : name_err;
12122 ++ goto out_up;
12123 ++ }
12124 ++
12125 + err = -ENOMEM;
12126 + addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
12127 + if (!addr)
12128 +@@ -1008,11 +1018,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
12129 + addr->hash = hash ^ sk->sk_type;
12130 + atomic_set(&addr->refcnt, 1);
12131 +
12132 +- if (sun_path[0]) {
12133 +- struct path path;
12134 ++ if (dentry) {
12135 ++ struct path u_path;
12136 + umode_t mode = S_IFSOCK |
12137 + (SOCK_INODE(sock)->i_mode & ~current_umask());
12138 +- err = unix_mknod(sun_path, mode, &path);
12139 ++ err = unix_mknod(dentry, &path, mode, &u_path);
12140 + if (err) {
12141 + if (err == -EEXIST)
12142 + err = -EADDRINUSE;
12143 +@@ -1020,9 +1030,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
12144 + goto out_up;
12145 + }
12146 + addr->hash = UNIX_HASH_SIZE;
12147 +- hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE-1);
12148 ++ hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
12149 + spin_lock(&unix_table_lock);
12150 +- u->path = path;
12151 ++ u->path = u_path;
12152 + list = &unix_socket_table[hash];
12153 + } else {
12154 + spin_lock(&unix_table_lock);
12155 +@@ -1045,6 +1055,10 @@ out_unlock:
12156 + spin_unlock(&unix_table_lock);
12157 + out_up:
12158 + mutex_unlock(&u->readlock);
12159 ++out_path:
12160 ++ if (dentry)
12161 ++ done_path_create(&path, dentry);
12162 ++
12163 + out:
12164 + return err;
12165 + }
12166 +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
12167 +index 2ec86e652a19..e1c69b216db3 100644
12168 +--- a/net/vmw_vsock/af_vsock.c
12169 ++++ b/net/vmw_vsock/af_vsock.c
12170 +@@ -1794,27 +1794,8 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
12171 + else if (sk->sk_shutdown & RCV_SHUTDOWN)
12172 + err = 0;
12173 +
12174 +- if (copied > 0) {
12175 +- /* We only do these additional bookkeeping/notification steps
12176 +- * if we actually copied something out of the queue pair
12177 +- * instead of just peeking ahead.
12178 +- */
12179 +-
12180 +- if (!(flags & MSG_PEEK)) {
12181 +- /* If the other side has shutdown for sending and there
12182 +- * is nothing more to read, then modify the socket
12183 +- * state.
12184 +- */
12185 +- if (vsk->peer_shutdown & SEND_SHUTDOWN) {
12186 +- if (vsock_stream_has_data(vsk) <= 0) {
12187 +- sk->sk_state = SS_UNCONNECTED;
12188 +- sock_set_flag(sk, SOCK_DONE);
12189 +- sk->sk_state_change(sk);
12190 +- }
12191 +- }
12192 +- }
12193 ++ if (copied > 0)
12194 + err = copied;
12195 +- }
12196 +
12197 + out_wait:
12198 + finish_wait(sk_sleep(sk), &wait);
12199 +diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
12200 +index 7ecd04c21360..997ff7b2509b 100644
12201 +--- a/net/x25/x25_facilities.c
12202 ++++ b/net/x25/x25_facilities.c
12203 +@@ -277,6 +277,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
12204 +
12205 + memset(&theirs, 0, sizeof(theirs));
12206 + memcpy(new, ours, sizeof(*new));
12207 ++ memset(dte, 0, sizeof(*dte));
12208 +
12209 + len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
12210 + if (len < 0)
12211 +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
12212 +index b58286ecd156..cbaf52c837f4 100644
12213 +--- a/net/xfrm/xfrm_input.c
12214 ++++ b/net/xfrm/xfrm_input.c
12215 +@@ -292,12 +292,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
12216 + XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
12217 +
12218 + skb_dst_force(skb);
12219 ++ dev_hold(skb->dev);
12220 +
12221 + nexthdr = x->type->input(x, skb);
12222 +
12223 + if (nexthdr == -EINPROGRESS)
12224 + return 0;
12225 + resume:
12226 ++ dev_put(skb->dev);
12227 ++
12228 + spin_lock(&x->lock);
12229 + if (nexthdr <= 0) {
12230 + if (nexthdr == -EBADMSG) {
12231 +diff --git a/security/keys/key.c b/security/keys/key.c
12232 +index aee2ec5a18fc..970b58ee3e20 100644
12233 +--- a/security/keys/key.c
12234 ++++ b/security/keys/key.c
12235 +@@ -578,7 +578,7 @@ int key_reject_and_link(struct key *key,
12236 +
12237 + mutex_unlock(&key_construction_mutex);
12238 +
12239 +- if (keyring)
12240 ++ if (keyring && link_ret == 0)
12241 + __key_link_end(keyring, &key->index_key, edit);
12242 +
12243 + /* wake up anyone waiting for a key to be constructed */
12244 +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
12245 +index 7d45645f10ba..253a2da05cf0 100644
12246 +--- a/sound/core/pcm_lib.c
12247 ++++ b/sound/core/pcm_lib.c
12248 +@@ -322,7 +322,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
12249 + char name[16];
12250 + snd_pcm_debug_name(substream, name, sizeof(name));
12251 + pcm_err(substream->pcm,
12252 +- "BUG: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
12253 ++ "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
12254 + name, pos, runtime->buffer_size,
12255 + runtime->period_size);
12256 + }
12257 +diff --git a/sound/core/timer.c b/sound/core/timer.c
12258 +index bf48e71f73cd..1782555fcaca 100644
12259 +--- a/sound/core/timer.c
12260 ++++ b/sound/core/timer.c
12261 +@@ -1051,8 +1051,8 @@ static int snd_timer_s_start(struct snd_timer * timer)
12262 + njiff += timer->sticks - priv->correction;
12263 + priv->correction = 0;
12264 + }
12265 +- priv->last_expires = priv->tlist.expires = njiff;
12266 +- add_timer(&priv->tlist);
12267 ++ priv->last_expires = njiff;
12268 ++ mod_timer(&priv->tlist, njiff);
12269 + return 0;
12270 + }
12271 +
12272 +diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
12273 +index c5d5217a4180..4df5dc1a3765 100644
12274 +--- a/sound/drivers/dummy.c
12275 ++++ b/sound/drivers/dummy.c
12276 +@@ -420,6 +420,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
12277 +
12278 + static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
12279 + {
12280 ++ hrtimer_cancel(&dpcm->timer);
12281 + tasklet_kill(&dpcm->tasklet);
12282 + }
12283 +
12284 +diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
12285 +index 961ca32ee989..37f2dacd6691 100644
12286 +--- a/sound/hda/hdac_device.c
12287 ++++ b/sound/hda/hdac_device.c
12288 +@@ -261,13 +261,11 @@ EXPORT_SYMBOL_GPL(_snd_hdac_read_parm);
12289 + int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid,
12290 + int parm)
12291 + {
12292 +- int val;
12293 ++ unsigned int cmd, val;
12294 +
12295 +- if (codec->regmap)
12296 +- regcache_cache_bypass(codec->regmap, true);
12297 +- val = snd_hdac_read_parm(codec, nid, parm);
12298 +- if (codec->regmap)
12299 +- regcache_cache_bypass(codec->regmap, false);
12300 ++ cmd = snd_hdac_regmap_encode_verb(nid, AC_VERB_PARAMETERS) | parm;
12301 ++ if (snd_hdac_regmap_read_raw_uncached(codec, cmd, &val) < 0)
12302 ++ return -1;
12303 + return val;
12304 + }
12305 + EXPORT_SYMBOL_GPL(snd_hdac_read_parm_uncached);
12306 +diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
12307 +index b0ed870ffb88..f3eb78e47ced 100644
12308 +--- a/sound/hda/hdac_regmap.c
12309 ++++ b/sound/hda/hdac_regmap.c
12310 +@@ -411,7 +411,7 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
12311 + err = reg_raw_write(codec, reg, val);
12312 + if (err == -EAGAIN) {
12313 + err = snd_hdac_power_up_pm(codec);
12314 +- if (!err)
12315 ++ if (err >= 0)
12316 + err = reg_raw_write(codec, reg, val);
12317 + snd_hdac_power_down_pm(codec);
12318 + }
12319 +@@ -420,14 +420,30 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
12320 + EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw);
12321 +
12322 + static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
12323 +- unsigned int *val)
12324 ++ unsigned int *val, bool uncached)
12325 + {
12326 +- if (!codec->regmap)
12327 ++ if (uncached || !codec->regmap)
12328 + return hda_reg_read(codec, reg, val);
12329 + else
12330 + return regmap_read(codec->regmap, reg, val);
12331 + }
12332 +
12333 ++static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
12334 ++ unsigned int reg, unsigned int *val,
12335 ++ bool uncached)
12336 ++{
12337 ++ int err;
12338 ++
12339 ++ err = reg_raw_read(codec, reg, val, uncached);
12340 ++ if (err == -EAGAIN) {
12341 ++ err = snd_hdac_power_up_pm(codec);
12342 ++ if (err >= 0)
12343 ++ err = reg_raw_read(codec, reg, val, uncached);
12344 ++ snd_hdac_power_down_pm(codec);
12345 ++ }
12346 ++ return err;
12347 ++}
12348 ++
12349 + /**
12350 + * snd_hdac_regmap_read_raw - read a pseudo register with power mgmt
12351 + * @codec: the codec object
12352 +@@ -439,19 +455,19 @@ static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
12353 + int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
12354 + unsigned int *val)
12355 + {
12356 +- int err;
12357 +-
12358 +- err = reg_raw_read(codec, reg, val);
12359 +- if (err == -EAGAIN) {
12360 +- err = snd_hdac_power_up_pm(codec);
12361 +- if (!err)
12362 +- err = reg_raw_read(codec, reg, val);
12363 +- snd_hdac_power_down_pm(codec);
12364 +- }
12365 +- return err;
12366 ++ return __snd_hdac_regmap_read_raw(codec, reg, val, false);
12367 + }
12368 + EXPORT_SYMBOL_GPL(snd_hdac_regmap_read_raw);
12369 +
12370 ++/* Works like snd_hdac_regmap_read_raw(), but this doesn't read from the
12371 ++ * cache but always via hda verbs.
12372 ++ */
12373 ++int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
12374 ++ unsigned int reg, unsigned int *val)
12375 ++{
12376 ++ return __snd_hdac_regmap_read_raw(codec, reg, val, true);
12377 ++}
12378 ++
12379 + /**
12380 + * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt
12381 + * @codec: the codec object
12382 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
12383 +index a62872f7b41a..abf8d342f1f4 100644
12384 +--- a/sound/pci/hda/patch_realtek.c
12385 ++++ b/sound/pci/hda/patch_realtek.c
12386 +@@ -5504,6 +5504,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
12387 + SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
12388 + SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
12389 + SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
12390 ++ SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
12391 ++ SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
12392 + SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
12393 + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
12394 + SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
12395 +@@ -5635,8 +5637,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
12396 + {0x15, 0x0221401f}, \
12397 + {0x1a, 0x411111f0}, \
12398 + {0x1b, 0x411111f0}, \
12399 +- {0x1d, 0x40700001}, \
12400 +- {0x1e, 0x411111f0}
12401 ++ {0x1d, 0x40700001}
12402 +
12403 + #define ALC298_STANDARD_PINS \
12404 + {0x18, 0x411111f0}, \
12405 +@@ -5920,35 +5921,48 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
12406 + {0x13, 0x411111f0},
12407 + {0x16, 0x01014020},
12408 + {0x18, 0x411111f0},
12409 +- {0x19, 0x01a19030}),
12410 ++ {0x19, 0x01a19030},
12411 ++ {0x1e, 0x411111f0}),
12412 + SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
12413 + ALC292_STANDARD_PINS,
12414 + {0x12, 0x90a60140},
12415 + {0x13, 0x411111f0},
12416 + {0x16, 0x01014020},
12417 + {0x18, 0x02a19031},
12418 +- {0x19, 0x01a1903e}),
12419 ++ {0x19, 0x01a1903e},
12420 ++ {0x1e, 0x411111f0}),
12421 + SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
12422 + ALC292_STANDARD_PINS,
12423 + {0x12, 0x90a60140},
12424 + {0x13, 0x411111f0},
12425 + {0x16, 0x411111f0},
12426 + {0x18, 0x411111f0},
12427 +- {0x19, 0x411111f0}),
12428 ++ {0x19, 0x411111f0},
12429 ++ {0x1e, 0x411111f0}),
12430 + SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
12431 + ALC292_STANDARD_PINS,
12432 + {0x12, 0x40000000},
12433 + {0x13, 0x90a60140},
12434 + {0x16, 0x21014020},
12435 + {0x18, 0x411111f0},
12436 +- {0x19, 0x21a19030}),
12437 ++ {0x19, 0x21a19030},
12438 ++ {0x1e, 0x411111f0}),
12439 + SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
12440 + ALC292_STANDARD_PINS,
12441 + {0x12, 0x40000000},
12442 + {0x13, 0x90a60140},
12443 + {0x16, 0x411111f0},
12444 + {0x18, 0x411111f0},
12445 +- {0x19, 0x411111f0}),
12446 ++ {0x19, 0x411111f0},
12447 ++ {0x1e, 0x411111f0}),
12448 ++ SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
12449 ++ ALC292_STANDARD_PINS,
12450 ++ {0x12, 0x40000000},
12451 ++ {0x13, 0x90a60140},
12452 ++ {0x16, 0x21014020},
12453 ++ {0x18, 0x411111f0},
12454 ++ {0x19, 0x21a19030},
12455 ++ {0x1e, 0x411111ff}),
12456 + SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
12457 + ALC298_STANDARD_PINS,
12458 + {0x12, 0x90a60130},
12459 +diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
12460 +index f7549cc7ea85..d1f53abb60de 100644
12461 +--- a/sound/soc/codecs/ssm4567.c
12462 ++++ b/sound/soc/codecs/ssm4567.c
12463 +@@ -338,6 +338,11 @@ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
12464 + regcache_cache_only(ssm4567->regmap, !enable);
12465 +
12466 + if (enable) {
12467 ++ ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET,
12468 ++ 0x00);
12469 ++ if (ret)
12470 ++ return ret;
12471 ++
12472 + ret = regmap_update_bits(ssm4567->regmap,
12473 + SSM4567_REG_POWER_CTRL,
12474 + SSM4567_POWER_SPWDN, 0x00);
12475 +diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
12476 +index e4145509d63c..9c5219392460 100644
12477 +--- a/sound/soc/samsung/ac97.c
12478 ++++ b/sound/soc/samsung/ac97.c
12479 +@@ -324,7 +324,7 @@ static const struct snd_soc_component_driver s3c_ac97_component = {
12480 +
12481 + static int s3c_ac97_probe(struct platform_device *pdev)
12482 + {
12483 +- struct resource *mem_res, *dmatx_res, *dmarx_res, *dmamic_res, *irq_res;
12484 ++ struct resource *mem_res, *irq_res;
12485 + struct s3c_audio_pdata *ac97_pdata;
12486 + int ret;
12487 +
12488 +@@ -335,24 +335,6 @@ static int s3c_ac97_probe(struct platform_device *pdev)
12489 + }
12490 +
12491 + /* Check for availability of necessary resource */
12492 +- dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
12493 +- if (!dmatx_res) {
12494 +- dev_err(&pdev->dev, "Unable to get AC97-TX dma resource\n");
12495 +- return -ENXIO;
12496 +- }
12497 +-
12498 +- dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
12499 +- if (!dmarx_res) {
12500 +- dev_err(&pdev->dev, "Unable to get AC97-RX dma resource\n");
12501 +- return -ENXIO;
12502 +- }
12503 +-
12504 +- dmamic_res = platform_get_resource(pdev, IORESOURCE_DMA, 2);
12505 +- if (!dmamic_res) {
12506 +- dev_err(&pdev->dev, "Unable to get AC97-MIC dma resource\n");
12507 +- return -ENXIO;
12508 +- }
12509 +-
12510 + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
12511 + if (!irq_res) {
12512 + dev_err(&pdev->dev, "AC97 IRQ not provided!\n");
12513 +@@ -364,11 +346,11 @@ static int s3c_ac97_probe(struct platform_device *pdev)
12514 + if (IS_ERR(s3c_ac97.regs))
12515 + return PTR_ERR(s3c_ac97.regs);
12516 +
12517 +- s3c_ac97_pcm_out.channel = dmatx_res->start;
12518 ++ s3c_ac97_pcm_out.slave = ac97_pdata->dma_playback;
12519 + s3c_ac97_pcm_out.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
12520 +- s3c_ac97_pcm_in.channel = dmarx_res->start;
12521 ++ s3c_ac97_pcm_in.slave = ac97_pdata->dma_capture;
12522 + s3c_ac97_pcm_in.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
12523 +- s3c_ac97_mic_in.channel = dmamic_res->start;
12524 ++ s3c_ac97_mic_in.slave = ac97_pdata->dma_capture_mic;
12525 + s3c_ac97_mic_in.dma_addr = mem_res->start + S3C_AC97_MIC_DATA;
12526 +
12527 + init_completion(&s3c_ac97.done);
12528 +diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h
12529 +index 0e85dcfec023..085ef30f5ca2 100644
12530 +--- a/sound/soc/samsung/dma.h
12531 ++++ b/sound/soc/samsung/dma.h
12532 +@@ -15,7 +15,7 @@
12533 + #include <sound/dmaengine_pcm.h>
12534 +
12535 + struct s3c_dma_params {
12536 +- int channel; /* Channel ID */
12537 ++ void *slave; /* Channel ID */
12538 + dma_addr_t dma_addr;
12539 + int dma_size; /* Size of the DMA transfer */
12540 + char *ch_name;
12541 +diff --git a/sound/soc/samsung/dmaengine.c b/sound/soc/samsung/dmaengine.c
12542 +index 506f5bf6d082..727008d57d14 100644
12543 +--- a/sound/soc/samsung/dmaengine.c
12544 ++++ b/sound/soc/samsung/dmaengine.c
12545 +@@ -50,14 +50,14 @@ void samsung_asoc_init_dma_data(struct snd_soc_dai *dai,
12546 +
12547 + if (playback) {
12548 + playback_data = &playback->dma_data;
12549 +- playback_data->filter_data = (void *)playback->channel;
12550 ++ playback_data->filter_data = playback->slave;
12551 + playback_data->chan_name = playback->ch_name;
12552 + playback_data->addr = playback->dma_addr;
12553 + playback_data->addr_width = playback->dma_size;
12554 + }
12555 + if (capture) {
12556 + capture_data = &capture->dma_data;
12557 +- capture_data->filter_data = (void *)capture->channel;
12558 ++ capture_data->filter_data = capture->slave;
12559 + capture_data->chan_name = capture->ch_name;
12560 + capture_data->addr = capture->dma_addr;
12561 + capture_data->addr_width = capture->dma_size;
12562 +diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
12563 +index 5e8ccb0a7028..9456c78c9051 100644
12564 +--- a/sound/soc/samsung/i2s.c
12565 ++++ b/sound/soc/samsung/i2s.c
12566 +@@ -1260,27 +1260,14 @@ static int samsung_i2s_probe(struct platform_device *pdev)
12567 + pri_dai->lock = &pri_dai->spinlock;
12568 +
12569 + if (!np) {
12570 +- res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
12571 +- if (!res) {
12572 +- dev_err(&pdev->dev,
12573 +- "Unable to get I2S-TX dma resource\n");
12574 +- return -ENXIO;
12575 +- }
12576 +- pri_dai->dma_playback.channel = res->start;
12577 +-
12578 +- res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
12579 +- if (!res) {
12580 +- dev_err(&pdev->dev,
12581 +- "Unable to get I2S-RX dma resource\n");
12582 +- return -ENXIO;
12583 +- }
12584 +- pri_dai->dma_capture.channel = res->start;
12585 +-
12586 + if (i2s_pdata == NULL) {
12587 + dev_err(&pdev->dev, "Can't work without s3c_audio_pdata\n");
12588 + return -EINVAL;
12589 + }
12590 +
12591 ++ pri_dai->dma_playback.slave = i2s_pdata->dma_playback;
12592 ++ pri_dai->dma_capture.slave = i2s_pdata->dma_capture;
12593 ++
12594 + if (&i2s_pdata->type)
12595 + i2s_cfg = &i2s_pdata->type.i2s;
12596 +
12597 +@@ -1341,11 +1328,8 @@ static int samsung_i2s_probe(struct platform_device *pdev)
12598 + sec_dai->dma_playback.dma_addr = regs_base + I2STXDS;
12599 + sec_dai->dma_playback.ch_name = "tx-sec";
12600 +
12601 +- if (!np) {
12602 +- res = platform_get_resource(pdev, IORESOURCE_DMA, 2);
12603 +- if (res)
12604 +- sec_dai->dma_playback.channel = res->start;
12605 +- }
12606 ++ if (!np)
12607 ++ sec_dai->dma_playback.slave = i2s_pdata->dma_play_sec;
12608 +
12609 + sec_dai->dma_playback.dma_size = 4;
12610 + sec_dai->addr = pri_dai->addr;
12611 +diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
12612 +index b320a9d3fbf8..c77f324e0bb8 100644
12613 +--- a/sound/soc/samsung/pcm.c
12614 ++++ b/sound/soc/samsung/pcm.c
12615 +@@ -486,7 +486,7 @@ static const struct snd_soc_component_driver s3c_pcm_component = {
12616 + static int s3c_pcm_dev_probe(struct platform_device *pdev)
12617 + {
12618 + struct s3c_pcm_info *pcm;
12619 +- struct resource *mem_res, *dmatx_res, *dmarx_res;
12620 ++ struct resource *mem_res;
12621 + struct s3c_audio_pdata *pcm_pdata;
12622 + int ret;
12623 +
12624 +@@ -499,18 +499,6 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
12625 + pcm_pdata = pdev->dev.platform_data;
12626 +
12627 + /* Check for availability of necessary resource */
12628 +- dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
12629 +- if (!dmatx_res) {
12630 +- dev_err(&pdev->dev, "Unable to get PCM-TX dma resource\n");
12631 +- return -ENXIO;
12632 +- }
12633 +-
12634 +- dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
12635 +- if (!dmarx_res) {
12636 +- dev_err(&pdev->dev, "Unable to get PCM-RX dma resource\n");
12637 +- return -ENXIO;
12638 +- }
12639 +-
12640 + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
12641 + if (!mem_res) {
12642 + dev_err(&pdev->dev, "Unable to get register resource\n");
12643 +@@ -568,8 +556,10 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
12644 + s3c_pcm_stereo_out[pdev->id].dma_addr = mem_res->start
12645 + + S3C_PCM_TXFIFO;
12646 +
12647 +- s3c_pcm_stereo_in[pdev->id].channel = dmarx_res->start;
12648 +- s3c_pcm_stereo_out[pdev->id].channel = dmatx_res->start;
12649 ++ if (pcm_pdata) {
12650 ++ s3c_pcm_stereo_in[pdev->id].slave = pcm_pdata->dma_capture;
12651 ++ s3c_pcm_stereo_out[pdev->id].slave = pcm_pdata->dma_playback;
12652 ++ }
12653 +
12654 + pcm->dma_capture = &s3c_pcm_stereo_in[pdev->id];
12655 + pcm->dma_playback = &s3c_pcm_stereo_out[pdev->id];
12656 +diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
12657 +index df65c5b494b1..b6ab3fc5789e 100644
12658 +--- a/sound/soc/samsung/s3c-i2s-v2.c
12659 ++++ b/sound/soc/samsung/s3c-i2s-v2.c
12660 +@@ -709,7 +709,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
12661 + #endif
12662 +
12663 + int s3c_i2sv2_register_component(struct device *dev, int id,
12664 +- struct snd_soc_component_driver *cmp_drv,
12665 ++ const struct snd_soc_component_driver *cmp_drv,
12666 + struct snd_soc_dai_driver *dai_drv)
12667 + {
12668 + struct snd_soc_dai_ops *ops = (struct snd_soc_dai_ops *)dai_drv->ops;
12669 +diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
12670 +index 90abab364b49..d0684145ed1f 100644
12671 +--- a/sound/soc/samsung/s3c-i2s-v2.h
12672 ++++ b/sound/soc/samsung/s3c-i2s-v2.h
12673 +@@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
12674 + * soc core.
12675 + */
12676 + extern int s3c_i2sv2_register_component(struct device *dev, int id,
12677 +- struct snd_soc_component_driver *cmp_drv,
12678 ++ const struct snd_soc_component_driver *cmp_drv,
12679 + struct snd_soc_dai_driver *dai_drv);
12680 +
12681 + #endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
12682 +diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
12683 +index 2b766d212ce0..77d27c85a32a 100644
12684 +--- a/sound/soc/samsung/s3c2412-i2s.c
12685 ++++ b/sound/soc/samsung/s3c2412-i2s.c
12686 +@@ -34,13 +34,13 @@
12687 + #include "s3c2412-i2s.h"
12688 +
12689 + static struct s3c_dma_params s3c2412_i2s_pcm_stereo_out = {
12690 +- .channel = DMACH_I2S_OUT,
12691 ++ .slave = (void *)(uintptr_t)DMACH_I2S_OUT,
12692 + .ch_name = "tx",
12693 + .dma_size = 4,
12694 + };
12695 +
12696 + static struct s3c_dma_params s3c2412_i2s_pcm_stereo_in = {
12697 +- .channel = DMACH_I2S_IN,
12698 ++ .slave = (void *)(uintptr_t)DMACH_I2S_IN,
12699 + .ch_name = "rx",
12700 + .dma_size = 4,
12701 + };
12702 +diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
12703 +index 5bf723689692..9da3a77ea2c7 100644
12704 +--- a/sound/soc/samsung/s3c24xx-i2s.c
12705 ++++ b/sound/soc/samsung/s3c24xx-i2s.c
12706 +@@ -32,13 +32,13 @@
12707 + #include "s3c24xx-i2s.h"
12708 +
12709 + static struct s3c_dma_params s3c24xx_i2s_pcm_stereo_out = {
12710 +- .channel = DMACH_I2S_OUT,
12711 ++ .slave = (void *)(uintptr_t)DMACH_I2S_OUT,
12712 + .ch_name = "tx",
12713 + .dma_size = 2,
12714 + };
12715 +
12716 + static struct s3c_dma_params s3c24xx_i2s_pcm_stereo_in = {
12717 +- .channel = DMACH_I2S_IN,
12718 ++ .slave = (void *)(uintptr_t)DMACH_I2S_IN,
12719 + .ch_name = "rx",
12720 + .dma_size = 2,
12721 + };
12722 +diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
12723 +index 36dbc0e96004..9dd7ee6d03ff 100644
12724 +--- a/sound/soc/samsung/spdif.c
12725 ++++ b/sound/soc/samsung/spdif.c
12726 +@@ -359,7 +359,7 @@ static const struct snd_soc_component_driver samsung_spdif_component = {
12727 + static int spdif_probe(struct platform_device *pdev)
12728 + {
12729 + struct s3c_audio_pdata *spdif_pdata;
12730 +- struct resource *mem_res, *dma_res;
12731 ++ struct resource *mem_res;
12732 + struct samsung_spdif_info *spdif;
12733 + int ret;
12734 +
12735 +@@ -367,12 +367,6 @@ static int spdif_probe(struct platform_device *pdev)
12736 +
12737 + dev_dbg(&pdev->dev, "Entered %s\n", __func__);
12738 +
12739 +- dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
12740 +- if (!dma_res) {
12741 +- dev_err(&pdev->dev, "Unable to get dma resource.\n");
12742 +- return -ENXIO;
12743 +- }
12744 +-
12745 + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
12746 + if (!mem_res) {
12747 + dev_err(&pdev->dev, "Unable to get register resource.\n");
12748 +@@ -432,7 +426,7 @@ static int spdif_probe(struct platform_device *pdev)
12749 +
12750 + spdif_stereo_out.dma_size = 2;
12751 + spdif_stereo_out.dma_addr = mem_res->start + DATA_OUTBUF;
12752 +- spdif_stereo_out.channel = dma_res->start;
12753 ++ spdif_stereo_out.slave = spdif_pdata ? spdif_pdata->dma_playback : NULL;
12754 +
12755 + spdif->dma_playback = &spdif_stereo_out;
12756 +
12757 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
12758 +index 194fa7f60a38..e27df0d3898b 100644
12759 +--- a/sound/usb/quirks.c
12760 ++++ b/sound/usb/quirks.c
12761 +@@ -147,6 +147,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
12762 + usb_audio_err(chip, "cannot memdup\n");
12763 + return -ENOMEM;
12764 + }
12765 ++ INIT_LIST_HEAD(&fp->list);
12766 + if (fp->nr_rates > MAX_NR_RATES) {
12767 + kfree(fp);
12768 + return -EINVAL;
12769 +@@ -164,23 +165,18 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
12770 + stream = (fp->endpoint & USB_DIR_IN)
12771 + ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
12772 + err = snd_usb_add_audio_stream(chip, stream, fp);
12773 +- if (err < 0) {
12774 +- kfree(fp);
12775 +- kfree(rate_table);
12776 +- return err;
12777 +- }
12778 ++ if (err < 0)
12779 ++ goto error;
12780 + if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
12781 + fp->altset_idx >= iface->num_altsetting) {
12782 +- kfree(fp);
12783 +- kfree(rate_table);
12784 +- return -EINVAL;
12785 ++ err = -EINVAL;
12786 ++ goto error;
12787 + }
12788 + alts = &iface->altsetting[fp->altset_idx];
12789 + altsd = get_iface_desc(alts);
12790 + if (altsd->bNumEndpoints < 1) {
12791 +- kfree(fp);
12792 +- kfree(rate_table);
12793 +- return -EINVAL;
12794 ++ err = -EINVAL;
12795 ++ goto error;
12796 + }
12797 +
12798 + fp->protocol = altsd->bInterfaceProtocol;
12799 +@@ -193,6 +189,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
12800 + snd_usb_init_pitch(chip, fp->iface, alts, fp);
12801 + snd_usb_init_sample_rate(chip, fp->iface, alts, fp, fp->rate_max);
12802 + return 0;
12803 ++
12804 ++ error:
12805 ++ list_del(&fp->list); /* unlink for avoiding double-free */
12806 ++ kfree(fp);
12807 ++ kfree(rate_table);
12808 ++ return err;
12809 + }
12810 +
12811 + static int create_auto_pcm_quirk(struct snd_usb_audio *chip,
12812 +@@ -465,6 +467,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
12813 + fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
12814 + fp->datainterval = 0;
12815 + fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
12816 ++ INIT_LIST_HEAD(&fp->list);
12817 +
12818 + switch (fp->maxpacksize) {
12819 + case 0x120:
12820 +@@ -488,6 +491,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
12821 + ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
12822 + err = snd_usb_add_audio_stream(chip, stream, fp);
12823 + if (err < 0) {
12824 ++ list_del(&fp->list); /* unlink for avoiding double-free */
12825 + kfree(fp);
12826 + return err;
12827 + }
12828 +diff --git a/sound/usb/stream.c b/sound/usb/stream.c
12829 +index 310a3822d2b7..25e8075f9ea3 100644
12830 +--- a/sound/usb/stream.c
12831 ++++ b/sound/usb/stream.c
12832 +@@ -315,7 +315,9 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
12833 + /*
12834 + * add this endpoint to the chip instance.
12835 + * if a stream with the same endpoint already exists, append to it.
12836 +- * if not, create a new pcm stream.
12837 ++ * if not, create a new pcm stream. note, fp is added to the substream
12838 ++ * fmt_list and will be freed on the chip instance release. do not free
12839 ++ * fp or do remove it from the substream fmt_list to avoid double-free.
12840 + */
12841 + int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
12842 + int stream,
12843 +@@ -668,6 +670,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
12844 + * (fp->maxpacksize & 0x7ff);
12845 + fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
12846 + fp->clock = clock;
12847 ++ INIT_LIST_HEAD(&fp->list);
12848 +
12849 + /* some quirks for attributes here */
12850 +
12851 +@@ -716,6 +719,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
12852 + dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint);
12853 + err = snd_usb_add_audio_stream(chip, stream, fp);
12854 + if (err < 0) {
12855 ++ list_del(&fp->list); /* unlink for avoiding double-free */
12856 + kfree(fp->rate_table);
12857 + kfree(fp->chmap);
12858 + kfree(fp);
12859 +diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
12860 +index 04e150d83e7d..756ed9fdc9ad 100644
12861 +--- a/tools/perf/Documentation/perf-stat.txt
12862 ++++ b/tools/perf/Documentation/perf-stat.txt
12863 +@@ -62,6 +62,14 @@ OPTIONS
12864 + --scale::
12865 + scale/normalize counter values
12866 +
12867 ++-d::
12868 ++--detailed::
12869 ++ print more detailed statistics, can be specified up to 3 times
12870 ++
12871 ++ -d: detailed events, L1 and LLC data cache
12872 ++ -d -d: more detailed events, dTLB and iTLB events
12873 ++ -d -d -d: very detailed events, adding prefetch events
12874 ++
12875 + -r::
12876 + --repeat=<n>::
12877 + repeat command and print average + stddev (max: 100). 0 means forever.
12878 +diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
12879 +index ff866c4d2e2f..d18a59ab4ed5 100644
12880 +--- a/tools/perf/util/event.c
12881 ++++ b/tools/perf/util/event.c
12882 +@@ -251,7 +251,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
12883 + strcpy(execname, "");
12884 +
12885 + /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
12886 +- n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
12887 ++ n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
12888 + &event->mmap2.start, &event->mmap2.len, prot,
12889 + &event->mmap2.pgoff, &event->mmap2.maj,
12890 + &event->mmap2.min,
12891 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
12892 +index c2f87ff0061d..d93deb5ce4f2 100644
12893 +--- a/virt/kvm/kvm_main.c
12894 ++++ b/virt/kvm/kvm_main.c
12895 +@@ -2622,7 +2622,7 @@ static long kvm_vm_ioctl(struct file *filp,
12896 + if (copy_from_user(&routing, argp, sizeof(routing)))
12897 + goto out;
12898 + r = -EINVAL;
12899 +- if (routing.nr >= KVM_MAX_IRQ_ROUTES)
12900 ++ if (routing.nr > KVM_MAX_IRQ_ROUTES)
12901 + goto out;
12902 + if (routing.flags)
12903 + goto out;