Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.18 commit in: /
Date: Wed, 13 Jul 2016 23:28:19
Message-Id: 1468452504.51038750b9ff426c91ecb93374c72e1b8438e3e7.mpagano@gentoo
1 commit: 51038750b9ff426c91ecb93374c72e1b8438e3e7
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jul 13 23:28:24 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jul 13 23:28:24 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=51038750
7
8 Linux patch 3.18.37
9
10 0000_README | 4 +
11 1036_linux-3.18.37.patch | 11191 +++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 11195 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index fcefa63..1863a89 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -187,6 +187,10 @@ Patch: 1035_linux-3.18.36.patch
19 From: http://www.kernel.org
20 Desc: Linux 3.18.36
21
22 +Patch: 1036_linux-3.18.37.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 3.18.37
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1036_linux-3.18.37.patch b/1036_linux-3.18.37.patch
31 new file mode 100644
32 index 0000000..40419a5
33 --- /dev/null
34 +++ b/1036_linux-3.18.37.patch
35 @@ -0,0 +1,11191 @@
36 +diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
37 +index a0c85110a07e..689ab9b9953a 100644
38 +--- a/Documentation/scsi/scsi_eh.txt
39 ++++ b/Documentation/scsi/scsi_eh.txt
40 +@@ -263,19 +263,23 @@ scmd->allowed.
41 +
42 + 3. scmd recovered
43 + ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
44 +- - shost->host_failed--
45 + - clear scmd->eh_eflags
46 + - scsi_setup_cmd_retry()
47 + - move from local eh_work_q to local eh_done_q
48 + LOCKING: none
49 ++ CONCURRENCY: at most one thread per separate eh_work_q to
50 ++ keep queue manipulation lockless
51 +
52 + 4. EH completes
53 + ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
54 +- layer of failure.
55 ++ layer of failure. May be called concurrently but must have
56 ++ a no more than one thread per separate eh_work_q to
57 ++ manipulate the queue locklessly
58 + - scmd is removed from eh_done_q and scmd->eh_entry is cleared
59 + - if retry is necessary, scmd is requeued using
60 + scsi_queue_insert()
61 + - otherwise, scsi_finish_command() is invoked for scmd
62 ++ - zero shost->host_failed
63 + LOCKING: queue or finish function performs appropriate locking
64 +
65 +
66 +diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
67 +index 88152f214f48..302b5ed616a6 100644
68 +--- a/Documentation/sysctl/fs.txt
69 ++++ b/Documentation/sysctl/fs.txt
70 +@@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/fs:
71 + - nr_open
72 + - overflowuid
73 + - overflowgid
74 ++- pipe-user-pages-hard
75 ++- pipe-user-pages-soft
76 + - protected_hardlinks
77 + - protected_symlinks
78 + - suid_dumpable
79 +@@ -159,6 +161,27 @@ The default is 65534.
80 +
81 + ==============================================================
82 +
83 ++pipe-user-pages-hard:
84 ++
85 ++Maximum total number of pages a non-privileged user may allocate for pipes.
86 ++Once this limit is reached, no new pipes may be allocated until usage goes
87 ++below the limit again. When set to 0, no limit is applied, which is the default
88 ++setting.
89 ++
90 ++==============================================================
91 ++
92 ++pipe-user-pages-soft:
93 ++
94 ++Maximum total number of pages a non-privileged user may allocate for pipes
95 ++before the pipe size gets limited to a single page. Once this limit is reached,
96 ++new pipes will be limited to a single page in size for this user in order to
97 ++limit total memory usage, and trying to increase them using fcntl() will be
98 ++denied until usage goes below the limit again. The default value allows to
99 ++allocate up to 1024 pipes at their default size. When set to 0, no limit is
100 ++applied.
101 ++
102 ++==============================================================
103 ++
104 + protected_hardlinks:
105 +
106 + A long-standing class of security issues is the hardlink-based
107 +diff --git a/Makefile b/Makefile
108 +index 497f437efebf..e6953a43fe64 100644
109 +--- a/Makefile
110 ++++ b/Makefile
111 +@@ -1,6 +1,6 @@
112 + VERSION = 3
113 + PATCHLEVEL = 18
114 +-SUBLEVEL = 36
115 ++SUBLEVEL = 37
116 + EXTRAVERSION =
117 + NAME = Diseased Newt
118 +
119 +diff --git a/arch/arc/Makefile b/arch/arc/Makefile
120 +index 10bc3d4e8a44..dada919aba27 100644
121 +--- a/arch/arc/Makefile
122 ++++ b/arch/arc/Makefile
123 +@@ -34,7 +34,6 @@ cflags-$(atleast_gcc44) += -fsection-anchors
124 + cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
125 + cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
126 + cflags-$(CONFIG_ARC_HAS_RTSC) += -mrtsc
127 +-cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
128 +
129 + # By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
130 + ifeq ($(atleast_gcc48),y)
131 +diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
132 +index fb98769b6a98..3e349aefdb9e 100644
133 +--- a/arch/arc/kernel/stacktrace.c
134 ++++ b/arch/arc/kernel/stacktrace.c
135 +@@ -131,7 +131,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
136 + * prelogue is setup (callee regs saved and then fp set and not other
137 + * way around
138 + */
139 +- pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
140 ++ pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
141 + return 0;
142 +
143 + #endif
144 +diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
145 +index f0279411847d..67a251a815f1 100644
146 +--- a/arch/arm/include/asm/pgtable-2level.h
147 ++++ b/arch/arm/include/asm/pgtable-2level.h
148 +@@ -163,6 +163,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
149 +
150 + #define pmd_large(pmd) (pmd_val(pmd) & 2)
151 + #define pmd_bad(pmd) (pmd_val(pmd) & 2)
152 ++#define pmd_present(pmd) (pmd_val(pmd))
153 +
154 + #define copy_pmd(pmdpd,pmdps) \
155 + do { \
156 +diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
157 +index a31ecdad4b59..b5ef8c7c6220 100644
158 +--- a/arch/arm/include/asm/pgtable-3level.h
159 ++++ b/arch/arm/include/asm/pgtable-3level.h
160 +@@ -212,6 +212,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
161 + : !!(pmd_val(pmd) & (val)))
162 + #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
163 +
164 ++#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
165 + #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
166 + #define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
167 + static inline pte_t pte_mkspecial(pte_t pte)
168 +@@ -257,8 +258,11 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
169 + #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
170 + #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
171 +
172 +-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
173 +-#define pmd_mknotpresent(pmd) (__pmd(0))
174 ++/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
175 ++static inline pmd_t pmd_mknotpresent(pmd_t pmd)
176 ++{
177 ++ return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
178 ++}
179 +
180 + static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
181 + {
182 +diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
183 +index 3b30062975b2..e42bbd9ec427 100644
184 +--- a/arch/arm/include/asm/pgtable.h
185 ++++ b/arch/arm/include/asm/pgtable.h
186 +@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
187 + #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
188 +
189 + #define pmd_none(pmd) (!pmd_val(pmd))
190 +-#define pmd_present(pmd) (pmd_val(pmd))
191 +
192 + static inline pte_t *pmd_page_vaddr(pmd_t pmd)
193 + {
194 +diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
195 +index ed5834e8e2ac..e8193b987313 100644
196 +--- a/arch/arm/kvm/arm.c
197 ++++ b/arch/arm/kvm/arm.c
198 +@@ -251,6 +251,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
199 + kvm_mmu_free_memory_caches(vcpu);
200 + kvm_timer_vcpu_terminate(vcpu);
201 + kvm_vgic_vcpu_destroy(vcpu);
202 ++ kvm_vcpu_uninit(vcpu);
203 + kmem_cache_free(kvm_vcpu_cache, vcpu);
204 + }
205 +
206 +diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
207 +index e18709d3b95d..38e1bdcaf015 100644
208 +--- a/arch/arm/mach-omap2/cpuidle34xx.c
209 ++++ b/arch/arm/mach-omap2/cpuidle34xx.c
210 +@@ -34,6 +34,7 @@
211 + #include "pm.h"
212 + #include "control.h"
213 + #include "common.h"
214 ++#include "soc.h"
215 +
216 + /* Mach specific information to be recorded in the C-state driver_data */
217 + struct omap3_idle_statedata {
218 +@@ -322,6 +323,69 @@ static struct cpuidle_driver omap3_idle_driver = {
219 + .safe_state_index = 0,
220 + };
221 +
222 ++/*
223 ++ * Numbers based on measurements made in October 2009 for PM optimized kernel
224 ++ * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
225 ++ * and worst case latencies).
226 ++ */
227 ++static struct cpuidle_driver omap3430_idle_driver = {
228 ++ .name = "omap3430_idle",
229 ++ .owner = THIS_MODULE,
230 ++ .states = {
231 ++ {
232 ++ .enter = omap3_enter_idle_bm,
233 ++ .exit_latency = 110 + 162,
234 ++ .target_residency = 5,
235 ++ .name = "C1",
236 ++ .desc = "MPU ON + CORE ON",
237 ++ },
238 ++ {
239 ++ .enter = omap3_enter_idle_bm,
240 ++ .exit_latency = 106 + 180,
241 ++ .target_residency = 309,
242 ++ .name = "C2",
243 ++ .desc = "MPU ON + CORE ON",
244 ++ },
245 ++ {
246 ++ .enter = omap3_enter_idle_bm,
247 ++ .exit_latency = 107 + 410,
248 ++ .target_residency = 46057,
249 ++ .name = "C3",
250 ++ .desc = "MPU RET + CORE ON",
251 ++ },
252 ++ {
253 ++ .enter = omap3_enter_idle_bm,
254 ++ .exit_latency = 121 + 3374,
255 ++ .target_residency = 46057,
256 ++ .name = "C4",
257 ++ .desc = "MPU OFF + CORE ON",
258 ++ },
259 ++ {
260 ++ .enter = omap3_enter_idle_bm,
261 ++ .exit_latency = 855 + 1146,
262 ++ .target_residency = 46057,
263 ++ .name = "C5",
264 ++ .desc = "MPU RET + CORE RET",
265 ++ },
266 ++ {
267 ++ .enter = omap3_enter_idle_bm,
268 ++ .exit_latency = 7580 + 4134,
269 ++ .target_residency = 484329,
270 ++ .name = "C6",
271 ++ .desc = "MPU OFF + CORE RET",
272 ++ },
273 ++ {
274 ++ .enter = omap3_enter_idle_bm,
275 ++ .exit_latency = 7505 + 15274,
276 ++ .target_residency = 484329,
277 ++ .name = "C7",
278 ++ .desc = "MPU OFF + CORE OFF",
279 ++ },
280 ++ },
281 ++ .state_count = ARRAY_SIZE(omap3_idle_data),
282 ++ .safe_state_index = 0,
283 ++};
284 ++
285 + /* Public functions */
286 +
287 + /**
288 +@@ -340,5 +404,8 @@ int __init omap3_idle_init(void)
289 + if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
290 + return -ENODEV;
291 +
292 +- return cpuidle_register(&omap3_idle_driver, NULL);
293 ++ if (cpu_is_omap3430())
294 ++ return cpuidle_register(&omap3430_idle_driver, NULL);
295 ++ else
296 ++ return cpuidle_register(&omap3_idle_driver, NULL);
297 + }
298 +diff --git a/arch/arm/mach-s3c64xx/dev-audio.c b/arch/arm/mach-s3c64xx/dev-audio.c
299 +index ff780a8d8366..9a42736ef4ac 100644
300 +--- a/arch/arm/mach-s3c64xx/dev-audio.c
301 ++++ b/arch/arm/mach-s3c64xx/dev-audio.c
302 +@@ -54,12 +54,12 @@ static int s3c64xx_i2s_cfg_gpio(struct platform_device *pdev)
303 +
304 + static struct resource s3c64xx_iis0_resource[] = {
305 + [0] = DEFINE_RES_MEM(S3C64XX_PA_IIS0, SZ_256),
306 +- [1] = DEFINE_RES_DMA(DMACH_I2S0_OUT),
307 +- [2] = DEFINE_RES_DMA(DMACH_I2S0_IN),
308 + };
309 +
310 +-static struct s3c_audio_pdata i2sv3_pdata = {
311 ++static struct s3c_audio_pdata i2s0_pdata = {
312 + .cfg_gpio = s3c64xx_i2s_cfg_gpio,
313 ++ .dma_playback = DMACH_I2S0_OUT,
314 ++ .dma_capture = DMACH_I2S0_IN,
315 + };
316 +
317 + struct platform_device s3c64xx_device_iis0 = {
318 +@@ -68,15 +68,19 @@ struct platform_device s3c64xx_device_iis0 = {
319 + .num_resources = ARRAY_SIZE(s3c64xx_iis0_resource),
320 + .resource = s3c64xx_iis0_resource,
321 + .dev = {
322 +- .platform_data = &i2sv3_pdata,
323 ++ .platform_data = &i2s0_pdata,
324 + },
325 + };
326 + EXPORT_SYMBOL(s3c64xx_device_iis0);
327 +
328 + static struct resource s3c64xx_iis1_resource[] = {
329 + [0] = DEFINE_RES_MEM(S3C64XX_PA_IIS1, SZ_256),
330 +- [1] = DEFINE_RES_DMA(DMACH_I2S1_OUT),
331 +- [2] = DEFINE_RES_DMA(DMACH_I2S1_IN),
332 ++};
333 ++
334 ++static struct s3c_audio_pdata i2s1_pdata = {
335 ++ .cfg_gpio = s3c64xx_i2s_cfg_gpio,
336 ++ .dma_playback = DMACH_I2S1_OUT,
337 ++ .dma_capture = DMACH_I2S1_IN,
338 + };
339 +
340 + struct platform_device s3c64xx_device_iis1 = {
341 +@@ -85,19 +89,19 @@ struct platform_device s3c64xx_device_iis1 = {
342 + .num_resources = ARRAY_SIZE(s3c64xx_iis1_resource),
343 + .resource = s3c64xx_iis1_resource,
344 + .dev = {
345 +- .platform_data = &i2sv3_pdata,
346 ++ .platform_data = &i2s1_pdata,
347 + },
348 + };
349 + EXPORT_SYMBOL(s3c64xx_device_iis1);
350 +
351 + static struct resource s3c64xx_iisv4_resource[] = {
352 + [0] = DEFINE_RES_MEM(S3C64XX_PA_IISV4, SZ_256),
353 +- [1] = DEFINE_RES_DMA(DMACH_HSI_I2SV40_TX),
354 +- [2] = DEFINE_RES_DMA(DMACH_HSI_I2SV40_RX),
355 + };
356 +
357 + static struct s3c_audio_pdata i2sv4_pdata = {
358 + .cfg_gpio = s3c64xx_i2s_cfg_gpio,
359 ++ .dma_playback = DMACH_HSI_I2SV40_TX,
360 ++ .dma_capture = DMACH_HSI_I2SV40_RX,
361 + .type = {
362 + .i2s = {
363 + .quirks = QUIRK_PRI_6CHAN,
364 +@@ -142,12 +146,12 @@ static int s3c64xx_pcm_cfg_gpio(struct platform_device *pdev)
365 +
366 + static struct resource s3c64xx_pcm0_resource[] = {
367 + [0] = DEFINE_RES_MEM(S3C64XX_PA_PCM0, SZ_256),
368 +- [1] = DEFINE_RES_DMA(DMACH_PCM0_TX),
369 +- [2] = DEFINE_RES_DMA(DMACH_PCM0_RX),
370 + };
371 +
372 + static struct s3c_audio_pdata s3c_pcm0_pdata = {
373 + .cfg_gpio = s3c64xx_pcm_cfg_gpio,
374 ++ .dma_capture = DMACH_PCM0_RX,
375 ++ .dma_playback = DMACH_PCM0_TX,
376 + };
377 +
378 + struct platform_device s3c64xx_device_pcm0 = {
379 +@@ -163,12 +167,12 @@ EXPORT_SYMBOL(s3c64xx_device_pcm0);
380 +
381 + static struct resource s3c64xx_pcm1_resource[] = {
382 + [0] = DEFINE_RES_MEM(S3C64XX_PA_PCM1, SZ_256),
383 +- [1] = DEFINE_RES_DMA(DMACH_PCM1_TX),
384 +- [2] = DEFINE_RES_DMA(DMACH_PCM1_RX),
385 + };
386 +
387 + static struct s3c_audio_pdata s3c_pcm1_pdata = {
388 + .cfg_gpio = s3c64xx_pcm_cfg_gpio,
389 ++ .dma_playback = DMACH_PCM1_TX,
390 ++ .dma_capture = DMACH_PCM1_RX,
391 + };
392 +
393 + struct platform_device s3c64xx_device_pcm1 = {
394 +@@ -196,13 +200,14 @@ static int s3c64xx_ac97_cfg_gpe(struct platform_device *pdev)
395 +
396 + static struct resource s3c64xx_ac97_resource[] = {
397 + [0] = DEFINE_RES_MEM(S3C64XX_PA_AC97, SZ_256),
398 +- [1] = DEFINE_RES_DMA(DMACH_AC97_PCMOUT),
399 +- [2] = DEFINE_RES_DMA(DMACH_AC97_PCMIN),
400 +- [3] = DEFINE_RES_DMA(DMACH_AC97_MICIN),
401 +- [4] = DEFINE_RES_IRQ(IRQ_AC97),
402 ++ [1] = DEFINE_RES_IRQ(IRQ_AC97),
403 + };
404 +
405 +-static struct s3c_audio_pdata s3c_ac97_pdata;
406 ++static struct s3c_audio_pdata s3c_ac97_pdata = {
407 ++ .dma_playback = DMACH_AC97_PCMOUT,
408 ++ .dma_capture = DMACH_AC97_PCMIN,
409 ++ .dma_capture_mic = DMACH_AC97_MICIN,
410 ++};
411 +
412 + static u64 s3c64xx_ac97_dmamask = DMA_BIT_MASK(32);
413 +
414 +diff --git a/arch/arm/mach-s3c64xx/include/mach/dma.h b/arch/arm/mach-s3c64xx/include/mach/dma.h
415 +index 059b1fc85037..41a304803497 100644
416 +--- a/arch/arm/mach-s3c64xx/include/mach/dma.h
417 ++++ b/arch/arm/mach-s3c64xx/include/mach/dma.h
418 +@@ -14,38 +14,38 @@
419 + #define S3C64XX_DMA_CHAN(name) ((unsigned long)(name))
420 +
421 + /* DMA0/SDMA0 */
422 +-#define DMACH_UART0 S3C64XX_DMA_CHAN("uart0_tx")
423 +-#define DMACH_UART0_SRC2 S3C64XX_DMA_CHAN("uart0_rx")
424 +-#define DMACH_UART1 S3C64XX_DMA_CHAN("uart1_tx")
425 +-#define DMACH_UART1_SRC2 S3C64XX_DMA_CHAN("uart1_rx")
426 +-#define DMACH_UART2 S3C64XX_DMA_CHAN("uart2_tx")
427 +-#define DMACH_UART2_SRC2 S3C64XX_DMA_CHAN("uart2_rx")
428 +-#define DMACH_UART3 S3C64XX_DMA_CHAN("uart3_tx")
429 +-#define DMACH_UART3_SRC2 S3C64XX_DMA_CHAN("uart3_rx")
430 +-#define DMACH_PCM0_TX S3C64XX_DMA_CHAN("pcm0_tx")
431 +-#define DMACH_PCM0_RX S3C64XX_DMA_CHAN("pcm0_rx")
432 +-#define DMACH_I2S0_OUT S3C64XX_DMA_CHAN("i2s0_tx")
433 +-#define DMACH_I2S0_IN S3C64XX_DMA_CHAN("i2s0_rx")
434 ++#define DMACH_UART0 "uart0_tx"
435 ++#define DMACH_UART0_SRC2 "uart0_rx"
436 ++#define DMACH_UART1 "uart1_tx"
437 ++#define DMACH_UART1_SRC2 "uart1_rx"
438 ++#define DMACH_UART2 "uart2_tx"
439 ++#define DMACH_UART2_SRC2 "uart2_rx"
440 ++#define DMACH_UART3 "uart3_tx"
441 ++#define DMACH_UART3_SRC2 "uart3_rx"
442 ++#define DMACH_PCM0_TX "pcm0_tx"
443 ++#define DMACH_PCM0_RX "pcm0_rx"
444 ++#define DMACH_I2S0_OUT "i2s0_tx"
445 ++#define DMACH_I2S0_IN "i2s0_rx"
446 + #define DMACH_SPI0_TX S3C64XX_DMA_CHAN("spi0_tx")
447 + #define DMACH_SPI0_RX S3C64XX_DMA_CHAN("spi0_rx")
448 +-#define DMACH_HSI_I2SV40_TX S3C64XX_DMA_CHAN("i2s2_tx")
449 +-#define DMACH_HSI_I2SV40_RX S3C64XX_DMA_CHAN("i2s2_rx")
450 ++#define DMACH_HSI_I2SV40_TX "i2s2_tx"
451 ++#define DMACH_HSI_I2SV40_RX "i2s2_rx"
452 +
453 + /* DMA1/SDMA1 */
454 +-#define DMACH_PCM1_TX S3C64XX_DMA_CHAN("pcm1_tx")
455 +-#define DMACH_PCM1_RX S3C64XX_DMA_CHAN("pcm1_rx")
456 +-#define DMACH_I2S1_OUT S3C64XX_DMA_CHAN("i2s1_tx")
457 +-#define DMACH_I2S1_IN S3C64XX_DMA_CHAN("i2s1_rx")
458 ++#define DMACH_PCM1_TX "pcm1_tx"
459 ++#define DMACH_PCM1_RX "pcm1_rx"
460 ++#define DMACH_I2S1_OUT "i2s1_tx"
461 ++#define DMACH_I2S1_IN "i2s1_rx"
462 + #define DMACH_SPI1_TX S3C64XX_DMA_CHAN("spi1_tx")
463 + #define DMACH_SPI1_RX S3C64XX_DMA_CHAN("spi1_rx")
464 +-#define DMACH_AC97_PCMOUT S3C64XX_DMA_CHAN("ac97_out")
465 +-#define DMACH_AC97_PCMIN S3C64XX_DMA_CHAN("ac97_in")
466 +-#define DMACH_AC97_MICIN S3C64XX_DMA_CHAN("ac97_mic")
467 +-#define DMACH_PWM S3C64XX_DMA_CHAN("pwm")
468 +-#define DMACH_IRDA S3C64XX_DMA_CHAN("irda")
469 +-#define DMACH_EXTERNAL S3C64XX_DMA_CHAN("external")
470 +-#define DMACH_SECURITY_RX S3C64XX_DMA_CHAN("sec_rx")
471 +-#define DMACH_SECURITY_TX S3C64XX_DMA_CHAN("sec_tx")
472 ++#define DMACH_AC97_PCMOUT "ac97_out"
473 ++#define DMACH_AC97_PCMIN "ac97_in"
474 ++#define DMACH_AC97_MICIN "ac97_mic"
475 ++#define DMACH_PWM "pwm"
476 ++#define DMACH_IRDA "irda"
477 ++#define DMACH_EXTERNAL "external"
478 ++#define DMACH_SECURITY_RX "sec_rx"
479 ++#define DMACH_SECURITY_TX "sec_tx"
480 +
481 + enum dma_ch {
482 + DMACH_MAX = 32
483 +diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
484 +index 83c7d154bde0..8b67db8c1213 100644
485 +--- a/arch/arm/plat-samsung/devs.c
486 ++++ b/arch/arm/plat-samsung/devs.c
487 +@@ -65,6 +65,7 @@
488 + #include <linux/platform_data/usb-ohci-s3c2410.h>
489 + #include <plat/usb-phy.h>
490 + #include <plat/regs-spi.h>
491 ++#include <linux/platform_data/asoc-s3c.h>
492 + #include <linux/platform_data/spi-s3c64xx.h>
493 +
494 + static u64 samsung_device_dma_mask = DMA_BIT_MASK(32);
495 +@@ -74,9 +75,12 @@ static u64 samsung_device_dma_mask = DMA_BIT_MASK(32);
496 + static struct resource s3c_ac97_resource[] = {
497 + [0] = DEFINE_RES_MEM(S3C2440_PA_AC97, S3C2440_SZ_AC97),
498 + [1] = DEFINE_RES_IRQ(IRQ_S3C244X_AC97),
499 +- [2] = DEFINE_RES_DMA_NAMED(DMACH_PCM_OUT, "PCM out"),
500 +- [3] = DEFINE_RES_DMA_NAMED(DMACH_PCM_IN, "PCM in"),
501 +- [4] = DEFINE_RES_DMA_NAMED(DMACH_MIC_IN, "Mic in"),
502 ++};
503 ++
504 ++static struct s3c_audio_pdata s3c_ac97_pdata = {
505 ++ .dma_playback = (void *)DMACH_PCM_OUT,
506 ++ .dma_capture = (void *)DMACH_PCM_IN,
507 ++ .dma_capture_mic = (void *)DMACH_MIC_IN,
508 + };
509 +
510 + struct platform_device s3c_device_ac97 = {
511 +@@ -87,6 +91,7 @@ struct platform_device s3c_device_ac97 = {
512 + .dev = {
513 + .dma_mask = &samsung_device_dma_mask,
514 + .coherent_dma_mask = DMA_BIT_MASK(32),
515 ++ .platform_data = &s3c_ac97_pdata,
516 + }
517 + };
518 + #endif /* CONFIG_CPU_S3C2440 */
519 +diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
520 +index ce5836c14ec1..6f93c24ca801 100644
521 +--- a/arch/arm64/kernel/pci.c
522 ++++ b/arch/arm64/kernel/pci.c
523 +@@ -46,25 +46,3 @@ int pcibios_add_device(struct pci_dev *dev)
524 +
525 + return 0;
526 + }
527 +-
528 +-
529 +-#ifdef CONFIG_PCI_DOMAINS_GENERIC
530 +-static bool dt_domain_found = false;
531 +-
532 +-void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
533 +-{
534 +- int domain = of_get_pci_domain_nr(parent->of_node);
535 +-
536 +- if (domain >= 0) {
537 +- dt_domain_found = true;
538 +- } else if (dt_domain_found == true) {
539 +- dev_err(parent, "Node %s is missing \"linux,pci-domain\" property in DT\n",
540 +- parent->of_node->full_name);
541 +- return;
542 +- } else {
543 +- domain = pci_get_new_domain_nr();
544 +- }
545 +-
546 +- bus->domain_nr = domain;
547 +-}
548 +-#endif
549 +diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
550 +index b6f14e8d2121..bfb8eb168f2d 100644
551 +--- a/arch/arm64/mm/flush.c
552 ++++ b/arch/arm64/mm/flush.c
553 +@@ -74,10 +74,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
554 + {
555 + struct page *page = pte_page(pte);
556 +
557 +- /* no flushing needed for anonymous pages */
558 +- if (!page_mapping(page))
559 +- return;
560 +-
561 + if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
562 + __flush_dcache_area(page_address(page),
563 + PAGE_SIZE << compound_order(page));
564 +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
565 +index 1616b56eadfe..b369199d9f39 100644
566 +--- a/arch/mips/include/asm/kvm_host.h
567 ++++ b/arch/mips/include/asm/kvm_host.h
568 +@@ -377,6 +377,7 @@ struct kvm_mips_tlb {
569 + #define KVM_MIPS_GUEST_TLB_SIZE 64
570 + struct kvm_vcpu_arch {
571 + void *host_ebase, *guest_ebase;
572 ++ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
573 + unsigned long host_stack;
574 + unsigned long host_gp;
575 +
576 +diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
577 +index f1df4cb4a286..578ece1e4a99 100644
578 +--- a/arch/mips/include/asm/processor.h
579 ++++ b/arch/mips/include/asm/processor.h
580 +@@ -51,7 +51,7 @@ extern unsigned int vced_count, vcei_count;
581 + * User space process size: 2GB. This is hardcoded into a few places,
582 + * so don't change it unless you know what you are doing.
583 + */
584 +-#define TASK_SIZE 0x7fff8000UL
585 ++#define TASK_SIZE 0x80000000UL
586 + #endif
587 +
588 + #ifdef __KERNEL__
589 +diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
590 +index f3b635f86c39..2168355591f5 100644
591 +--- a/arch/mips/kernel/setup.c
592 ++++ b/arch/mips/kernel/setup.c
593 +@@ -685,6 +685,9 @@ static void __init arch_mem_init(char **cmdline_p)
594 + for_each_memblock(reserved, reg)
595 + if (reg->size != 0)
596 + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
597 ++
598 ++ reserve_bootmem_region(__pa_symbol(&__nosave_begin),
599 ++ __pa_symbol(&__nosave_end)); /* Reserve for hibernation */
600 + }
601 +
602 + static void __init resource_init(void)
603 +diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
604 +index 4ab4bdfad703..2143884709e4 100644
605 +--- a/arch/mips/kvm/interrupt.h
606 ++++ b/arch/mips/kvm/interrupt.h
607 +@@ -28,6 +28,7 @@
608 + #define MIPS_EXC_MAX 12
609 + /* XXXSL More to follow */
610 +
611 ++extern char __kvm_mips_vcpu_run_end[];
612 + extern char mips32_exception[], mips32_exceptionEnd[];
613 + extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
614 +
615 +diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
616 +index 4a68b176d6e4..21c257579a06 100644
617 +--- a/arch/mips/kvm/locore.S
618 ++++ b/arch/mips/kvm/locore.S
619 +@@ -231,6 +231,7 @@ FEXPORT(__kvm_mips_load_k0k1)
620 +
621 + /* Jump to guest */
622 + eret
623 ++EXPORT(__kvm_mips_vcpu_run_end)
624 +
625 + VECTOR(MIPSX(exception), unknown)
626 + /* Find out what mode we came from and jump to the proper handler. */
627 +diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
628 +index a53eaf50c224..26059bf34b1a 100644
629 +--- a/arch/mips/kvm/mips.c
630 ++++ b/arch/mips/kvm/mips.c
631 +@@ -306,6 +306,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
632 + memcpy(gebase + offset, mips32_GuestException,
633 + mips32_GuestExceptionEnd - mips32_GuestException);
634 +
635 ++#ifdef MODULE
636 ++ offset += mips32_GuestExceptionEnd - mips32_GuestException;
637 ++ memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
638 ++ __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
639 ++ vcpu->arch.vcpu_run = gebase + offset;
640 ++#else
641 ++ vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
642 ++#endif
643 ++
644 + /* Invalidate the icache for these ranges */
645 + local_flush_icache_range((unsigned long)gebase,
646 + (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
647 +@@ -392,7 +401,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
648 + /* Disable hardware page table walking while in guest */
649 + htw_stop();
650 +
651 +- r = __kvm_mips_vcpu_run(run, vcpu);
652 ++ r = vcpu->arch.vcpu_run(run, vcpu);
653 +
654 + /* Re-enable HTW before enabling interrupts */
655 + htw_start();
656 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
657 +index 923cd2daba89..5d2ea3f90f72 100644
658 +--- a/arch/powerpc/kernel/process.c
659 ++++ b/arch/powerpc/kernel/process.c
660 +@@ -1224,6 +1224,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
661 + current->thread.regs = regs - 1;
662 + }
663 +
664 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
665 ++ /*
666 ++ * Clear any transactional state, we're exec()ing. The cause is
667 ++ * not important as there will never be a recheckpoint so it's not
668 ++ * user visible.
669 ++ */
670 ++ if (MSR_TM_SUSPENDED(mfmsr()))
671 ++ tm_reclaim_current(0);
672 ++#endif
673 ++
674 + memset(regs->gpr, 0, sizeof(regs->gpr));
675 + regs->ctr = 0;
676 + regs->link = 0;
677 +diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
678 +index d2804fd65c4b..7a218ab7e941 100644
679 +--- a/arch/powerpc/kernel/prom.c
680 ++++ b/arch/powerpc/kernel/prom.c
681 +@@ -162,11 +162,12 @@ static struct ibm_pa_feature {
682 + {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
683 + {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
684 + /*
685 +- * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
686 +- * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
687 +- * which is 0 if the kernel doesn't support TM.
688 ++ * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
689 ++ * we don't want to turn on TM here, so we use the *_COMP versions
690 ++ * which are 0 if the kernel doesn't support TM.
691 + */
692 +- {CPU_FTR_TM_COMP, 0, 0, 0, 22, 0, 0},
693 ++ {CPU_FTR_TM_COMP, 0, 0,
694 ++ PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
695 + };
696 +
697 + static void __init scan_features(unsigned long node, const unsigned char *ftrs,
698 +diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
699 +index 39badb9ca0b3..f5ec05984364 100644
700 +--- a/arch/s390/kernel/ipl.c
701 ++++ b/arch/s390/kernel/ipl.c
702 +@@ -2102,13 +2102,6 @@ void s390_reset_system(void (*func)(void *), void *data)
703 + S390_lowcore.program_new_psw.addr =
704 + PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
705 +
706 +- /*
707 +- * Clear subchannel ID and number to signal new kernel that no CCW or
708 +- * SCSI IPL has been done (for kexec and kdump)
709 +- */
710 +- S390_lowcore.subchannel_id = 0;
711 +- S390_lowcore.subchannel_nr = 0;
712 +-
713 + /* Store status at absolute zero */
714 + store_status();
715 +
716 +diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h
717 +index 10e9dabc4c41..f0700cfeedd7 100644
718 +--- a/arch/sparc/include/asm/head_64.h
719 ++++ b/arch/sparc/include/asm/head_64.h
720 +@@ -15,6 +15,10 @@
721 +
722 + #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
723 +
724 ++#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
725 ++#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
726 ++#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
727 ++
728 + #define __CHEETAH_ID 0x003e0014
729 + #define __JALAPENO_ID 0x003e0016
730 + #define __SERRANO_ID 0x003e0022
731 +diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h
732 +index 71b5a67522ab..781b9f1dbdc2 100644
733 +--- a/arch/sparc/include/asm/ttable.h
734 ++++ b/arch/sparc/include/asm/ttable.h
735 +@@ -589,8 +589,8 @@ user_rtt_fill_64bit: \
736 + restored; \
737 + nop; nop; nop; nop; nop; nop; \
738 + nop; nop; nop; nop; nop; \
739 +- ba,a,pt %xcc, user_rtt_fill_fixup; \
740 +- ba,a,pt %xcc, user_rtt_fill_fixup; \
741 ++ ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
742 ++ ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
743 + ba,a,pt %xcc, user_rtt_fill_fixup;
744 +
745 +
746 +@@ -652,8 +652,8 @@ user_rtt_fill_32bit: \
747 + restored; \
748 + nop; nop; nop; nop; nop; \
749 + nop; nop; nop; \
750 +- ba,a,pt %xcc, user_rtt_fill_fixup; \
751 +- ba,a,pt %xcc, user_rtt_fill_fixup; \
752 ++ ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
753 ++ ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
754 + ba,a,pt %xcc, user_rtt_fill_fixup;
755 +
756 +
757 +diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
758 +index 7cf9c6ea3f1f..fdb13327fded 100644
759 +--- a/arch/sparc/kernel/Makefile
760 ++++ b/arch/sparc/kernel/Makefile
761 +@@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg
762 + CFLAGS_REMOVE_pcr.o := -pg
763 + endif
764 +
765 ++obj-$(CONFIG_SPARC64) += urtt_fill.o
766 + obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
767 + obj-$(CONFIG_SPARC32) += etrap_32.o
768 + obj-$(CONFIG_SPARC32) += rtrap_32.o
769 +diff --git a/arch/sparc/kernel/cherrs.S b/arch/sparc/kernel/cherrs.S
770 +index 4ee1ad420862..655628def68e 100644
771 +--- a/arch/sparc/kernel/cherrs.S
772 ++++ b/arch/sparc/kernel/cherrs.S
773 +@@ -214,8 +214,7 @@ do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
774 + subcc %g1, %g2, %g1 ! Next cacheline
775 + bge,pt %icc, 1b
776 + nop
777 +- ba,pt %xcc, dcpe_icpe_tl1_common
778 +- nop
779 ++ ba,a,pt %xcc, dcpe_icpe_tl1_common
780 +
781 + do_dcpe_tl1_fatal:
782 + sethi %hi(1f), %g7
783 +@@ -224,8 +223,7 @@ do_dcpe_tl1_fatal:
784 + mov 0x2, %o0
785 + call cheetah_plus_parity_error
786 + add %sp, PTREGS_OFF, %o1
787 +- ba,pt %xcc, rtrap
788 +- nop
789 ++ ba,a,pt %xcc, rtrap
790 + .size do_dcpe_tl1,.-do_dcpe_tl1
791 +
792 + .globl do_icpe_tl1
793 +@@ -259,8 +257,7 @@ do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
794 + subcc %g1, %g2, %g1
795 + bge,pt %icc, 1b
796 + nop
797 +- ba,pt %xcc, dcpe_icpe_tl1_common
798 +- nop
799 ++ ba,a,pt %xcc, dcpe_icpe_tl1_common
800 +
801 + do_icpe_tl1_fatal:
802 + sethi %hi(1f), %g7
803 +@@ -269,8 +266,7 @@ do_icpe_tl1_fatal:
804 + mov 0x3, %o0
805 + call cheetah_plus_parity_error
806 + add %sp, PTREGS_OFF, %o1
807 +- ba,pt %xcc, rtrap
808 +- nop
809 ++ ba,a,pt %xcc, rtrap
810 + .size do_icpe_tl1,.-do_icpe_tl1
811 +
812 + .type dcpe_icpe_tl1_common,#function
813 +@@ -456,7 +452,7 @@ __cheetah_log_error:
814 + cmp %g2, 0x63
815 + be c_cee
816 + nop
817 +- ba,pt %xcc, c_deferred
818 ++ ba,a,pt %xcc, c_deferred
819 + .size __cheetah_log_error,.-__cheetah_log_error
820 +
821 + /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
822 +diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
823 +index 33c02b15f478..a83707c83be8 100644
824 +--- a/arch/sparc/kernel/entry.S
825 ++++ b/arch/sparc/kernel/entry.S
826 +@@ -948,7 +948,24 @@ linux_syscall_trace:
827 + cmp %o0, 0
828 + bne 3f
829 + mov -ENOSYS, %o0
830 ++
831 ++ /* Syscall tracing can modify the registers. */
832 ++ ld [%sp + STACKFRAME_SZ + PT_G1], %g1
833 ++ sethi %hi(sys_call_table), %l7
834 ++ ld [%sp + STACKFRAME_SZ + PT_I0], %i0
835 ++ or %l7, %lo(sys_call_table), %l7
836 ++ ld [%sp + STACKFRAME_SZ + PT_I1], %i1
837 ++ ld [%sp + STACKFRAME_SZ + PT_I2], %i2
838 ++ ld [%sp + STACKFRAME_SZ + PT_I3], %i3
839 ++ ld [%sp + STACKFRAME_SZ + PT_I4], %i4
840 ++ ld [%sp + STACKFRAME_SZ + PT_I5], %i5
841 ++ cmp %g1, NR_syscalls
842 ++ bgeu 3f
843 ++ mov -ENOSYS, %o0
844 ++
845 ++ sll %g1, 2, %l4
846 + mov %i0, %o0
847 ++ ld [%l7 + %l4], %l7
848 + mov %i1, %o1
849 + mov %i2, %o2
850 + mov %i3, %o3
851 +diff --git a/arch/sparc/kernel/fpu_traps.S b/arch/sparc/kernel/fpu_traps.S
852 +index a6864826a4bd..336d2750fe78 100644
853 +--- a/arch/sparc/kernel/fpu_traps.S
854 ++++ b/arch/sparc/kernel/fpu_traps.S
855 +@@ -100,8 +100,8 @@ do_fpdis:
856 + fmuld %f0, %f2, %f26
857 + faddd %f0, %f2, %f28
858 + fmuld %f0, %f2, %f30
859 +- b,pt %xcc, fpdis_exit
860 +- nop
861 ++ ba,a,pt %xcc, fpdis_exit
862 ++
863 + 2: andcc %g5, FPRS_DU, %g0
864 + bne,pt %icc, 3f
865 + fzero %f32
866 +@@ -144,8 +144,8 @@ do_fpdis:
867 + fmuld %f32, %f34, %f58
868 + faddd %f32, %f34, %f60
869 + fmuld %f32, %f34, %f62
870 +- ba,pt %xcc, fpdis_exit
871 +- nop
872 ++ ba,a,pt %xcc, fpdis_exit
873 ++
874 + 3: mov SECONDARY_CONTEXT, %g3
875 + add %g6, TI_FPREGS, %g1
876 +
877 +@@ -197,8 +197,7 @@ fpdis_exit2:
878 + fp_other_bounce:
879 + call do_fpother
880 + add %sp, PTREGS_OFF, %o0
881 +- ba,pt %xcc, rtrap
882 +- nop
883 ++ ba,a,pt %xcc, rtrap
884 + .size fp_other_bounce,.-fp_other_bounce
885 +
886 + .align 32
887 +diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
888 +index 3d61fcae7ee3..8ff57630a486 100644
889 +--- a/arch/sparc/kernel/head_64.S
890 ++++ b/arch/sparc/kernel/head_64.S
891 +@@ -461,9 +461,8 @@ sun4v_chip_type:
892 + subcc %g3, 1, %g3
893 + bne,pt %xcc, 41b
894 + add %g1, 1, %g1
895 +- mov SUN4V_CHIP_SPARC64X, %g4
896 + ba,pt %xcc, 5f
897 +- nop
898 ++ mov SUN4V_CHIP_SPARC64X, %g4
899 +
900 + 49:
901 + mov SUN4V_CHIP_UNKNOWN, %g4
902 +@@ -548,8 +547,7 @@ sun4u_init:
903 + stxa %g0, [%g7] ASI_DMMU
904 + membar #Sync
905 +
906 +- ba,pt %xcc, sun4u_continue
907 +- nop
908 ++ ba,a,pt %xcc, sun4u_continue
909 +
910 + sun4v_init:
911 + /* Set ctx 0 */
912 +@@ -560,14 +558,12 @@ sun4v_init:
913 + mov SECONDARY_CONTEXT, %g7
914 + stxa %g0, [%g7] ASI_MMU
915 + membar #Sync
916 +- ba,pt %xcc, niagara_tlb_fixup
917 +- nop
918 ++ ba,a,pt %xcc, niagara_tlb_fixup
919 +
920 + sun4u_continue:
921 + BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
922 +
923 +- ba,pt %xcc, spitfire_tlb_fixup
924 +- nop
925 ++ ba,a,pt %xcc, spitfire_tlb_fixup
926 +
927 + niagara_tlb_fixup:
928 + mov 3, %g2 /* Set TLB type to hypervisor. */
929 +@@ -639,8 +635,7 @@ niagara_patch:
930 + call hypervisor_patch_cachetlbops
931 + nop
932 +
933 +- ba,pt %xcc, tlb_fixup_done
934 +- nop
935 ++ ba,a,pt %xcc, tlb_fixup_done
936 +
937 + cheetah_tlb_fixup:
938 + mov 2, %g2 /* Set TLB type to cheetah+. */
939 +@@ -659,8 +654,7 @@ cheetah_tlb_fixup:
940 + call cheetah_patch_cachetlbops
941 + nop
942 +
943 +- ba,pt %xcc, tlb_fixup_done
944 +- nop
945 ++ ba,a,pt %xcc, tlb_fixup_done
946 +
947 + spitfire_tlb_fixup:
948 + /* Set TLB type to spitfire. */
949 +@@ -782,8 +776,7 @@ setup_trap_table:
950 + call %o1
951 + add %sp, (2047 + 128), %o0
952 +
953 +- ba,pt %xcc, 2f
954 +- nop
955 ++ ba,a,pt %xcc, 2f
956 +
957 + 1: sethi %hi(sparc64_ttable_tl0), %o0
958 + set prom_set_trap_table_name, %g2
959 +@@ -822,8 +815,7 @@ setup_trap_table:
960 +
961 + BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
962 +
963 +- ba,pt %xcc, 2f
964 +- nop
965 ++ ba,a,pt %xcc, 2f
966 +
967 + /* Disable STICK_INT interrupts. */
968 + 1:
969 +diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S
970 +index 753b4f031bfb..34b4933900bf 100644
971 +--- a/arch/sparc/kernel/misctrap.S
972 ++++ b/arch/sparc/kernel/misctrap.S
973 +@@ -18,8 +18,7 @@ __do_privact:
974 + 109: or %g7, %lo(109b), %g7
975 + call do_privact
976 + add %sp, PTREGS_OFF, %o0
977 +- ba,pt %xcc, rtrap
978 +- nop
979 ++ ba,a,pt %xcc, rtrap
980 + .size __do_privact,.-__do_privact
981 +
982 + .type do_mna,#function
983 +@@ -46,8 +45,7 @@ do_mna:
984 + mov %l5, %o2
985 + call mem_address_unaligned
986 + add %sp, PTREGS_OFF, %o0
987 +- ba,pt %xcc, rtrap
988 +- nop
989 ++ ba,a,pt %xcc, rtrap
990 + .size do_mna,.-do_mna
991 +
992 + .type do_lddfmna,#function
993 +@@ -65,8 +63,7 @@ do_lddfmna:
994 + mov %l5, %o2
995 + call handle_lddfmna
996 + add %sp, PTREGS_OFF, %o0
997 +- ba,pt %xcc, rtrap
998 +- nop
999 ++ ba,a,pt %xcc, rtrap
1000 + .size do_lddfmna,.-do_lddfmna
1001 +
1002 + .type do_stdfmna,#function
1003 +@@ -84,8 +81,7 @@ do_stdfmna:
1004 + mov %l5, %o2
1005 + call handle_stdfmna
1006 + add %sp, PTREGS_OFF, %o0
1007 +- ba,pt %xcc, rtrap
1008 +- nop
1009 ++ ba,a,pt %xcc, rtrap
1010 + .size do_stdfmna,.-do_stdfmna
1011 +
1012 + .type breakpoint_trap,#function
1013 +diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
1014 +index b36365f49478..f9288bf12fea 100644
1015 +--- a/arch/sparc/kernel/pci.c
1016 ++++ b/arch/sparc/kernel/pci.c
1017 +@@ -995,6 +995,23 @@ void pcibios_set_master(struct pci_dev *dev)
1018 + /* No special bus mastering setup handling */
1019 + }
1020 +
1021 ++#ifdef CONFIG_PCI_IOV
1022 ++int pcibios_add_device(struct pci_dev *dev)
1023 ++{
1024 ++ struct pci_dev *pdev;
1025 ++
1026 ++ /* Add sriov arch specific initialization here.
1027 ++ * Copy dev_archdata from PF to VF
1028 ++ */
1029 ++ if (dev->is_virtfn) {
1030 ++ pdev = dev->physfn;
1031 ++ memcpy(&dev->dev.archdata, &pdev->dev.archdata,
1032 ++ sizeof(struct dev_archdata));
1033 ++ }
1034 ++ return 0;
1035 ++}
1036 ++#endif /* CONFIG_PCI_IOV */
1037 ++
1038 + static int __init pcibios_init(void)
1039 + {
1040 + pci_dfl_cache_line_size = 64 >> 2;
1041 +diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
1042 +index 39f0c662f4c8..8de386dc8150 100644
1043 +--- a/arch/sparc/kernel/rtrap_64.S
1044 ++++ b/arch/sparc/kernel/rtrap_64.S
1045 +@@ -14,10 +14,6 @@
1046 + #include <asm/visasm.h>
1047 + #include <asm/processor.h>
1048 +
1049 +-#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
1050 +-#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
1051 +-#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
1052 +-
1053 + #ifdef CONFIG_CONTEXT_TRACKING
1054 + # define SCHEDULE_USER schedule_user
1055 + #else
1056 +@@ -236,52 +232,17 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
1057 + wrpr %g1, %cwp
1058 + ba,a,pt %xcc, user_rtt_fill_64bit
1059 +
1060 +-user_rtt_fill_fixup:
1061 +- rdpr %cwp, %g1
1062 +- add %g1, 1, %g1
1063 +- wrpr %g1, 0x0, %cwp
1064 +-
1065 +- rdpr %wstate, %g2
1066 +- sll %g2, 3, %g2
1067 +- wrpr %g2, 0x0, %wstate
1068 +-
1069 +- /* We know %canrestore and %otherwin are both zero. */
1070 +-
1071 +- sethi %hi(sparc64_kern_pri_context), %g2
1072 +- ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
1073 +- mov PRIMARY_CONTEXT, %g1
1074 +-
1075 +-661: stxa %g2, [%g1] ASI_DMMU
1076 +- .section .sun4v_1insn_patch, "ax"
1077 +- .word 661b
1078 +- stxa %g2, [%g1] ASI_MMU
1079 +- .previous
1080 +-
1081 +- sethi %hi(KERNBASE), %g1
1082 +- flush %g1
1083 ++user_rtt_fill_fixup_dax:
1084 ++ ba,pt %xcc, user_rtt_fill_fixup_common
1085 ++ mov 1, %g3
1086 +
1087 +- or %g4, FAULT_CODE_WINFIXUP, %g4
1088 +- stb %g4, [%g6 + TI_FAULT_CODE]
1089 +- stx %g5, [%g6 + TI_FAULT_ADDR]
1090 ++user_rtt_fill_fixup_mna:
1091 ++ ba,pt %xcc, user_rtt_fill_fixup_common
1092 ++ mov 2, %g3
1093 +
1094 +- mov %g6, %l1
1095 +- wrpr %g0, 0x0, %tl
1096 +-
1097 +-661: nop
1098 +- .section .sun4v_1insn_patch, "ax"
1099 +- .word 661b
1100 +- SET_GL(0)
1101 +- .previous
1102 +-
1103 +- wrpr %g0, RTRAP_PSTATE, %pstate
1104 +-
1105 +- mov %l1, %g6
1106 +- ldx [%g6 + TI_TASK], %g4
1107 +- LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
1108 +- call do_sparc64_fault
1109 +- add %sp, PTREGS_OFF, %o0
1110 +- ba,pt %xcc, rtrap
1111 +- nop
1112 ++user_rtt_fill_fixup:
1113 ++ ba,pt %xcc, user_rtt_fill_fixup_common
1114 ++ clr %g3
1115 +
1116 + user_rtt_pre_restore:
1117 + add %g1, 1, %g1
1118 +diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
1119 +index 62deba7be1a9..94646266f0e4 100644
1120 +--- a/arch/sparc/kernel/signal32.c
1121 ++++ b/arch/sparc/kernel/signal32.c
1122 +@@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
1123 + return 0;
1124 + }
1125 +
1126 ++/* Checks if the fp is valid. We always build signal frames which are
1127 ++ * 16-byte aligned, therefore we can always enforce that the restore
1128 ++ * frame has that property as well.
1129 ++ */
1130 ++static bool invalid_frame_pointer(void __user *fp, int fplen)
1131 ++{
1132 ++ if ((((unsigned long) fp) & 15) ||
1133 ++ ((unsigned long)fp) > 0x100000000ULL - fplen)
1134 ++ return true;
1135 ++ return false;
1136 ++}
1137 ++
1138 + void do_sigreturn32(struct pt_regs *regs)
1139 + {
1140 + struct signal_frame32 __user *sf;
1141 + compat_uptr_t fpu_save;
1142 + compat_uptr_t rwin_save;
1143 +- unsigned int psr;
1144 ++ unsigned int psr, ufp;
1145 + unsigned pc, npc;
1146 + sigset_t set;
1147 + compat_sigset_t seta;
1148 +@@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs)
1149 + sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
1150 +
1151 + /* 1. Make sure we are not getting garbage from the user */
1152 +- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
1153 +- (((unsigned long) sf) & 3))
1154 ++ if (invalid_frame_pointer(sf, sizeof(*sf)))
1155 ++ goto segv;
1156 ++
1157 ++ if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
1158 ++ goto segv;
1159 ++
1160 ++ if (ufp & 0x7)
1161 + goto segv;
1162 +
1163 +- if (get_user(pc, &sf->info.si_regs.pc) ||
1164 ++ if (__get_user(pc, &sf->info.si_regs.pc) ||
1165 + __get_user(npc, &sf->info.si_regs.npc))
1166 + goto segv;
1167 +
1168 +@@ -227,7 +244,7 @@ segv:
1169 + asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
1170 + {
1171 + struct rt_signal_frame32 __user *sf;
1172 +- unsigned int psr, pc, npc;
1173 ++ unsigned int psr, pc, npc, ufp;
1174 + compat_uptr_t fpu_save;
1175 + compat_uptr_t rwin_save;
1176 + sigset_t set;
1177 +@@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
1178 + sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
1179 +
1180 + /* 1. Make sure we are not getting garbage from the user */
1181 +- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
1182 +- (((unsigned long) sf) & 3))
1183 ++ if (invalid_frame_pointer(sf, sizeof(*sf)))
1184 + goto segv;
1185 +
1186 +- if (get_user(pc, &sf->regs.pc) ||
1187 ++ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
1188 ++ goto segv;
1189 ++
1190 ++ if (ufp & 0x7)
1191 ++ goto segv;
1192 ++
1193 ++ if (__get_user(pc, &sf->regs.pc) ||
1194 + __get_user(npc, &sf->regs.npc))
1195 + goto segv;
1196 +
1197 +@@ -307,14 +329,6 @@ segv:
1198 + force_sig(SIGSEGV, current);
1199 + }
1200 +
1201 +-/* Checks if the fp is valid */
1202 +-static int invalid_frame_pointer(void __user *fp, int fplen)
1203 +-{
1204 +- if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
1205 +- return 1;
1206 +- return 0;
1207 +-}
1208 +-
1209 + static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
1210 + {
1211 + unsigned long sp;
1212 +diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
1213 +index 9ee72fc8e0e4..8492291424ab 100644
1214 +--- a/arch/sparc/kernel/signal_32.c
1215 ++++ b/arch/sparc/kernel/signal_32.c
1216 +@@ -60,10 +60,22 @@ struct rt_signal_frame {
1217 + #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7)))
1218 + #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
1219 +
1220 ++/* Checks if the fp is valid. We always build signal frames which are
1221 ++ * 16-byte aligned, therefore we can always enforce that the restore
1222 ++ * frame has that property as well.
1223 ++ */
1224 ++static inline bool invalid_frame_pointer(void __user *fp, int fplen)
1225 ++{
1226 ++ if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
1227 ++ return true;
1228 ++
1229 ++ return false;
1230 ++}
1231 ++
1232 + asmlinkage void do_sigreturn(struct pt_regs *regs)
1233 + {
1234 ++ unsigned long up_psr, pc, npc, ufp;
1235 + struct signal_frame __user *sf;
1236 +- unsigned long up_psr, pc, npc;
1237 + sigset_t set;
1238 + __siginfo_fpu_t __user *fpu_save;
1239 + __siginfo_rwin_t __user *rwin_save;
1240 +@@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
1241 + sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
1242 +
1243 + /* 1. Make sure we are not getting garbage from the user */
1244 +- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
1245 ++ if (!invalid_frame_pointer(sf, sizeof(*sf)))
1246 ++ goto segv_and_exit;
1247 ++
1248 ++ if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
1249 + goto segv_and_exit;
1250 +
1251 +- if (((unsigned long) sf) & 3)
1252 ++ if (ufp & 0x7)
1253 + goto segv_and_exit;
1254 +
1255 + err = __get_user(pc, &sf->info.si_regs.pc);
1256 +@@ -127,7 +142,7 @@ segv_and_exit:
1257 + asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
1258 + {
1259 + struct rt_signal_frame __user *sf;
1260 +- unsigned int psr, pc, npc;
1261 ++ unsigned int psr, pc, npc, ufp;
1262 + __siginfo_fpu_t __user *fpu_save;
1263 + __siginfo_rwin_t __user *rwin_save;
1264 + sigset_t set;
1265 +@@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
1266 +
1267 + synchronize_user_stack();
1268 + sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
1269 +- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
1270 +- (((unsigned long) sf) & 0x03))
1271 ++ if (!invalid_frame_pointer(sf, sizeof(*sf)))
1272 ++ goto segv;
1273 ++
1274 ++ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
1275 ++ goto segv;
1276 ++
1277 ++ if (ufp & 0x7)
1278 + goto segv;
1279 +
1280 + err = __get_user(pc, &sf->regs.pc);
1281 +@@ -178,15 +198,6 @@ segv:
1282 + force_sig(SIGSEGV, current);
1283 + }
1284 +
1285 +-/* Checks if the fp is valid */
1286 +-static inline int invalid_frame_pointer(void __user *fp, int fplen)
1287 +-{
1288 +- if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
1289 +- return 1;
1290 +-
1291 +- return 0;
1292 +-}
1293 +-
1294 + static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
1295 + {
1296 + unsigned long sp = regs->u_regs[UREG_FP];
1297 +diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
1298 +index 1a6999868031..9acf9822cbbd 100644
1299 +--- a/arch/sparc/kernel/signal_64.c
1300 ++++ b/arch/sparc/kernel/signal_64.c
1301 +@@ -52,7 +52,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
1302 + unsigned char fenab;
1303 + int err;
1304 +
1305 +- flush_user_windows();
1306 ++ synchronize_user_stack();
1307 + if (get_thread_wsaved() ||
1308 + (((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
1309 + (!__access_ok(ucp, sizeof(*ucp))))
1310 +@@ -234,6 +234,17 @@ do_sigsegv:
1311 + goto out;
1312 + }
1313 +
1314 ++/* Checks if the fp is valid. We always build rt signal frames which
1315 ++ * are 16-byte aligned, therefore we can always enforce that the
1316 ++ * restore frame has that property as well.
1317 ++ */
1318 ++static bool invalid_frame_pointer(void __user *fp)
1319 ++{
1320 ++ if (((unsigned long) fp) & 15)
1321 ++ return true;
1322 ++ return false;
1323 ++}
1324 ++
1325 + struct rt_signal_frame {
1326 + struct sparc_stackf ss;
1327 + siginfo_t info;
1328 +@@ -246,8 +257,8 @@ struct rt_signal_frame {
1329 +
1330 + void do_rt_sigreturn(struct pt_regs *regs)
1331 + {
1332 ++ unsigned long tpc, tnpc, tstate, ufp;
1333 + struct rt_signal_frame __user *sf;
1334 +- unsigned long tpc, tnpc, tstate;
1335 + __siginfo_fpu_t __user *fpu_save;
1336 + __siginfo_rwin_t __user *rwin_save;
1337 + sigset_t set;
1338 +@@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs)
1339 + (regs->u_regs [UREG_FP] + STACK_BIAS);
1340 +
1341 + /* 1. Make sure we are not getting garbage from the user */
1342 +- if (((unsigned long) sf) & 3)
1343 ++ if (invalid_frame_pointer(sf))
1344 ++ goto segv;
1345 ++
1346 ++ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
1347 + goto segv;
1348 +
1349 +- err = get_user(tpc, &sf->regs.tpc);
1350 ++ if ((ufp + STACK_BIAS) & 0x7)
1351 ++ goto segv;
1352 ++
1353 ++ err = __get_user(tpc, &sf->regs.tpc);
1354 + err |= __get_user(tnpc, &sf->regs.tnpc);
1355 + if (test_thread_flag(TIF_32BIT)) {
1356 + tpc &= 0xffffffff;
1357 +@@ -308,14 +325,6 @@ segv:
1358 + force_sig(SIGSEGV, current);
1359 + }
1360 +
1361 +-/* Checks if the fp is valid */
1362 +-static int invalid_frame_pointer(void __user *fp)
1363 +-{
1364 +- if (((unsigned long) fp) & 15)
1365 +- return 1;
1366 +- return 0;
1367 +-}
1368 +-
1369 + static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
1370 + {
1371 + unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
1372 +diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c
1373 +index 0f6eebe71e6c..e5fe8cef9a69 100644
1374 +--- a/arch/sparc/kernel/sigutil_32.c
1375 ++++ b/arch/sparc/kernel/sigutil_32.c
1376 +@@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
1377 + int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
1378 + {
1379 + int err;
1380 ++
1381 ++ if (((unsigned long) fpu) & 3)
1382 ++ return -EFAULT;
1383 ++
1384 + #ifdef CONFIG_SMP
1385 + if (test_tsk_thread_flag(current, TIF_USEDFPU))
1386 + regs->psr &= ~PSR_EF;
1387 +@@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
1388 + struct thread_info *t = current_thread_info();
1389 + int i, wsaved, err;
1390 +
1391 +- __get_user(wsaved, &rp->wsaved);
1392 ++ if (((unsigned long) rp) & 3)
1393 ++ return -EFAULT;
1394 ++
1395 ++ get_user(wsaved, &rp->wsaved);
1396 + if (wsaved > NSWINS)
1397 + return -EFAULT;
1398 +
1399 +diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
1400 +index 387834a9c56a..36aadcbeac69 100644
1401 +--- a/arch/sparc/kernel/sigutil_64.c
1402 ++++ b/arch/sparc/kernel/sigutil_64.c
1403 +@@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
1404 + unsigned long fprs;
1405 + int err;
1406 +
1407 +- err = __get_user(fprs, &fpu->si_fprs);
1408 ++ if (((unsigned long) fpu) & 7)
1409 ++ return -EFAULT;
1410 ++
1411 ++ err = get_user(fprs, &fpu->si_fprs);
1412 + fprs_write(0);
1413 + regs->tstate &= ~TSTATE_PEF;
1414 + if (fprs & FPRS_DL)
1415 +@@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
1416 + struct thread_info *t = current_thread_info();
1417 + int i, wsaved, err;
1418 +
1419 +- __get_user(wsaved, &rp->wsaved);
1420 ++ if (((unsigned long) rp) & 7)
1421 ++ return -EFAULT;
1422 ++
1423 ++ get_user(wsaved, &rp->wsaved);
1424 + if (wsaved > NSWINS)
1425 + return -EFAULT;
1426 +
1427 +diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S
1428 +index c357e40ffd01..4a73009f66a5 100644
1429 +--- a/arch/sparc/kernel/spiterrs.S
1430 ++++ b/arch/sparc/kernel/spiterrs.S
1431 +@@ -85,8 +85,7 @@ __spitfire_cee_trap_continue:
1432 + ba,pt %xcc, etraptl1
1433 + rd %pc, %g7
1434 +
1435 +- ba,pt %xcc, 2f
1436 +- nop
1437 ++ ba,a,pt %xcc, 2f
1438 +
1439 + 1: ba,pt %xcc, etrap_irq
1440 + rd %pc, %g7
1441 +@@ -100,8 +99,7 @@ __spitfire_cee_trap_continue:
1442 + mov %l5, %o2
1443 + call spitfire_access_error
1444 + add %sp, PTREGS_OFF, %o0
1445 +- ba,pt %xcc, rtrap
1446 +- nop
1447 ++ ba,a,pt %xcc, rtrap
1448 + .size __spitfire_access_error,.-__spitfire_access_error
1449 +
1450 + /* This is the trap handler entry point for ECC correctable
1451 +@@ -179,8 +177,7 @@ __spitfire_data_access_exception_tl1:
1452 + mov %l5, %o2
1453 + call spitfire_data_access_exception_tl1
1454 + add %sp, PTREGS_OFF, %o0
1455 +- ba,pt %xcc, rtrap
1456 +- nop
1457 ++ ba,a,pt %xcc, rtrap
1458 + .size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
1459 +
1460 + .type __spitfire_data_access_exception,#function
1461 +@@ -200,8 +197,7 @@ __spitfire_data_access_exception:
1462 + mov %l5, %o2
1463 + call spitfire_data_access_exception
1464 + add %sp, PTREGS_OFF, %o0
1465 +- ba,pt %xcc, rtrap
1466 +- nop
1467 ++ ba,a,pt %xcc, rtrap
1468 + .size __spitfire_data_access_exception,.-__spitfire_data_access_exception
1469 +
1470 + .type __spitfire_insn_access_exception_tl1,#function
1471 +@@ -220,8 +216,7 @@ __spitfire_insn_access_exception_tl1:
1472 + mov %l5, %o2
1473 + call spitfire_insn_access_exception_tl1
1474 + add %sp, PTREGS_OFF, %o0
1475 +- ba,pt %xcc, rtrap
1476 +- nop
1477 ++ ba,a,pt %xcc, rtrap
1478 + .size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
1479 +
1480 + .type __spitfire_insn_access_exception,#function
1481 +@@ -240,6 +235,5 @@ __spitfire_insn_access_exception:
1482 + mov %l5, %o2
1483 + call spitfire_insn_access_exception
1484 + add %sp, PTREGS_OFF, %o0
1485 +- ba,pt %xcc, rtrap
1486 +- nop
1487 ++ ba,a,pt %xcc, rtrap
1488 + .size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception
1489 +diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
1490 +index 33a17e7b3ccd..6ec7531f27fc 100644
1491 +--- a/arch/sparc/kernel/syscalls.S
1492 ++++ b/arch/sparc/kernel/syscalls.S
1493 +@@ -148,7 +148,25 @@ linux_syscall_trace32:
1494 + add %sp, PTREGS_OFF, %o0
1495 + brnz,pn %o0, 3f
1496 + mov -ENOSYS, %o0
1497 ++
1498 ++ /* Syscall tracing can modify the registers. */
1499 ++ ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
1500 ++ sethi %hi(sys_call_table32), %l7
1501 ++ ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
1502 ++ or %l7, %lo(sys_call_table32), %l7
1503 ++ ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
1504 ++ ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
1505 ++ ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
1506 ++ ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
1507 ++ ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
1508 ++
1509 ++ cmp %g1, NR_syscalls
1510 ++ bgeu,pn %xcc, 3f
1511 ++ mov -ENOSYS, %o0
1512 ++
1513 ++ sll %g1, 2, %l4
1514 + srl %i0, 0, %o0
1515 ++ lduw [%l7 + %l4], %l7
1516 + srl %i4, 0, %o4
1517 + srl %i1, 0, %o1
1518 + srl %i2, 0, %o2
1519 +@@ -160,7 +178,25 @@ linux_syscall_trace:
1520 + add %sp, PTREGS_OFF, %o0
1521 + brnz,pn %o0, 3f
1522 + mov -ENOSYS, %o0
1523 ++
1524 ++ /* Syscall tracing can modify the registers. */
1525 ++ ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
1526 ++ sethi %hi(sys_call_table64), %l7
1527 ++ ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
1528 ++ or %l7, %lo(sys_call_table64), %l7
1529 ++ ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
1530 ++ ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
1531 ++ ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
1532 ++ ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
1533 ++ ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
1534 ++
1535 ++ cmp %g1, NR_syscalls
1536 ++ bgeu,pn %xcc, 3f
1537 ++ mov -ENOSYS, %o0
1538 ++
1539 ++ sll %g1, 2, %l4
1540 + mov %i0, %o0
1541 ++ lduw [%l7 + %l4], %l7
1542 + mov %i1, %o1
1543 + mov %i2, %o2
1544 + mov %i3, %o3
1545 +diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
1546 +new file mode 100644
1547 +index 000000000000..5604a2b051d4
1548 +--- /dev/null
1549 ++++ b/arch/sparc/kernel/urtt_fill.S
1550 +@@ -0,0 +1,98 @@
1551 ++#include <asm/thread_info.h>
1552 ++#include <asm/trap_block.h>
1553 ++#include <asm/spitfire.h>
1554 ++#include <asm/ptrace.h>
1555 ++#include <asm/head.h>
1556 ++
1557 ++ .text
1558 ++ .align 8
1559 ++ .globl user_rtt_fill_fixup_common
1560 ++user_rtt_fill_fixup_common:
1561 ++ rdpr %cwp, %g1
1562 ++ add %g1, 1, %g1
1563 ++ wrpr %g1, 0x0, %cwp
1564 ++
1565 ++ rdpr %wstate, %g2
1566 ++ sll %g2, 3, %g2
1567 ++ wrpr %g2, 0x0, %wstate
1568 ++
1569 ++ /* We know %canrestore and %otherwin are both zero. */
1570 ++
1571 ++ sethi %hi(sparc64_kern_pri_context), %g2
1572 ++ ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
1573 ++ mov PRIMARY_CONTEXT, %g1
1574 ++
1575 ++661: stxa %g2, [%g1] ASI_DMMU
1576 ++ .section .sun4v_1insn_patch, "ax"
1577 ++ .word 661b
1578 ++ stxa %g2, [%g1] ASI_MMU
1579 ++ .previous
1580 ++
1581 ++ sethi %hi(KERNBASE), %g1
1582 ++ flush %g1
1583 ++
1584 ++ mov %g4, %l4
1585 ++ mov %g5, %l5
1586 ++ brnz,pn %g3, 1f
1587 ++ mov %g3, %l3
1588 ++
1589 ++ or %g4, FAULT_CODE_WINFIXUP, %g4
1590 ++ stb %g4, [%g6 + TI_FAULT_CODE]
1591 ++ stx %g5, [%g6 + TI_FAULT_ADDR]
1592 ++1:
1593 ++ mov %g6, %l1
1594 ++ wrpr %g0, 0x0, %tl
1595 ++
1596 ++661: nop
1597 ++ .section .sun4v_1insn_patch, "ax"
1598 ++ .word 661b
1599 ++ SET_GL(0)
1600 ++ .previous
1601 ++
1602 ++ wrpr %g0, RTRAP_PSTATE, %pstate
1603 ++
1604 ++ mov %l1, %g6
1605 ++ ldx [%g6 + TI_TASK], %g4
1606 ++ LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
1607 ++
1608 ++ brnz,pn %l3, 1f
1609 ++ nop
1610 ++
1611 ++ call do_sparc64_fault
1612 ++ add %sp, PTREGS_OFF, %o0
1613 ++ ba,pt %xcc, rtrap
1614 ++ nop
1615 ++
1616 ++1: cmp %g3, 2
1617 ++ bne,pn %xcc, 2f
1618 ++ nop
1619 ++
1620 ++ sethi %hi(tlb_type), %g1
1621 ++ lduw [%g1 + %lo(tlb_type)], %g1
1622 ++ cmp %g1, 3
1623 ++ bne,pt %icc, 1f
1624 ++ add %sp, PTREGS_OFF, %o0
1625 ++ mov %l4, %o2
1626 ++ call sun4v_do_mna
1627 ++ mov %l5, %o1
1628 ++ ba,a,pt %xcc, rtrap
1629 ++1: mov %l4, %o1
1630 ++ mov %l5, %o2
1631 ++ call mem_address_unaligned
1632 ++ nop
1633 ++ ba,a,pt %xcc, rtrap
1634 ++
1635 ++2: sethi %hi(tlb_type), %g1
1636 ++ mov %l4, %o1
1637 ++ lduw [%g1 + %lo(tlb_type)], %g1
1638 ++ mov %l5, %o2
1639 ++ cmp %g1, 3
1640 ++ bne,pt %icc, 1f
1641 ++ add %sp, PTREGS_OFF, %o0
1642 ++ call sun4v_data_access_exception
1643 ++ nop
1644 ++ ba,a,pt %xcc, rtrap
1645 ++
1646 ++1: call spitfire_data_access_exception
1647 ++ nop
1648 ++ ba,a,pt %xcc, rtrap
1649 +diff --git a/arch/sparc/kernel/utrap.S b/arch/sparc/kernel/utrap.S
1650 +index b7f0f3f3a909..c731e8023d3e 100644
1651 +--- a/arch/sparc/kernel/utrap.S
1652 ++++ b/arch/sparc/kernel/utrap.S
1653 +@@ -11,8 +11,7 @@ utrap_trap: /* %g3=handler,%g4=level */
1654 + mov %l4, %o1
1655 + call bad_trap
1656 + add %sp, PTREGS_OFF, %o0
1657 +- ba,pt %xcc, rtrap
1658 +- nop
1659 ++ ba,a,pt %xcc, rtrap
1660 +
1661 + invoke_utrap:
1662 + sllx %g3, 3, %g3
1663 +diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
1664 +index 09243057cb0b..7028b4dab903 100644
1665 +--- a/arch/sparc/kernel/vmlinux.lds.S
1666 ++++ b/arch/sparc/kernel/vmlinux.lds.S
1667 +@@ -33,6 +33,10 @@ ENTRY(_start)
1668 + jiffies = jiffies_64;
1669 + #endif
1670 +
1671 ++#ifdef CONFIG_SPARC64
1672 ++ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
1673 ++#endif
1674 ++
1675 + SECTIONS
1676 + {
1677 + #ifdef CONFIG_SPARC64
1678 +diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S
1679 +index 1e67ce958369..855019a8590e 100644
1680 +--- a/arch/sparc/kernel/winfixup.S
1681 ++++ b/arch/sparc/kernel/winfixup.S
1682 +@@ -32,8 +32,7 @@ fill_fixup:
1683 + rd %pc, %g7
1684 + call do_sparc64_fault
1685 + add %sp, PTREGS_OFF, %o0
1686 +- ba,pt %xcc, rtrap
1687 +- nop
1688 ++ ba,a,pt %xcc, rtrap
1689 +
1690 + /* Be very careful about usage of the trap globals here.
1691 + * You cannot touch %g5 as that has the fault information.
1692 +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
1693 +index 2d91c62f7f5f..7dd57626da19 100644
1694 +--- a/arch/sparc/mm/init_64.c
1695 ++++ b/arch/sparc/mm/init_64.c
1696 +@@ -1300,10 +1300,18 @@ static int __init numa_parse_sun4u(void)
1697 +
1698 + static int __init bootmem_init_numa(void)
1699 + {
1700 ++ int i, j;
1701 + int err = -1;
1702 +
1703 + numadbg("bootmem_init_numa()\n");
1704 +
1705 ++ /* Some sane defaults for numa latency values */
1706 ++ for (i = 0; i < MAX_NUMNODES; i++) {
1707 ++ for (j = 0; j < MAX_NUMNODES; j++)
1708 ++ numa_latency[i][j] = (i == j) ?
1709 ++ LOCAL_DISTANCE : REMOTE_DISTANCE;
1710 ++ }
1711 ++
1712 + if (numa_enabled) {
1713 + if (tlb_type == hypervisor)
1714 + err = numa_parse_mdesc();
1715 +@@ -2730,9 +2738,10 @@ void hugetlb_setup(struct pt_regs *regs)
1716 + * the Data-TLB for huge pages.
1717 + */
1718 + if (tlb_type == cheetah_plus) {
1719 ++ bool need_context_reload = false;
1720 + unsigned long ctx;
1721 +
1722 +- spin_lock(&ctx_alloc_lock);
1723 ++ spin_lock_irq(&ctx_alloc_lock);
1724 + ctx = mm->context.sparc64_ctx_val;
1725 + ctx &= ~CTX_PGSZ_MASK;
1726 + ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
1727 +@@ -2751,9 +2760,12 @@ void hugetlb_setup(struct pt_regs *regs)
1728 + * also executing in this address space.
1729 + */
1730 + mm->context.sparc64_ctx_val = ctx;
1731 +- on_each_cpu(context_reload, mm, 0);
1732 ++ need_context_reload = true;
1733 + }
1734 +- spin_unlock(&ctx_alloc_lock);
1735 ++ spin_unlock_irq(&ctx_alloc_lock);
1736 ++
1737 ++ if (need_context_reload)
1738 ++ on_each_cpu(context_reload, mm, 0);
1739 + }
1740 + }
1741 + #endif
1742 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
1743 +index f2e281cf8c19..a78aa118afc2 100644
1744 +--- a/arch/x86/kernel/kprobes/core.c
1745 ++++ b/arch/x86/kernel/kprobes/core.c
1746 +@@ -931,7 +931,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
1747 + * normal page fault.
1748 + */
1749 + regs->ip = (unsigned long)cur->addr;
1750 ++ /*
1751 ++ * Trap flag (TF) has been set here because this fault
1752 ++ * happened where the single stepping will be done.
1753 ++ * So clear it by resetting the current kprobe:
1754 ++ */
1755 ++ regs->flags &= ~X86_EFLAGS_TF;
1756 ++
1757 ++ /*
1758 ++ * If the TF flag was set before the kprobe hit,
1759 ++ * don't touch it:
1760 ++ */
1761 + regs->flags |= kcb->kprobe_old_flags;
1762 ++
1763 + if (kcb->kprobe_status == KPROBE_REENTER)
1764 + restore_previous_kprobe(kcb);
1765 + else
1766 +diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
1767 +index 637ab34ed632..ddb2244b06a1 100644
1768 +--- a/arch/x86/mm/kmmio.c
1769 ++++ b/arch/x86/mm/kmmio.c
1770 +@@ -33,7 +33,7 @@
1771 + struct kmmio_fault_page {
1772 + struct list_head list;
1773 + struct kmmio_fault_page *release_next;
1774 +- unsigned long page; /* location of the fault page */
1775 ++ unsigned long addr; /* the requested address */
1776 + pteval_t old_presence; /* page presence prior to arming */
1777 + bool armed;
1778 +
1779 +@@ -70,9 +70,16 @@ unsigned int kmmio_count;
1780 + static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
1781 + static LIST_HEAD(kmmio_probes);
1782 +
1783 +-static struct list_head *kmmio_page_list(unsigned long page)
1784 ++static struct list_head *kmmio_page_list(unsigned long addr)
1785 + {
1786 +- return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
1787 ++ unsigned int l;
1788 ++ pte_t *pte = lookup_address(addr, &l);
1789 ++
1790 ++ if (!pte)
1791 ++ return NULL;
1792 ++ addr &= page_level_mask(l);
1793 ++
1794 ++ return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
1795 + }
1796 +
1797 + /* Accessed per-cpu */
1798 +@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
1799 + }
1800 +
1801 + /* You must be holding RCU read lock. */
1802 +-static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
1803 ++static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
1804 + {
1805 + struct list_head *head;
1806 + struct kmmio_fault_page *f;
1807 ++ unsigned int l;
1808 ++ pte_t *pte = lookup_address(addr, &l);
1809 +
1810 +- page &= PAGE_MASK;
1811 +- head = kmmio_page_list(page);
1812 ++ if (!pte)
1813 ++ return NULL;
1814 ++ addr &= page_level_mask(l);
1815 ++ head = kmmio_page_list(addr);
1816 + list_for_each_entry_rcu(f, head, list) {
1817 +- if (f->page == page)
1818 ++ if (f->addr == addr)
1819 + return f;
1820 + }
1821 + return NULL;
1822 +@@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
1823 + static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
1824 + {
1825 + unsigned int level;
1826 +- pte_t *pte = lookup_address(f->page, &level);
1827 ++ pte_t *pte = lookup_address(f->addr, &level);
1828 +
1829 + if (!pte) {
1830 +- pr_err("no pte for page 0x%08lx\n", f->page);
1831 ++ pr_err("no pte for addr 0x%08lx\n", f->addr);
1832 + return -1;
1833 + }
1834 +
1835 +@@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
1836 + return -1;
1837 + }
1838 +
1839 +- __flush_tlb_one(f->page);
1840 ++ __flush_tlb_one(f->addr);
1841 + return 0;
1842 + }
1843 +
1844 +@@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
1845 + int ret;
1846 + WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
1847 + if (f->armed) {
1848 +- pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
1849 +- f->page, f->count, !!f->old_presence);
1850 ++ pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
1851 ++ f->addr, f->count, !!f->old_presence);
1852 + }
1853 + ret = clear_page_presence(f, true);
1854 +- WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
1855 +- f->page);
1856 ++ WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
1857 ++ f->addr);
1858 + f->armed = true;
1859 + return ret;
1860 + }
1861 +@@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
1862 + {
1863 + int ret = clear_page_presence(f, false);
1864 + WARN_ONCE(ret < 0,
1865 +- KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
1866 ++ KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
1867 + f->armed = false;
1868 + }
1869 +
1870 +@@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
1871 + struct kmmio_context *ctx;
1872 + struct kmmio_fault_page *faultpage;
1873 + int ret = 0; /* default to fault not handled */
1874 ++ unsigned long page_base = addr;
1875 ++ unsigned int l;
1876 ++ pte_t *pte = lookup_address(addr, &l);
1877 ++ if (!pte)
1878 ++ return -EINVAL;
1879 ++ page_base &= page_level_mask(l);
1880 +
1881 + /*
1882 + * Preemption is now disabled to prevent process switch during
1883 +@@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
1884 + preempt_disable();
1885 + rcu_read_lock();
1886 +
1887 +- faultpage = get_kmmio_fault_page(addr);
1888 ++ faultpage = get_kmmio_fault_page(page_base);
1889 + if (!faultpage) {
1890 + /*
1891 + * Either this page fault is not caused by kmmio, or
1892 +@@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
1893 +
1894 + ctx = &get_cpu_var(kmmio_ctx);
1895 + if (ctx->active) {
1896 +- if (addr == ctx->addr) {
1897 ++ if (page_base == ctx->addr) {
1898 + /*
1899 + * A second fault on the same page means some other
1900 + * condition needs handling by do_page_fault(), the
1901 +@@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
1902 + ctx->active++;
1903 +
1904 + ctx->fpage = faultpage;
1905 +- ctx->probe = get_kmmio_probe(addr);
1906 ++ ctx->probe = get_kmmio_probe(page_base);
1907 + ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
1908 +- ctx->addr = addr;
1909 ++ ctx->addr = page_base;
1910 +
1911 + if (ctx->probe && ctx->probe->pre_handler)
1912 + ctx->probe->pre_handler(ctx->probe, regs, addr);
1913 +@@ -354,12 +371,11 @@ out:
1914 + }
1915 +
1916 + /* You must be holding kmmio_lock. */
1917 +-static int add_kmmio_fault_page(unsigned long page)
1918 ++static int add_kmmio_fault_page(unsigned long addr)
1919 + {
1920 + struct kmmio_fault_page *f;
1921 +
1922 +- page &= PAGE_MASK;
1923 +- f = get_kmmio_fault_page(page);
1924 ++ f = get_kmmio_fault_page(addr);
1925 + if (f) {
1926 + if (!f->count)
1927 + arm_kmmio_fault_page(f);
1928 +@@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned long page)
1929 + return -1;
1930 +
1931 + f->count = 1;
1932 +- f->page = page;
1933 ++ f->addr = addr;
1934 +
1935 + if (arm_kmmio_fault_page(f)) {
1936 + kfree(f);
1937 + return -1;
1938 + }
1939 +
1940 +- list_add_rcu(&f->list, kmmio_page_list(f->page));
1941 ++ list_add_rcu(&f->list, kmmio_page_list(f->addr));
1942 +
1943 + return 0;
1944 + }
1945 +
1946 + /* You must be holding kmmio_lock. */
1947 +-static void release_kmmio_fault_page(unsigned long page,
1948 ++static void release_kmmio_fault_page(unsigned long addr,
1949 + struct kmmio_fault_page **release_list)
1950 + {
1951 + struct kmmio_fault_page *f;
1952 +
1953 +- page &= PAGE_MASK;
1954 +- f = get_kmmio_fault_page(page);
1955 ++ f = get_kmmio_fault_page(addr);
1956 + if (!f)
1957 + return;
1958 +
1959 +@@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_probe *p)
1960 + int ret = 0;
1961 + unsigned long size = 0;
1962 + const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
1963 ++ unsigned int l;
1964 ++ pte_t *pte;
1965 +
1966 + spin_lock_irqsave(&kmmio_lock, flags);
1967 + if (get_kmmio_probe(p->addr)) {
1968 + ret = -EEXIST;
1969 + goto out;
1970 + }
1971 ++
1972 ++ pte = lookup_address(p->addr, &l);
1973 ++ if (!pte) {
1974 ++ ret = -EINVAL;
1975 ++ goto out;
1976 ++ }
1977 ++
1978 + kmmio_count++;
1979 + list_add_rcu(&p->list, &kmmio_probes);
1980 + while (size < size_lim) {
1981 + if (add_kmmio_fault_page(p->addr + size))
1982 + pr_err("Unable to set page fault.\n");
1983 +- size += PAGE_SIZE;
1984 ++ size += page_level_size(l);
1985 + }
1986 + out:
1987 + spin_unlock_irqrestore(&kmmio_lock, flags);
1988 +@@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
1989 + const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
1990 + struct kmmio_fault_page *release_list = NULL;
1991 + struct kmmio_delayed_release *drelease;
1992 ++ unsigned int l;
1993 ++ pte_t *pte;
1994 ++
1995 ++ pte = lookup_address(p->addr, &l);
1996 ++ if (!pte)
1997 ++ return;
1998 +
1999 + spin_lock_irqsave(&kmmio_lock, flags);
2000 + while (size < size_lim) {
2001 + release_kmmio_fault_page(p->addr + size, &release_list);
2002 +- size += PAGE_SIZE;
2003 ++ size += page_level_size(l);
2004 + }
2005 + list_del_rcu(&p->list);
2006 + kmmio_count--;
2007 +diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
2008 +index 1d29376072da..841a04c1b258 100644
2009 +--- a/crypto/asymmetric_keys/pkcs7_trust.c
2010 ++++ b/crypto/asymmetric_keys/pkcs7_trust.c
2011 +@@ -174,6 +174,8 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
2012 + int cached_ret = -ENOKEY;
2013 + int ret;
2014 +
2015 ++ *_trusted = false;
2016 ++
2017 + for (p = pkcs7->certs; p; p = p->next)
2018 + p->seen = false;
2019 +
2020 +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
2021 +index 4ec95b76f6a1..0550c76f4e6c 100644
2022 +--- a/drivers/ata/libata-eh.c
2023 ++++ b/drivers/ata/libata-eh.c
2024 +@@ -605,7 +605,7 @@ void ata_scsi_error(struct Scsi_Host *host)
2025 + ata_scsi_port_error_handler(host, ap);
2026 +
2027 + /* finish or retry handled scmd's and clean up */
2028 +- WARN_ON(host->host_failed || !list_empty(&eh_work_q));
2029 ++ WARN_ON(!list_empty(&eh_work_q));
2030 +
2031 + DPRINTK("EXIT\n");
2032 + }
2033 +diff --git a/drivers/base/module.c b/drivers/base/module.c
2034 +index db930d3ee312..2a215780eda2 100644
2035 +--- a/drivers/base/module.c
2036 ++++ b/drivers/base/module.c
2037 +@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
2038 +
2039 + static void module_create_drivers_dir(struct module_kobject *mk)
2040 + {
2041 +- if (!mk || mk->drivers_dir)
2042 +- return;
2043 ++ static DEFINE_MUTEX(drivers_dir_mutex);
2044 +
2045 +- mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
2046 ++ mutex_lock(&drivers_dir_mutex);
2047 ++ if (mk && !mk->drivers_dir)
2048 ++ mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
2049 ++ mutex_unlock(&drivers_dir_mutex);
2050 + }
2051 +
2052 + void module_add_driver(struct module *mod, struct device_driver *drv)
2053 +diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
2054 +index d7e0b9b806e9..93e542cb4911 100644
2055 +--- a/drivers/block/mtip32xx/mtip32xx.c
2056 ++++ b/drivers/block/mtip32xx/mtip32xx.c
2057 +@@ -2946,9 +2946,15 @@ static int mtip_service_thread(void *data)
2058 + * is in progress nor error handling is active
2059 + */
2060 + wait_event_interruptible(port->svc_wait, (port->flags) &&
2061 +- !(port->flags & MTIP_PF_PAUSE_IO));
2062 ++ (port->flags & MTIP_PF_SVC_THD_WORK));
2063 +
2064 +- set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
2065 ++ if (kthread_should_stop() ||
2066 ++ test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
2067 ++ goto st_out;
2068 ++
2069 ++ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
2070 ++ &dd->dd_flag)))
2071 ++ goto st_out;
2072 +
2073 + if (kthread_should_stop() ||
2074 + test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
2075 +@@ -2962,6 +2968,8 @@ static int mtip_service_thread(void *data)
2076 + &dd->dd_flag)))
2077 + goto st_out;
2078 +
2079 ++ set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
2080 ++
2081 + restart_eh:
2082 + /* Demux bits: start with error handling */
2083 + if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
2084 +@@ -3004,10 +3012,8 @@ restart_eh:
2085 + }
2086 +
2087 + if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
2088 +- if (mtip_ftl_rebuild_poll(dd) < 0)
2089 +- set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
2090 +- &dd->dd_flag);
2091 +- clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
2092 ++ if (mtip_ftl_rebuild_poll(dd) == 0)
2093 ++ clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
2094 + }
2095 + }
2096 +
2097 +@@ -3886,7 +3892,6 @@ static int mtip_block_initialize(struct driver_data *dd)
2098 +
2099 + mtip_hw_debugfs_init(dd);
2100 +
2101 +-skip_create_disk:
2102 + memset(&dd->tags, 0, sizeof(dd->tags));
2103 + dd->tags.ops = &mtip_mq_ops;
2104 + dd->tags.nr_hw_queues = 1;
2105 +@@ -3916,6 +3921,7 @@ skip_create_disk:
2106 + dd->disk->queue = dd->queue;
2107 + dd->queue->queuedata = dd;
2108 +
2109 ++skip_create_disk:
2110 + /* Initialize the protocol layer. */
2111 + wait_for_rebuild = mtip_hw_get_identify(dd);
2112 + if (wait_for_rebuild < 0) {
2113 +@@ -4077,7 +4083,8 @@ static int mtip_block_remove(struct driver_data *dd)
2114 + dd->bdev = NULL;
2115 + }
2116 + if (dd->disk) {
2117 +- del_gendisk(dd->disk);
2118 ++ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
2119 ++ del_gendisk(dd->disk);
2120 + if (dd->disk->queue) {
2121 + blk_cleanup_queue(dd->queue);
2122 + blk_mq_free_tag_set(&dd->tags);
2123 +@@ -4118,7 +4125,8 @@ static int mtip_block_shutdown(struct driver_data *dd)
2124 + dev_info(&dd->pdev->dev,
2125 + "Shutting down %s ...\n", dd->disk->disk_name);
2126 +
2127 +- del_gendisk(dd->disk);
2128 ++ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
2129 ++ del_gendisk(dd->disk);
2130 + if (dd->disk->queue) {
2131 + blk_cleanup_queue(dd->queue);
2132 + blk_mq_free_tag_set(&dd->tags);
2133 +diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
2134 +index 76695265dffb..578ad36c9913 100644
2135 +--- a/drivers/block/mtip32xx/mtip32xx.h
2136 ++++ b/drivers/block/mtip32xx/mtip32xx.h
2137 +@@ -145,6 +145,11 @@ enum {
2138 + MTIP_PF_SR_CLEANUP_BIT = 7,
2139 + MTIP_PF_SVC_THD_STOP_BIT = 8,
2140 +
2141 ++ MTIP_PF_SVC_THD_WORK = ((1 << MTIP_PF_EH_ACTIVE_BIT) |
2142 ++ (1 << MTIP_PF_ISSUE_CMDS_BIT) |
2143 ++ (1 << MTIP_PF_REBUILD_BIT) |
2144 ++ (1 << MTIP_PF_SVC_THD_STOP_BIT)),
2145 ++
2146 + /* below are bit numbers in 'dd_flag' defined in driver_data */
2147 + MTIP_DDF_SEC_LOCK_BIT = 0,
2148 + MTIP_DDF_REMOVE_PENDING_BIT = 1,
2149 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
2150 +index a98c41f72c63..101232982c58 100644
2151 +--- a/drivers/block/nbd.c
2152 ++++ b/drivers/block/nbd.c
2153 +@@ -578,8 +578,8 @@ static void do_nbd_request(struct request_queue *q)
2154 + BUG_ON(nbd->magic != NBD_MAGIC);
2155 +
2156 + if (unlikely(!nbd->sock)) {
2157 +- dev_err(disk_to_dev(nbd->disk),
2158 +- "Attempted send on closed socket\n");
2159 ++ dev_err_ratelimited(disk_to_dev(nbd->disk),
2160 ++ "Attempted send on closed socket\n");
2161 + req->errors++;
2162 + nbd_end_request(req);
2163 + spin_lock_irq(q->queue_lock);
2164 +diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
2165 +index d48715b287e6..b0414702e61a 100644
2166 +--- a/drivers/block/paride/pd.c
2167 ++++ b/drivers/block/paride/pd.c
2168 +@@ -126,7 +126,7 @@
2169 + */
2170 + #include <linux/types.h>
2171 +
2172 +-static bool verbose = 0;
2173 ++static int verbose = 0;
2174 + static int major = PD_MAJOR;
2175 + static char *name = PD_NAME;
2176 + static int cluster = 64;
2177 +@@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
2178 + static DEFINE_MUTEX(pd_mutex);
2179 + static DEFINE_SPINLOCK(pd_lock);
2180 +
2181 +-module_param(verbose, bool, 0);
2182 ++module_param(verbose, int, 0);
2183 + module_param(major, int, 0);
2184 + module_param(name, charp, 0);
2185 + module_param(cluster, int, 0);
2186 +diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
2187 +index 2596042eb987..ada45058e04d 100644
2188 +--- a/drivers/block/paride/pt.c
2189 ++++ b/drivers/block/paride/pt.c
2190 +@@ -117,7 +117,7 @@
2191 +
2192 + */
2193 +
2194 +-static bool verbose = 0;
2195 ++static int verbose = 0;
2196 + static int major = PT_MAJOR;
2197 + static char *name = PT_NAME;
2198 + static int disable = 0;
2199 +@@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
2200 +
2201 + #include <asm/uaccess.h>
2202 +
2203 +-module_param(verbose, bool, 0);
2204 ++module_param(verbose, int, 0);
2205 + module_param(major, int, 0);
2206 + module_param(name, charp, 0);
2207 + module_param_array(drive0, int, NULL, 0);
2208 +diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
2209 +index 007534f7a2d7..9004a4e88800 100644
2210 +--- a/drivers/clk/qcom/gcc-msm8960.c
2211 ++++ b/drivers/clk/qcom/gcc-msm8960.c
2212 +@@ -2740,7 +2740,7 @@ static struct clk_rcg ce3_src = {
2213 + },
2214 + .freq_tbl = clk_tbl_ce3,
2215 + .clkr = {
2216 +- .enable_reg = 0x2c08,
2217 ++ .enable_reg = 0x36c0,
2218 + .enable_mask = BIT(7),
2219 + .hw.init = &(struct clk_init_data){
2220 + .name = "ce3_src",
2221 +@@ -2756,7 +2756,7 @@ static struct clk_branch ce3_core_clk = {
2222 + .halt_reg = 0x2fdc,
2223 + .halt_bit = 5,
2224 + .clkr = {
2225 +- .enable_reg = 0x36c4,
2226 ++ .enable_reg = 0x36cc,
2227 + .enable_mask = BIT(4),
2228 + .hw.init = &(struct clk_init_data){
2229 + .name = "ce3_core_clk",
2230 +diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
2231 +index 880a266f0143..a25261aaf793 100644
2232 +--- a/drivers/clk/rockchip/clk.c
2233 ++++ b/drivers/clk/rockchip/clk.c
2234 +@@ -70,7 +70,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
2235 + if (gate_offset >= 0) {
2236 + gate = kzalloc(sizeof(*gate), GFP_KERNEL);
2237 + if (!gate)
2238 +- return ERR_PTR(-ENOMEM);
2239 ++ goto err_gate;
2240 +
2241 + gate->flags = gate_flags;
2242 + gate->reg = base + gate_offset;
2243 +@@ -82,7 +82,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
2244 + if (div_width > 0) {
2245 + div = kzalloc(sizeof(*div), GFP_KERNEL);
2246 + if (!div)
2247 +- return ERR_PTR(-ENOMEM);
2248 ++ goto err_div;
2249 +
2250 + div->flags = div_flags;
2251 + div->reg = base + muxdiv_offset;
2252 +@@ -100,6 +100,11 @@ static struct clk *rockchip_clk_register_branch(const char *name,
2253 + flags);
2254 +
2255 + return clk;
2256 ++err_div:
2257 ++ kfree(gate);
2258 ++err_gate:
2259 ++ kfree(mux);
2260 ++ return ERR_PTR(-ENOMEM);
2261 + }
2262 +
2263 + static struct clk *rockchip_clk_register_frac_branch(const char *name,
2264 +diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
2265 +index 5122ef25f595..e63c3ef9b5ec 100644
2266 +--- a/drivers/clk/versatile/clk-sp810.c
2267 ++++ b/drivers/clk/versatile/clk-sp810.c
2268 +@@ -141,6 +141,7 @@ void __init clk_sp810_of_setup(struct device_node *node)
2269 + const char *parent_names[2];
2270 + char name[12];
2271 + struct clk_init_data init;
2272 ++ static int instance;
2273 + int i;
2274 +
2275 + if (!sp810) {
2276 +@@ -172,7 +173,7 @@ void __init clk_sp810_of_setup(struct device_node *node)
2277 + init.num_parents = ARRAY_SIZE(parent_names);
2278 +
2279 + for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
2280 +- snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
2281 ++ snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
2282 +
2283 + sp810->timerclken[i].sp810 = sp810;
2284 + sp810->timerclken[i].channel = i;
2285 +@@ -184,5 +185,6 @@ void __init clk_sp810_of_setup(struct device_node *node)
2286 + }
2287 +
2288 + of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
2289 ++ instance++;
2290 + }
2291 + CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
2292 +diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
2293 +index 8e5e0187506f..3ff21c3e9ab2 100644
2294 +--- a/drivers/crypto/ux500/hash/hash_core.c
2295 ++++ b/drivers/crypto/ux500/hash/hash_core.c
2296 +@@ -797,7 +797,7 @@ static int hash_process_data(struct hash_device_data *device_data,
2297 + &device_data->state);
2298 + memmove(req_ctx->state.buffer,
2299 + device_data->state.buffer,
2300 +- HASH_BLOCK_SIZE / sizeof(u32));
2301 ++ HASH_BLOCK_SIZE);
2302 + if (ret) {
2303 + dev_err(device_data->dev,
2304 + "%s: hash_resume_state() failed!\n",
2305 +@@ -848,7 +848,7 @@ static int hash_process_data(struct hash_device_data *device_data,
2306 +
2307 + memmove(device_data->state.buffer,
2308 + req_ctx->state.buffer,
2309 +- HASH_BLOCK_SIZE / sizeof(u32));
2310 ++ HASH_BLOCK_SIZE);
2311 + if (ret) {
2312 + dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
2313 + __func__);
2314 +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
2315 +index 8590099ac148..71e090c8c85e 100644
2316 +--- a/drivers/firmware/efi/efi.c
2317 ++++ b/drivers/firmware/efi/efi.c
2318 +@@ -154,6 +154,7 @@ static int generic_ops_register(void)
2319 + {
2320 + generic_ops.get_variable = efi.get_variable;
2321 + generic_ops.set_variable = efi.set_variable;
2322 ++ generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
2323 + generic_ops.get_next_variable = efi.get_next_variable;
2324 + generic_ops.query_variable_store = efi_query_variable_store;
2325 +
2326 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
2327 +index c2d76fed3abf..234e89c013dd 100644
2328 +--- a/drivers/gpu/drm/i915/intel_display.c
2329 ++++ b/drivers/gpu/drm/i915/intel_display.c
2330 +@@ -6526,12 +6526,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
2331 + {
2332 + struct drm_i915_private *dev_priv = dev->dev_private;
2333 + struct intel_encoder *encoder;
2334 ++ int i;
2335 + u32 val, final;
2336 + bool has_lvds = false;
2337 + bool has_cpu_edp = false;
2338 + bool has_panel = false;
2339 + bool has_ck505 = false;
2340 + bool can_ssc = false;
2341 ++ bool using_ssc_source = false;
2342 +
2343 + /* We need to take the global config into account */
2344 + for_each_intel_encoder(dev, encoder) {
2345 +@@ -6556,8 +6558,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
2346 + can_ssc = true;
2347 + }
2348 +
2349 +- DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
2350 +- has_panel, has_lvds, has_ck505);
2351 ++ /* Check if any DPLLs are using the SSC source */
2352 ++ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2353 ++ u32 temp = I915_READ(PCH_DPLL(i));
2354 ++
2355 ++ if (!(temp & DPLL_VCO_ENABLE))
2356 ++ continue;
2357 ++
2358 ++ if ((temp & PLL_REF_INPUT_MASK) ==
2359 ++ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
2360 ++ using_ssc_source = true;
2361 ++ break;
2362 ++ }
2363 ++ }
2364 ++
2365 ++ DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
2366 ++ has_panel, has_lvds, has_ck505, using_ssc_source);
2367 +
2368 + /* Ironlake: try to setup display ref clock before DPLL
2369 + * enabling. This is only under driver's control after
2370 +@@ -6594,9 +6610,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
2371 + final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
2372 + } else
2373 + final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2374 +- } else {
2375 +- final |= DREF_SSC_SOURCE_DISABLE;
2376 +- final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2377 ++ } else if (using_ssc_source) {
2378 ++ final |= DREF_SSC_SOURCE_ENABLE;
2379 ++ final |= DREF_SSC1_ENABLE;
2380 + }
2381 +
2382 + if (final == val)
2383 +@@ -6642,7 +6658,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
2384 + POSTING_READ(PCH_DREF_CONTROL);
2385 + udelay(200);
2386 + } else {
2387 +- DRM_DEBUG_KMS("Disabling SSC entirely\n");
2388 ++ DRM_DEBUG_KMS("Disabling CPU source output\n");
2389 +
2390 + val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2391 +
2392 +@@ -6653,16 +6669,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
2393 + POSTING_READ(PCH_DREF_CONTROL);
2394 + udelay(200);
2395 +
2396 +- /* Turn off the SSC source */
2397 +- val &= ~DREF_SSC_SOURCE_MASK;
2398 +- val |= DREF_SSC_SOURCE_DISABLE;
2399 ++ if (!using_ssc_source) {
2400 ++ DRM_DEBUG_KMS("Disabling SSC source\n");
2401 +
2402 +- /* Turn off SSC1 */
2403 +- val &= ~DREF_SSC1_ENABLE;
2404 ++ /* Turn off the SSC source */
2405 ++ val &= ~DREF_SSC_SOURCE_MASK;
2406 ++ val |= DREF_SSC_SOURCE_DISABLE;
2407 +
2408 +- I915_WRITE(PCH_DREF_CONTROL, val);
2409 +- POSTING_READ(PCH_DREF_CONTROL);
2410 +- udelay(200);
2411 ++ /* Turn off SSC1 */
2412 ++ val &= ~DREF_SSC1_ENABLE;
2413 ++
2414 ++ I915_WRITE(PCH_DREF_CONTROL, val);
2415 ++ POSTING_READ(PCH_DREF_CONTROL);
2416 ++ udelay(200);
2417 ++ }
2418 + }
2419 +
2420 + BUG_ON(val != final);
2421 +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
2422 +index ff6358f8a9c6..993c9a0377da 100644
2423 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
2424 ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
2425 +@@ -568,7 +568,8 @@ nouveau_fbcon_init(struct drm_device *dev)
2426 + if (ret)
2427 + goto fini;
2428 +
2429 +- fbcon->helper.fbdev->pixmap.buf_align = 4;
2430 ++ if (fbcon->helper.fbdev)
2431 ++ fbcon->helper.fbdev->pixmap.buf_align = 4;
2432 + return 0;
2433 +
2434 + fini:
2435 +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
2436 +index ce8cab52285b..2f2e50a0feb4 100644
2437 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c
2438 ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
2439 +@@ -1730,6 +1730,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
2440 + static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
2441 + {
2442 + struct drm_device *dev = crtc->dev;
2443 ++ struct radeon_device *rdev = dev->dev_private;
2444 + struct drm_crtc *test_crtc;
2445 + struct radeon_crtc *test_radeon_crtc;
2446 +
2447 +@@ -1739,6 +1740,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
2448 + test_radeon_crtc = to_radeon_crtc(test_crtc);
2449 + if (test_radeon_crtc->encoder &&
2450 + ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
2451 ++ /* PPLL2 is exclusive to UNIPHYA on DCE61 */
2452 ++ if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
2453 ++ test_radeon_crtc->pll_id == ATOM_PPLL2)
2454 ++ continue;
2455 + /* for DP use the same PLL for all */
2456 + if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
2457 + return test_radeon_crtc->pll_id;
2458 +@@ -1760,6 +1765,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
2459 + {
2460 + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
2461 + struct drm_device *dev = crtc->dev;
2462 ++ struct radeon_device *rdev = dev->dev_private;
2463 + struct drm_crtc *test_crtc;
2464 + struct radeon_crtc *test_radeon_crtc;
2465 + u32 adjusted_clock, test_adjusted_clock;
2466 +@@ -1775,6 +1781,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
2467 + test_radeon_crtc = to_radeon_crtc(test_crtc);
2468 + if (test_radeon_crtc->encoder &&
2469 + !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
2470 ++ /* PPLL2 is exclusive to UNIPHYA on DCE61 */
2471 ++ if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
2472 ++ test_radeon_crtc->pll_id == ATOM_PPLL2)
2473 ++ continue;
2474 + /* check if we are already driving this connector with another crtc */
2475 + if (test_radeon_crtc->connector == radeon_crtc->connector) {
2476 + /* if we are, return that pll */
2477 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2478 +index 5d54ab0fbe2b..6b99d3956baa 100644
2479 +--- a/drivers/gpu/drm/radeon/radeon_device.c
2480 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
2481 +@@ -599,6 +599,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
2482 + /*
2483 + * GPU helpers function.
2484 + */
2485 ++
2486 ++/**
2487 ++ * radeon_device_is_virtual - check if we are running is a virtual environment
2488 ++ *
2489 ++ * Check if the asic has been passed through to a VM (all asics).
2490 ++ * Used at driver startup.
2491 ++ * Returns true if virtual or false if not.
2492 ++ */
2493 ++static bool radeon_device_is_virtual(void)
2494 ++{
2495 ++#ifdef CONFIG_X86
2496 ++ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
2497 ++#else
2498 ++ return false;
2499 ++#endif
2500 ++}
2501 ++
2502 + /**
2503 + * radeon_card_posted - check if the hw has already been initialized
2504 + *
2505 +@@ -612,6 +629,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
2506 + {
2507 + uint32_t reg;
2508 +
2509 ++ /* for pass through, always force asic_init */
2510 ++ if (radeon_device_is_virtual())
2511 ++ return false;
2512 ++
2513 + /* required for EFI mode on macbook2,1 which uses an r5xx asic */
2514 + if (efi_enabled(EFI_BOOT) &&
2515 + (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
2516 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
2517 +index 481e718086fc..c3530caa7ddd 100644
2518 +--- a/drivers/gpu/drm/radeon/si_dpm.c
2519 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
2520 +@@ -2922,6 +2922,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2521 + /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
2522 + { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
2523 + { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
2524 ++ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
2525 + { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
2526 + { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
2527 + { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
2528 +@@ -2956,6 +2957,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2529 + }
2530 + ++p;
2531 + }
2532 ++ /* limit mclk on all R7 370 parts for stability */
2533 ++ if (rdev->pdev->device == 0x6811 &&
2534 ++ rdev->pdev->revision == 0x81)
2535 ++ max_mclk = 120000;
2536 +
2537 + if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
2538 + ni_dpm_vblank_too_short(rdev))
2539 +diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
2540 +index 5fc16cecd3ba..cd8d183dcfe5 100644
2541 +--- a/drivers/gpu/drm/udl/udl_fb.c
2542 ++++ b/drivers/gpu/drm/udl/udl_fb.c
2543 +@@ -546,7 +546,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
2544 +
2545 + return ret;
2546 + out_gfree:
2547 +- drm_gem_object_unreference(&ufbdev->ufb.obj->base);
2548 ++ drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
2549 + out:
2550 + return ret;
2551 + }
2552 +diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
2553 +index 8044f5fb7c49..6de963b70eee 100644
2554 +--- a/drivers/gpu/drm/udl/udl_gem.c
2555 ++++ b/drivers/gpu/drm/udl/udl_gem.c
2556 +@@ -51,7 +51,7 @@ udl_gem_create(struct drm_file *file,
2557 + return ret;
2558 + }
2559 +
2560 +- drm_gem_object_unreference(&obj->base);
2561 ++ drm_gem_object_unreference_unlocked(&obj->base);
2562 + *handle_p = handle;
2563 + return 0;
2564 + }
2565 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
2566 +index cb4bc0dadba5..19a3a12f3257 100644
2567 +--- a/drivers/hid/hid-core.c
2568 ++++ b/drivers/hid/hid-core.c
2569 +@@ -1834,6 +1834,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
2570 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
2571 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
2572 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
2573 ++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
2574 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
2575 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
2576 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
2577 +diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
2578 +index 4e49462870ab..d0c8a1c1e1fe 100644
2579 +--- a/drivers/hid/hid-elo.c
2580 ++++ b/drivers/hid/hid-elo.c
2581 +@@ -259,7 +259,7 @@ static void elo_remove(struct hid_device *hdev)
2582 + struct elo_priv *priv = hid_get_drvdata(hdev);
2583 +
2584 + hid_hw_stop(hdev);
2585 +- flush_workqueue(wq);
2586 ++ cancel_delayed_work_sync(&priv->work);
2587 + kfree(priv);
2588 + }
2589 +
2590 +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
2591 +index 2f1ddca6f2e0..700145b15088 100644
2592 +--- a/drivers/hid/usbhid/hiddev.c
2593 ++++ b/drivers/hid/usbhid/hiddev.c
2594 +@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
2595 + goto inval;
2596 + } else if (uref->usage_index >= field->report_count)
2597 + goto inval;
2598 +-
2599 +- else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
2600 +- (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
2601 +- uref->usage_index + uref_multi->num_values > field->report_count))
2602 +- goto inval;
2603 + }
2604 +
2605 ++ if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
2606 ++ (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
2607 ++ uref->usage_index + uref_multi->num_values > field->report_count))
2608 ++ goto inval;
2609 ++
2610 + switch (cmd) {
2611 + case HIDIOCGUSAGE:
2612 + uref->value = field->value[uref->usage_index];
2613 +diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
2614 +index f67d71ee8386..159f50d0ae39 100644
2615 +--- a/drivers/hwmon/max1111.c
2616 ++++ b/drivers/hwmon/max1111.c
2617 +@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
2618 +
2619 + int max1111_read_channel(int channel)
2620 + {
2621 ++ if (!the_max1111 || !the_max1111->spi)
2622 ++ return -ENODEV;
2623 ++
2624 + return max1111_read(&the_max1111->spi->dev, channel);
2625 + }
2626 + EXPORT_SYMBOL(max1111_read_channel);
2627 +@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
2628 + {
2629 + struct max1111_data *data = spi_get_drvdata(spi);
2630 +
2631 ++#ifdef CONFIG_SHARPSL_PM
2632 ++ the_max1111 = NULL;
2633 ++#endif
2634 + hwmon_device_unregister(data->hwmon_dev);
2635 + sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
2636 + sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
2637 +diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
2638 +index 81e6263cd7da..91a6362bbef6 100644
2639 +--- a/drivers/i2c/busses/i2c-exynos5.c
2640 ++++ b/drivers/i2c/busses/i2c-exynos5.c
2641 +@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
2642 + return -EIO;
2643 + }
2644 +
2645 +- clk_prepare_enable(i2c->clk);
2646 ++ ret = clk_enable(i2c->clk);
2647 ++ if (ret)
2648 ++ return ret;
2649 +
2650 + for (i = 0; i < num; i++, msgs++) {
2651 + stop = (i == num - 1);
2652 +@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
2653 + }
2654 +
2655 + out:
2656 +- clk_disable_unprepare(i2c->clk);
2657 ++ clk_disable(i2c->clk);
2658 + return ret;
2659 + }
2660 +
2661 +@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
2662 + return -ENOENT;
2663 + }
2664 +
2665 +- clk_prepare_enable(i2c->clk);
2666 ++ ret = clk_prepare_enable(i2c->clk);
2667 ++ if (ret)
2668 ++ return ret;
2669 +
2670 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2671 + i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
2672 +@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
2673 +
2674 + platform_set_drvdata(pdev, i2c);
2675 +
2676 ++ clk_disable(i2c->clk);
2677 ++
2678 ++ return 0;
2679 ++
2680 + err_clk:
2681 + clk_disable_unprepare(i2c->clk);
2682 + return ret;
2683 +@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
2684 +
2685 + i2c_del_adapter(&i2c->adap);
2686 +
2687 ++ clk_unprepare(i2c->clk);
2688 ++
2689 + return 0;
2690 + }
2691 +
2692 +@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
2693 +
2694 + i2c->suspended = 1;
2695 +
2696 ++ clk_unprepare(i2c->clk);
2697 ++
2698 + return 0;
2699 + }
2700 +
2701 +@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
2702 + struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
2703 + int ret = 0;
2704 +
2705 +- clk_prepare_enable(i2c->clk);
2706 ++ ret = clk_prepare_enable(i2c->clk);
2707 ++ if (ret)
2708 ++ return ret;
2709 +
2710 + ret = exynos5_hsi2c_clock_setup(i2c);
2711 + if (ret) {
2712 +@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
2713 + }
2714 +
2715 + exynos5_i2c_init(i2c);
2716 +- clk_disable_unprepare(i2c->clk);
2717 ++ clk_disable(i2c->clk);
2718 + i2c->suspended = 0;
2719 +
2720 + return 0;
2721 +diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
2722 +index e65ee1947279..b33fa45a13cc 100644
2723 +--- a/drivers/infiniband/hw/mlx4/ah.c
2724 ++++ b/drivers/infiniband/hw/mlx4/ah.c
2725 +@@ -46,6 +46,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
2726 +
2727 + ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
2728 + ah->av.ib.g_slid = ah_attr->src_path_bits;
2729 ++ ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
2730 + if (ah_attr->ah_flags & IB_AH_GRH) {
2731 + ah->av.ib.g_slid |= 0x80;
2732 + ah->av.ib.gid_index = ah_attr->grh.sgid_index;
2733 +@@ -63,7 +64,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
2734 + !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
2735 + --ah->av.ib.stat_rate;
2736 + }
2737 +- ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
2738 +
2739 + return &ah->ibah;
2740 + }
2741 +diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
2742 +index c91e3d33aea9..88db9204bac2 100644
2743 +--- a/drivers/input/misc/pmic8xxx-pwrkey.c
2744 ++++ b/drivers/input/misc/pmic8xxx-pwrkey.c
2745 +@@ -94,7 +94,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
2746 + if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
2747 + kpd_delay = 15625;
2748 +
2749 +- if (kpd_delay > 62500 || kpd_delay == 0) {
2750 ++ /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
2751 ++ if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
2752 + dev_err(&pdev->dev, "invalid power key trigger delay\n");
2753 + return -EINVAL;
2754 + }
2755 +@@ -124,8 +125,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
2756 + pwr->name = "pmic8xxx_pwrkey";
2757 + pwr->phys = "pmic8xxx_pwrkey/input0";
2758 +
2759 +- delay = (kpd_delay << 10) / USEC_PER_SEC;
2760 +- delay = 1 + ilog2(delay);
2761 ++ delay = (kpd_delay << 6) / USEC_PER_SEC;
2762 ++ delay = ilog2(delay);
2763 +
2764 + err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
2765 + if (err < 0) {
2766 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
2767 +index b3b2a137e55e..8c964907d291 100644
2768 +--- a/drivers/input/mouse/elantech.c
2769 ++++ b/drivers/input/mouse/elantech.c
2770 +@@ -1550,12 +1550,7 @@ static int elantech_set_properties(struct elantech_data *etd)
2771 + case 5:
2772 + etd->hw_version = 3;
2773 + break;
2774 +- case 6:
2775 +- case 7:
2776 +- case 8:
2777 +- case 9:
2778 +- case 10:
2779 +- case 13:
2780 ++ case 6 ... 14:
2781 + etd->hw_version = 4;
2782 + break;
2783 + default:
2784 +diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
2785 +index 2792ca397dd0..3ed0ce1e4dcb 100644
2786 +--- a/drivers/input/touchscreen/wacom_w8001.c
2787 ++++ b/drivers/input/touchscreen/wacom_w8001.c
2788 +@@ -27,7 +27,7 @@ MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@×××××.com>");
2789 + MODULE_DESCRIPTION(DRIVER_DESC);
2790 + MODULE_LICENSE("GPL");
2791 +
2792 +-#define W8001_MAX_LENGTH 11
2793 ++#define W8001_MAX_LENGTH 13
2794 + #define W8001_LEAD_MASK 0x80
2795 + #define W8001_LEAD_BYTE 0x80
2796 + #define W8001_TAB_MASK 0x40
2797 +diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
2798 +index 8ba48f5eff7b..ce1a855d8b9a 100644
2799 +--- a/drivers/input/touchscreen/zforce_ts.c
2800 ++++ b/drivers/input/touchscreen/zforce_ts.c
2801 +@@ -359,8 +359,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
2802 + point.coord_x = point.coord_y = 0;
2803 + }
2804 +
2805 +- point.state = payload[9 * i + 5] & 0x03;
2806 +- point.id = (payload[9 * i + 5] & 0xfc) >> 2;
2807 ++ point.state = payload[9 * i + 5] & 0x0f;
2808 ++ point.id = (payload[9 * i + 5] & 0xf0) >> 4;
2809 +
2810 + /* determine touch major, minor and orientation */
2811 + point.area_major = max(payload[9 * i + 6],
2812 +diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
2813 +index b7d3c8b9f189..47f1a4879837 100644
2814 +--- a/drivers/media/platform/vsp1/vsp1_sru.c
2815 ++++ b/drivers/media/platform/vsp1/vsp1_sru.c
2816 +@@ -154,6 +154,7 @@ static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
2817 + mutex_lock(sru->ctrls.lock);
2818 + ctrl0 |= vsp1_sru_read(sru, VI6_SRU_CTRL0)
2819 + & (VI6_SRU_CTRL0_PARAM0_MASK | VI6_SRU_CTRL0_PARAM1_MASK);
2820 ++ vsp1_sru_write(sru, VI6_SRU_CTRL0, ctrl0);
2821 + mutex_unlock(sru->ctrls.lock);
2822 +
2823 + vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
2824 +diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
2825 +index bbeb4516facf..878d5430973c 100644
2826 +--- a/drivers/misc/Kconfig
2827 ++++ b/drivers/misc/Kconfig
2828 +@@ -429,7 +429,7 @@ config ARM_CHARLCD
2829 + still useful.
2830 +
2831 + config BMP085
2832 +- bool
2833 ++ tristate
2834 + depends on SYSFS
2835 +
2836 + config BMP085_I2C
2837 +diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
2838 +index a43053daad0e..46272b0ae2dd 100644
2839 +--- a/drivers/misc/ad525x_dpot.c
2840 ++++ b/drivers/misc/ad525x_dpot.c
2841 +@@ -215,7 +215,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
2842 + */
2843 + value = swab16(value);
2844 +
2845 +- if (dpot->uid == DPOT_UID(AD5271_ID))
2846 ++ if (dpot->uid == DPOT_UID(AD5274_ID))
2847 + value = value >> 2;
2848 + return value;
2849 + default:
2850 +diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
2851 +index 0aa7087438fa..e40db2fd2da3 100644
2852 +--- a/drivers/mtd/ubi/eba.c
2853 ++++ b/drivers/mtd/ubi/eba.c
2854 +@@ -861,7 +861,7 @@ write_error:
2855 + int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
2856 + int lnum, const void *buf, int len)
2857 + {
2858 +- int err, pnum, tries = 0, vol_id = vol->vol_id;
2859 ++ int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id;
2860 + struct ubi_vid_hdr *vid_hdr;
2861 + uint32_t crc;
2862 +
2863 +@@ -924,16 +924,17 @@ retry:
2864 + goto write_error;
2865 + }
2866 +
2867 +- if (vol->eba_tbl[lnum] >= 0) {
2868 +- err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0);
2869 +- if (err)
2870 +- goto out_leb_unlock;
2871 +- }
2872 +-
2873 + down_read(&ubi->fm_sem);
2874 ++ old_pnum = vol->eba_tbl[lnum];
2875 + vol->eba_tbl[lnum] = pnum;
2876 + up_read(&ubi->fm_sem);
2877 +
2878 ++ if (old_pnum >= 0) {
2879 ++ err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0);
2880 ++ if (err)
2881 ++ goto out_leb_unlock;
2882 ++ }
2883 ++
2884 + out_leb_unlock:
2885 + leb_write_unlock(ubi, vol_id, lnum);
2886 + out_mutex:
2887 +diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
2888 +index 05e1aa090add..2c4e54f764c5 100644
2889 +--- a/drivers/net/can/at91_can.c
2890 ++++ b/drivers/net/can/at91_can.c
2891 +@@ -734,9 +734,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
2892 +
2893 + /* upper group completed, look again in lower */
2894 + if (priv->rx_next > get_mb_rx_low_last(priv) &&
2895 +- quota > 0 && mb > get_mb_rx_last(priv)) {
2896 ++ mb > get_mb_rx_last(priv)) {
2897 + priv->rx_next = get_mb_rx_first(priv);
2898 +- goto again;
2899 ++ if (quota > 0)
2900 ++ goto again;
2901 + }
2902 +
2903 + return received;
2904 +diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
2905 +index 60285820f7b4..055457619c1e 100644
2906 +--- a/drivers/net/can/c_can/c_can.c
2907 ++++ b/drivers/net/can/c_can/c_can.c
2908 +@@ -331,9 +331,23 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
2909 +
2910 + priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
2911 +
2912 +- for (i = 0; i < frame->can_dlc; i += 2) {
2913 +- priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
2914 +- frame->data[i] | (frame->data[i + 1] << 8));
2915 ++ if (priv->type == BOSCH_D_CAN) {
2916 ++ u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
2917 ++
2918 ++ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
2919 ++ data = (u32)frame->data[i];
2920 ++ data |= (u32)frame->data[i + 1] << 8;
2921 ++ data |= (u32)frame->data[i + 2] << 16;
2922 ++ data |= (u32)frame->data[i + 3] << 24;
2923 ++ priv->write_reg32(priv, dreg, data);
2924 ++ }
2925 ++ } else {
2926 ++ for (i = 0; i < frame->can_dlc; i += 2) {
2927 ++ priv->write_reg(priv,
2928 ++ C_CAN_IFACE(DATA1_REG, iface) + i / 2,
2929 ++ frame->data[i] |
2930 ++ (frame->data[i + 1] << 8));
2931 ++ }
2932 + }
2933 + }
2934 +
2935 +@@ -401,10 +415,20 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
2936 + } else {
2937 + int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
2938 +
2939 +- for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
2940 +- data = priv->read_reg(priv, dreg);
2941 +- frame->data[i] = data;
2942 +- frame->data[i + 1] = data >> 8;
2943 ++ if (priv->type == BOSCH_D_CAN) {
2944 ++ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
2945 ++ data = priv->read_reg32(priv, dreg);
2946 ++ frame->data[i] = data;
2947 ++ frame->data[i + 1] = data >> 8;
2948 ++ frame->data[i + 2] = data >> 16;
2949 ++ frame->data[i + 3] = data >> 24;
2950 ++ }
2951 ++ } else {
2952 ++ for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
2953 ++ data = priv->read_reg(priv, dreg);
2954 ++ frame->data[i] = data;
2955 ++ frame->data[i + 1] = data >> 8;
2956 ++ }
2957 + }
2958 + }
2959 +
2960 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
2961 +index 80185ebc7a43..bcefb375d232 100644
2962 +--- a/drivers/net/can/dev.c
2963 ++++ b/drivers/net/can/dev.c
2964 +@@ -717,6 +717,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[])
2965 + * - control mode with CAN_CTRLMODE_FD set
2966 + */
2967 +
2968 ++ if (!data)
2969 ++ return 0;
2970 ++
2971 + if (data[IFLA_CAN_CTRLMODE]) {
2972 + struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
2973 +
2974 +@@ -927,6 +930,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
2975 + return -EOPNOTSUPP;
2976 + }
2977 +
2978 ++static void can_dellink(struct net_device *dev, struct list_head *head)
2979 ++{
2980 ++ return;
2981 ++}
2982 ++
2983 + static struct rtnl_link_ops can_link_ops __read_mostly = {
2984 + .kind = "can",
2985 + .maxtype = IFLA_CAN_MAX,
2986 +@@ -935,6 +943,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
2987 + .validate = can_validate,
2988 + .newlink = can_newlink,
2989 + .changelink = can_changelink,
2990 ++ .dellink = can_dellink,
2991 + .get_size = can_get_size,
2992 + .fill_info = can_fill_info,
2993 + .get_xstats_size = can_get_xstats_size,
2994 +diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
2995 +index 84a09e8ddd9c..5086ec9214c3 100644
2996 +--- a/drivers/net/ethernet/atheros/atlx/atl2.c
2997 ++++ b/drivers/net/ethernet/atheros/atlx/atl2.c
2998 +@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2999 +
3000 + err = -EIO;
3001 +
3002 +- netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
3003 ++ netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
3004 + netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
3005 +
3006 + /* Init PHY as early as possible due to power saving issue */
3007 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
3008 +index bb27028d392b..1db35f8053a1 100644
3009 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
3010 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
3011 +@@ -912,7 +912,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
3012 + dev->stats.tx_bytes += tx_cb_ptr->skb->len;
3013 + dma_unmap_single(&dev->dev,
3014 + dma_unmap_addr(tx_cb_ptr, dma_addr),
3015 +- tx_cb_ptr->skb->len,
3016 ++ dma_unmap_len(tx_cb_ptr, dma_len),
3017 + DMA_TO_DEVICE);
3018 + bcmgenet_free_cb(tx_cb_ptr);
3019 + } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
3020 +@@ -1019,7 +1019,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
3021 + }
3022 +
3023 + dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
3024 +- dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
3025 ++ dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
3026 + length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
3027 + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
3028 + DMA_TX_APPEND_CRC;
3029 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
3030 +index 96ba23e90111..2ee3c9537772 100644
3031 +--- a/drivers/net/ethernet/freescale/fec_main.c
3032 ++++ b/drivers/net/ethernet/freescale/fec_main.c
3033 +@@ -1539,9 +1539,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
3034 + struct fec_enet_private *fep = netdev_priv(ndev);
3035 +
3036 + for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
3037 +- clear_bit(queue_id, &fep->work_rx);
3038 +- pkt_received += fec_enet_rx_queue(ndev,
3039 ++ int ret;
3040 ++
3041 ++ ret = fec_enet_rx_queue(ndev,
3042 + budget - pkt_received, queue_id);
3043 ++
3044 ++ if (ret < budget - pkt_received)
3045 ++ clear_bit(queue_id, &fep->work_rx);
3046 ++
3047 ++ pkt_received += ret;
3048 + }
3049 + return pkt_received;
3050 + }
3051 +diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
3052 +index 4a1be34d7214..70039afd68fa 100644
3053 +--- a/drivers/net/ethernet/jme.c
3054 ++++ b/drivers/net/ethernet/jme.c
3055 +@@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme)
3056 + }
3057 +
3058 + static inline void
3059 +-jme_clear_pm(struct jme_adapter *jme)
3060 ++jme_clear_pm_enable_wol(struct jme_adapter *jme)
3061 + {
3062 + jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
3063 + }
3064 +
3065 ++static inline void
3066 ++jme_clear_pm_disable_wol(struct jme_adapter *jme)
3067 ++{
3068 ++ jwrite32(jme, JME_PMCS, PMCS_STMASK);
3069 ++}
3070 ++
3071 + static int
3072 + jme_reload_eeprom(struct jme_adapter *jme)
3073 + {
3074 +@@ -1857,7 +1863,7 @@ jme_open(struct net_device *netdev)
3075 + struct jme_adapter *jme = netdev_priv(netdev);
3076 + int rc;
3077 +
3078 +- jme_clear_pm(jme);
3079 ++ jme_clear_pm_disable_wol(jme);
3080 + JME_NAPI_ENABLE(jme);
3081 +
3082 + tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
3083 +@@ -1929,11 +1935,11 @@ jme_wait_link(struct jme_adapter *jme)
3084 + static void
3085 + jme_powersave_phy(struct jme_adapter *jme)
3086 + {
3087 +- if (jme->reg_pmcs) {
3088 ++ if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
3089 + jme_set_100m_half(jme);
3090 + if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
3091 + jme_wait_link(jme);
3092 +- jme_clear_pm(jme);
3093 ++ jme_clear_pm_enable_wol(jme);
3094 + } else {
3095 + jme_phy_off(jme);
3096 + }
3097 +@@ -2650,9 +2656,6 @@ jme_set_wol(struct net_device *netdev,
3098 + if (wol->wolopts & WAKE_MAGIC)
3099 + jme->reg_pmcs |= PMCS_MFEN;
3100 +
3101 +- jwrite32(jme, JME_PMCS, jme->reg_pmcs);
3102 +- device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
3103 +-
3104 + return 0;
3105 + }
3106 +
3107 +@@ -3176,8 +3179,8 @@ jme_init_one(struct pci_dev *pdev,
3108 + jme->mii_if.mdio_read = jme_mdio_read;
3109 + jme->mii_if.mdio_write = jme_mdio_write;
3110 +
3111 +- jme_clear_pm(jme);
3112 +- device_set_wakeup_enable(&pdev->dev, true);
3113 ++ jme_clear_pm_disable_wol(jme);
3114 ++ device_init_wakeup(&pdev->dev, true);
3115 +
3116 + jme_set_phyfifo_5level(jme);
3117 + jme->pcirev = pdev->revision;
3118 +@@ -3308,7 +3311,7 @@ jme_resume(struct device *dev)
3119 + if (!netif_running(netdev))
3120 + return 0;
3121 +
3122 +- jme_clear_pm(jme);
3123 ++ jme_clear_pm_disable_wol(jme);
3124 + jme_phy_on(jme);
3125 + if (test_bit(JME_FLAG_SSET, &jme->flags))
3126 + jme_set_settings(netdev, &jme->old_ecmd);
3127 +@@ -3316,13 +3319,14 @@ jme_resume(struct device *dev)
3128 + jme_reset_phy_processor(jme);
3129 + jme_phy_calibration(jme);
3130 + jme_phy_setEA(jme);
3131 +- jme_start_irq(jme);
3132 + netif_device_attach(netdev);
3133 +
3134 + atomic_inc(&jme->link_changing);
3135 +
3136 + jme_reset_link(jme);
3137 +
3138 ++ jme_start_irq(jme);
3139 ++
3140 + return 0;
3141 + }
3142 +
3143 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
3144 +index 5980d3fe597c..fdc592ac2529 100644
3145 +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
3146 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
3147 +@@ -391,7 +391,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
3148 + u32 packets = 0;
3149 + u32 bytes = 0;
3150 + int factor = priv->cqe_factor;
3151 +- u64 timestamp = 0;
3152 + int done = 0;
3153 + int budget = priv->tx_work_limit;
3154 + u32 last_nr_txbb;
3155 +@@ -431,9 +430,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
3156 + new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
3157 +
3158 + do {
3159 ++ u64 timestamp = 0;
3160 ++
3161 + txbbs_skipped += last_nr_txbb;
3162 + ring_index = (ring_index + last_nr_txbb) & size_mask;
3163 +- if (ring->tx_info[ring_index].ts_requested)
3164 ++
3165 ++ if (unlikely(ring->tx_info[ring_index].ts_requested))
3166 + timestamp = mlx4_en_get_cqe_ts(cqe);
3167 +
3168 + /* free next descriptor */
3169 +diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
3170 +index cd5cf6d957c7..e57df91bad06 100644
3171 +--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
3172 ++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
3173 +@@ -2922,7 +2922,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
3174 + case QP_TRANS_RTS2RTS:
3175 + case QP_TRANS_SQD2SQD:
3176 + case QP_TRANS_SQD2RTS:
3177 +- if (slave != mlx4_master_func_num(dev))
3178 ++ if (slave != mlx4_master_func_num(dev)) {
3179 + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3180 + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3181 + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3182 +@@ -2941,6 +2941,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
3183 + if (qp_ctx->alt_path.mgid_index >= num_gids)
3184 + return -EINVAL;
3185 + }
3186 ++ }
3187 + break;
3188 + default:
3189 + break;
3190 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
3191 +index e56c1bb36141..76b2cfe12504 100644
3192 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
3193 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
3194 +@@ -567,6 +567,7 @@ struct qlcnic_adapter_stats {
3195 + u64 tx_dma_map_error;
3196 + u64 spurious_intr;
3197 + u64 mac_filter_limit_overrun;
3198 ++ u64 mbx_spurious_intr;
3199 + };
3200 +
3201 + /*
3202 +@@ -1092,7 +1093,7 @@ struct qlcnic_mailbox {
3203 + unsigned long status;
3204 + spinlock_t queue_lock; /* Mailbox queue lock */
3205 + spinlock_t aen_lock; /* Mailbox response/AEN lock */
3206 +- atomic_t rsp_status;
3207 ++ u32 rsp_status;
3208 + u32 num_cmds;
3209 + };
3210 +
3211 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
3212 +index 840bf36b5e9d..dd618d7ed257 100644
3213 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
3214 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
3215 +@@ -489,7 +489,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
3216 +
3217 + static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
3218 + {
3219 +- atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
3220 ++ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
3221 + complete(&mbx->completion);
3222 + }
3223 +
3224 +@@ -508,7 +508,7 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
3225 + if (event & QLCNIC_MBX_ASYNC_EVENT) {
3226 + __qlcnic_83xx_process_aen(adapter);
3227 + } else {
3228 +- if (atomic_read(&mbx->rsp_status) != rsp_status)
3229 ++ if (mbx->rsp_status != rsp_status)
3230 + qlcnic_83xx_notify_mbx_response(mbx);
3231 + }
3232 + out:
3233 +@@ -1023,7 +1023,7 @@ static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
3234 + if (event & QLCNIC_MBX_ASYNC_EVENT) {
3235 + __qlcnic_83xx_process_aen(adapter);
3236 + } else {
3237 +- if (atomic_read(&mbx->rsp_status) != rsp_status)
3238 ++ if (mbx->rsp_status != rsp_status)
3239 + qlcnic_83xx_notify_mbx_response(mbx);
3240 + }
3241 + }
3242 +@@ -2338,9 +2338,9 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
3243 +
3244 + static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
3245 + {
3246 ++ u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
3247 + struct qlcnic_adapter *adapter = data;
3248 + struct qlcnic_mailbox *mbx;
3249 +- u32 mask, resp, event;
3250 + unsigned long flags;
3251 +
3252 + mbx = adapter->ahw->mailbox;
3253 +@@ -2350,10 +2350,14 @@ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
3254 + goto out;
3255 +
3256 + event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
3257 +- if (event & QLCNIC_MBX_ASYNC_EVENT)
3258 ++ if (event & QLCNIC_MBX_ASYNC_EVENT) {
3259 + __qlcnic_83xx_process_aen(adapter);
3260 +- else
3261 +- qlcnic_83xx_notify_mbx_response(mbx);
3262 ++ } else {
3263 ++ if (mbx->rsp_status != rsp_status)
3264 ++ qlcnic_83xx_notify_mbx_response(mbx);
3265 ++ else
3266 ++ adapter->stats.mbx_spurious_intr++;
3267 ++ }
3268 +
3269 + out:
3270 + mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
3271 +@@ -4025,10 +4029,10 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
3272 + struct qlcnic_adapter *adapter = mbx->adapter;
3273 + struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
3274 + struct device *dev = &adapter->pdev->dev;
3275 +- atomic_t *rsp_status = &mbx->rsp_status;
3276 + struct list_head *head = &mbx->cmd_q;
3277 + struct qlcnic_hardware_context *ahw;
3278 + struct qlcnic_cmd_args *cmd = NULL;
3279 ++ unsigned long flags;
3280 +
3281 + ahw = adapter->ahw;
3282 +
3283 +@@ -4038,7 +4042,9 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
3284 + return;
3285 + }
3286 +
3287 +- atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
3288 ++ spin_lock_irqsave(&mbx->aen_lock, flags);
3289 ++ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
3290 ++ spin_unlock_irqrestore(&mbx->aen_lock, flags);
3291 +
3292 + spin_lock(&mbx->queue_lock);
3293 +
3294 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
3295 +index 494e8105adee..0a2318cad34d 100644
3296 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
3297 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
3298 +@@ -59,7 +59,8 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
3299 + QLC_OFF(stats.mac_filter_limit_overrun)},
3300 + {"spurious intr", QLC_SIZEOF(stats.spurious_intr),
3301 + QLC_OFF(stats.spurious_intr)},
3302 +-
3303 ++ {"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr),
3304 ++ QLC_OFF(stats.mbx_spurious_intr)},
3305 + };
3306 +
3307 + static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
3308 +diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
3309 +index 6c904a6cad2a..7bbb04124dc1 100644
3310 +--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
3311 ++++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
3312 +@@ -1648,7 +1648,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
3313 + return;
3314 + }
3315 + skb_reserve(new_skb, NET_IP_ALIGN);
3316 ++
3317 ++ pci_dma_sync_single_for_cpu(qdev->pdev,
3318 ++ dma_unmap_addr(sbq_desc, mapaddr),
3319 ++ dma_unmap_len(sbq_desc, maplen),
3320 ++ PCI_DMA_FROMDEVICE);
3321 ++
3322 + memcpy(skb_put(new_skb, length), skb->data, length);
3323 ++
3324 ++ pci_dma_sync_single_for_device(qdev->pdev,
3325 ++ dma_unmap_addr(sbq_desc, mapaddr),
3326 ++ dma_unmap_len(sbq_desc, maplen),
3327 ++ PCI_DMA_FROMDEVICE);
3328 + skb = new_skb;
3329 +
3330 + /* Frame error, so drop the packet. */
3331 +diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
3332 +index f77b58911558..f55be6ddf3a7 100644
3333 +--- a/drivers/net/ethernet/qualcomm/qca_spi.c
3334 ++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
3335 +@@ -812,7 +812,7 @@ qcaspi_netdev_setup(struct net_device *dev)
3336 + dev->netdev_ops = &qcaspi_netdev_ops;
3337 + qcaspi_set_ethtool_ops(dev);
3338 + dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
3339 +- dev->flags = IFF_MULTICAST;
3340 ++ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
3341 + dev->tx_queue_len = 100;
3342 +
3343 + qca = netdev_priv(dev);
3344 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
3345 +index b474dbfcdb4f..c44bae495804 100644
3346 +--- a/drivers/net/ethernet/renesas/sh_eth.c
3347 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
3348 +@@ -1112,6 +1112,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
3349 + int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
3350 + int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
3351 + int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
3352 ++ dma_addr_t dma_addr;
3353 +
3354 + mdp->cur_rx = 0;
3355 + mdp->cur_tx = 0;
3356 +@@ -1125,18 +1126,23 @@ static void sh_eth_ring_format(struct net_device *ndev)
3357 + /* skb */
3358 + mdp->rx_skbuff[i] = NULL;
3359 + skb = netdev_alloc_skb(ndev, skbuff_size);
3360 +- mdp->rx_skbuff[i] = skb;
3361 + if (skb == NULL)
3362 + break;
3363 + sh_eth_set_receive_align(skb);
3364 +
3365 + /* RX descriptor */
3366 + rxdesc = &mdp->rx_ring[i];
3367 +- /* The size of the buffer is a multiple of 16 bytes. */
3368 +- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
3369 +- dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
3370 +- DMA_FROM_DEVICE);
3371 +- rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
3372 ++ /* The size of the buffer is a multiple of 32 bytes. */
3373 ++ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
3374 ++ dma_addr = dma_map_single(&ndev->dev, skb->data,
3375 ++ rxdesc->buffer_length,
3376 ++ DMA_FROM_DEVICE);
3377 ++ if (dma_mapping_error(&ndev->dev, dma_addr)) {
3378 ++ kfree_skb(skb);
3379 ++ break;
3380 ++ }
3381 ++ mdp->rx_skbuff[i] = skb;
3382 ++ rxdesc->addr = dma_addr;
3383 + rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
3384 +
3385 + /* Rx descriptor address set */
3386 +@@ -1151,7 +1157,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
3387 + mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
3388 +
3389 + /* Mark the last entry as wrapping the ring. */
3390 +- rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
3391 ++ if (rxdesc)
3392 ++ rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
3393 +
3394 + memset(mdp->tx_ring, 0, tx_ringsize);
3395 +
3396 +@@ -1391,6 +1398,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
3397 + u16 pkt_len = 0;
3398 + u32 desc_status;
3399 + int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
3400 ++ dma_addr_t dma_addr;
3401 +
3402 + rxdesc = &mdp->rx_ring[entry];
3403 + while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
3404 +@@ -1441,9 +1449,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
3405 + mdp->rx_skbuff[entry] = NULL;
3406 + if (mdp->cd->rpadir)
3407 + skb_reserve(skb, NET_IP_ALIGN);
3408 +- dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
3409 +- ALIGN(mdp->rx_buf_sz, 16),
3410 +- DMA_FROM_DEVICE);
3411 ++ dma_unmap_single(&ndev->dev, rxdesc->addr,
3412 ++ ALIGN(mdp->rx_buf_sz, 32),
3413 ++ DMA_FROM_DEVICE);
3414 + skb_put(skb, pkt_len);
3415 + skb->protocol = eth_type_trans(skb, ndev);
3416 + netif_receive_skb(skb);
3417 +@@ -1458,20 +1466,25 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
3418 + for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
3419 + entry = mdp->dirty_rx % mdp->num_rx_ring;
3420 + rxdesc = &mdp->rx_ring[entry];
3421 +- /* The size of the buffer is 16 byte boundary. */
3422 +- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
3423 ++ /* The size of the buffer is 32 byte boundary. */
3424 ++ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
3425 +
3426 + if (mdp->rx_skbuff[entry] == NULL) {
3427 + skb = netdev_alloc_skb(ndev, skbuff_size);
3428 +- mdp->rx_skbuff[entry] = skb;
3429 + if (skb == NULL)
3430 + break; /* Better luck next round. */
3431 + sh_eth_set_receive_align(skb);
3432 +- dma_map_single(&ndev->dev, skb->data,
3433 +- rxdesc->buffer_length, DMA_FROM_DEVICE);
3434 ++ dma_addr = dma_map_single(&ndev->dev, skb->data,
3435 ++ rxdesc->buffer_length,
3436 ++ DMA_FROM_DEVICE);
3437 ++ if (dma_mapping_error(&ndev->dev, dma_addr)) {
3438 ++ kfree_skb(skb);
3439 ++ break;
3440 ++ }
3441 ++ mdp->rx_skbuff[entry] = skb;
3442 +
3443 + skb_checksum_none_assert(skb);
3444 +- rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
3445 ++ rxdesc->addr = dma_addr;
3446 + }
3447 + if (entry >= mdp->num_rx_ring - 1)
3448 + rxdesc->status |=
3449 +diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
3450 +index 63ec209cdfd3..010009d64017 100644
3451 +--- a/drivers/net/ethernet/sfc/ef10.c
3452 ++++ b/drivers/net/ethernet/sfc/ef10.c
3453 +@@ -452,6 +452,17 @@ fail:
3454 + return rc;
3455 + }
3456 +
3457 ++static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
3458 ++{
3459 ++ struct efx_channel *channel;
3460 ++ struct efx_tx_queue *tx_queue;
3461 ++
3462 ++ /* All our existing PIO buffers went away */
3463 ++ efx_for_each_channel(channel, efx)
3464 ++ efx_for_each_channel_tx_queue(tx_queue, channel)
3465 ++ tx_queue->piobuf = NULL;
3466 ++}
3467 ++
3468 + #else /* !EFX_USE_PIO */
3469 +
3470 + static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
3471 +@@ -468,6 +479,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
3472 + {
3473 + }
3474 +
3475 ++static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
3476 ++{
3477 ++}
3478 ++
3479 + #endif /* EFX_USE_PIO */
3480 +
3481 + static void efx_ef10_remove(struct efx_nic *efx)
3482 +@@ -699,6 +714,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
3483 + nic_data->must_realloc_vis = true;
3484 + nic_data->must_restore_filters = true;
3485 + nic_data->must_restore_piobufs = true;
3486 ++ efx_ef10_forget_old_piobufs(efx);
3487 + nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
3488 + }
3489 +
3490 +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
3491 +index fc7b3d76f08e..e3fbbbbd84e7 100644
3492 +--- a/drivers/net/ppp/ppp_generic.c
3493 ++++ b/drivers/net/ppp/ppp_generic.c
3494 +@@ -561,7 +561,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
3495 +
3496 + static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3497 + {
3498 +- struct ppp_file *pf = file->private_data;
3499 ++ struct ppp_file *pf;
3500 + struct ppp *ppp;
3501 + int err = -EFAULT, val, val2, i;
3502 + struct ppp_idle idle;
3503 +@@ -571,9 +571,14 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3504 + void __user *argp = (void __user *)arg;
3505 + int __user *p = argp;
3506 +
3507 +- if (!pf)
3508 +- return ppp_unattached_ioctl(current->nsproxy->net_ns,
3509 +- pf, file, cmd, arg);
3510 ++ mutex_lock(&ppp_mutex);
3511 ++
3512 ++ pf = file->private_data;
3513 ++ if (!pf) {
3514 ++ err = ppp_unattached_ioctl(current->nsproxy->net_ns,
3515 ++ pf, file, cmd, arg);
3516 ++ goto out;
3517 ++ }
3518 +
3519 + if (cmd == PPPIOCDETACH) {
3520 + /*
3521 +@@ -588,7 +593,6 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3522 + * this fd and reopening /dev/ppp.
3523 + */
3524 + err = -EINVAL;
3525 +- mutex_lock(&ppp_mutex);
3526 + if (pf->kind == INTERFACE) {
3527 + ppp = PF_TO_PPP(pf);
3528 + if (file == ppp->owner)
3529 +@@ -600,15 +604,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3530 + } else
3531 + pr_warn("PPPIOCDETACH file->f_count=%ld\n",
3532 + atomic_long_read(&file->f_count));
3533 +- mutex_unlock(&ppp_mutex);
3534 +- return err;
3535 ++ goto out;
3536 + }
3537 +
3538 + if (pf->kind == CHANNEL) {
3539 + struct channel *pch;
3540 + struct ppp_channel *chan;
3541 +
3542 +- mutex_lock(&ppp_mutex);
3543 + pch = PF_TO_CHANNEL(pf);
3544 +
3545 + switch (cmd) {
3546 +@@ -630,17 +632,16 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3547 + err = chan->ops->ioctl(chan, cmd, arg);
3548 + up_read(&pch->chan_sem);
3549 + }
3550 +- mutex_unlock(&ppp_mutex);
3551 +- return err;
3552 ++ goto out;
3553 + }
3554 +
3555 + if (pf->kind != INTERFACE) {
3556 + /* can't happen */
3557 + pr_err("PPP: not interface or channel??\n");
3558 +- return -EINVAL;
3559 ++ err = -EINVAL;
3560 ++ goto out;
3561 + }
3562 +
3563 +- mutex_lock(&ppp_mutex);
3564 + ppp = PF_TO_PPP(pf);
3565 + switch (cmd) {
3566 + case PPPIOCSMRU:
3567 +@@ -815,7 +816,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3568 + default:
3569 + err = -ENOTTY;
3570 + }
3571 ++
3572 ++out:
3573 + mutex_unlock(&ppp_mutex);
3574 ++
3575 + return err;
3576 + }
3577 +
3578 +@@ -828,7 +832,6 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
3579 + struct ppp_net *pn;
3580 + int __user *p = (int __user *)arg;
3581 +
3582 +- mutex_lock(&ppp_mutex);
3583 + switch (cmd) {
3584 + case PPPIOCNEWUNIT:
3585 + /* Create a new ppp unit */
3586 +@@ -879,7 +882,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
3587 + default:
3588 + err = -ENOTTY;
3589 + }
3590 +- mutex_unlock(&ppp_mutex);
3591 ++
3592 + return err;
3593 + }
3594 +
3595 +@@ -2242,7 +2245,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
3596 +
3597 + pch->ppp = NULL;
3598 + pch->chan = chan;
3599 +- pch->chan_net = net;
3600 ++ pch->chan_net = get_net(net);
3601 + chan->ppp = pch;
3602 + init_ppp_file(&pch->file, CHANNEL);
3603 + pch->file.hdrlen = chan->hdrlen;
3604 +@@ -2339,6 +2342,8 @@ ppp_unregister_channel(struct ppp_channel *chan)
3605 + spin_lock_bh(&pn->all_channels_lock);
3606 + list_del(&pch->list);
3607 + spin_unlock_bh(&pn->all_channels_lock);
3608 ++ put_net(pch->chan_net);
3609 ++ pch->chan_net = NULL;
3610 +
3611 + pch->file.dead = 1;
3612 + wake_up_interruptible(&pch->file.rwait);
3613 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
3614 +index 9dd3746994a4..a4685a22f665 100644
3615 +--- a/drivers/net/tun.c
3616 ++++ b/drivers/net/tun.c
3617 +@@ -499,11 +499,13 @@ static void tun_detach_all(struct net_device *dev)
3618 + for (i = 0; i < n; i++) {
3619 + tfile = rtnl_dereference(tun->tfiles[i]);
3620 + BUG_ON(!tfile);
3621 ++ tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
3622 + tfile->socket.sk->sk_data_ready(tfile->socket.sk);
3623 + RCU_INIT_POINTER(tfile->tun, NULL);
3624 + --tun->numqueues;
3625 + }
3626 + list_for_each_entry(tfile, &tun->disabled, next) {
3627 ++ tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
3628 + tfile->socket.sk->sk_data_ready(tfile->socket.sk);
3629 + RCU_INIT_POINTER(tfile->tun, NULL);
3630 + }
3631 +@@ -558,6 +560,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
3632 + goto out;
3633 + }
3634 + tfile->queue_index = tun->numqueues;
3635 ++ tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
3636 + rcu_assign_pointer(tfile->tun, tun);
3637 + rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
3638 + tun->numqueues++;
3639 +@@ -1356,9 +1359,6 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
3640 + if (!len)
3641 + return ret;
3642 +
3643 +- if (tun->dev->reg_state != NETREG_REGISTERED)
3644 +- return -EIO;
3645 +-
3646 + /* Read frames from queue */
3647 + skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
3648 + &peeked, &off, &err);
3649 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
3650 +index 8067b8fbb0ee..614b4ca6420a 100644
3651 +--- a/drivers/net/usb/cdc_ncm.c
3652 ++++ b/drivers/net/usb/cdc_ncm.c
3653 +@@ -815,7 +815,11 @@ advance:
3654 +
3655 + iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
3656 +
3657 +- /* reset data interface */
3658 ++ /* Reset data interface. Some devices will not reset properly
3659 ++ * unless they are configured first. Toggle the altsetting to
3660 ++ * force a reset
3661 ++ */
3662 ++ usb_set_interface(dev->udev, iface_no, data_altsetting);
3663 + temp = usb_set_interface(dev->udev, iface_no, 0);
3664 + if (temp) {
3665 + dev_dbg(&intf->dev, "set interface failed\n");
3666 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
3667 +index a5771515d9ab..73f55a98798f 100644
3668 +--- a/drivers/net/usb/qmi_wwan.c
3669 ++++ b/drivers/net/usb/qmi_wwan.c
3670 +@@ -748,6 +748,7 @@ static const struct usb_device_id products[] = {
3671 + {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
3672 + {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
3673 + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
3674 ++ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
3675 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
3676 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
3677 + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
3678 +@@ -766,8 +767,10 @@ static const struct usb_device_id products[] = {
3679 + {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
3680 + {QMI_FIXED_INTF(0x1199, 0x9070, 8)}, /* Sierra Wireless MC74xx/EM74xx */
3681 + {QMI_FIXED_INTF(0x1199, 0x9070, 10)}, /* Sierra Wireless MC74xx/EM74xx */
3682 +- {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */
3683 +- {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */
3684 ++ {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
3685 ++ {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
3686 ++ {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
3687 ++ {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
3688 + {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
3689 + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
3690 + {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
3691 +diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
3692 +index 7a598932f922..bb570916e340 100644
3693 +--- a/drivers/net/usb/usbnet.c
3694 ++++ b/drivers/net/usb/usbnet.c
3695 +@@ -1755,6 +1755,13 @@ out3:
3696 + if (info->unbind)
3697 + info->unbind (dev, udev);
3698 + out1:
3699 ++ /* subdrivers must undo all they did in bind() if they
3700 ++ * fail it, but we may fail later and a deferred kevent
3701 ++ * may trigger an error resubmitting itself and, worse,
3702 ++ * schedule a timer. So we kill it all just in case.
3703 ++ */
3704 ++ cancel_work_sync(&dev->kevent);
3705 ++ del_timer_sync(&dev->delay);
3706 + free_netdev(net);
3707 + out:
3708 + return status;
3709 +diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
3710 +index 44541dbc5c28..69b994f3b8c5 100644
3711 +--- a/drivers/net/wan/farsync.c
3712 ++++ b/drivers/net/wan/farsync.c
3713 +@@ -2516,7 +2516,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3714 + dev->mem_start = card->phys_mem
3715 + + BUF_OFFSET ( txBuffer[i][0][0]);
3716 + dev->mem_end = card->phys_mem
3717 +- + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
3718 ++ + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
3719 + dev->base_addr = card->pci_conf;
3720 + dev->irq = card->irq;
3721 +
3722 +diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
3723 +index 971d770722cf..2ac05486424b 100644
3724 +--- a/drivers/net/wireless/ath/ath9k/eeprom.c
3725 ++++ b/drivers/net/wireless/ath/ath9k/eeprom.c
3726 +@@ -408,10 +408,9 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
3727 +
3728 + if (match) {
3729 + if (AR_SREV_9287(ah)) {
3730 +- /* FIXME: array overrun? */
3731 + for (i = 0; i < numXpdGains; i++) {
3732 + minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
3733 +- maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4];
3734 ++ maxPwrT4[i] = data_9287[idxL].pwrPdg[i][intercepts - 1];
3735 + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
3736 + data_9287[idxL].pwrPdg[i],
3737 + data_9287[idxL].vpdPdg[i],
3738 +@@ -421,7 +420,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
3739 + } else if (eeprom_4k) {
3740 + for (i = 0; i < numXpdGains; i++) {
3741 + minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
3742 +- maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4];
3743 ++ maxPwrT4[i] = data_4k[idxL].pwrPdg[i][intercepts - 1];
3744 + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
3745 + data_4k[idxL].pwrPdg[i],
3746 + data_4k[idxL].vpdPdg[i],
3747 +@@ -431,7 +430,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
3748 + } else {
3749 + for (i = 0; i < numXpdGains; i++) {
3750 + minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
3751 +- maxPwrT4[i] = data_def[idxL].pwrPdg[i][4];
3752 ++ maxPwrT4[i] = data_def[idxL].pwrPdg[i][intercepts - 1];
3753 + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
3754 + data_def[idxL].pwrPdg[i],
3755 + data_def[idxL].vpdPdg[i],
3756 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
3757 +index ce0aa47222f6..94ff20e9a8a0 100644
3758 +--- a/drivers/pci/pci.c
3759 ++++ b/drivers/pci/pci.c
3760 +@@ -10,6 +10,8 @@
3761 + #include <linux/kernel.h>
3762 + #include <linux/delay.h>
3763 + #include <linux/init.h>
3764 ++#include <linux/of.h>
3765 ++#include <linux/of_pci.h>
3766 + #include <linux/pci.h>
3767 + #include <linux/pm.h>
3768 + #include <linux/slab.h>
3769 +@@ -4490,6 +4492,55 @@ int pci_get_new_domain_nr(void)
3770 + {
3771 + return atomic_inc_return(&__domain_nr);
3772 + }
3773 ++
3774 ++#ifdef CONFIG_PCI_DOMAINS_GENERIC
3775 ++void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
3776 ++{
3777 ++ static int use_dt_domains = -1;
3778 ++ int domain = -1;
3779 ++
3780 ++ if (parent)
3781 ++ domain = of_get_pci_domain_nr(parent->of_node);
3782 ++ /*
3783 ++ * Check DT domain and use_dt_domains values.
3784 ++ *
3785 ++ * If DT domain property is valid (domain >= 0) and
3786 ++ * use_dt_domains != 0, the DT assignment is valid since this means
3787 ++ * we have not previously allocated a domain number by using
3788 ++ * pci_get_new_domain_nr(); we should also update use_dt_domains to
3789 ++ * 1, to indicate that we have just assigned a domain number from
3790 ++ * DT.
3791 ++ *
3792 ++ * If DT domain property value is not valid (ie domain < 0), and we
3793 ++ * have not previously assigned a domain number from DT
3794 ++ * (use_dt_domains != 1) we should assign a domain number by
3795 ++ * using the:
3796 ++ *
3797 ++ * pci_get_new_domain_nr()
3798 ++ *
3799 ++ * API and update the use_dt_domains value to keep track of method we
3800 ++ * are using to assign domain numbers (use_dt_domains = 0).
3801 ++ *
3802 ++ * All other combinations imply we have a platform that is trying
3803 ++ * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
3804 ++ * which is a recipe for domain mishandling and it is prevented by
3805 ++ * invalidating the domain value (domain = -1) and printing a
3806 ++ * corresponding error.
3807 ++ */
3808 ++ if (domain >= 0 && use_dt_domains) {
3809 ++ use_dt_domains = 1;
3810 ++ } else if (domain < 0 && use_dt_domains != 1) {
3811 ++ use_dt_domains = 0;
3812 ++ domain = pci_get_new_domain_nr();
3813 ++ } else {
3814 ++ dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n",
3815 ++ parent->of_node->full_name);
3816 ++ domain = -1;
3817 ++ }
3818 ++
3819 ++ bus->domain_nr = domain;
3820 ++}
3821 ++#endif
3822 + #endif
3823 +
3824 + /**
3825 +diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
3826 +index 6f806f93662a..857f29ba406a 100644
3827 +--- a/drivers/pinctrl/freescale/pinctrl-imx.c
3828 ++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
3829 +@@ -205,9 +205,9 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
3830 + pin_reg = &info->pin_regs[pin_id];
3831 +
3832 + if (pin_reg->mux_reg == -1) {
3833 +- dev_err(ipctl->dev, "Pin(%s) does not support mux function\n",
3834 ++ dev_dbg(ipctl->dev, "Pin(%s) does not support mux function\n",
3835 + info->pins[pin_id].name);
3836 +- return -EINVAL;
3837 ++ continue;
3838 + }
3839 +
3840 + if (info->flags & SHARE_MUX_CONF_REG) {
3841 +diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
3842 +index 746db6acf648..25d5a21c51e5 100644
3843 +--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
3844 ++++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
3845 +@@ -1025,7 +1025,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
3846 + int pullidx = 0;
3847 +
3848 + if (pull)
3849 +- pullidx = data_out ? 1 : 2;
3850 ++ pullidx = data_out ? 2 : 1;
3851 +
3852 + seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
3853 + gpio,
3854 +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
3855 +index fb94b772ad62..f94d46c57dc5 100644
3856 +--- a/drivers/pinctrl/pinctrl-single.c
3857 ++++ b/drivers/pinctrl/pinctrl-single.c
3858 +@@ -1273,9 +1273,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
3859 +
3860 + /* Parse pins in each row from LSB */
3861 + while (mask) {
3862 +- bit_pos = ffs(mask);
3863 ++ bit_pos = __ffs(mask);
3864 + pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
3865 +- mask_pos = ((pcs->fmask) << (bit_pos - 1));
3866 ++ mask_pos = ((pcs->fmask) << bit_pos);
3867 + val_pos = val & mask_pos;
3868 + submask = mask & mask_pos;
3869 +
3870 +@@ -1576,6 +1576,9 @@ static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc,
3871 + else
3872 + mask &= ~soc_mask;
3873 + pcs->write(mask, pcswi->reg);
3874 ++
3875 ++ /* flush posted write */
3876 ++ mask = pcs->read(pcswi->reg);
3877 + raw_spin_unlock(&pcs->lock);
3878 + }
3879 +
3880 +@@ -1851,7 +1854,7 @@ static int pcs_probe(struct platform_device *pdev)
3881 + ret = of_property_read_u32(np, "pinctrl-single,function-mask",
3882 + &pcs->fmask);
3883 + if (!ret) {
3884 +- pcs->fshift = ffs(pcs->fmask) - 1;
3885 ++ pcs->fshift = __ffs(pcs->fmask);
3886 + pcs->fmax = pcs->fmask >> pcs->fshift;
3887 + } else {
3888 + /* If mask property doesn't exist, function mux is invalid. */
3889 +diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
3890 +index 0ab5cbeeb797..c53fe2645548 100644
3891 +--- a/drivers/regulator/s5m8767.c
3892 ++++ b/drivers/regulator/s5m8767.c
3893 +@@ -202,9 +202,10 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
3894 + }
3895 + }
3896 +
3897 +- if (i < s5m8767->num_regulators)
3898 +- *enable_ctrl =
3899 +- s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
3900 ++ if (i >= s5m8767->num_regulators)
3901 ++ return -EINVAL;
3902 ++
3903 ++ *enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
3904 +
3905 + return 0;
3906 + }
3907 +@@ -936,8 +937,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
3908 + else
3909 + regulators[id].vsel_mask = 0xff;
3910 +
3911 +- s5m8767_get_register(s5m8767, id, &enable_reg,
3912 ++ ret = s5m8767_get_register(s5m8767, id, &enable_reg,
3913 + &enable_val);
3914 ++ if (ret) {
3915 ++ dev_err(s5m8767->dev, "error reading registers\n");
3916 ++ return ret;
3917 ++ }
3918 + regulators[id].enable_reg = enable_reg;
3919 + regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
3920 + regulators[id].enable_val = enable_val;
3921 +diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
3922 +index b936bb4096b5..280584b2813b 100644
3923 +--- a/drivers/rtc/rtc-hym8563.c
3924 ++++ b/drivers/rtc/rtc-hym8563.c
3925 +@@ -144,7 +144,7 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
3926 + * it does not seem to carry it over a subsequent write/read.
3927 + * So we'll limit ourself to 100 years, starting at 2000 for now.
3928 + */
3929 +- buf[6] = tm->tm_year - 100;
3930 ++ buf[6] = bin2bcd(tm->tm_year - 100);
3931 +
3932 + /*
3933 + * CTL1 only contains TEST-mode bits apart from stop,
3934 +diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
3935 +index cf73e969c8cc..b69d409c9d9c 100644
3936 +--- a/drivers/rtc/rtc-max77686.c
3937 ++++ b/drivers/rtc/rtc-max77686.c
3938 +@@ -463,7 +463,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
3939 +
3940 + info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
3941 + MAX77686_RTCIRQ_RTCA1);
3942 +- if (!info->virq) {
3943 ++ if (info->virq <= 0) {
3944 + ret = -ENXIO;
3945 + goto err_rtc;
3946 + }
3947 +diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
3948 +index 88c9c92e89fd..4b0966ed5394 100644
3949 +--- a/drivers/rtc/rtc-vr41xx.c
3950 ++++ b/drivers/rtc/rtc-vr41xx.c
3951 +@@ -272,12 +272,13 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
3952 + }
3953 +
3954 + static const struct rtc_class_ops vr41xx_rtc_ops = {
3955 +- .release = vr41xx_rtc_release,
3956 +- .ioctl = vr41xx_rtc_ioctl,
3957 +- .read_time = vr41xx_rtc_read_time,
3958 +- .set_time = vr41xx_rtc_set_time,
3959 +- .read_alarm = vr41xx_rtc_read_alarm,
3960 +- .set_alarm = vr41xx_rtc_set_alarm,
3961 ++ .release = vr41xx_rtc_release,
3962 ++ .ioctl = vr41xx_rtc_ioctl,
3963 ++ .read_time = vr41xx_rtc_read_time,
3964 ++ .set_time = vr41xx_rtc_set_time,
3965 ++ .read_alarm = vr41xx_rtc_read_alarm,
3966 ++ .set_alarm = vr41xx_rtc_set_alarm,
3967 ++ .alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
3968 + };
3969 +
3970 + static int rtc_probe(struct platform_device *pdev)
3971 +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
3972 +index 0b2c53af85c7..6a6b3db5780d 100644
3973 +--- a/drivers/scsi/lpfc/lpfc_init.c
3974 ++++ b/drivers/scsi/lpfc/lpfc_init.c
3975 +@@ -2822,7 +2822,7 @@ lpfc_online(struct lpfc_hba *phba)
3976 + }
3977 +
3978 + vports = lpfc_create_vport_work_array(phba);
3979 +- if (vports != NULL)
3980 ++ if (vports != NULL) {
3981 + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3982 + struct Scsi_Host *shost;
3983 + shost = lpfc_shost_from_vport(vports[i]);
3984 +@@ -2839,7 +2839,8 @@ lpfc_online(struct lpfc_hba *phba)
3985 + }
3986 + spin_unlock_irq(shost->host_lock);
3987 + }
3988 +- lpfc_destroy_vport_work_array(phba, vports);
3989 ++ }
3990 ++ lpfc_destroy_vport_work_array(phba, vports);
3991 +
3992 + lpfc_unblock_mgmt_io(phba);
3993 + return 0;
3994 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
3995 +index 6e503802947a..b1af2987b0c8 100644
3996 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
3997 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
3998 +@@ -6096,12 +6096,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
3999 + }
4000 +
4001 + for (i = 0; i < ioc->sge_count; i++) {
4002 +- if (kbuff_arr[i])
4003 ++ if (kbuff_arr[i]) {
4004 + dma_free_coherent(&instance->pdev->dev,
4005 + le32_to_cpu(kern_sge32[i].length),
4006 + kbuff_arr[i],
4007 + le32_to_cpu(kern_sge32[i].phys_addr));
4008 + kbuff_arr[i] = NULL;
4009 ++ }
4010 + }
4011 +
4012 + if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
4013 +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
4014 +index 3d12c52c3f81..29a67a85ee71 100644
4015 +--- a/drivers/scsi/scsi_error.c
4016 ++++ b/drivers/scsi/scsi_error.c
4017 +@@ -1115,7 +1115,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
4018 + */
4019 + void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
4020 + {
4021 +- scmd->device->host->host_failed--;
4022 + scmd->eh_eflags = 0;
4023 + list_move_tail(&scmd->eh_entry, done_q);
4024 + }
4025 +@@ -2213,6 +2212,9 @@ int scsi_error_handler(void *data)
4026 + else
4027 + scsi_unjam_host(shost);
4028 +
4029 ++ /* All scmds have been handled */
4030 ++ shost->host_failed = 0;
4031 ++
4032 + /*
4033 + * Note - if the above fails completely, the action is to take
4034 + * individual devices offline and flush the queue of any
4035 +diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
4036 +index 87bc16f491f0..4703aeb7c25d 100644
4037 +--- a/drivers/spi/spi-rockchip.c
4038 ++++ b/drivers/spi/spi-rockchip.c
4039 +@@ -264,7 +264,10 @@ static inline u32 rx_max(struct rockchip_spi *rs)
4040 + static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
4041 + {
4042 + u32 ser;
4043 +- struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
4044 ++ struct spi_master *master = spi->master;
4045 ++ struct rockchip_spi *rs = spi_master_get_devdata(master);
4046 ++
4047 ++ pm_runtime_get_sync(rs->dev);
4048 +
4049 + ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
4050 +
4051 +@@ -289,6 +292,8 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
4052 + ser &= ~(1 << spi->chip_select);
4053 +
4054 + writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
4055 ++
4056 ++ pm_runtime_put_sync(rs->dev);
4057 + }
4058 +
4059 + static int rockchip_spi_prepare_message(struct spi_master *master,
4060 +diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
4061 +index 85204c93f3d3..19169bf9bf4d 100644
4062 +--- a/drivers/spi/spi-sun4i.c
4063 ++++ b/drivers/spi/spi-sun4i.c
4064 +@@ -170,13 +170,17 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
4065 + {
4066 + struct sun4i_spi *sspi = spi_master_get_devdata(master);
4067 + unsigned int mclk_rate, div, timeout;
4068 ++ unsigned int start, end, tx_time;
4069 + unsigned int tx_len = 0;
4070 + int ret = 0;
4071 + u32 reg;
4072 +
4073 + /* We don't support transfer larger than the FIFO */
4074 + if (tfr->len > SUN4I_FIFO_DEPTH)
4075 +- return -EINVAL;
4076 ++ return -EMSGSIZE;
4077 ++
4078 ++ if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
4079 ++ return -EMSGSIZE;
4080 +
4081 + reinit_completion(&sspi->done);
4082 + sspi->tx_buf = tfr->tx_buf;
4083 +@@ -229,8 +233,8 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
4084 +
4085 + /* Ensure that we have a parent clock fast enough */
4086 + mclk_rate = clk_get_rate(sspi->mclk);
4087 +- if (mclk_rate < (2 * spi->max_speed_hz)) {
4088 +- clk_set_rate(sspi->mclk, 2 * spi->max_speed_hz);
4089 ++ if (mclk_rate < (2 * tfr->speed_hz)) {
4090 ++ clk_set_rate(sspi->mclk, 2 * tfr->speed_hz);
4091 + mclk_rate = clk_get_rate(sspi->mclk);
4092 + }
4093 +
4094 +@@ -248,14 +252,14 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
4095 + * First try CDR2, and if we can't reach the expected
4096 + * frequency, fall back to CDR1.
4097 + */
4098 +- div = mclk_rate / (2 * spi->max_speed_hz);
4099 ++ div = mclk_rate / (2 * tfr->speed_hz);
4100 + if (div <= (SUN4I_CLK_CTL_CDR2_MASK + 1)) {
4101 + if (div > 0)
4102 + div--;
4103 +
4104 + reg = SUN4I_CLK_CTL_CDR2(div) | SUN4I_CLK_CTL_DRS;
4105 + } else {
4106 +- div = ilog2(mclk_rate) - ilog2(spi->max_speed_hz);
4107 ++ div = ilog2(mclk_rate) - ilog2(tfr->speed_hz);
4108 + reg = SUN4I_CLK_CTL_CDR1(div);
4109 + }
4110 +
4111 +@@ -269,8 +273,12 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
4112 + sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
4113 + sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
4114 +
4115 +- /* Fill the TX FIFO */
4116 +- sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
4117 ++ /*
4118 ++ * Fill the TX FIFO
4119 ++ * Filling the FIFO fully causes timeout for some reason
4120 ++ * at least on spi2 on A10s
4121 ++ */
4122 ++ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
4123 +
4124 + /* Enable the interrupts */
4125 + sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
4126 +@@ -279,9 +287,16 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
4127 + reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
4128 + sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
4129 +
4130 ++ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
4131 ++ start = jiffies;
4132 + timeout = wait_for_completion_timeout(&sspi->done,
4133 +- msecs_to_jiffies(1000));
4134 ++ msecs_to_jiffies(tx_time));
4135 ++ end = jiffies;
4136 + if (!timeout) {
4137 ++ dev_warn(&master->dev,
4138 ++ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
4139 ++ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
4140 ++ jiffies_to_msecs(end - start), tx_time);
4141 + ret = -ETIMEDOUT;
4142 + goto out;
4143 + }
4144 +diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
4145 +index bd24093f4038..04e90851504c 100644
4146 +--- a/drivers/spi/spi-sun6i.c
4147 ++++ b/drivers/spi/spi-sun6i.c
4148 +@@ -160,6 +160,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
4149 + {
4150 + struct sun6i_spi *sspi = spi_master_get_devdata(master);
4151 + unsigned int mclk_rate, div, timeout;
4152 ++ unsigned int start, end, tx_time;
4153 + unsigned int tx_len = 0;
4154 + int ret = 0;
4155 + u32 reg;
4156 +@@ -217,8 +218,8 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
4157 +
4158 + /* Ensure that we have a parent clock fast enough */
4159 + mclk_rate = clk_get_rate(sspi->mclk);
4160 +- if (mclk_rate < (2 * spi->max_speed_hz)) {
4161 +- clk_set_rate(sspi->mclk, 2 * spi->max_speed_hz);
4162 ++ if (mclk_rate < (2 * tfr->speed_hz)) {
4163 ++ clk_set_rate(sspi->mclk, 2 * tfr->speed_hz);
4164 + mclk_rate = clk_get_rate(sspi->mclk);
4165 + }
4166 +
4167 +@@ -236,14 +237,14 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
4168 + * First try CDR2, and if we can't reach the expected
4169 + * frequency, fall back to CDR1.
4170 + */
4171 +- div = mclk_rate / (2 * spi->max_speed_hz);
4172 ++ div = mclk_rate / (2 * tfr->speed_hz);
4173 + if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
4174 + if (div > 0)
4175 + div--;
4176 +
4177 + reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS;
4178 + } else {
4179 +- div = ilog2(mclk_rate) - ilog2(spi->max_speed_hz);
4180 ++ div = ilog2(mclk_rate) - ilog2(tfr->speed_hz);
4181 + reg = SUN6I_CLK_CTL_CDR1(div);
4182 + }
4183 +
4184 +@@ -269,9 +270,16 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
4185 + reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
4186 + sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
4187 +
4188 ++ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
4189 ++ start = jiffies;
4190 + timeout = wait_for_completion_timeout(&sspi->done,
4191 +- msecs_to_jiffies(1000));
4192 ++ msecs_to_jiffies(tx_time));
4193 ++ end = jiffies;
4194 + if (!timeout) {
4195 ++ dev_warn(&master->dev,
4196 ++ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
4197 ++ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
4198 ++ jiffies_to_msecs(end - start), tx_time);
4199 + ret = -ETIMEDOUT;
4200 + goto out;
4201 + }
4202 +diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
4203 +index e5c31eadb0ac..04da6f0e3326 100644
4204 +--- a/drivers/tty/serial/8250/8250_core.c
4205 ++++ b/drivers/tty/serial/8250/8250_core.c
4206 +@@ -738,22 +738,16 @@ static int size_fifo(struct uart_8250_port *up)
4207 + */
4208 + static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
4209 + {
4210 +- unsigned char old_dll, old_dlm, old_lcr;
4211 +- unsigned int id;
4212 ++ unsigned char old_lcr;
4213 ++ unsigned int id, old_dl;
4214 +
4215 + old_lcr = serial_in(p, UART_LCR);
4216 + serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
4217 ++ old_dl = serial_dl_read(p);
4218 ++ serial_dl_write(p, 0);
4219 ++ id = serial_dl_read(p);
4220 ++ serial_dl_write(p, old_dl);
4221 +
4222 +- old_dll = serial_in(p, UART_DLL);
4223 +- old_dlm = serial_in(p, UART_DLM);
4224 +-
4225 +- serial_out(p, UART_DLL, 0);
4226 +- serial_out(p, UART_DLM, 0);
4227 +-
4228 +- id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
4229 +-
4230 +- serial_out(p, UART_DLL, old_dll);
4231 +- serial_out(p, UART_DLM, old_dlm);
4232 + serial_out(p, UART_LCR, old_lcr);
4233 +
4234 + return id;
4235 +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
4236 +index 587d63bcbd0e..b7213637f498 100644
4237 +--- a/drivers/tty/serial/samsung.c
4238 ++++ b/drivers/tty/serial/samsung.c
4239 +@@ -749,6 +749,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
4240 + /* check to see if we need to change clock source */
4241 +
4242 + if (ourport->baudclk != clk) {
4243 ++ clk_prepare_enable(clk);
4244 ++
4245 + s3c24xx_serial_setsource(port, clk_sel);
4246 +
4247 + if (!IS_ERR(ourport->baudclk)) {
4248 +@@ -756,8 +758,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
4249 + ourport->baudclk = ERR_PTR(-EINVAL);
4250 + }
4251 +
4252 +- clk_prepare_enable(clk);
4253 +-
4254 + ourport->baudclk = clk;
4255 + ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
4256 + }
4257 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
4258 +index 53c25bca7d05..9062636d3154 100644
4259 +--- a/drivers/tty/vt/vt.c
4260 ++++ b/drivers/tty/vt/vt.c
4261 +@@ -3591,9 +3591,10 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
4262 + goto err;
4263 +
4264 + desc = csw->con_startup();
4265 +-
4266 +- if (!desc)
4267 ++ if (!desc) {
4268 ++ retval = -ENODEV;
4269 + goto err;
4270 ++ }
4271 +
4272 + retval = -EINVAL;
4273 +
4274 +diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
4275 +index c812fefe0e50..27baa36cca00 100644
4276 +--- a/drivers/usb/common/usb-otg-fsm.c
4277 ++++ b/drivers/usb/common/usb-otg-fsm.c
4278 +@@ -21,6 +21,7 @@
4279 + * 675 Mass Ave, Cambridge, MA 02139, USA.
4280 + */
4281 +
4282 ++#include <linux/module.h>
4283 + #include <linux/kernel.h>
4284 + #include <linux/types.h>
4285 + #include <linux/mutex.h>
4286 +@@ -365,3 +366,4 @@ int otg_statemachine(struct otg_fsm *fsm)
4287 + return state_changed;
4288 + }
4289 + EXPORT_SYMBOL_GPL(otg_statemachine);
4290 ++MODULE_LICENSE("GPL");
4291 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
4292 +index 87cc0654b49e..096bb82c69c4 100644
4293 +--- a/drivers/usb/core/hcd.c
4294 ++++ b/drivers/usb/core/hcd.c
4295 +@@ -2523,26 +2523,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd);
4296 + * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
4297 + * deallocated.
4298 + *
4299 +- * Make sure to only deallocate the bandwidth_mutex when the primary HCD is
4300 +- * freed. When hcd_release() is called for either hcd in a peer set
4301 +- * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to
4302 +- * block new peering attempts
4303 ++ * Make sure to deallocate the bandwidth_mutex only when the last HCD is
4304 ++ * freed. When hcd_release() is called for either hcd in a peer set,
4305 ++ * invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
4306 + */
4307 + static void hcd_release(struct kref *kref)
4308 + {
4309 + struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
4310 +
4311 + mutex_lock(&usb_port_peer_mutex);
4312 +- if (usb_hcd_is_primary_hcd(hcd)) {
4313 +- kfree(hcd->address0_mutex);
4314 +- kfree(hcd->bandwidth_mutex);
4315 +- }
4316 + if (hcd->shared_hcd) {
4317 + struct usb_hcd *peer = hcd->shared_hcd;
4318 +
4319 + peer->shared_hcd = NULL;
4320 +- if (peer->primary_hcd == hcd)
4321 +- peer->primary_hcd = NULL;
4322 ++ peer->primary_hcd = NULL;
4323 ++ } else {
4324 ++ kfree(hcd->address0_mutex);
4325 ++ kfree(hcd->bandwidth_mutex);
4326 + }
4327 + mutex_unlock(&usb_port_peer_mutex);
4328 + kfree(hcd);
4329 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
4330 +index 6b53fc3ec636..0673a5abc21d 100644
4331 +--- a/drivers/usb/core/quirks.c
4332 ++++ b/drivers/usb/core/quirks.c
4333 +@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
4334 + /* Creative SB Audigy 2 NX */
4335 + { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
4336 +
4337 ++ /* USB3503 */
4338 ++ { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
4339 ++
4340 + /* Microsoft Wireless Laser Mouse 6000 Receiver */
4341 + { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
4342 +
4343 +@@ -167,6 +170,10 @@ static const struct usb_device_id usb_quirk_list[] = {
4344 + /* MAYA44USB sound device */
4345 + { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
4346 +
4347 ++ /* ASUS Base Station(T100) */
4348 ++ { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
4349 ++ USB_QUIRK_IGNORE_REMOTE_WAKEUP },
4350 ++
4351 + /* Action Semiconductor flash disk */
4352 + { USB_DEVICE(0x10d6, 0x2200), .driver_info =
4353 + USB_QUIRK_STRING_FETCH_255 },
4354 +@@ -182,26 +189,22 @@ static const struct usb_device_id usb_quirk_list[] = {
4355 + { USB_DEVICE(0x1908, 0x1315), .driver_info =
4356 + USB_QUIRK_HONOR_BNUMINTERFACES },
4357 +
4358 +- /* INTEL VALUE SSD */
4359 +- { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
4360 +-
4361 +- /* USB3503 */
4362 +- { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
4363 +-
4364 +- /* ASUS Base Station(T100) */
4365 +- { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
4366 +- USB_QUIRK_IGNORE_REMOTE_WAKEUP },
4367 +-
4368 + /* Protocol and OTG Electrical Test Device */
4369 + { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
4370 + USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
4371 +
4372 ++ /* Acer C120 LED Projector */
4373 ++ { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
4374 ++
4375 + /* Blackmagic Design Intensity Shuttle */
4376 + { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
4377 +
4378 + /* Blackmagic Design UltraStudio SDI */
4379 + { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
4380 +
4381 ++ /* INTEL VALUE SSD */
4382 ++ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
4383 ++
4384 + { } /* terminating entry must be last */
4385 + };
4386 +
4387 +diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
4388 +index 3951a65fea04..e184d17588ab 100644
4389 +--- a/drivers/usb/dwc3/dwc3-exynos.c
4390 ++++ b/drivers/usb/dwc3/dwc3-exynos.c
4391 +@@ -106,7 +106,6 @@ static int dwc3_exynos_remove_child(struct device *dev, void *unused)
4392 + static int dwc3_exynos_probe(struct platform_device *pdev)
4393 + {
4394 + struct dwc3_exynos *exynos;
4395 +- struct clk *clk;
4396 + struct device *dev = &pdev->dev;
4397 + struct device_node *node = dev->of_node;
4398 +
4399 +@@ -127,21 +126,13 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
4400 +
4401 + platform_set_drvdata(pdev, exynos);
4402 +
4403 +- ret = dwc3_exynos_register_phys(exynos);
4404 +- if (ret) {
4405 +- dev_err(dev, "couldn't register PHYs\n");
4406 +- return ret;
4407 +- }
4408 ++ exynos->dev = dev;
4409 +
4410 +- clk = devm_clk_get(dev, "usbdrd30");
4411 +- if (IS_ERR(clk)) {
4412 ++ exynos->clk = devm_clk_get(dev, "usbdrd30");
4413 ++ if (IS_ERR(exynos->clk)) {
4414 + dev_err(dev, "couldn't get clock\n");
4415 + return -EINVAL;
4416 + }
4417 +-
4418 +- exynos->dev = dev;
4419 +- exynos->clk = clk;
4420 +-
4421 + clk_prepare_enable(exynos->clk);
4422 +
4423 + exynos->vdd33 = devm_regulator_get(dev, "vdd33");
4424 +@@ -166,26 +157,35 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
4425 + goto err3;
4426 + }
4427 +
4428 ++ ret = dwc3_exynos_register_phys(exynos);
4429 ++ if (ret) {
4430 ++ dev_err(dev, "couldn't register PHYs\n");
4431 ++ goto err4;
4432 ++ }
4433 ++
4434 + if (node) {
4435 + ret = of_platform_populate(node, NULL, NULL, dev);
4436 + if (ret) {
4437 + dev_err(dev, "failed to add dwc3 core\n");
4438 +- goto err4;
4439 ++ goto err5;
4440 + }
4441 + } else {
4442 + dev_err(dev, "no device node, failed to add dwc3 core\n");
4443 + ret = -ENODEV;
4444 +- goto err4;
4445 ++ goto err5;
4446 + }
4447 +
4448 + return 0;
4449 +
4450 ++err5:
4451 ++ platform_device_unregister(exynos->usb2_phy);
4452 ++ platform_device_unregister(exynos->usb3_phy);
4453 + err4:
4454 + regulator_disable(exynos->vdd10);
4455 + err3:
4456 + regulator_disable(exynos->vdd33);
4457 + err2:
4458 +- clk_disable_unprepare(clk);
4459 ++ clk_disable_unprepare(exynos->clk);
4460 + return ret;
4461 + }
4462 +
4463 +diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
4464 +index db2becd31a51..54f964bbc79a 100644
4465 +--- a/drivers/usb/gadget/legacy/inode.c
4466 ++++ b/drivers/usb/gadget/legacy/inode.c
4467 +@@ -1018,8 +1018,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
4468 + struct usb_ep *ep = dev->gadget->ep0;
4469 + struct usb_request *req = dev->req;
4470 +
4471 +- if ((retval = setup_req (ep, req, 0)) == 0)
4472 +- retval = usb_ep_queue (ep, req, GFP_ATOMIC);
4473 ++ if ((retval = setup_req (ep, req, 0)) == 0) {
4474 ++ spin_unlock_irq (&dev->lock);
4475 ++ retval = usb_ep_queue (ep, req, GFP_KERNEL);
4476 ++ spin_lock_irq (&dev->lock);
4477 ++ }
4478 + dev->state = STATE_DEV_CONNECTED;
4479 +
4480 + /* assume that was SET_CONFIGURATION */
4481 +@@ -1550,8 +1553,11 @@ delegate:
4482 + w_length);
4483 + if (value < 0)
4484 + break;
4485 ++
4486 ++ spin_unlock (&dev->lock);
4487 + value = usb_ep_queue (gadget->ep0, dev->req,
4488 +- GFP_ATOMIC);
4489 ++ GFP_KERNEL);
4490 ++ spin_lock (&dev->lock);
4491 + if (value < 0) {
4492 + clean_req (gadget->ep0, dev->req);
4493 + break;
4494 +@@ -1574,11 +1580,14 @@ delegate:
4495 + if (value >= 0 && dev->state != STATE_DEV_SETUP) {
4496 + req->length = value;
4497 + req->zero = value < w_length;
4498 +- value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
4499 ++
4500 ++ spin_unlock (&dev->lock);
4501 ++ value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
4502 + if (value < 0) {
4503 + DBG (dev, "ep_queue --> %d\n", value);
4504 + req->status = 0;
4505 + }
4506 ++ return value;
4507 + }
4508 +
4509 + /* device stalls when value < 0 */
4510 +diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
4511 +index aaa01971efe9..aad253559bcd 100644
4512 +--- a/drivers/usb/host/ehci-tegra.c
4513 ++++ b/drivers/usb/host/ehci-tegra.c
4514 +@@ -89,7 +89,7 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
4515 + if (!usb1_reset_attempted) {
4516 + struct reset_control *usb1_reset;
4517 +
4518 +- usb1_reset = of_reset_control_get(phy_np, "usb");
4519 ++ usb1_reset = of_reset_control_get(phy_np, "utmi-pads");
4520 + if (IS_ERR(usb1_reset)) {
4521 + dev_warn(&pdev->dev,
4522 + "can't get utmi-pads reset from the PHY\n");
4523 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
4524 +index c6027acb6263..54caaf87c567 100644
4525 +--- a/drivers/usb/host/xhci-pci.c
4526 ++++ b/drivers/usb/host/xhci-pci.c
4527 +@@ -37,6 +37,7 @@
4528 + /* Device for a quirk */
4529 + #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
4530 + #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
4531 ++#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009
4532 + #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400
4533 +
4534 + #define PCI_VENDOR_ID_ETRON 0x1b6f
4535 +@@ -108,6 +109,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
4536 + xhci->quirks |= XHCI_TRUST_TX_LENGTH;
4537 + }
4538 +
4539 ++ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
4540 ++ pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
4541 ++ xhci->quirks |= XHCI_BROKEN_STREAMS;
4542 ++
4543 + if (pdev->vendor == PCI_VENDOR_ID_NEC)
4544 + xhci->quirks |= XHCI_NEC_HOST;
4545 +
4546 +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
4547 +index 22516f41c6f4..3dd487872bf1 100644
4548 +--- a/drivers/usb/host/xhci-plat.c
4549 ++++ b/drivers/usb/host/xhci-plat.c
4550 +@@ -118,6 +118,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
4551 + ret = clk_prepare_enable(clk);
4552 + if (ret)
4553 + goto put_hcd;
4554 ++ } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
4555 ++ ret = -EPROBE_DEFER;
4556 ++ goto put_hcd;
4557 + }
4558 +
4559 + if (of_device_is_compatible(pdev->dev.of_node,
4560 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
4561 +index 04e75258fb46..69464630be54 100644
4562 +--- a/drivers/usb/host/xhci-ring.c
4563 ++++ b/drivers/usb/host/xhci-ring.c
4564 +@@ -289,6 +289,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
4565 +
4566 + temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
4567 + xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
4568 ++
4569 ++ /*
4570 ++ * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
4571 ++ * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
4572 ++ * but the completion event in never sent. Use the cmd timeout timer to
4573 ++ * handle those cases. Use twice the time to cover the bit polling retry
4574 ++ */
4575 ++ mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
4576 + xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
4577 + &xhci->op_regs->cmd_ring);
4578 +
4579 +@@ -304,6 +312,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
4580 + if (ret < 0) {
4581 + xhci_err(xhci, "Stopped the command ring failed, "
4582 + "maybe the host is dead\n");
4583 ++ del_timer(&xhci->cmd_timer);
4584 + xhci->xhc_state |= XHCI_STATE_DYING;
4585 + xhci_quiesce(xhci);
4586 + xhci_halt(xhci);
4587 +@@ -1245,22 +1254,21 @@ void xhci_handle_command_timeout(unsigned long data)
4588 + int ret;
4589 + unsigned long flags;
4590 + u64 hw_ring_state;
4591 +- struct xhci_command *cur_cmd = NULL;
4592 ++ bool second_timeout = false;
4593 + xhci = (struct xhci_hcd *) data;
4594 +
4595 + /* mark this command to be cancelled */
4596 + spin_lock_irqsave(&xhci->lock, flags);
4597 + if (xhci->current_cmd) {
4598 +- cur_cmd = xhci->current_cmd;
4599 +- cur_cmd->status = COMP_CMD_ABORT;
4600 ++ if (xhci->current_cmd->status == COMP_CMD_ABORT)
4601 ++ second_timeout = true;
4602 ++ xhci->current_cmd->status = COMP_CMD_ABORT;
4603 + }
4604 +
4605 +-
4606 + /* Make sure command ring is running before aborting it */
4607 + hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
4608 + if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
4609 + (hw_ring_state & CMD_RING_RUNNING)) {
4610 +-
4611 + spin_unlock_irqrestore(&xhci->lock, flags);
4612 + xhci_dbg(xhci, "Command timeout\n");
4613 + ret = xhci_abort_cmd_ring(xhci);
4614 +@@ -1272,6 +1280,15 @@ void xhci_handle_command_timeout(unsigned long data)
4615 + }
4616 + return;
4617 + }
4618 ++
4619 ++ /* command ring failed to restart, or host removed. Bail out */
4620 ++ if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
4621 ++ spin_unlock_irqrestore(&xhci->lock, flags);
4622 ++ xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
4623 ++ xhci_cleanup_command_queue(xhci);
4624 ++ return;
4625 ++ }
4626 ++
4627 + /* command timeout on stopped ring, ring can't be aborted */
4628 + xhci_dbg(xhci, "Command timeout on stopped ring\n");
4629 + xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
4630 +diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
4631 +index 4500610356f2..7da914bc2094 100644
4632 +--- a/drivers/usb/musb/musb_host.c
4633 ++++ b/drivers/usb/musb/musb_host.c
4634 +@@ -583,14 +583,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
4635 + musb_writew(ep->regs, MUSB_TXCSR, 0);
4636 +
4637 + /* scrub all previous state, clearing toggle */
4638 +- } else {
4639 +- csr = musb_readw(ep->regs, MUSB_RXCSR);
4640 +- if (csr & MUSB_RXCSR_RXPKTRDY)
4641 +- WARNING("rx%d, packet/%d ready?\n", ep->epnum,
4642 +- musb_readw(ep->regs, MUSB_RXCOUNT));
4643 +-
4644 +- musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
4645 + }
4646 ++ csr = musb_readw(ep->regs, MUSB_RXCSR);
4647 ++ if (csr & MUSB_RXCSR_RXPKTRDY)
4648 ++ WARNING("rx%d, packet/%d ready?\n", ep->epnum,
4649 ++ musb_readw(ep->regs, MUSB_RXCOUNT));
4650 ++
4651 ++ musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
4652 +
4653 + /* target addr and (for multipoint) hub addr/port */
4654 + if (musb->is_multipoint) {
4655 +@@ -950,9 +949,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
4656 + if (is_in) {
4657 + dma = is_dma_capable() ? ep->rx_channel : NULL;
4658 +
4659 +- /* clear nak timeout bit */
4660 ++ /*
4661 ++ * Need to stop the transaction by clearing REQPKT first
4662 ++ * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
4663 ++ * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
4664 ++ */
4665 + rx_csr = musb_readw(epio, MUSB_RXCSR);
4666 + rx_csr |= MUSB_RXCSR_H_WZC_BITS;
4667 ++ rx_csr &= ~MUSB_RXCSR_H_REQPKT;
4668 ++ musb_writew(epio, MUSB_RXCSR, rx_csr);
4669 + rx_csr &= ~MUSB_RXCSR_DATAERROR;
4670 + musb_writew(epio, MUSB_RXCSR, rx_csr);
4671 +
4672 +diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
4673 +index facaaf003f19..e40da7759a0e 100644
4674 +--- a/drivers/usb/usbip/usbip_common.c
4675 ++++ b/drivers/usb/usbip/usbip_common.c
4676 +@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
4677 + if (!(size > 0))
4678 + return 0;
4679 +
4680 ++ if (size > urb->transfer_buffer_length) {
4681 ++ /* should not happen, probably malicious packet */
4682 ++ if (ud->side == USBIP_STUB) {
4683 ++ usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
4684 ++ return 0;
4685 ++ } else {
4686 ++ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
4687 ++ return -EPIPE;
4688 ++ }
4689 ++ }
4690 ++
4691 + ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
4692 + if (ret != size) {
4693 + dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
4694 +diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
4695 +index a5f88377cec5..cdd965fae22c 100644
4696 +--- a/drivers/video/fbdev/Kconfig
4697 ++++ b/drivers/video/fbdev/Kconfig
4698 +@@ -2266,7 +2266,6 @@ config XEN_FBDEV_FRONTEND
4699 + select FB_SYS_IMAGEBLIT
4700 + select FB_SYS_FOPS
4701 + select FB_DEFERRED_IO
4702 +- select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
4703 + select XEN_XENBUS_FRONTEND
4704 + default y
4705 + help
4706 +diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
4707 +index 10c876c95772..4515e4cddbca 100644
4708 +--- a/drivers/video/fbdev/da8xx-fb.c
4709 ++++ b/drivers/video/fbdev/da8xx-fb.c
4710 +@@ -209,8 +209,7 @@ static struct fb_videomode known_lcd_panels[] = {
4711 + .lower_margin = 2,
4712 + .hsync_len = 0,
4713 + .vsync_len = 0,
4714 +- .sync = FB_SYNC_CLK_INVERT |
4715 +- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
4716 ++ .sync = FB_SYNC_CLK_INVERT,
4717 + },
4718 + /* Sharp LK043T1DG01 */
4719 + [1] = {
4720 +@@ -224,7 +223,7 @@ static struct fb_videomode known_lcd_panels[] = {
4721 + .lower_margin = 2,
4722 + .hsync_len = 41,
4723 + .vsync_len = 10,
4724 +- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
4725 ++ .sync = 0,
4726 + .flag = 0,
4727 + },
4728 + [2] = {
4729 +@@ -239,7 +238,7 @@ static struct fb_videomode known_lcd_panels[] = {
4730 + .lower_margin = 10,
4731 + .hsync_len = 10,
4732 + .vsync_len = 10,
4733 +- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
4734 ++ .sync = 0,
4735 + .flag = 0,
4736 + },
4737 + [3] = {
4738 +diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
4739 +index 9c234209d8b5..47a4177b16d2 100644
4740 +--- a/drivers/xen/xen-pciback/conf_space.c
4741 ++++ b/drivers/xen/xen-pciback/conf_space.c
4742 +@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
4743 + field_start = OFFSET(cfg_entry);
4744 + field_end = OFFSET(cfg_entry) + field->size;
4745 +
4746 +- if ((req_start >= field_start && req_start < field_end)
4747 +- || (req_end > field_start && req_end <= field_end)) {
4748 ++ if (req_end > field_start && field_end > req_start) {
4749 + err = conf_space_read(dev, cfg_entry, field_start,
4750 + &tmp_val);
4751 + if (err)
4752 +@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
4753 + field_start = OFFSET(cfg_entry);
4754 + field_end = OFFSET(cfg_entry) + field->size;
4755 +
4756 +- if ((req_start >= field_start && req_start < field_end)
4757 +- || (req_end > field_start && req_end <= field_end)) {
4758 ++ if (req_end > field_start && field_end > req_start) {
4759 + tmp_val = 0;
4760 +
4761 + err = xen_pcibk_config_read(dev, field_start,
4762 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
4763 +index f54511dd287e..39c68ef10808 100644
4764 +--- a/fs/btrfs/ctree.c
4765 ++++ b/fs/btrfs/ctree.c
4766 +@@ -1542,6 +1542,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
4767 + trans->transid, root->fs_info->generation);
4768 +
4769 + if (!should_cow_block(trans, root, buf)) {
4770 ++ trans->dirty = true;
4771 + *cow_ret = buf;
4772 + return 0;
4773 + }
4774 +@@ -2762,8 +2763,10 @@ again:
4775 + * then we don't want to set the path blocking,
4776 + * so we test it here
4777 + */
4778 +- if (!should_cow_block(trans, root, b))
4779 ++ if (!should_cow_block(trans, root, b)) {
4780 ++ trans->dirty = true;
4781 + goto cow_done;
4782 ++ }
4783 +
4784 + /*
4785 + * must have write locks on this node and the
4786 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
4787 +index 950479f2d337..a067065efa6b 100644
4788 +--- a/fs/btrfs/extent-tree.c
4789 ++++ b/fs/btrfs/extent-tree.c
4790 +@@ -7237,7 +7237,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4791 + set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
4792 + buf->start + buf->len - 1, GFP_NOFS);
4793 + }
4794 +- trans->blocks_used++;
4795 ++ trans->dirty = true;
4796 + /* this returns a buffer locked for blocking */
4797 + return buf;
4798 + }
4799 +@@ -9196,9 +9196,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
4800 + int ret = 0;
4801 +
4802 + list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
4803 +- list_del_init(&block_group->bg_list);
4804 + if (ret)
4805 +- continue;
4806 ++ goto next;
4807 +
4808 + spin_lock(&block_group->lock);
4809 + memcpy(&item, &block_group->item, sizeof(item));
4810 +@@ -9213,6 +9212,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
4811 + key.objectid, key.offset);
4812 + if (ret)
4813 + btrfs_abort_transaction(trans, extent_root, ret);
4814 ++next:
4815 ++ list_del_init(&block_group->bg_list);
4816 + }
4817 + }
4818 +
4819 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
4820 +index 1c1ee12ab0cf..d96b2bc444c8 100644
4821 +--- a/fs/btrfs/ioctl.c
4822 ++++ b/fs/btrfs/ioctl.c
4823 +@@ -1656,7 +1656,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
4824 +
4825 + src_inode = file_inode(src.file);
4826 + if (src_inode->i_sb != file_inode(file)->i_sb) {
4827 +- btrfs_info(BTRFS_I(src_inode)->root->fs_info,
4828 ++ btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
4829 + "Snapshot src from another FS");
4830 + ret = -EXDEV;
4831 + } else if (!inode_owner_or_capable(src_inode)) {
4832 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
4833 +index 7ceaaf2010f9..cf6d11bb8dcb 100644
4834 +--- a/fs/btrfs/super.c
4835 ++++ b/fs/btrfs/super.c
4836 +@@ -262,7 +262,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
4837 + trans->aborted = errno;
4838 + /* Nothing used. The other threads that have joined this
4839 + * transaction may be able to continue. */
4840 +- if (!trans->blocks_used) {
4841 ++ if (!trans->dirty && list_empty(&trans->new_bgs)) {
4842 + const char *errstr;
4843 +
4844 + errstr = btrfs_decode_error(errno);
4845 +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
4846 +index 240019f36b2a..30dbf315c2d6 100644
4847 +--- a/fs/btrfs/transaction.c
4848 ++++ b/fs/btrfs/transaction.c
4849 +@@ -472,7 +472,6 @@ again:
4850 +
4851 + h->transid = cur_trans->transid;
4852 + h->transaction = cur_trans;
4853 +- h->blocks_used = 0;
4854 + h->bytes_reserved = 0;
4855 + h->root = root;
4856 + h->delayed_ref_updates = 0;
4857 +diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
4858 +index 1ba9c3e04191..1cf5de30368a 100644
4859 +--- a/fs/btrfs/transaction.h
4860 ++++ b/fs/btrfs/transaction.h
4861 +@@ -88,7 +88,6 @@ struct btrfs_trans_handle {
4862 + u64 qgroup_reserved;
4863 + unsigned long use_count;
4864 + unsigned long blocks_reserved;
4865 +- unsigned long blocks_used;
4866 + unsigned long delayed_ref_updates;
4867 + struct btrfs_transaction *transaction;
4868 + struct btrfs_block_rsv *block_rsv;
4869 +@@ -98,6 +97,7 @@ struct btrfs_trans_handle {
4870 + bool allocating_chunk;
4871 + bool reloc_reserved;
4872 + bool sync;
4873 ++ bool dirty;
4874 + unsigned int type;
4875 + /*
4876 + * this root is only needed to validate that the root passed to
4877 +diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
4878 +index 0303c6793d90..f7fb4b8658ba 100644
4879 +--- a/fs/cifs/cifs_unicode.c
4880 ++++ b/fs/cifs/cifs_unicode.c
4881 +@@ -136,6 +136,12 @@ convert_sfm_char(const __u16 src_char, char *target)
4882 + case SFM_SLASH:
4883 + *target = '\\';
4884 + break;
4885 ++ case SFM_SPACE:
4886 ++ *target = ' ';
4887 ++ break;
4888 ++ case SFM_PERIOD:
4889 ++ *target = '.';
4890 ++ break;
4891 + default:
4892 + return false;
4893 + }
4894 +@@ -364,7 +370,7 @@ static __le16 convert_to_sfu_char(char src_char)
4895 + return dest_char;
4896 + }
4897 +
4898 +-static __le16 convert_to_sfm_char(char src_char)
4899 ++static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
4900 + {
4901 + __le16 dest_char;
4902 +
4903 +@@ -387,6 +393,18 @@ static __le16 convert_to_sfm_char(char src_char)
4904 + case '|':
4905 + dest_char = cpu_to_le16(SFM_PIPE);
4906 + break;
4907 ++ case '.':
4908 ++ if (end_of_string)
4909 ++ dest_char = cpu_to_le16(SFM_PERIOD);
4910 ++ else
4911 ++ dest_char = 0;
4912 ++ break;
4913 ++ case ' ':
4914 ++ if (end_of_string)
4915 ++ dest_char = cpu_to_le16(SFM_SPACE);
4916 ++ else
4917 ++ dest_char = 0;
4918 ++ break;
4919 + default:
4920 + dest_char = 0;
4921 + }
4922 +@@ -424,9 +442,16 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
4923 + /* see if we must remap this char */
4924 + if (map_chars == SFU_MAP_UNI_RSVD)
4925 + dst_char = convert_to_sfu_char(src_char);
4926 +- else if (map_chars == SFM_MAP_UNI_RSVD)
4927 +- dst_char = convert_to_sfm_char(src_char);
4928 +- else
4929 ++ else if (map_chars == SFM_MAP_UNI_RSVD) {
4930 ++ bool end_of_string;
4931 ++
4932 ++ if (i == srclen - 1)
4933 ++ end_of_string = true;
4934 ++ else
4935 ++ end_of_string = false;
4936 ++
4937 ++ dst_char = convert_to_sfm_char(src_char, end_of_string);
4938 ++ } else
4939 + dst_char = 0;
4940 + /*
4941 + * FIXME: We can not handle remapping backslash (UNI_SLASH)
4942 +diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
4943 +index bdc52cb9a676..479bc0a941f3 100644
4944 +--- a/fs/cifs/cifs_unicode.h
4945 ++++ b/fs/cifs/cifs_unicode.h
4946 +@@ -64,6 +64,8 @@
4947 + #define SFM_LESSTHAN ((__u16) 0xF023)
4948 + #define SFM_PIPE ((__u16) 0xF027)
4949 + #define SFM_SLASH ((__u16) 0xF026)
4950 ++#define SFM_PERIOD ((__u16) 0xF028)
4951 ++#define SFM_SPACE ((__u16) 0xF029)
4952 +
4953 + /*
4954 + * Mapping mechanism to use when one of the seven reserved characters is
4955 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
4956 +index 82ebe7dbd834..db97215a23b2 100644
4957 +--- a/fs/cifs/connect.c
4958 ++++ b/fs/cifs/connect.c
4959 +@@ -413,7 +413,9 @@ cifs_echo_request(struct work_struct *work)
4960 + * server->ops->need_neg() == true. Also, no need to ping if
4961 + * we got a response recently.
4962 + */
4963 +- if (!server->ops->need_neg || server->ops->need_neg(server) ||
4964 ++
4965 ++ if (server->tcpStatus == CifsNeedReconnect ||
4966 ++ server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
4967 + (server->ops->can_echo && !server->ops->can_echo(server)) ||
4968 + time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
4969 + goto requeue_echo;
4970 +diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
4971 +index 848249fa120f..3079b38f0afb 100644
4972 +--- a/fs/cifs/ntlmssp.h
4973 ++++ b/fs/cifs/ntlmssp.h
4974 +@@ -133,6 +133,6 @@ typedef struct _AUTHENTICATE_MESSAGE {
4975 +
4976 + int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
4977 + void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
4978 +-int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen,
4979 ++int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
4980 + struct cifs_ses *ses,
4981 + const struct nls_table *nls_cp);
4982 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
4983 +index fe423e18450f..693da83a65f0 100644
4984 +--- a/fs/cifs/sess.c
4985 ++++ b/fs/cifs/sess.c
4986 +@@ -364,19 +364,43 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
4987 + sec_blob->DomainName.MaximumLength = 0;
4988 + }
4989 +
4990 +-/* We do not malloc the blob, it is passed in pbuffer, because its
4991 +- maximum possible size is fixed and small, making this approach cleaner.
4992 +- This function returns the length of the data in the blob */
4993 +-int build_ntlmssp_auth_blob(unsigned char *pbuffer,
4994 ++static int size_of_ntlmssp_blob(struct cifs_ses *ses)
4995 ++{
4996 ++ int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len
4997 ++ - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
4998 ++
4999 ++ if (ses->domainName)
5000 ++ sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
5001 ++ else
5002 ++ sz += 2;
5003 ++
5004 ++ if (ses->user_name)
5005 ++ sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
5006 ++ else
5007 ++ sz += 2;
5008 ++
5009 ++ return sz;
5010 ++}
5011 ++
5012 ++int build_ntlmssp_auth_blob(unsigned char **pbuffer,
5013 + u16 *buflen,
5014 + struct cifs_ses *ses,
5015 + const struct nls_table *nls_cp)
5016 + {
5017 + int rc;
5018 +- AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
5019 ++ AUTHENTICATE_MESSAGE *sec_blob;
5020 + __u32 flags;
5021 + unsigned char *tmp;
5022 +
5023 ++ rc = setup_ntlmv2_rsp(ses, nls_cp);
5024 ++ if (rc) {
5025 ++ cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
5026 ++ *buflen = 0;
5027 ++ goto setup_ntlmv2_ret;
5028 ++ }
5029 ++ *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
5030 ++ sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
5031 ++
5032 + memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
5033 + sec_blob->MessageType = NtLmAuthenticate;
5034 +
5035 +@@ -391,7 +415,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
5036 + flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
5037 + }
5038 +
5039 +- tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
5040 ++ tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
5041 + sec_blob->NegotiateFlags = cpu_to_le32(flags);
5042 +
5043 + sec_blob->LmChallengeResponse.BufferOffset =
5044 +@@ -399,13 +423,9 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
5045 + sec_blob->LmChallengeResponse.Length = 0;
5046 + sec_blob->LmChallengeResponse.MaximumLength = 0;
5047 +
5048 +- sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
5049 ++ sec_blob->NtChallengeResponse.BufferOffset =
5050 ++ cpu_to_le32(tmp - *pbuffer);
5051 + if (ses->user_name != NULL) {
5052 +- rc = setup_ntlmv2_rsp(ses, nls_cp);
5053 +- if (rc) {
5054 +- cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
5055 +- goto setup_ntlmv2_ret;
5056 +- }
5057 + memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
5058 + ses->auth_key.len - CIFS_SESS_KEY_SIZE);
5059 + tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
5060 +@@ -423,7 +443,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
5061 + }
5062 +
5063 + if (ses->domainName == NULL) {
5064 +- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
5065 ++ sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
5066 + sec_blob->DomainName.Length = 0;
5067 + sec_blob->DomainName.MaximumLength = 0;
5068 + tmp += 2;
5069 +@@ -432,14 +452,14 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
5070 + len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
5071 + CIFS_MAX_USERNAME_LEN, nls_cp);
5072 + len *= 2; /* unicode is 2 bytes each */
5073 +- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
5074 ++ sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
5075 + sec_blob->DomainName.Length = cpu_to_le16(len);
5076 + sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
5077 + tmp += len;
5078 + }
5079 +
5080 + if (ses->user_name == NULL) {
5081 +- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
5082 ++ sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
5083 + sec_blob->UserName.Length = 0;
5084 + sec_blob->UserName.MaximumLength = 0;
5085 + tmp += 2;
5086 +@@ -448,13 +468,13 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
5087 + len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
5088 + CIFS_MAX_USERNAME_LEN, nls_cp);
5089 + len *= 2; /* unicode is 2 bytes each */
5090 +- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
5091 ++ sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
5092 + sec_blob->UserName.Length = cpu_to_le16(len);
5093 + sec_blob->UserName.MaximumLength = cpu_to_le16(len);
5094 + tmp += len;
5095 + }
5096 +
5097 +- sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - pbuffer);
5098 ++ sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
5099 + sec_blob->WorkstationName.Length = 0;
5100 + sec_blob->WorkstationName.MaximumLength = 0;
5101 + tmp += 2;
5102 +@@ -463,19 +483,19 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
5103 + (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
5104 + && !calc_seckey(ses)) {
5105 + memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
5106 +- sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
5107 ++ sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
5108 + sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
5109 + sec_blob->SessionKey.MaximumLength =
5110 + cpu_to_le16(CIFS_CPHTXT_SIZE);
5111 + tmp += CIFS_CPHTXT_SIZE;
5112 + } else {
5113 +- sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
5114 ++ sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
5115 + sec_blob->SessionKey.Length = 0;
5116 + sec_blob->SessionKey.MaximumLength = 0;
5117 + }
5118 +
5119 ++ *buflen = tmp - *pbuffer;
5120 + setup_ntlmv2_ret:
5121 +- *buflen = tmp - pbuffer;
5122 + return rc;
5123 + }
5124 +
5125 +@@ -1266,7 +1286,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
5126 + struct cifs_ses *ses = sess_data->ses;
5127 + __u16 bytes_remaining;
5128 + char *bcc_ptr;
5129 +- char *ntlmsspblob = NULL;
5130 ++ unsigned char *ntlmsspblob = NULL;
5131 + u16 blob_len;
5132 +
5133 + cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
5134 +@@ -1279,19 +1299,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
5135 + /* Build security blob before we assemble the request */
5136 + pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
5137 + smb_buf = (struct smb_hdr *)pSMB;
5138 +- /*
5139 +- * 5 is an empirical value, large enough to hold
5140 +- * authenticate message plus max 10 of av paris,
5141 +- * domain, user, workstation names, flags, etc.
5142 +- */
5143 +- ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE),
5144 +- GFP_KERNEL);
5145 +- if (!ntlmsspblob) {
5146 +- rc = -ENOMEM;
5147 +- goto out;
5148 +- }
5149 +-
5150 +- rc = build_ntlmssp_auth_blob(ntlmsspblob,
5151 ++ rc = build_ntlmssp_auth_blob(&ntlmsspblob,
5152 + &blob_len, ses, sess_data->nls_cp);
5153 + if (rc)
5154 + goto out_free_ntlmsspblob;
5155 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
5156 +index e8d1f8c59b56..d759ecdfa9d6 100644
5157 +--- a/fs/cifs/smb2pdu.c
5158 ++++ b/fs/cifs/smb2pdu.c
5159 +@@ -46,6 +46,7 @@
5160 + #include "smb2status.h"
5161 + #include "smb2glob.h"
5162 + #include "cifspdu.h"
5163 ++#include "cifs_spnego.h"
5164 +
5165 + /*
5166 + * The following table defines the expected "StructureSize" of SMB2 requests
5167 +@@ -427,20 +428,15 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
5168 + cifs_dbg(FYI, "missing security blob on negprot\n");
5169 +
5170 + rc = cifs_enable_signing(server, ses->sign);
5171 +-#ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
5172 + if (rc)
5173 + goto neg_exit;
5174 +- if (blob_length)
5175 +- rc = decode_neg_token_init(security_blob, blob_length,
5176 +- &server->sec_type);
5177 +- if (rc == 1)
5178 +- rc = 0;
5179 +- else if (rc == 0) {
5180 +- rc = -EIO;
5181 +- goto neg_exit;
5182 ++ if (blob_length) {
5183 ++ rc = decode_negTokenInit(security_blob, blob_length, server);
5184 ++ if (rc == 1)
5185 ++ rc = 0;
5186 ++ else if (rc == 0)
5187 ++ rc = -EIO;
5188 + }
5189 +-#endif
5190 +-
5191 + neg_exit:
5192 + free_rsp_buf(resp_buftype, rsp);
5193 + return rc;
5194 +@@ -534,8 +530,9 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
5195 + __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
5196 + struct TCP_Server_Info *server = ses->server;
5197 + u16 blob_length = 0;
5198 +- char *security_blob;
5199 +- char *ntlmssp_blob = NULL;
5200 ++ struct key *spnego_key = NULL;
5201 ++ char *security_blob = NULL;
5202 ++ unsigned char *ntlmssp_blob = NULL;
5203 + bool use_spnego = false; /* else use raw ntlmssp */
5204 +
5205 + cifs_dbg(FYI, "Session Setup\n");
5206 +@@ -562,7 +559,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
5207 + ses->ntlmssp->sesskey_per_smbsess = true;
5208 +
5209 + /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
5210 +- ses->sectype = RawNTLMSSP;
5211 ++ if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP)
5212 ++ ses->sectype = RawNTLMSSP;
5213 +
5214 + ssetup_ntlmssp_authenticate:
5215 + if (phase == NtLmChallenge)
5216 +@@ -591,7 +589,48 @@ ssetup_ntlmssp_authenticate:
5217 + iov[0].iov_base = (char *)req;
5218 + /* 4 for rfc1002 length field and 1 for pad */
5219 + iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
5220 +- if (phase == NtLmNegotiate) {
5221 ++
5222 ++ if (ses->sectype == Kerberos) {
5223 ++#ifdef CONFIG_CIFS_UPCALL
5224 ++ struct cifs_spnego_msg *msg;
5225 ++
5226 ++ spnego_key = cifs_get_spnego_key(ses);
5227 ++ if (IS_ERR(spnego_key)) {
5228 ++ rc = PTR_ERR(spnego_key);
5229 ++ spnego_key = NULL;
5230 ++ goto ssetup_exit;
5231 ++ }
5232 ++
5233 ++ msg = spnego_key->payload.data;
5234 ++ /*
5235 ++ * check version field to make sure that cifs.upcall is
5236 ++ * sending us a response in an expected form
5237 ++ */
5238 ++ if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
5239 ++ cifs_dbg(VFS,
5240 ++ "bad cifs.upcall version. Expected %d got %d",
5241 ++ CIFS_SPNEGO_UPCALL_VERSION, msg->version);
5242 ++ rc = -EKEYREJECTED;
5243 ++ goto ssetup_exit;
5244 ++ }
5245 ++ ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
5246 ++ GFP_KERNEL);
5247 ++ if (!ses->auth_key.response) {
5248 ++ cifs_dbg(VFS,
5249 ++ "Kerberos can't allocate (%u bytes) memory",
5250 ++ msg->sesskey_len);
5251 ++ rc = -ENOMEM;
5252 ++ goto ssetup_exit;
5253 ++ }
5254 ++ ses->auth_key.len = msg->sesskey_len;
5255 ++ blob_length = msg->secblob_len;
5256 ++ iov[1].iov_base = msg->data + msg->sesskey_len;
5257 ++ iov[1].iov_len = blob_length;
5258 ++#else
5259 ++ rc = -EOPNOTSUPP;
5260 ++ goto ssetup_exit;
5261 ++#endif /* CONFIG_CIFS_UPCALL */
5262 ++ } else if (phase == NtLmNegotiate) { /* if not krb5 must be ntlmssp */
5263 + ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
5264 + GFP_KERNEL);
5265 + if (ntlmssp_blob == NULL) {
5266 +@@ -614,15 +653,11 @@ ssetup_ntlmssp_authenticate:
5267 + /* with raw NTLMSSP we don't encapsulate in SPNEGO */
5268 + security_blob = ntlmssp_blob;
5269 + }
5270 ++ iov[1].iov_base = security_blob;
5271 ++ iov[1].iov_len = blob_length;
5272 + } else if (phase == NtLmAuthenticate) {
5273 + req->hdr.SessionId = ses->Suid;
5274 +- ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
5275 +- GFP_KERNEL);
5276 +- if (ntlmssp_blob == NULL) {
5277 +- rc = -ENOMEM;
5278 +- goto ssetup_exit;
5279 +- }
5280 +- rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
5281 ++ rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
5282 + nls_cp);
5283 + if (rc) {
5284 + cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
5285 +@@ -641,6 +676,8 @@ ssetup_ntlmssp_authenticate:
5286 + } else {
5287 + security_blob = ntlmssp_blob;
5288 + }
5289 ++ iov[1].iov_base = security_blob;
5290 ++ iov[1].iov_len = blob_length;
5291 + } else {
5292 + cifs_dbg(VFS, "illegal ntlmssp phase\n");
5293 + rc = -EIO;
5294 +@@ -652,8 +689,6 @@ ssetup_ntlmssp_authenticate:
5295 + cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
5296 + 1 /* pad */ - 4 /* rfc1001 len */);
5297 + req->SecurityBufferLength = cpu_to_le16(blob_length);
5298 +- iov[1].iov_base = security_blob;
5299 +- iov[1].iov_len = blob_length;
5300 +
5301 + inc_rfc1001_len(req, blob_length - 1 /* pad */);
5302 +
5303 +@@ -664,6 +699,7 @@ ssetup_ntlmssp_authenticate:
5304 +
5305 + kfree(security_blob);
5306 + rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
5307 ++ ses->Suid = rsp->hdr.SessionId;
5308 + if (resp_buftype != CIFS_NO_BUFFER &&
5309 + rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
5310 + if (phase != NtLmNegotiate) {
5311 +@@ -681,7 +717,6 @@ ssetup_ntlmssp_authenticate:
5312 + /* NTLMSSP Negotiate sent now processing challenge (response) */
5313 + phase = NtLmChallenge; /* process ntlmssp challenge */
5314 + rc = 0; /* MORE_PROCESSING is not an error here but expected */
5315 +- ses->Suid = rsp->hdr.SessionId;
5316 + rc = decode_ntlmssp_challenge(rsp->Buffer,
5317 + le16_to_cpu(rsp->SecurityBufferLength), ses);
5318 + }
5319 +@@ -738,6 +773,10 @@ keygen_exit:
5320 + kfree(ses->auth_key.response);
5321 + ses->auth_key.response = NULL;
5322 + }
5323 ++ if (spnego_key) {
5324 ++ key_invalidate(spnego_key);
5325 ++ key_put(spnego_key);
5326 ++ }
5327 + kfree(ses->ntlmssp);
5328 +
5329 + return rc;
5330 +@@ -1584,6 +1623,33 @@ SMB2_echo(struct TCP_Server_Info *server)
5331 +
5332 + cifs_dbg(FYI, "In echo request\n");
5333 +
5334 ++ if (server->tcpStatus == CifsNeedNegotiate) {
5335 ++ struct list_head *tmp, *tmp2;
5336 ++ struct cifs_ses *ses;
5337 ++ struct cifs_tcon *tcon;
5338 ++
5339 ++ cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
5340 ++ spin_lock(&cifs_tcp_ses_lock);
5341 ++ list_for_each(tmp, &server->smb_ses_list) {
5342 ++ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
5343 ++ list_for_each(tmp2, &ses->tcon_list) {
5344 ++ tcon = list_entry(tmp2, struct cifs_tcon,
5345 ++ tcon_list);
5346 ++ /* add check for persistent handle reconnect */
5347 ++ if (tcon && tcon->need_reconnect) {
5348 ++ spin_unlock(&cifs_tcp_ses_lock);
5349 ++ rc = smb2_reconnect(SMB2_ECHO, tcon);
5350 ++ spin_lock(&cifs_tcp_ses_lock);
5351 ++ }
5352 ++ }
5353 ++ }
5354 ++ spin_unlock(&cifs_tcp_ses_lock);
5355 ++ }
5356 ++
5357 ++ /* if no session, renegotiate failed above */
5358 ++ if (server->tcpStatus == CifsNeedNegotiate)
5359 ++ return -EIO;
5360 ++
5361 + rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
5362 + if (rc)
5363 + return rc;
5364 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
5365 +index 2ea75cbeb697..6d7293082086 100644
5366 +--- a/fs/ext4/inode.c
5367 ++++ b/fs/ext4/inode.c
5368 +@@ -4841,6 +4841,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5369 + might_sleep();
5370 + trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5371 + err = ext4_reserve_inode_write(handle, inode, &iloc);
5372 ++ if (err)
5373 ++ return err;
5374 + if (ext4_handle_valid(handle) &&
5375 + EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
5376 + !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5377 +@@ -4871,9 +4873,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5378 + }
5379 + }
5380 + }
5381 +- if (!err)
5382 +- err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5383 +- return err;
5384 ++ return ext4_mark_iloc_dirty(handle, inode, &iloc);
5385 + }
5386 +
5387 + /*
5388 +diff --git a/fs/locks.c b/fs/locks.c
5389 +index 298d1f5c66f0..253ca5c2b9c7 100644
5390 +--- a/fs/locks.c
5391 ++++ b/fs/locks.c
5392 +@@ -1580,7 +1580,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
5393 + {
5394 + struct file_lock *fl, **before, **my_before = NULL, *lease;
5395 + struct dentry *dentry = filp->f_path.dentry;
5396 +- struct inode *inode = dentry->d_inode;
5397 ++ struct inode *inode = file_inode(filp);
5398 + bool is_deleg = (*flp)->fl_flags & FL_DELEG;
5399 + int error;
5400 + LIST_HEAD(dispose);
5401 +diff --git a/fs/namespace.c b/fs/namespace.c
5402 +index db31a49e01fd..d0cd3f4012ec 100644
5403 +--- a/fs/namespace.c
5404 ++++ b/fs/namespace.c
5405 +@@ -1494,6 +1494,7 @@ void __detach_mounts(struct dentry *dentry)
5406 + goto out_unlock;
5407 +
5408 + lock_mount_hash();
5409 ++ event++;
5410 + while (!hlist_empty(&mp->m_list)) {
5411 + mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
5412 + umount_tree(mnt, 0);
5413 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
5414 +index 7a8d67cd823d..29b7a75064af 100644
5415 +--- a/fs/nfs/dir.c
5416 ++++ b/fs/nfs/dir.c
5417 +@@ -1529,9 +1529,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
5418 + err = PTR_ERR(inode);
5419 + trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
5420 + put_nfs_open_context(ctx);
5421 ++ d_drop(dentry);
5422 + switch (err) {
5423 + case -ENOENT:
5424 +- d_drop(dentry);
5425 + d_add(dentry, NULL);
5426 + nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
5427 + break;
5428 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
5429 +index 575fb0ebf980..43fb57831567 100644
5430 +--- a/fs/nfs/nfs4proc.c
5431 ++++ b/fs/nfs/nfs4proc.c
5432 +@@ -2635,12 +2635,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
5433 + call_close |= is_wronly;
5434 + else if (is_wronly)
5435 + calldata->arg.fmode |= FMODE_WRITE;
5436 ++ if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
5437 ++ call_close |= is_rdwr;
5438 + } else if (is_rdwr)
5439 + calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
5440 +
5441 +- if (calldata->arg.fmode == 0)
5442 +- call_close |= is_rdwr;
5443 +-
5444 + if (!nfs4_valid_open_stateid(state))
5445 + call_close = 0;
5446 + spin_unlock(&state->owner->so_lock);
5447 +diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
5448 +index 7cbdf1b2e4ab..9701a76f562e 100644
5449 +--- a/fs/nfsd/nfs4callback.c
5450 ++++ b/fs/nfsd/nfs4callback.c
5451 +@@ -630,22 +630,6 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
5452 + }
5453 + }
5454 +
5455 +-static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
5456 +-{
5457 +- struct rpc_xprt *xprt;
5458 +-
5459 +- if (args->protocol != XPRT_TRANSPORT_BC_TCP)
5460 +- return rpc_create(args);
5461 +-
5462 +- xprt = args->bc_xprt->xpt_bc_xprt;
5463 +- if (xprt) {
5464 +- xprt_get(xprt);
5465 +- return rpc_create_xprt(args, xprt);
5466 +- }
5467 +-
5468 +- return rpc_create(args);
5469 +-}
5470 +-
5471 + static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
5472 + {
5473 + int maxtime = max_cb_time(clp->net);
5474 +@@ -688,7 +672,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
5475 + args.authflavor = ses->se_cb_sec.flavor;
5476 + }
5477 + /* Create RPC client */
5478 +- client = create_backchannel_client(&args);
5479 ++ client = rpc_create(&args);
5480 + if (IS_ERR(client)) {
5481 + dprintk("NFSD: couldn't create callback client: %ld\n",
5482 + PTR_ERR(client));
5483 +diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
5484 +index 9da25fe9ea61..ed3a2d9ec5f7 100644
5485 +--- a/fs/nilfs2/the_nilfs.c
5486 ++++ b/fs/nilfs2/the_nilfs.c
5487 +@@ -443,7 +443,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
5488 + if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
5489 + return 0;
5490 + bytes = le16_to_cpu(sbp->s_bytes);
5491 +- if (bytes > BLOCK_SIZE)
5492 ++ if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
5493 + return 0;
5494 + crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
5495 + sumoff);
5496 +diff --git a/fs/pipe.c b/fs/pipe.c
5497 +index 21981e58e2a6..e3ba6c3a1743 100644
5498 +--- a/fs/pipe.c
5499 ++++ b/fs/pipe.c
5500 +@@ -39,6 +39,12 @@ unsigned int pipe_max_size = 1048576;
5501 + */
5502 + unsigned int pipe_min_size = PAGE_SIZE;
5503 +
5504 ++/* Maximum allocatable pages per user. Hard limit is unset by default, soft
5505 ++ * matches default values.
5506 ++ */
5507 ++unsigned long pipe_user_pages_hard;
5508 ++unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
5509 ++
5510 + /*
5511 + * We use a start+len construction, which provides full use of the
5512 + * allocated memory.
5513 +@@ -585,20 +591,49 @@ pipe_fasync(int fd, struct file *filp, int on)
5514 + return retval;
5515 + }
5516 +
5517 ++static void account_pipe_buffers(struct pipe_inode_info *pipe,
5518 ++ unsigned long old, unsigned long new)
5519 ++{
5520 ++ atomic_long_add(new - old, &pipe->user->pipe_bufs);
5521 ++}
5522 ++
5523 ++static bool too_many_pipe_buffers_soft(struct user_struct *user)
5524 ++{
5525 ++ return pipe_user_pages_soft &&
5526 ++ atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
5527 ++}
5528 ++
5529 ++static bool too_many_pipe_buffers_hard(struct user_struct *user)
5530 ++{
5531 ++ return pipe_user_pages_hard &&
5532 ++ atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
5533 ++}
5534 ++
5535 + struct pipe_inode_info *alloc_pipe_info(void)
5536 + {
5537 + struct pipe_inode_info *pipe;
5538 +
5539 + pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
5540 + if (pipe) {
5541 +- pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
5542 ++ unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
5543 ++ struct user_struct *user = get_current_user();
5544 ++
5545 ++ if (!too_many_pipe_buffers_hard(user)) {
5546 ++ if (too_many_pipe_buffers_soft(user))
5547 ++ pipe_bufs = 1;
5548 ++ pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL);
5549 ++ }
5550 ++
5551 + if (pipe->bufs) {
5552 + init_waitqueue_head(&pipe->wait);
5553 + pipe->r_counter = pipe->w_counter = 1;
5554 +- pipe->buffers = PIPE_DEF_BUFFERS;
5555 ++ pipe->buffers = pipe_bufs;
5556 ++ pipe->user = user;
5557 ++ account_pipe_buffers(pipe, 0, pipe_bufs);
5558 + mutex_init(&pipe->mutex);
5559 + return pipe;
5560 + }
5561 ++ free_uid(user);
5562 + kfree(pipe);
5563 + }
5564 +
5565 +@@ -609,6 +644,8 @@ void free_pipe_info(struct pipe_inode_info *pipe)
5566 + {
5567 + int i;
5568 +
5569 ++ account_pipe_buffers(pipe, pipe->buffers, 0);
5570 ++ free_uid(pipe->user);
5571 + for (i = 0; i < pipe->buffers; i++) {
5572 + struct pipe_buffer *buf = pipe->bufs + i;
5573 + if (buf->ops)
5574 +@@ -999,6 +1036,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
5575 + memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
5576 + }
5577 +
5578 ++ account_pipe_buffers(pipe, pipe->buffers, nr_pages);
5579 + pipe->curbuf = 0;
5580 + kfree(pipe->bufs);
5581 + pipe->bufs = bufs;
5582 +@@ -1070,6 +1108,11 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
5583 + if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
5584 + ret = -EPERM;
5585 + goto out;
5586 ++ } else if ((too_many_pipe_buffers_hard(pipe->user) ||
5587 ++ too_many_pipe_buffers_soft(pipe->user)) &&
5588 ++ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
5589 ++ ret = -EPERM;
5590 ++ goto out;
5591 + }
5592 + ret = pipe_set_size(pipe, nr_pages);
5593 + break;
5594 +diff --git a/fs/proc/base.c b/fs/proc/base.c
5595 +index 76b296fe93c9..dc98620634a3 100644
5596 +--- a/fs/proc/base.c
5597 ++++ b/fs/proc/base.c
5598 +@@ -752,7 +752,8 @@ static ssize_t environ_read(struct file *file, char __user *buf,
5599 + int ret = 0;
5600 + struct mm_struct *mm = file->private_data;
5601 +
5602 +- if (!mm)
5603 ++ /* Ensure the process spawned far enough to have an environment. */
5604 ++ if (!mm || !mm->env_end)
5605 + return 0;
5606 +
5607 + page = (char *)__get_free_page(GFP_TEMPORARY);
5608 +diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
5609 +index b5b593c45270..d37140e5b722 100644
5610 +--- a/fs/ubifs/file.c
5611 ++++ b/fs/ubifs/file.c
5612 +@@ -54,6 +54,7 @@
5613 + #include <linux/mount.h>
5614 + #include <linux/namei.h>
5615 + #include <linux/slab.h>
5616 ++#include <linux/migrate.h>
5617 +
5618 + static int read_block(struct inode *inode, void *addr, unsigned int block,
5619 + struct ubifs_data_node *dn)
5620 +@@ -1419,6 +1420,26 @@ static int ubifs_set_page_dirty(struct page *page)
5621 + return ret;
5622 + }
5623 +
5624 ++#ifdef CONFIG_MIGRATION
5625 ++static int ubifs_migrate_page(struct address_space *mapping,
5626 ++ struct page *newpage, struct page *page, enum migrate_mode mode)
5627 ++{
5628 ++ int rc;
5629 ++
5630 ++ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
5631 ++ if (rc != MIGRATEPAGE_SUCCESS)
5632 ++ return rc;
5633 ++
5634 ++ if (PagePrivate(page)) {
5635 ++ ClearPagePrivate(page);
5636 ++ SetPagePrivate(newpage);
5637 ++ }
5638 ++
5639 ++ migrate_page_copy(newpage, page);
5640 ++ return MIGRATEPAGE_SUCCESS;
5641 ++}
5642 ++#endif
5643 ++
5644 + static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
5645 + {
5646 + /*
5647 +@@ -1556,6 +1577,9 @@ const struct address_space_operations ubifs_file_address_operations = {
5648 + .write_end = ubifs_write_end,
5649 + .invalidatepage = ubifs_invalidatepage,
5650 + .set_page_dirty = ubifs_set_page_dirty,
5651 ++#ifdef CONFIG_MIGRATION
5652 ++ .migratepage = ubifs_migrate_page,
5653 ++#endif
5654 + .releasepage = ubifs_releasepage,
5655 + };
5656 +
5657 +diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
5658 +index eff34218f405..2300743e701e 100644
5659 +--- a/fs/xfs/libxfs/xfs_alloc.c
5660 ++++ b/fs/xfs/libxfs/xfs_alloc.c
5661 +@@ -515,6 +515,7 @@ xfs_agfl_write_verify(
5662 + }
5663 +
5664 + const struct xfs_buf_ops xfs_agfl_buf_ops = {
5665 ++ .name = "xfs_agfl",
5666 + .verify_read = xfs_agfl_read_verify,
5667 + .verify_write = xfs_agfl_write_verify,
5668 + };
5669 +@@ -2271,6 +2272,7 @@ xfs_agf_write_verify(
5670 + }
5671 +
5672 + const struct xfs_buf_ops xfs_agf_buf_ops = {
5673 ++ .name = "xfs_agf",
5674 + .verify_read = xfs_agf_read_verify,
5675 + .verify_write = xfs_agf_write_verify,
5676 + };
5677 +diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
5678 +index e0e83e24d3ef..3bd94d5a5068 100644
5679 +--- a/fs/xfs/libxfs/xfs_alloc_btree.c
5680 ++++ b/fs/xfs/libxfs/xfs_alloc_btree.c
5681 +@@ -380,6 +380,7 @@ xfs_allocbt_write_verify(
5682 + }
5683 +
5684 + const struct xfs_buf_ops xfs_allocbt_buf_ops = {
5685 ++ .name = "xfs_allocbt",
5686 + .verify_read = xfs_allocbt_read_verify,
5687 + .verify_write = xfs_allocbt_write_verify,
5688 + };
5689 +diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
5690 +index b7cd0a0541af..905b0cdb0107 100644
5691 +--- a/fs/xfs/libxfs/xfs_attr_leaf.c
5692 ++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
5693 +@@ -251,6 +251,7 @@ xfs_attr3_leaf_read_verify(
5694 + }
5695 +
5696 + const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = {
5697 ++ .name = "xfs_attr3_leaf",
5698 + .verify_read = xfs_attr3_leaf_read_verify,
5699 + .verify_write = xfs_attr3_leaf_write_verify,
5700 + };
5701 +diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
5702 +index 7510ab8058a4..fda9c2aad37c 100644
5703 +--- a/fs/xfs/libxfs/xfs_attr_remote.c
5704 ++++ b/fs/xfs/libxfs/xfs_attr_remote.c
5705 +@@ -198,6 +198,7 @@ xfs_attr3_rmt_write_verify(
5706 + }
5707 +
5708 + const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
5709 ++ .name = "xfs_attr3_rmt",
5710 + .verify_read = xfs_attr3_rmt_read_verify,
5711 + .verify_write = xfs_attr3_rmt_write_verify,
5712 + };
5713 +diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
5714 +index fba753308f31..e6bde91a7d0c 100644
5715 +--- a/fs/xfs/libxfs/xfs_bmap_btree.c
5716 ++++ b/fs/xfs/libxfs/xfs_bmap_btree.c
5717 +@@ -722,6 +722,7 @@ xfs_bmbt_write_verify(
5718 + }
5719 +
5720 + const struct xfs_buf_ops xfs_bmbt_buf_ops = {
5721 ++ .name = "xfs_bmbt",
5722 + .verify_read = xfs_bmbt_read_verify,
5723 + .verify_write = xfs_bmbt_write_verify,
5724 + };
5725 +diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
5726 +index fd827530afec..837b366c3fcf 100644
5727 +--- a/fs/xfs/libxfs/xfs_da_btree.c
5728 ++++ b/fs/xfs/libxfs/xfs_da_btree.c
5729 +@@ -243,6 +243,7 @@ xfs_da3_node_read_verify(
5730 + }
5731 +
5732 + const struct xfs_buf_ops xfs_da3_node_buf_ops = {
5733 ++ .name = "xfs_da3_node",
5734 + .verify_read = xfs_da3_node_read_verify,
5735 + .verify_write = xfs_da3_node_write_verify,
5736 + };
5737 +diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
5738 +index 9628ceccfa02..11a3beed2e57 100644
5739 +--- a/fs/xfs/libxfs/xfs_dir2_block.c
5740 ++++ b/fs/xfs/libxfs/xfs_dir2_block.c
5741 +@@ -123,6 +123,7 @@ xfs_dir3_block_write_verify(
5742 + }
5743 +
5744 + const struct xfs_buf_ops xfs_dir3_block_buf_ops = {
5745 ++ .name = "xfs_dir3_block",
5746 + .verify_read = xfs_dir3_block_read_verify,
5747 + .verify_write = xfs_dir3_block_write_verify,
5748 + };
5749 +diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
5750 +index fdd803fecb8e..47504a191bb3 100644
5751 +--- a/fs/xfs/libxfs/xfs_dir2_data.c
5752 ++++ b/fs/xfs/libxfs/xfs_dir2_data.c
5753 +@@ -302,11 +302,13 @@ xfs_dir3_data_write_verify(
5754 + }
5755 +
5756 + const struct xfs_buf_ops xfs_dir3_data_buf_ops = {
5757 ++ .name = "xfs_dir3_data",
5758 + .verify_read = xfs_dir3_data_read_verify,
5759 + .verify_write = xfs_dir3_data_write_verify,
5760 + };
5761 +
5762 + static const struct xfs_buf_ops xfs_dir3_data_reada_buf_ops = {
5763 ++ .name = "xfs_dir3_data_reada",
5764 + .verify_read = xfs_dir3_data_reada_verify,
5765 + .verify_write = xfs_dir3_data_write_verify,
5766 + };
5767 +diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
5768 +index a19174eb3cb2..a03f595674bf 100644
5769 +--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
5770 ++++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
5771 +@@ -244,11 +244,13 @@ xfs_dir3_leafn_write_verify(
5772 + }
5773 +
5774 + const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops = {
5775 ++ .name = "xfs_dir3_leaf1",
5776 + .verify_read = xfs_dir3_leaf1_read_verify,
5777 + .verify_write = xfs_dir3_leaf1_write_verify,
5778 + };
5779 +
5780 + const struct xfs_buf_ops xfs_dir3_leafn_buf_ops = {
5781 ++ .name = "xfs_dir3_leafn",
5782 + .verify_read = xfs_dir3_leafn_read_verify,
5783 + .verify_write = xfs_dir3_leafn_write_verify,
5784 + };
5785 +diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
5786 +index 2ae6ac2c11ae..75431b220174 100644
5787 +--- a/fs/xfs/libxfs/xfs_dir2_node.c
5788 ++++ b/fs/xfs/libxfs/xfs_dir2_node.c
5789 +@@ -149,6 +149,7 @@ xfs_dir3_free_write_verify(
5790 + }
5791 +
5792 + const struct xfs_buf_ops xfs_dir3_free_buf_ops = {
5793 ++ .name = "xfs_dir3_free",
5794 + .verify_read = xfs_dir3_free_read_verify,
5795 + .verify_write = xfs_dir3_free_write_verify,
5796 + };
5797 +diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
5798 +index 89b7ddfe5377..c6fba7314e8f 100644
5799 +--- a/fs/xfs/libxfs/xfs_dquot_buf.c
5800 ++++ b/fs/xfs/libxfs/xfs_dquot_buf.c
5801 +@@ -303,6 +303,7 @@ xfs_dquot_buf_write_verify(
5802 + }
5803 +
5804 + const struct xfs_buf_ops xfs_dquot_buf_ops = {
5805 ++ .name = "xfs_dquot",
5806 + .verify_read = xfs_dquot_buf_read_verify,
5807 + .verify_write = xfs_dquot_buf_write_verify,
5808 + };
5809 +diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
5810 +index 23dcb72fc5e6..196f29690484 100644
5811 +--- a/fs/xfs/libxfs/xfs_ialloc.c
5812 ++++ b/fs/xfs/libxfs/xfs_ialloc.c
5813 +@@ -2105,6 +2105,7 @@ xfs_agi_write_verify(
5814 + }
5815 +
5816 + const struct xfs_buf_ops xfs_agi_buf_ops = {
5817 ++ .name = "xfs_agi",
5818 + .verify_read = xfs_agi_read_verify,
5819 + .verify_write = xfs_agi_write_verify,
5820 + };
5821 +diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
5822 +index c9b06f30fe86..14ae3e295e7c 100644
5823 +--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
5824 ++++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
5825 +@@ -297,6 +297,7 @@ xfs_inobt_write_verify(
5826 + }
5827 +
5828 + const struct xfs_buf_ops xfs_inobt_buf_ops = {
5829 ++ .name = "xfs_inobt",
5830 + .verify_read = xfs_inobt_read_verify,
5831 + .verify_write = xfs_inobt_write_verify,
5832 + };
5833 +diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
5834 +index 8249599238c3..1bf2f1ccd957 100644
5835 +--- a/fs/xfs/libxfs/xfs_inode_buf.c
5836 ++++ b/fs/xfs/libxfs/xfs_inode_buf.c
5837 +@@ -141,11 +141,13 @@ xfs_inode_buf_write_verify(
5838 + }
5839 +
5840 + const struct xfs_buf_ops xfs_inode_buf_ops = {
5841 ++ .name = "xfs_inode",
5842 + .verify_read = xfs_inode_buf_read_verify,
5843 + .verify_write = xfs_inode_buf_write_verify,
5844 + };
5845 +
5846 + const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
5847 ++ .name = "xxfs_inode_ra",
5848 + .verify_read = xfs_inode_buf_readahead_verify,
5849 + .verify_write = xfs_inode_buf_write_verify,
5850 + };
5851 +diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
5852 +index 5f902fa7913f..acfcdc625aca 100644
5853 +--- a/fs/xfs/libxfs/xfs_sb.c
5854 ++++ b/fs/xfs/libxfs/xfs_sb.c
5855 +@@ -704,11 +704,13 @@ xfs_sb_write_verify(
5856 + }
5857 +
5858 + const struct xfs_buf_ops xfs_sb_buf_ops = {
5859 ++ .name = "xfs_sb",
5860 + .verify_read = xfs_sb_read_verify,
5861 + .verify_write = xfs_sb_write_verify,
5862 + };
5863 +
5864 + const struct xfs_buf_ops xfs_sb_quiet_buf_ops = {
5865 ++ .name = "xfs_sb_quiet",
5866 + .verify_read = xfs_sb_quiet_read_verify,
5867 + .verify_write = xfs_sb_write_verify,
5868 + };
5869 +diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
5870 +index a7dce9aea5b5..ccbbbbc7f25a 100644
5871 +--- a/fs/xfs/libxfs/xfs_symlink_remote.c
5872 ++++ b/fs/xfs/libxfs/xfs_symlink_remote.c
5873 +@@ -166,6 +166,7 @@ xfs_symlink_write_verify(
5874 + }
5875 +
5876 + const struct xfs_buf_ops xfs_symlink_buf_ops = {
5877 ++ .name = "xfs_symlink",
5878 + .verify_read = xfs_symlink_read_verify,
5879 + .verify_write = xfs_symlink_write_verify,
5880 + };
5881 +diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
5882 +index 82002c00af90..136319ccabf5 100644
5883 +--- a/fs/xfs/xfs_buf.h
5884 ++++ b/fs/xfs/xfs_buf.h
5885 +@@ -131,6 +131,7 @@ struct xfs_buf_map {
5886 + struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
5887 +
5888 + struct xfs_buf_ops {
5889 ++ char *name;
5890 + void (*verify_read)(struct xfs_buf *);
5891 + void (*verify_write)(struct xfs_buf *);
5892 + };
5893 +diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
5894 +index b92fd7bc49e3..d55a1ce6d880 100644
5895 +--- a/fs/xfs/xfs_error.c
5896 ++++ b/fs/xfs/xfs_error.c
5897 +@@ -166,9 +166,9 @@ xfs_verifier_error(
5898 + {
5899 + struct xfs_mount *mp = bp->b_target->bt_mount;
5900 +
5901 +- xfs_alert(mp, "Metadata %s detected at %pF, block 0x%llx",
5902 ++ xfs_alert(mp, "Metadata %s detected at %pF, %s block 0x%llx",
5903 + bp->b_error == -EFSBADCRC ? "CRC error" : "corruption",
5904 +- __return_address, bp->b_bn);
5905 ++ __return_address, bp->b_ops->name, bp->b_bn);
5906 +
5907 + xfs_alert(mp, "Unmount and run xfs_repair");
5908 +
5909 +diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
5910 +index a3e215bb0241..7741efa43b35 100644
5911 +--- a/include/linux/netfilter/x_tables.h
5912 ++++ b/include/linux/netfilter/x_tables.h
5913 +@@ -239,11 +239,18 @@ void xt_unregister_match(struct xt_match *target);
5914 + int xt_register_matches(struct xt_match *match, unsigned int n);
5915 + void xt_unregister_matches(struct xt_match *match, unsigned int n);
5916 +
5917 ++int xt_check_entry_offsets(const void *base, const char *elems,
5918 ++ unsigned int target_offset,
5919 ++ unsigned int next_offset);
5920 ++
5921 + int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
5922 + bool inv_proto);
5923 + int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
5924 + bool inv_proto);
5925 +
5926 ++void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
5927 ++ struct xt_counters_info *info, bool compat);
5928 ++
5929 + struct xt_table *xt_register_table(struct net *net,
5930 + const struct xt_table *table,
5931 + struct xt_table_info *bootstrap,
5932 +@@ -421,7 +428,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number);
5933 + int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
5934 +
5935 + int xt_compat_match_offset(const struct xt_match *match);
5936 +-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
5937 ++void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
5938 + unsigned int *size);
5939 + int xt_compat_match_to_user(const struct xt_entry_match *m,
5940 + void __user **dstptr, unsigned int *size);
5941 +@@ -431,6 +438,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
5942 + unsigned int *size);
5943 + int xt_compat_target_to_user(const struct xt_entry_target *t,
5944 + void __user **dstptr, unsigned int *size);
5945 ++int xt_compat_check_entry_offsets(const void *base, const char *elems,
5946 ++ unsigned int target_offset,
5947 ++ unsigned int next_offset);
5948 +
5949 + #endif /* CONFIG_COMPAT */
5950 + #endif /* _X_TABLES_H */
5951 +diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
5952 +index eb8b8ac6df3c..24f5470d3944 100644
5953 +--- a/include/linux/pipe_fs_i.h
5954 ++++ b/include/linux/pipe_fs_i.h
5955 +@@ -42,6 +42,7 @@ struct pipe_buffer {
5956 + * @fasync_readers: reader side fasync
5957 + * @fasync_writers: writer side fasync
5958 + * @bufs: the circular array of pipe buffers
5959 ++ * @user: the user who created this pipe
5960 + **/
5961 + struct pipe_inode_info {
5962 + struct mutex mutex;
5963 +@@ -57,6 +58,7 @@ struct pipe_inode_info {
5964 + struct fasync_struct *fasync_readers;
5965 + struct fasync_struct *fasync_writers;
5966 + struct pipe_buffer *bufs;
5967 ++ struct user_struct *user;
5968 + };
5969 +
5970 + /*
5971 +@@ -123,6 +125,8 @@ void pipe_unlock(struct pipe_inode_info *);
5972 + void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
5973 +
5974 + extern unsigned int pipe_max_size, pipe_min_size;
5975 ++extern unsigned long pipe_user_pages_hard;
5976 ++extern unsigned long pipe_user_pages_soft;
5977 + int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
5978 +
5979 +
5980 +diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
5981 +index a6591c693ebb..e8d9f84ec931 100644
5982 +--- a/include/linux/platform_data/asoc-s3c.h
5983 ++++ b/include/linux/platform_data/asoc-s3c.h
5984 +@@ -38,6 +38,10 @@ struct samsung_i2s {
5985 + */
5986 + struct s3c_audio_pdata {
5987 + int (*cfg_gpio)(struct platform_device *);
5988 ++ void *dma_playback;
5989 ++ void *dma_capture;
5990 ++ void *dma_play_sec;
5991 ++ void *dma_capture_mic;
5992 + union {
5993 + struct samsung_i2s i2s;
5994 + } type;
5995 +diff --git a/include/linux/sched.h b/include/linux/sched.h
5996 +index 2a0bcc8411dc..ebfd7a5ea6d4 100644
5997 +--- a/include/linux/sched.h
5998 ++++ b/include/linux/sched.h
5999 +@@ -759,6 +759,7 @@ struct user_struct {
6000 + #endif
6001 + unsigned long locked_shm; /* How many pages of mlocked shm ? */
6002 + unsigned long unix_inflight; /* How many files in flight in unix sockets */
6003 ++ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
6004 +
6005 + #ifdef CONFIG_KEYS
6006 + struct key *uid_keyring; /* UID specific keyring */
6007 +diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
6008 +index b363a0f6dfe1..3fbae6853df3 100644
6009 +--- a/include/linux/sunrpc/clnt.h
6010 ++++ b/include/linux/sunrpc/clnt.h
6011 +@@ -131,8 +131,6 @@ struct rpc_create_args {
6012 + #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
6013 +
6014 + struct rpc_clnt *rpc_create(struct rpc_create_args *args);
6015 +-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
6016 +- struct rpc_xprt *xprt);
6017 + struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
6018 + const struct rpc_program *, u32);
6019 + void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt);
6020 +diff --git a/include/linux/swap.h b/include/linux/swap.h
6021 +index 37a585beef5c..1dc0e886227d 100644
6022 +--- a/include/linux/swap.h
6023 ++++ b/include/linux/swap.h
6024 +@@ -315,7 +315,7 @@ extern void lru_add_drain(void);
6025 + extern void lru_add_drain_cpu(int cpu);
6026 + extern void lru_add_drain_all(void);
6027 + extern void rotate_reclaimable_page(struct page *page);
6028 +-extern void deactivate_page(struct page *page);
6029 ++extern void deactivate_file_page(struct page *page);
6030 + extern void swap_setup(void);
6031 +
6032 + extern void add_page_to_unevictable_list(struct page *page);
6033 +diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
6034 +index daec99af5d54..1c88b177cb9c 100644
6035 +--- a/include/linux/usb/ehci_def.h
6036 ++++ b/include/linux/usb/ehci_def.h
6037 +@@ -178,11 +178,11 @@ struct ehci_regs {
6038 + * PORTSCx
6039 + */
6040 + /* HOSTPC: offset 0x84 */
6041 +- u32 hostpc[1]; /* HOSTPC extension */
6042 ++ u32 hostpc[0]; /* HOSTPC extension */
6043 + #define HOSTPC_PHCD (1<<22) /* Phy clock disable */
6044 + #define HOSTPC_PSPD (3<<25) /* Port speed detection */
6045 +
6046 +- u32 reserved5[16];
6047 ++ u32 reserved5[17];
6048 +
6049 + /* USBMODE_EX: offset 0xc8 */
6050 + u32 usbmode_ex; /* USB Device mode extension */
6051 +diff --git a/include/net/codel.h b/include/net/codel.h
6052 +index aeee28081245..7302a4df618c 100644
6053 +--- a/include/net/codel.h
6054 ++++ b/include/net/codel.h
6055 +@@ -158,11 +158,13 @@ struct codel_vars {
6056 + * struct codel_stats - contains codel shared variables and stats
6057 + * @maxpacket: largest packet we've seen so far
6058 + * @drop_count: temp count of dropped packets in dequeue()
6059 ++ * @drop_len: bytes of dropped packets in dequeue()
6060 + * ecn_mark: number of packets we ECN marked instead of dropping
6061 + */
6062 + struct codel_stats {
6063 + u32 maxpacket;
6064 + u32 drop_count;
6065 ++ u32 drop_len;
6066 + u32 ecn_mark;
6067 + };
6068 +
6069 +@@ -297,6 +299,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
6070 + vars->rec_inv_sqrt);
6071 + goto end;
6072 + }
6073 ++ stats->drop_len += qdisc_pkt_len(skb);
6074 + qdisc_drop(skb, sch);
6075 + stats->drop_count++;
6076 + skb = dequeue_func(vars, sch);
6077 +@@ -319,6 +322,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
6078 + if (params->ecn && INET_ECN_set_ce(skb)) {
6079 + stats->ecn_mark++;
6080 + } else {
6081 ++ stats->drop_len += qdisc_pkt_len(skb);
6082 + qdisc_drop(skb, sch);
6083 + stats->drop_count++;
6084 +
6085 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
6086 +index 5ccfe161f359..f10b01467f07 100644
6087 +--- a/include/net/sch_generic.h
6088 ++++ b/include/net/sch_generic.h
6089 +@@ -393,7 +393,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
6090 + struct Qdisc *qdisc);
6091 + void qdisc_reset(struct Qdisc *qdisc);
6092 + void qdisc_destroy(struct Qdisc *qdisc);
6093 +-void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
6094 ++void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
6095 ++ unsigned int len);
6096 + struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
6097 + const struct Qdisc_ops *ops);
6098 + struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
6099 +@@ -689,6 +690,23 @@ static inline void qdisc_reset_queue(struct Qdisc *sch)
6100 + sch->qstats.backlog = 0;
6101 + }
6102 +
6103 ++static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
6104 ++ struct Qdisc **pold)
6105 ++{
6106 ++ struct Qdisc *old;
6107 ++
6108 ++ sch_tree_lock(sch);
6109 ++ old = *pold;
6110 ++ *pold = new;
6111 ++ if (old != NULL) {
6112 ++ qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
6113 ++ qdisc_reset(old);
6114 ++ }
6115 ++ sch_tree_unlock(sch);
6116 ++
6117 ++ return old;
6118 ++}
6119 ++
6120 + static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
6121 + struct sk_buff_head *list)
6122 + {
6123 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
6124 +index 67f5f177c071..eac45b688c08 100644
6125 +--- a/kernel/bpf/verifier.c
6126 ++++ b/kernel/bpf/verifier.c
6127 +@@ -1737,7 +1737,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
6128 + if (IS_ERR(map)) {
6129 + verbose("fd %d is not pointing to valid bpf_map\n",
6130 + insn->imm);
6131 +- fdput(f);
6132 + return PTR_ERR(map);
6133 + }
6134 +
6135 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
6136 +index 136eceadeed1..f033fbd94ac3 100644
6137 +--- a/kernel/cgroup.c
6138 ++++ b/kernel/cgroup.c
6139 +@@ -4489,7 +4489,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
6140 +
6141 + err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_NOWAIT);
6142 + if (err < 0)
6143 +- goto err_free_percpu_ref;
6144 ++ goto err_free_css;
6145 + css->id = err;
6146 +
6147 + if (visible) {
6148 +@@ -4521,9 +4521,6 @@ err_list_del:
6149 + list_del_rcu(&css->sibling);
6150 + cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
6151 + err_free_id:
6152 +- cgroup_idr_remove(&ss->css_idr, css->id);
6153 +-err_free_percpu_ref:
6154 +- percpu_ref_exit(&css->refcnt);
6155 + err_free_css:
6156 + call_rcu(&css->rcu_head, css_free_rcu_fn);
6157 + return err;
6158 +diff --git a/kernel/futex.c b/kernel/futex.c
6159 +index d58859d62b8b..8874a7b431e4 100644
6160 +--- a/kernel/futex.c
6161 ++++ b/kernel/futex.c
6162 +@@ -1381,8 +1381,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
6163 + if (likely(&hb1->chain != &hb2->chain)) {
6164 + plist_del(&q->list, &hb1->chain);
6165 + hb_waiters_dec(hb1);
6166 +- plist_add(&q->list, &hb2->chain);
6167 + hb_waiters_inc(hb2);
6168 ++ plist_add(&q->list, &hb2->chain);
6169 + q->lock_ptr = &hb2->lock;
6170 + }
6171 + get_futex_key_refs(key2);
6172 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
6173 +index 4317f0156092..2f774edcc4e8 100644
6174 +--- a/kernel/sched/core.c
6175 ++++ b/kernel/sched/core.c
6176 +@@ -4595,14 +4595,16 @@ void show_state_filter(unsigned long state_filter)
6177 + /*
6178 + * reset the NMI-timeout, listing all files on a slow
6179 + * console might take a lot of time:
6180 ++ * Also, reset softlockup watchdogs on all CPUs, because
6181 ++ * another CPU might be blocked waiting for us to process
6182 ++ * an IPI.
6183 + */
6184 + touch_nmi_watchdog();
6185 ++ touch_all_softlockup_watchdogs();
6186 + if (!state_filter || (p->state & state_filter))
6187 + sched_show_task(p);
6188 + }
6189 +
6190 +- touch_all_softlockup_watchdogs();
6191 +-
6192 + #ifdef CONFIG_SCHED_DEBUG
6193 + sysrq_sched_debug_show();
6194 + #endif
6195 +diff --git a/kernel/sysctl.c b/kernel/sysctl.c
6196 +index cd0e835ecb85..07da431802e4 100644
6197 +--- a/kernel/sysctl.c
6198 ++++ b/kernel/sysctl.c
6199 +@@ -1656,6 +1656,20 @@ static struct ctl_table fs_table[] = {
6200 + .proc_handler = &pipe_proc_fn,
6201 + .extra1 = &pipe_min_size,
6202 + },
6203 ++ {
6204 ++ .procname = "pipe-user-pages-hard",
6205 ++ .data = &pipe_user_pages_hard,
6206 ++ .maxlen = sizeof(pipe_user_pages_hard),
6207 ++ .mode = 0644,
6208 ++ .proc_handler = proc_doulongvec_minmax,
6209 ++ },
6210 ++ {
6211 ++ .procname = "pipe-user-pages-soft",
6212 ++ .data = &pipe_user_pages_soft,
6213 ++ .maxlen = sizeof(pipe_user_pages_soft),
6214 ++ .mode = 0644,
6215 ++ .proc_handler = proc_doulongvec_minmax,
6216 ++ },
6217 + { }
6218 + };
6219 +
6220 +diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
6221 +index 14ffaa59a9e9..9008103db583 100644
6222 +--- a/kernel/trace/trace_printk.c
6223 ++++ b/kernel/trace/trace_printk.c
6224 +@@ -38,6 +38,10 @@ struct trace_bprintk_fmt {
6225 + static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
6226 + {
6227 + struct trace_bprintk_fmt *pos;
6228 ++
6229 ++ if (!fmt)
6230 ++ return ERR_PTR(-EINVAL);
6231 ++
6232 + list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
6233 + if (!strcmp(pos->fmt, fmt))
6234 + return pos;
6235 +@@ -59,7 +63,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
6236 + for (iter = start; iter < end; iter++) {
6237 + struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
6238 + if (tb_fmt) {
6239 +- *iter = tb_fmt->fmt;
6240 ++ if (!IS_ERR(tb_fmt))
6241 ++ *iter = tb_fmt->fmt;
6242 + continue;
6243 + }
6244 +
6245 +diff --git a/mm/compaction.c b/mm/compaction.c
6246 +index 8d010df763dc..9f8b1dd5303c 100644
6247 +--- a/mm/compaction.c
6248 ++++ b/mm/compaction.c
6249 +@@ -371,6 +371,24 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
6250 +
6251 + if (!valid_page)
6252 + valid_page = page;
6253 ++
6254 ++ /*
6255 ++ * For compound pages such as THP and hugetlbfs, we can save
6256 ++ * potentially a lot of iterations if we skip them at once.
6257 ++ * The check is racy, but we can consider only valid values
6258 ++ * and the only danger is skipping too much.
6259 ++ */
6260 ++ if (PageCompound(page)) {
6261 ++ unsigned int comp_order = compound_order(page);
6262 ++
6263 ++ if (likely(comp_order < MAX_ORDER)) {
6264 ++ blockpfn += (1UL << comp_order) - 1;
6265 ++ cursor += (1UL << comp_order) - 1;
6266 ++ }
6267 ++
6268 ++ goto isolate_fail;
6269 ++ }
6270 ++
6271 + if (!PageBuddy(page))
6272 + goto isolate_fail;
6273 +
6274 +@@ -402,18 +420,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
6275 +
6276 + /* Found a free page, break it into order-0 pages */
6277 + isolated = split_free_page(page);
6278 ++ if (!isolated)
6279 ++ break;
6280 ++
6281 + total_isolated += isolated;
6282 ++ cc->nr_freepages += isolated;
6283 + for (i = 0; i < isolated; i++) {
6284 + list_add(&page->lru, freelist);
6285 + page++;
6286 + }
6287 +-
6288 +- /* If a page was split, advance to the end of it */
6289 +- if (isolated) {
6290 +- blockpfn += isolated - 1;
6291 +- cursor += isolated - 1;
6292 +- continue;
6293 ++ if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
6294 ++ blockpfn += isolated;
6295 ++ break;
6296 + }
6297 ++ /* Advance to the end of split page */
6298 ++ blockpfn += isolated - 1;
6299 ++ cursor += isolated - 1;
6300 ++ continue;
6301 +
6302 + isolate_fail:
6303 + if (strict)
6304 +@@ -423,6 +446,16 @@ isolate_fail:
6305 +
6306 + }
6307 +
6308 ++ if (locked)
6309 ++ spin_unlock_irqrestore(&cc->zone->lock, flags);
6310 ++
6311 ++ /*
6312 ++ * There is a tiny chance that we have read bogus compound_order(),
6313 ++ * so be careful to not go outside of the pageblock.
6314 ++ */
6315 ++ if (unlikely(blockpfn > end_pfn))
6316 ++ blockpfn = end_pfn;
6317 ++
6318 + /* Record how far we have got within the block */
6319 + *start_pfn = blockpfn;
6320 +
6321 +@@ -436,9 +469,6 @@ isolate_fail:
6322 + if (strict && blockpfn < end_pfn)
6323 + total_isolated = 0;
6324 +
6325 +- if (locked)
6326 +- spin_unlock_irqrestore(&cc->zone->lock, flags);
6327 +-
6328 + /* Update the pageblock-skip if the whole pageblock was scanned */
6329 + if (blockpfn == end_pfn)
6330 + update_pageblock_skip(cc, valid_page, total_isolated, false);
6331 +@@ -864,7 +894,12 @@ static void isolate_freepages(struct compact_control *cc)
6332 +
6333 + /* Found a block suitable for isolating free pages from. */
6334 + isolated = isolate_freepages_block(cc, &isolate_start_pfn,
6335 +- block_end_pfn, freelist, false);
6336 ++ block_end_pfn, freelist, false);
6337 ++ /* If isolation failed early, do not continue needlessly */
6338 ++ if (!isolated && isolate_start_pfn < block_end_pfn &&
6339 ++ cc->nr_migratepages > cc->nr_freepages)
6340 ++ break;
6341 ++
6342 + nr_freepages += isolated;
6343 +
6344 + /*
6345 +diff --git a/mm/migrate.c b/mm/migrate.c
6346 +index cd4fd10c4ec3..3594defceff9 100644
6347 +--- a/mm/migrate.c
6348 ++++ b/mm/migrate.c
6349 +@@ -421,6 +421,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
6350 +
6351 + return MIGRATEPAGE_SUCCESS;
6352 + }
6353 ++EXPORT_SYMBOL(migrate_page_move_mapping);
6354 +
6355 + /*
6356 + * The expected number of remaining references is the same as that
6357 +@@ -580,6 +581,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
6358 + if (PageWriteback(newpage))
6359 + end_page_writeback(newpage);
6360 + }
6361 ++EXPORT_SYMBOL(migrate_page_copy);
6362 +
6363 + /************************************************************
6364 + * Migration functions
6365 +diff --git a/mm/page_isolation.c b/mm/page_isolation.c
6366 +index ec66134fb2a5..03648ced21c2 100644
6367 +--- a/mm/page_isolation.c
6368 ++++ b/mm/page_isolation.c
6369 +@@ -299,11 +299,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
6370 + * now as a simple work-around, we use the next node for destination.
6371 + */
6372 + if (PageHuge(page)) {
6373 +- nodemask_t src = nodemask_of_node(page_to_nid(page));
6374 +- nodemask_t dst;
6375 +- nodes_complement(dst, src);
6376 ++ int node = next_online_node(page_to_nid(page));
6377 ++ if (node == MAX_NUMNODES)
6378 ++ node = first_online_node;
6379 + return alloc_huge_page_node(page_hstate(compound_head(page)),
6380 +- next_node(page_to_nid(page), dst));
6381 ++ node);
6382 + }
6383 +
6384 + if (PageHighMem(page))
6385 +diff --git a/mm/percpu.c b/mm/percpu.c
6386 +index 88bb6c92d83a..5ae6e0284967 100644
6387 +--- a/mm/percpu.c
6388 ++++ b/mm/percpu.c
6389 +@@ -110,7 +110,7 @@ struct pcpu_chunk {
6390 + int map_used; /* # of map entries used before the sentry */
6391 + int map_alloc; /* # of map entries allocated */
6392 + int *map; /* allocation map */
6393 +- struct work_struct map_extend_work;/* async ->map[] extension */
6394 ++ struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
6395 +
6396 + void *data; /* chunk data */
6397 + int first_free; /* no free below this */
6398 +@@ -160,10 +160,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
6399 + static int pcpu_reserved_chunk_limit;
6400 +
6401 + static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
6402 +-static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
6403 ++static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
6404 +
6405 + static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
6406 +
6407 ++/* chunks which need their map areas extended, protected by pcpu_lock */
6408 ++static LIST_HEAD(pcpu_map_extend_chunks);
6409 ++
6410 + /*
6411 + * The number of empty populated pages, protected by pcpu_lock. The
6412 + * reserved chunk doesn't contribute to the count.
6413 +@@ -397,13 +400,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
6414 + {
6415 + int margin, new_alloc;
6416 +
6417 ++ lockdep_assert_held(&pcpu_lock);
6418 ++
6419 + if (is_atomic) {
6420 + margin = 3;
6421 +
6422 + if (chunk->map_alloc <
6423 +- chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
6424 +- pcpu_async_enabled)
6425 +- schedule_work(&chunk->map_extend_work);
6426 ++ chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
6427 ++ if (list_empty(&chunk->map_extend_list)) {
6428 ++ list_add_tail(&chunk->map_extend_list,
6429 ++ &pcpu_map_extend_chunks);
6430 ++ pcpu_schedule_balance_work();
6431 ++ }
6432 ++ }
6433 + } else {
6434 + margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
6435 + }
6436 +@@ -437,6 +446,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
6437 + size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
6438 + unsigned long flags;
6439 +
6440 ++ lockdep_assert_held(&pcpu_alloc_mutex);
6441 ++
6442 + new = pcpu_mem_zalloc(new_size);
6443 + if (!new)
6444 + return -ENOMEM;
6445 +@@ -469,20 +480,6 @@ out_unlock:
6446 + return 0;
6447 + }
6448 +
6449 +-static void pcpu_map_extend_workfn(struct work_struct *work)
6450 +-{
6451 +- struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
6452 +- map_extend_work);
6453 +- int new_alloc;
6454 +-
6455 +- spin_lock_irq(&pcpu_lock);
6456 +- new_alloc = pcpu_need_to_extend(chunk, false);
6457 +- spin_unlock_irq(&pcpu_lock);
6458 +-
6459 +- if (new_alloc)
6460 +- pcpu_extend_area_map(chunk, new_alloc);
6461 +-}
6462 +-
6463 + /**
6464 + * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
6465 + * @chunk: chunk the candidate area belongs to
6466 +@@ -742,7 +739,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
6467 + chunk->map_used = 1;
6468 +
6469 + INIT_LIST_HEAD(&chunk->list);
6470 +- INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
6471 ++ INIT_LIST_HEAD(&chunk->map_extend_list);
6472 + chunk->free_size = pcpu_unit_size;
6473 + chunk->contig_hint = pcpu_unit_size;
6474 +
6475 +@@ -897,6 +894,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
6476 + return NULL;
6477 + }
6478 +
6479 ++ if (!is_atomic)
6480 ++ mutex_lock(&pcpu_alloc_mutex);
6481 ++
6482 + spin_lock_irqsave(&pcpu_lock, flags);
6483 +
6484 + /* serve reserved allocations from the reserved chunk if available */
6485 +@@ -969,12 +969,9 @@ restart:
6486 + if (is_atomic)
6487 + goto fail;
6488 +
6489 +- mutex_lock(&pcpu_alloc_mutex);
6490 +-
6491 + if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
6492 + chunk = pcpu_create_chunk();
6493 + if (!chunk) {
6494 +- mutex_unlock(&pcpu_alloc_mutex);
6495 + err = "failed to allocate new chunk";
6496 + goto fail;
6497 + }
6498 +@@ -985,7 +982,6 @@ restart:
6499 + spin_lock_irqsave(&pcpu_lock, flags);
6500 + }
6501 +
6502 +- mutex_unlock(&pcpu_alloc_mutex);
6503 + goto restart;
6504 +
6505 + area_found:
6506 +@@ -995,8 +991,6 @@ area_found:
6507 + if (!is_atomic) {
6508 + int page_start, page_end, rs, re;
6509 +
6510 +- mutex_lock(&pcpu_alloc_mutex);
6511 +-
6512 + page_start = PFN_DOWN(off);
6513 + page_end = PFN_UP(off + size);
6514 +
6515 +@@ -1007,7 +1001,6 @@ area_found:
6516 +
6517 + spin_lock_irqsave(&pcpu_lock, flags);
6518 + if (ret) {
6519 +- mutex_unlock(&pcpu_alloc_mutex);
6520 + pcpu_free_area(chunk, off, &occ_pages);
6521 + err = "failed to populate";
6522 + goto fail_unlock;
6523 +@@ -1047,6 +1040,8 @@ fail:
6524 + /* see the flag handling in pcpu_blance_workfn() */
6525 + pcpu_atomic_alloc_failed = true;
6526 + pcpu_schedule_balance_work();
6527 ++ } else {
6528 ++ mutex_unlock(&pcpu_alloc_mutex);
6529 + }
6530 + return NULL;
6531 + }
6532 +@@ -1131,6 +1126,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
6533 + if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
6534 + continue;
6535 +
6536 ++ list_del_init(&chunk->map_extend_list);
6537 + list_move(&chunk->list, &to_free);
6538 + }
6539 +
6540 +@@ -1148,6 +1144,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
6541 + pcpu_destroy_chunk(chunk);
6542 + }
6543 +
6544 ++ /* service chunks which requested async area map extension */
6545 ++ do {
6546 ++ int new_alloc = 0;
6547 ++
6548 ++ spin_lock_irq(&pcpu_lock);
6549 ++
6550 ++ chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
6551 ++ struct pcpu_chunk, map_extend_list);
6552 ++ if (chunk) {
6553 ++ list_del_init(&chunk->map_extend_list);
6554 ++ new_alloc = pcpu_need_to_extend(chunk, false);
6555 ++ }
6556 ++
6557 ++ spin_unlock_irq(&pcpu_lock);
6558 ++
6559 ++ if (new_alloc)
6560 ++ pcpu_extend_area_map(chunk, new_alloc);
6561 ++ } while (chunk);
6562 ++
6563 + /*
6564 + * Ensure there are certain number of free populated pages for
6565 + * atomic allocs. Fill up from the most packed so that atomic
6566 +@@ -1648,7 +1663,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
6567 + */
6568 + schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
6569 + INIT_LIST_HEAD(&schunk->list);
6570 +- INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
6571 ++ INIT_LIST_HEAD(&schunk->map_extend_list);
6572 + schunk->base_addr = base_addr;
6573 + schunk->map = smap;
6574 + schunk->map_alloc = ARRAY_SIZE(smap);
6575 +@@ -1678,7 +1693,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
6576 + if (dyn_size) {
6577 + dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
6578 + INIT_LIST_HEAD(&dchunk->list);
6579 +- INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
6580 ++ INIT_LIST_HEAD(&dchunk->map_extend_list);
6581 + dchunk->base_addr = base_addr;
6582 + dchunk->map = dmap;
6583 + dchunk->map_alloc = ARRAY_SIZE(dmap);
6584 +diff --git a/mm/shmem.c b/mm/shmem.c
6585 +index 0b4ba556703a..fac22b578eb9 100644
6586 +--- a/mm/shmem.c
6587 ++++ b/mm/shmem.c
6588 +@@ -2140,9 +2140,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
6589 + NULL);
6590 + if (error) {
6591 + /* Remove the !PageUptodate pages we added */
6592 +- shmem_undo_range(inode,
6593 +- (loff_t)start << PAGE_CACHE_SHIFT,
6594 +- (loff_t)index << PAGE_CACHE_SHIFT, true);
6595 ++ if (index > start) {
6596 ++ shmem_undo_range(inode,
6597 ++ (loff_t)start << PAGE_CACHE_SHIFT,
6598 ++ ((loff_t)index << PAGE_CACHE_SHIFT) - 1, true);
6599 ++ }
6600 + goto undone;
6601 + }
6602 +
6603 +diff --git a/mm/swap.c b/mm/swap.c
6604 +index 8a12b33936b4..9ccec11ed3fb 100644
6605 +--- a/mm/swap.c
6606 ++++ b/mm/swap.c
6607 +@@ -42,7 +42,7 @@ int page_cluster;
6608 +
6609 + static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
6610 + static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
6611 +-static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
6612 ++static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
6613 +
6614 + /*
6615 + * This path almost never happens for VM activity - pages are normally
6616 +@@ -475,7 +475,7 @@ void rotate_reclaimable_page(struct page *page)
6617 + page_cache_get(page);
6618 + local_irq_save(flags);
6619 + pvec = this_cpu_ptr(&lru_rotate_pvecs);
6620 +- if (!pagevec_add(pvec, page))
6621 ++ if (!pagevec_add(pvec, page) || PageCompound(page))
6622 + pagevec_move_tail(pvec);
6623 + local_irq_restore(flags);
6624 + }
6625 +@@ -531,7 +531,7 @@ void activate_page(struct page *page)
6626 + struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
6627 +
6628 + page_cache_get(page);
6629 +- if (!pagevec_add(pvec, page))
6630 ++ if (!pagevec_add(pvec, page) || PageCompound(page))
6631 + pagevec_lru_move_fn(pvec, __activate_page, NULL);
6632 + put_cpu_var(activate_page_pvecs);
6633 + }
6634 +@@ -623,9 +623,8 @@ static void __lru_cache_add(struct page *page)
6635 + struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
6636 +
6637 + page_cache_get(page);
6638 +- if (!pagevec_space(pvec))
6639 ++ if (!pagevec_space(pvec) || PageCompound(page))
6640 + __pagevec_lru_add(pvec);
6641 +- pagevec_add(pvec, page);
6642 + put_cpu_var(lru_add_pvec);
6643 + }
6644 +
6645 +@@ -743,7 +742,7 @@ void lru_cache_add_active_or_unevictable(struct page *page,
6646 + * be write it out by flusher threads as this is much more effective
6647 + * than the single-page writeout from reclaim.
6648 + */
6649 +-static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
6650 ++static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
6651 + void *arg)
6652 + {
6653 + int lru, file;
6654 +@@ -811,36 +810,36 @@ void lru_add_drain_cpu(int cpu)
6655 + local_irq_restore(flags);
6656 + }
6657 +
6658 +- pvec = &per_cpu(lru_deactivate_pvecs, cpu);
6659 ++ pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
6660 + if (pagevec_count(pvec))
6661 +- pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
6662 ++ pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
6663 +
6664 + activate_page_drain(cpu);
6665 + }
6666 +
6667 + /**
6668 +- * deactivate_page - forcefully deactivate a page
6669 ++ * deactivate_file_page - forcefully deactivate a file page
6670 + * @page: page to deactivate
6671 + *
6672 + * This function hints the VM that @page is a good reclaim candidate,
6673 + * for example if its invalidation fails due to the page being dirty
6674 + * or under writeback.
6675 + */
6676 +-void deactivate_page(struct page *page)
6677 ++void deactivate_file_page(struct page *page)
6678 + {
6679 + /*
6680 +- * In a workload with many unevictable page such as mprotect, unevictable
6681 +- * page deactivation for accelerating reclaim is pointless.
6682 ++ * In a workload with many unevictable page such as mprotect,
6683 ++ * unevictable page deactivation for accelerating reclaim is pointless.
6684 + */
6685 + if (PageUnevictable(page))
6686 + return;
6687 +
6688 + if (likely(get_page_unless_zero(page))) {
6689 +- struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
6690 ++ struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
6691 +
6692 +- if (!pagevec_add(pvec, page))
6693 +- pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
6694 +- put_cpu_var(lru_deactivate_pvecs);
6695 ++ if (!pagevec_add(pvec, page) || PageCompound(page))
6696 ++ pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
6697 ++ put_cpu_var(lru_deactivate_file_pvecs);
6698 + }
6699 + }
6700 +
6701 +@@ -872,7 +871,7 @@ void lru_add_drain_all(void)
6702 +
6703 + if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
6704 + pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
6705 +- pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
6706 ++ pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
6707 + need_activate_page_drain(cpu)) {
6708 + INIT_WORK(work, lru_add_drain_per_cpu);
6709 + schedule_work_on(cpu, work);
6710 +diff --git a/mm/truncate.c b/mm/truncate.c
6711 +index f1e4d6052369..dff252c03f3b 100644
6712 +--- a/mm/truncate.c
6713 ++++ b/mm/truncate.c
6714 +@@ -513,7 +513,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
6715 + * of interest and try to speed up its reclaim.
6716 + */
6717 + if (!ret)
6718 +- deactivate_page(page);
6719 ++ deactivate_file_page(page);
6720 + count += ret;
6721 + }
6722 + pagevec_remove_exceptionals(&pvec);
6723 +diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
6724 +index 35f76f2f7824..bbb1d04e7f8f 100644
6725 +--- a/net/batman-adv/routing.c
6726 ++++ b/net/batman-adv/routing.c
6727 +@@ -88,6 +88,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
6728 + neigh_node = NULL;
6729 +
6730 + spin_lock_bh(&orig_node->neigh_list_lock);
6731 ++ /* curr_router used earlier may not be the current orig_ifinfo->router
6732 ++ * anymore because it was dereferenced outside of the neigh_list_lock
6733 ++ * protected region. After the new best neighbor has replace the current
6734 ++ * best neighbor the reference counter needs to decrease. Consequently,
6735 ++ * the code needs to ensure the curr_router variable contains a pointer
6736 ++ * to the replaced best neighbor.
6737 ++ */
6738 ++ curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
6739 ++
6740 + rcu_assign_pointer(orig_ifinfo->router, neigh_node);
6741 + spin_unlock_bh(&orig_node->neigh_list_lock);
6742 + batadv_orig_ifinfo_free_ref(orig_ifinfo);
6743 +diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
6744 +index 3d64ed20c393..6004c2de7b2a 100644
6745 +--- a/net/batman-adv/send.c
6746 ++++ b/net/batman-adv/send.c
6747 +@@ -611,6 +611,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
6748 +
6749 + if (pending) {
6750 + hlist_del(&forw_packet->list);
6751 ++ if (!forw_packet->own)
6752 ++ atomic_inc(&bat_priv->bcast_queue_left);
6753 ++
6754 + batadv_forw_packet_free(forw_packet);
6755 + }
6756 + }
6757 +@@ -638,6 +641,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
6758 +
6759 + if (pending) {
6760 + hlist_del(&forw_packet->list);
6761 ++ if (!forw_packet->own)
6762 ++ atomic_inc(&bat_priv->batman_queue_left);
6763 ++
6764 + batadv_forw_packet_free(forw_packet);
6765 + }
6766 + }
6767 +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
6768 +index 492b0593dc2f..0bb7cae486b3 100644
6769 +--- a/net/batman-adv/soft-interface.c
6770 ++++ b/net/batman-adv/soft-interface.c
6771 +@@ -378,11 +378,17 @@ void batadv_interface_rx(struct net_device *soft_iface,
6772 + */
6773 + nf_reset(skb);
6774 +
6775 ++ if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
6776 ++ goto dropped;
6777 ++
6778 + vid = batadv_get_vid(skb, 0);
6779 + ethhdr = eth_hdr(skb);
6780 +
6781 + switch (ntohs(ethhdr->h_proto)) {
6782 + case ETH_P_8021Q:
6783 ++ if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
6784 ++ goto dropped;
6785 ++
6786 + vhdr = (struct vlan_ethhdr *)skb->data;
6787 +
6788 + if (vhdr->h_vlan_encapsulated_proto != ethertype)
6789 +@@ -394,8 +400,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
6790 + }
6791 +
6792 + /* skb->dev & skb->pkt_type are set here */
6793 +- if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
6794 +- goto dropped;
6795 + skb->protocol = eth_type_trans(skb, soft_iface);
6796 +
6797 + /* should not be necessary anymore as we use skb_pull_rcsum()
6798 +diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
6799 +index 8d423bc649b9..f876f707fd9e 100644
6800 +--- a/net/bridge/br_ioctl.c
6801 ++++ b/net/bridge/br_ioctl.c
6802 +@@ -21,18 +21,19 @@
6803 + #include <asm/uaccess.h>
6804 + #include "br_private.h"
6805 +
6806 +-/* called with RTNL */
6807 + static int get_bridge_ifindices(struct net *net, int *indices, int num)
6808 + {
6809 + struct net_device *dev;
6810 + int i = 0;
6811 +
6812 +- for_each_netdev(net, dev) {
6813 ++ rcu_read_lock();
6814 ++ for_each_netdev_rcu(net, dev) {
6815 + if (i >= num)
6816 + break;
6817 + if (dev->priv_flags & IFF_EBRIDGE)
6818 + indices[i++] = dev->ifindex;
6819 + }
6820 ++ rcu_read_unlock();
6821 +
6822 + return i;
6823 + }
6824 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
6825 +index c412db774603..e1574edf4e43 100644
6826 +--- a/net/core/rtnetlink.c
6827 ++++ b/net/core/rtnetlink.c
6828 +@@ -1019,14 +1019,16 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
6829 + goto nla_put_failure;
6830 +
6831 + if (1) {
6832 +- struct rtnl_link_ifmap map = {
6833 +- .mem_start = dev->mem_start,
6834 +- .mem_end = dev->mem_end,
6835 +- .base_addr = dev->base_addr,
6836 +- .irq = dev->irq,
6837 +- .dma = dev->dma,
6838 +- .port = dev->if_port,
6839 +- };
6840 ++ struct rtnl_link_ifmap map;
6841 ++
6842 ++ memset(&map, 0, sizeof(map));
6843 ++ map.mem_start = dev->mem_start;
6844 ++ map.mem_end = dev->mem_end;
6845 ++ map.base_addr = dev->base_addr;
6846 ++ map.irq = dev->irq;
6847 ++ map.dma = dev->dma;
6848 ++ map.port = dev->if_port;
6849 ++
6850 + if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
6851 + goto nla_put_failure;
6852 + }
6853 +diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
6854 +index daccc4a36d80..4047341f6c07 100644
6855 +--- a/net/decnet/dn_route.c
6856 ++++ b/net/decnet/dn_route.c
6857 +@@ -1042,10 +1042,13 @@ source_ok:
6858 + if (!fld.daddr) {
6859 + fld.daddr = fld.saddr;
6860 +
6861 +- err = -EADDRNOTAVAIL;
6862 + if (dev_out)
6863 + dev_put(dev_out);
6864 ++ err = -EINVAL;
6865 + dev_out = init_net.loopback_dev;
6866 ++ if (!dev_out->dn_ptr)
6867 ++ goto out;
6868 ++ err = -EADDRNOTAVAIL;
6869 + dev_hold(dev_out);
6870 + if (!fld.daddr) {
6871 + fld.daddr =
6872 +@@ -1118,6 +1121,8 @@ source_ok:
6873 + if (dev_out == NULL)
6874 + goto out;
6875 + dn_db = rcu_dereference_raw(dev_out->dn_ptr);
6876 ++ if (!dn_db)
6877 ++ goto e_inval;
6878 + /* Possible improvement - check all devices for local addr */
6879 + if (dn_dev_islocal(dev_out, fld.daddr)) {
6880 + dev_put(dev_out);
6881 +@@ -1159,6 +1164,8 @@ select_source:
6882 + dev_put(dev_out);
6883 + dev_out = init_net.loopback_dev;
6884 + dev_hold(dev_out);
6885 ++ if (!dev_out->dn_ptr)
6886 ++ goto e_inval;
6887 + fld.flowidn_oif = dev_out->ifindex;
6888 + if (res.fi)
6889 + dn_fib_info_put(res.fi);
6890 +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
6891 +index 214882e7d6de..464293c34b7a 100644
6892 +--- a/net/ipv4/devinet.c
6893 ++++ b/net/ipv4/devinet.c
6894 +@@ -334,6 +334,9 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
6895 +
6896 + ASSERT_RTNL();
6897 +
6898 ++ if (in_dev->dead)
6899 ++ goto no_promotions;
6900 ++
6901 + /* 1. Deleting primary ifaddr forces deletion all secondaries
6902 + * unless alias promotion is set
6903 + **/
6904 +@@ -380,6 +383,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
6905 + fib_del_ifaddr(ifa, ifa1);
6906 + }
6907 +
6908 ++no_promotions:
6909 + /* 2. Unlink it */
6910 +
6911 + *ifap = ifa1->ifa_next;
6912 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
6913 +index 23104a3f2924..d4c698ce0838 100644
6914 +--- a/net/ipv4/fib_frontend.c
6915 ++++ b/net/ipv4/fib_frontend.c
6916 +@@ -814,6 +814,9 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
6917 + subnet = 1;
6918 + }
6919 +
6920 ++ if (in_dev->dead)
6921 ++ goto no_promotions;
6922 ++
6923 + /* Deletion is more complicated than add.
6924 + * We should take care of not to delete too much :-)
6925 + *
6926 +@@ -889,6 +892,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
6927 + }
6928 + }
6929 +
6930 ++no_promotions:
6931 + if (!(ok & BRD_OK))
6932 + fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
6933 + if (subnet && ifa->ifa_prefixlen < 31) {
6934 +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
6935 +index f95b6f93814b..655e1b05ff74 100644
6936 +--- a/net/ipv4/netfilter/arp_tables.c
6937 ++++ b/net/ipv4/netfilter/arp_tables.c
6938 +@@ -355,11 +355,24 @@ unsigned int arpt_do_table(struct sk_buff *skb,
6939 + }
6940 +
6941 + /* All zeroes == unconditional rule. */
6942 +-static inline bool unconditional(const struct arpt_arp *arp)
6943 ++static inline bool unconditional(const struct arpt_entry *e)
6944 + {
6945 + static const struct arpt_arp uncond;
6946 +
6947 +- return memcmp(arp, &uncond, sizeof(uncond)) == 0;
6948 ++ return e->target_offset == sizeof(struct arpt_entry) &&
6949 ++ memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
6950 ++}
6951 ++
6952 ++static bool find_jump_target(const struct xt_table_info *t,
6953 ++ const struct arpt_entry *target)
6954 ++{
6955 ++ struct arpt_entry *iter;
6956 ++
6957 ++ xt_entry_foreach(iter, t->entries, t->size) {
6958 ++ if (iter == target)
6959 ++ return true;
6960 ++ }
6961 ++ return false;
6962 + }
6963 +
6964 + /* Figures out from what hook each rule can be called: returns 0 if
6965 +@@ -398,11 +411,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
6966 + |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
6967 +
6968 + /* Unconditional return/END. */
6969 +- if ((e->target_offset == sizeof(struct arpt_entry) &&
6970 ++ if ((unconditional(e) &&
6971 + (strcmp(t->target.u.user.name,
6972 + XT_STANDARD_TARGET) == 0) &&
6973 +- t->verdict < 0 && unconditional(&e->arp)) ||
6974 +- visited) {
6975 ++ t->verdict < 0) || visited) {
6976 + unsigned int oldpos, size;
6977 +
6978 + if ((strcmp(t->target.u.user.name,
6979 +@@ -435,6 +447,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
6980 + size = e->next_offset;
6981 + e = (struct arpt_entry *)
6982 + (entry0 + pos + size);
6983 ++ if (pos + size >= newinfo->size)
6984 ++ return 0;
6985 + e->counters.pcnt = pos;
6986 + pos += size;
6987 + } else {
6988 +@@ -454,9 +468,15 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
6989 + /* This a jump; chase it. */
6990 + duprintf("Jump rule %u -> %u\n",
6991 + pos, newpos);
6992 ++ e = (struct arpt_entry *)
6993 ++ (entry0 + newpos);
6994 ++ if (!find_jump_target(newinfo, e))
6995 ++ return 0;
6996 + } else {
6997 + /* ... this is a fallthru */
6998 + newpos = pos + e->next_offset;
6999 ++ if (newpos >= newinfo->size)
7000 ++ return 0;
7001 + }
7002 + e = (struct arpt_entry *)
7003 + (entry0 + newpos);
7004 +@@ -470,25 +490,6 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
7005 + return 1;
7006 + }
7007 +
7008 +-static inline int check_entry(const struct arpt_entry *e, const char *name)
7009 +-{
7010 +- const struct xt_entry_target *t;
7011 +-
7012 +- if (!arp_checkentry(&e->arp)) {
7013 +- duprintf("arp_tables: arp check failed %p %s.\n", e, name);
7014 +- return -EINVAL;
7015 +- }
7016 +-
7017 +- if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
7018 +- return -EINVAL;
7019 +-
7020 +- t = arpt_get_target_c(e);
7021 +- if (e->target_offset + t->u.target_size > e->next_offset)
7022 +- return -EINVAL;
7023 +-
7024 +- return 0;
7025 +-}
7026 +-
7027 + static inline int check_target(struct arpt_entry *e, const char *name)
7028 + {
7029 + struct xt_entry_target *t = arpt_get_target(e);
7030 +@@ -518,10 +519,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
7031 + struct xt_target *target;
7032 + int ret;
7033 +
7034 +- ret = check_entry(e, name);
7035 +- if (ret)
7036 +- return ret;
7037 +-
7038 + t = arpt_get_target(e);
7039 + target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
7040 + t->u.user.revision);
7041 +@@ -547,7 +544,7 @@ static bool check_underflow(const struct arpt_entry *e)
7042 + const struct xt_entry_target *t;
7043 + unsigned int verdict;
7044 +
7045 +- if (!unconditional(&e->arp))
7046 ++ if (!unconditional(e))
7047 + return false;
7048 + t = arpt_get_target_c(e);
7049 + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
7050 +@@ -566,9 +563,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
7051 + unsigned int valid_hooks)
7052 + {
7053 + unsigned int h;
7054 ++ int err;
7055 +
7056 + if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
7057 +- (unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
7058 ++ (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
7059 ++ (unsigned char *)e + e->next_offset > limit) {
7060 + duprintf("Bad offset %p\n", e);
7061 + return -EINVAL;
7062 + }
7063 +@@ -580,6 +579,14 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
7064 + return -EINVAL;
7065 + }
7066 +
7067 ++ if (!arp_checkentry(&e->arp))
7068 ++ return -EINVAL;
7069 ++
7070 ++ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
7071 ++ e->next_offset);
7072 ++ if (err)
7073 ++ return err;
7074 ++
7075 + /* Check hooks & underflows */
7076 + for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
7077 + if (!(valid_hooks & (1 << h)))
7078 +@@ -588,9 +595,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
7079 + newinfo->hook_entry[h] = hook_entries[h];
7080 + if ((unsigned char *)e - base == underflows[h]) {
7081 + if (!check_underflow(e)) {
7082 +- pr_err("Underflows must be unconditional and "
7083 +- "use the STANDARD target with "
7084 +- "ACCEPT/DROP\n");
7085 ++ pr_debug("Underflows must be unconditional and "
7086 ++ "use the STANDARD target with "
7087 ++ "ACCEPT/DROP\n");
7088 + return -EINVAL;
7089 + }
7090 + newinfo->underflow[h] = underflows[h];
7091 +@@ -680,10 +687,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
7092 + }
7093 + }
7094 +
7095 +- if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
7096 +- duprintf("Looping hook\n");
7097 ++ if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
7098 + return -ELOOP;
7099 +- }
7100 +
7101 + /* Finally, each sanity check must pass */
7102 + i = 0;
7103 +@@ -1116,56 +1121,18 @@ static int do_add_counters(struct net *net, const void __user *user,
7104 + unsigned int i, curcpu;
7105 + struct xt_counters_info tmp;
7106 + struct xt_counters *paddc;
7107 +- unsigned int num_counters;
7108 +- const char *name;
7109 +- int size;
7110 +- void *ptmp;
7111 + struct xt_table *t;
7112 + const struct xt_table_info *private;
7113 + int ret = 0;
7114 + void *loc_cpu_entry;
7115 + struct arpt_entry *iter;
7116 + unsigned int addend;
7117 +-#ifdef CONFIG_COMPAT
7118 +- struct compat_xt_counters_info compat_tmp;
7119 +
7120 +- if (compat) {
7121 +- ptmp = &compat_tmp;
7122 +- size = sizeof(struct compat_xt_counters_info);
7123 +- } else
7124 +-#endif
7125 +- {
7126 +- ptmp = &tmp;
7127 +- size = sizeof(struct xt_counters_info);
7128 +- }
7129 ++ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
7130 ++ if (IS_ERR(paddc))
7131 ++ return PTR_ERR(paddc);
7132 +
7133 +- if (copy_from_user(ptmp, user, size) != 0)
7134 +- return -EFAULT;
7135 +-
7136 +-#ifdef CONFIG_COMPAT
7137 +- if (compat) {
7138 +- num_counters = compat_tmp.num_counters;
7139 +- name = compat_tmp.name;
7140 +- } else
7141 +-#endif
7142 +- {
7143 +- num_counters = tmp.num_counters;
7144 +- name = tmp.name;
7145 +- }
7146 +-
7147 +- if (len != size + num_counters * sizeof(struct xt_counters))
7148 +- return -EINVAL;
7149 +-
7150 +- paddc = vmalloc(len - size);
7151 +- if (!paddc)
7152 +- return -ENOMEM;
7153 +-
7154 +- if (copy_from_user(paddc, user + size, len - size) != 0) {
7155 +- ret = -EFAULT;
7156 +- goto free;
7157 +- }
7158 +-
7159 +- t = xt_find_table_lock(net, NFPROTO_ARP, name);
7160 ++ t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
7161 + if (IS_ERR_OR_NULL(t)) {
7162 + ret = t ? PTR_ERR(t) : -ENOENT;
7163 + goto free;
7164 +@@ -1173,7 +1140,7 @@ static int do_add_counters(struct net *net, const void __user *user,
7165 +
7166 + local_bh_disable();
7167 + private = t->private;
7168 +- if (private->number != num_counters) {
7169 ++ if (private->number != tmp.num_counters) {
7170 + ret = -EINVAL;
7171 + goto unlock_up_free;
7172 + }
7173 +@@ -1199,6 +1166,18 @@ static int do_add_counters(struct net *net, const void __user *user,
7174 + }
7175 +
7176 + #ifdef CONFIG_COMPAT
7177 ++struct compat_arpt_replace {
7178 ++ char name[XT_TABLE_MAXNAMELEN];
7179 ++ u32 valid_hooks;
7180 ++ u32 num_entries;
7181 ++ u32 size;
7182 ++ u32 hook_entry[NF_ARP_NUMHOOKS];
7183 ++ u32 underflow[NF_ARP_NUMHOOKS];
7184 ++ u32 num_counters;
7185 ++ compat_uptr_t counters;
7186 ++ struct compat_arpt_entry entries[0];
7187 ++};
7188 ++
7189 + static inline void compat_release_entry(struct compat_arpt_entry *e)
7190 + {
7191 + struct xt_entry_target *t;
7192 +@@ -1207,24 +1186,22 @@ static inline void compat_release_entry(struct compat_arpt_entry *e)
7193 + module_put(t->u.kernel.target->me);
7194 + }
7195 +
7196 +-static inline int
7197 ++static int
7198 + check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
7199 + struct xt_table_info *newinfo,
7200 + unsigned int *size,
7201 + const unsigned char *base,
7202 +- const unsigned char *limit,
7203 +- const unsigned int *hook_entries,
7204 +- const unsigned int *underflows,
7205 +- const char *name)
7206 ++ const unsigned char *limit)
7207 + {
7208 + struct xt_entry_target *t;
7209 + struct xt_target *target;
7210 + unsigned int entry_offset;
7211 +- int ret, off, h;
7212 ++ int ret, off;
7213 +
7214 + duprintf("check_compat_entry_size_and_hooks %p\n", e);
7215 + if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
7216 +- (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
7217 ++ (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
7218 ++ (unsigned char *)e + e->next_offset > limit) {
7219 + duprintf("Bad offset %p, limit = %p\n", e, limit);
7220 + return -EINVAL;
7221 + }
7222 +@@ -1236,8 +1213,11 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
7223 + return -EINVAL;
7224 + }
7225 +
7226 +- /* For purposes of check_entry casting the compat entry is fine */
7227 +- ret = check_entry((struct arpt_entry *)e, name);
7228 ++ if (!arp_checkentry(&e->arp))
7229 ++ return -EINVAL;
7230 ++
7231 ++ ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
7232 ++ e->next_offset);
7233 + if (ret)
7234 + return ret;
7235 +
7236 +@@ -1261,17 +1241,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
7237 + if (ret)
7238 + goto release_target;
7239 +
7240 +- /* Check hooks & underflows */
7241 +- for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
7242 +- if ((unsigned char *)e - base == hook_entries[h])
7243 +- newinfo->hook_entry[h] = hook_entries[h];
7244 +- if ((unsigned char *)e - base == underflows[h])
7245 +- newinfo->underflow[h] = underflows[h];
7246 +- }
7247 +-
7248 +- /* Clear counters and comefrom */
7249 +- memset(&e->counters, 0, sizeof(e->counters));
7250 +- e->comefrom = 0;
7251 + return 0;
7252 +
7253 + release_target:
7254 +@@ -1280,18 +1249,17 @@ out:
7255 + return ret;
7256 + }
7257 +
7258 +-static int
7259 ++static void
7260 + compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
7261 +- unsigned int *size, const char *name,
7262 ++ unsigned int *size,
7263 + struct xt_table_info *newinfo, unsigned char *base)
7264 + {
7265 + struct xt_entry_target *t;
7266 + struct xt_target *target;
7267 + struct arpt_entry *de;
7268 + unsigned int origsize;
7269 +- int ret, h;
7270 ++ int h;
7271 +
7272 +- ret = 0;
7273 + origsize = *size;
7274 + de = (struct arpt_entry *)*dstptr;
7275 + memcpy(de, e, sizeof(struct arpt_entry));
7276 +@@ -1312,144 +1280,81 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
7277 + if ((unsigned char *)de - base < newinfo->underflow[h])
7278 + newinfo->underflow[h] -= origsize - *size;
7279 + }
7280 +- return ret;
7281 + }
7282 +
7283 +-static int translate_compat_table(const char *name,
7284 +- unsigned int valid_hooks,
7285 +- struct xt_table_info **pinfo,
7286 ++static int translate_compat_table(struct xt_table_info **pinfo,
7287 + void **pentry0,
7288 +- unsigned int total_size,
7289 +- unsigned int number,
7290 +- unsigned int *hook_entries,
7291 +- unsigned int *underflows)
7292 ++ const struct compat_arpt_replace *compatr)
7293 + {
7294 + unsigned int i, j;
7295 + struct xt_table_info *newinfo, *info;
7296 + void *pos, *entry0, *entry1;
7297 + struct compat_arpt_entry *iter0;
7298 +- struct arpt_entry *iter1;
7299 ++ struct arpt_replace repl;
7300 + unsigned int size;
7301 + int ret = 0;
7302 +
7303 + info = *pinfo;
7304 + entry0 = *pentry0;
7305 +- size = total_size;
7306 +- info->number = number;
7307 +-
7308 +- /* Init all hooks to impossible value. */
7309 +- for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
7310 +- info->hook_entry[i] = 0xFFFFFFFF;
7311 +- info->underflow[i] = 0xFFFFFFFF;
7312 +- }
7313 ++ size = compatr->size;
7314 ++ info->number = compatr->num_entries;
7315 +
7316 + duprintf("translate_compat_table: size %u\n", info->size);
7317 + j = 0;
7318 + xt_compat_lock(NFPROTO_ARP);
7319 +- xt_compat_init_offsets(NFPROTO_ARP, number);
7320 ++ xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
7321 + /* Walk through entries, checking offsets. */
7322 +- xt_entry_foreach(iter0, entry0, total_size) {
7323 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
7324 + ret = check_compat_entry_size_and_hooks(iter0, info, &size,
7325 + entry0,
7326 +- entry0 + total_size,
7327 +- hook_entries,
7328 +- underflows,
7329 +- name);
7330 ++ entry0 + compatr->size);
7331 + if (ret != 0)
7332 + goto out_unlock;
7333 + ++j;
7334 + }
7335 +
7336 + ret = -EINVAL;
7337 +- if (j != number) {
7338 ++ if (j != compatr->num_entries) {
7339 + duprintf("translate_compat_table: %u not %u entries\n",
7340 +- j, number);
7341 ++ j, compatr->num_entries);
7342 + goto out_unlock;
7343 + }
7344 +
7345 +- /* Check hooks all assigned */
7346 +- for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
7347 +- /* Only hooks which are valid */
7348 +- if (!(valid_hooks & (1 << i)))
7349 +- continue;
7350 +- if (info->hook_entry[i] == 0xFFFFFFFF) {
7351 +- duprintf("Invalid hook entry %u %u\n",
7352 +- i, hook_entries[i]);
7353 +- goto out_unlock;
7354 +- }
7355 +- if (info->underflow[i] == 0xFFFFFFFF) {
7356 +- duprintf("Invalid underflow %u %u\n",
7357 +- i, underflows[i]);
7358 +- goto out_unlock;
7359 +- }
7360 +- }
7361 +-
7362 + ret = -ENOMEM;
7363 + newinfo = xt_alloc_table_info(size);
7364 + if (!newinfo)
7365 + goto out_unlock;
7366 +
7367 +- newinfo->number = number;
7368 ++ newinfo->number = compatr->num_entries;
7369 + for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
7370 + newinfo->hook_entry[i] = info->hook_entry[i];
7371 + newinfo->underflow[i] = info->underflow[i];
7372 + }
7373 + entry1 = newinfo->entries[raw_smp_processor_id()];
7374 + pos = entry1;
7375 +- size = total_size;
7376 +- xt_entry_foreach(iter0, entry0, total_size) {
7377 +- ret = compat_copy_entry_from_user(iter0, &pos, &size,
7378 +- name, newinfo, entry1);
7379 +- if (ret != 0)
7380 +- break;
7381 +- }
7382 ++ size = compatr->size;
7383 ++ xt_entry_foreach(iter0, entry0, compatr->size)
7384 ++ compat_copy_entry_from_user(iter0, &pos, &size,
7385 ++ newinfo, entry1);
7386 ++
7387 ++ /* all module references in entry0 are now gone */
7388 ++
7389 + xt_compat_flush_offsets(NFPROTO_ARP);
7390 + xt_compat_unlock(NFPROTO_ARP);
7391 +- if (ret)
7392 +- goto free_newinfo;
7393 +
7394 +- ret = -ELOOP;
7395 +- if (!mark_source_chains(newinfo, valid_hooks, entry1))
7396 +- goto free_newinfo;
7397 ++ memcpy(&repl, compatr, sizeof(*compatr));
7398 +
7399 +- i = 0;
7400 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
7401 +- ret = check_target(iter1, name);
7402 +- if (ret != 0)
7403 +- break;
7404 +- ++i;
7405 +- if (strcmp(arpt_get_target(iter1)->u.user.name,
7406 +- XT_ERROR_TARGET) == 0)
7407 +- ++newinfo->stacksize;
7408 +- }
7409 +- if (ret) {
7410 +- /*
7411 +- * The first i matches need cleanup_entry (calls ->destroy)
7412 +- * because they had called ->check already. The other j-i
7413 +- * entries need only release.
7414 +- */
7415 +- int skip = i;
7416 +- j -= i;
7417 +- xt_entry_foreach(iter0, entry0, newinfo->size) {
7418 +- if (skip-- > 0)
7419 +- continue;
7420 +- if (j-- == 0)
7421 +- break;
7422 +- compat_release_entry(iter0);
7423 +- }
7424 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
7425 +- if (i-- == 0)
7426 +- break;
7427 +- cleanup_entry(iter1);
7428 +- }
7429 +- xt_free_table_info(newinfo);
7430 +- return ret;
7431 ++ for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
7432 ++ repl.hook_entry[i] = newinfo->hook_entry[i];
7433 ++ repl.underflow[i] = newinfo->underflow[i];
7434 + }
7435 +
7436 +- /* And one copy for every other CPU */
7437 +- for_each_possible_cpu(i)
7438 +- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
7439 +- memcpy(newinfo->entries[i], entry1, newinfo->size);
7440 ++ repl.num_counters = 0;
7441 ++ repl.counters = NULL;
7442 ++ repl.size = newinfo->size;
7443 ++ ret = translate_table(newinfo, entry1, &repl);
7444 ++ if (ret)
7445 ++ goto free_newinfo;
7446 +
7447 + *pinfo = newinfo;
7448 + *pentry0 = entry1;
7449 +@@ -1458,31 +1363,18 @@ static int translate_compat_table(const char *name,
7450 +
7451 + free_newinfo:
7452 + xt_free_table_info(newinfo);
7453 +-out:
7454 +- xt_entry_foreach(iter0, entry0, total_size) {
7455 ++ return ret;
7456 ++out_unlock:
7457 ++ xt_compat_flush_offsets(NFPROTO_ARP);
7458 ++ xt_compat_unlock(NFPROTO_ARP);
7459 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
7460 + if (j-- == 0)
7461 + break;
7462 + compat_release_entry(iter0);
7463 + }
7464 + return ret;
7465 +-out_unlock:
7466 +- xt_compat_flush_offsets(NFPROTO_ARP);
7467 +- xt_compat_unlock(NFPROTO_ARP);
7468 +- goto out;
7469 + }
7470 +
7471 +-struct compat_arpt_replace {
7472 +- char name[XT_TABLE_MAXNAMELEN];
7473 +- u32 valid_hooks;
7474 +- u32 num_entries;
7475 +- u32 size;
7476 +- u32 hook_entry[NF_ARP_NUMHOOKS];
7477 +- u32 underflow[NF_ARP_NUMHOOKS];
7478 +- u32 num_counters;
7479 +- compat_uptr_t counters;
7480 +- struct compat_arpt_entry entries[0];
7481 +-};
7482 +-
7483 + static int compat_do_replace(struct net *net, void __user *user,
7484 + unsigned int len)
7485 + {
7486 +@@ -1513,10 +1405,7 @@ static int compat_do_replace(struct net *net, void __user *user,
7487 + goto free_newinfo;
7488 + }
7489 +
7490 +- ret = translate_compat_table(tmp.name, tmp.valid_hooks,
7491 +- &newinfo, &loc_cpu_entry, tmp.size,
7492 +- tmp.num_entries, tmp.hook_entry,
7493 +- tmp.underflow);
7494 ++ ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
7495 + if (ret != 0)
7496 + goto free_newinfo;
7497 +
7498 +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
7499 +index 99e810f84671..865e738d3804 100644
7500 +--- a/net/ipv4/netfilter/ip_tables.c
7501 ++++ b/net/ipv4/netfilter/ip_tables.c
7502 +@@ -168,11 +168,12 @@ get_entry(const void *base, unsigned int offset)
7503 +
7504 + /* All zeroes == unconditional rule. */
7505 + /* Mildly perf critical (only if packet tracing is on) */
7506 +-static inline bool unconditional(const struct ipt_ip *ip)
7507 ++static inline bool unconditional(const struct ipt_entry *e)
7508 + {
7509 + static const struct ipt_ip uncond;
7510 +
7511 +- return memcmp(ip, &uncond, sizeof(uncond)) == 0;
7512 ++ return e->target_offset == sizeof(struct ipt_entry) &&
7513 ++ memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
7514 + #undef FWINV
7515 + }
7516 +
7517 +@@ -229,11 +230,10 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
7518 + } else if (s == e) {
7519 + (*rulenum)++;
7520 +
7521 +- if (s->target_offset == sizeof(struct ipt_entry) &&
7522 ++ if (unconditional(s) &&
7523 + strcmp(t->target.u.kernel.target->name,
7524 + XT_STANDARD_TARGET) == 0 &&
7525 +- t->verdict < 0 &&
7526 +- unconditional(&s->ip)) {
7527 ++ t->verdict < 0) {
7528 + /* Tail of chains: STANDARD target (return/policy) */
7529 + *comment = *chainname == hookname
7530 + ? comments[NF_IP_TRACE_COMMENT_POLICY]
7531 +@@ -439,6 +439,18 @@ ipt_do_table(struct sk_buff *skb,
7532 + #endif
7533 + }
7534 +
7535 ++static bool find_jump_target(const struct xt_table_info *t,
7536 ++ const struct ipt_entry *target)
7537 ++{
7538 ++ struct ipt_entry *iter;
7539 ++
7540 ++ xt_entry_foreach(iter, t->entries, t->size) {
7541 ++ if (iter == target)
7542 ++ return true;
7543 ++ }
7544 ++ return false;
7545 ++}
7546 ++
7547 + /* Figures out from what hook each rule can be called: returns 0 if
7548 + there are loops. Puts hook bitmask in comefrom. */
7549 + static int
7550 +@@ -472,11 +484,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
7551 + e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
7552 +
7553 + /* Unconditional return/END. */
7554 +- if ((e->target_offset == sizeof(struct ipt_entry) &&
7555 ++ if ((unconditional(e) &&
7556 + (strcmp(t->target.u.user.name,
7557 + XT_STANDARD_TARGET) == 0) &&
7558 +- t->verdict < 0 && unconditional(&e->ip)) ||
7559 +- visited) {
7560 ++ t->verdict < 0) || visited) {
7561 + unsigned int oldpos, size;
7562 +
7563 + if ((strcmp(t->target.u.user.name,
7564 +@@ -517,6 +528,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
7565 + size = e->next_offset;
7566 + e = (struct ipt_entry *)
7567 + (entry0 + pos + size);
7568 ++ if (pos + size >= newinfo->size)
7569 ++ return 0;
7570 + e->counters.pcnt = pos;
7571 + pos += size;
7572 + } else {
7573 +@@ -535,9 +548,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
7574 + /* This a jump; chase it. */
7575 + duprintf("Jump rule %u -> %u\n",
7576 + pos, newpos);
7577 ++ e = (struct ipt_entry *)
7578 ++ (entry0 + newpos);
7579 ++ if (!find_jump_target(newinfo, e))
7580 ++ return 0;
7581 + } else {
7582 + /* ... this is a fallthru */
7583 + newpos = pos + e->next_offset;
7584 ++ if (newpos >= newinfo->size)
7585 ++ return 0;
7586 + }
7587 + e = (struct ipt_entry *)
7588 + (entry0 + newpos);
7589 +@@ -565,27 +584,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
7590 + }
7591 +
7592 + static int
7593 +-check_entry(const struct ipt_entry *e, const char *name)
7594 +-{
7595 +- const struct xt_entry_target *t;
7596 +-
7597 +- if (!ip_checkentry(&e->ip)) {
7598 +- duprintf("ip check failed %p %s.\n", e, name);
7599 +- return -EINVAL;
7600 +- }
7601 +-
7602 +- if (e->target_offset + sizeof(struct xt_entry_target) >
7603 +- e->next_offset)
7604 +- return -EINVAL;
7605 +-
7606 +- t = ipt_get_target_c(e);
7607 +- if (e->target_offset + t->u.target_size > e->next_offset)
7608 +- return -EINVAL;
7609 +-
7610 +- return 0;
7611 +-}
7612 +-
7613 +-static int
7614 + check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
7615 + {
7616 + const struct ipt_ip *ip = par->entryinfo;
7617 +@@ -662,10 +660,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
7618 + struct xt_mtchk_param mtpar;
7619 + struct xt_entry_match *ematch;
7620 +
7621 +- ret = check_entry(e, name);
7622 +- if (ret)
7623 +- return ret;
7624 +-
7625 + j = 0;
7626 + mtpar.net = net;
7627 + mtpar.table = name;
7628 +@@ -709,7 +703,7 @@ static bool check_underflow(const struct ipt_entry *e)
7629 + const struct xt_entry_target *t;
7630 + unsigned int verdict;
7631 +
7632 +- if (!unconditional(&e->ip))
7633 ++ if (!unconditional(e))
7634 + return false;
7635 + t = ipt_get_target_c(e);
7636 + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
7637 +@@ -729,9 +723,11 @@ check_entry_size_and_hooks(struct ipt_entry *e,
7638 + unsigned int valid_hooks)
7639 + {
7640 + unsigned int h;
7641 ++ int err;
7642 +
7643 + if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
7644 +- (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
7645 ++ (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
7646 ++ (unsigned char *)e + e->next_offset > limit) {
7647 + duprintf("Bad offset %p\n", e);
7648 + return -EINVAL;
7649 + }
7650 +@@ -743,6 +739,14 @@ check_entry_size_and_hooks(struct ipt_entry *e,
7651 + return -EINVAL;
7652 + }
7653 +
7654 ++ if (!ip_checkentry(&e->ip))
7655 ++ return -EINVAL;
7656 ++
7657 ++ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
7658 ++ e->next_offset);
7659 ++ if (err)
7660 ++ return err;
7661 ++
7662 + /* Check hooks & underflows */
7663 + for (h = 0; h < NF_INET_NUMHOOKS; h++) {
7664 + if (!(valid_hooks & (1 << h)))
7665 +@@ -751,9 +755,9 @@ check_entry_size_and_hooks(struct ipt_entry *e,
7666 + newinfo->hook_entry[h] = hook_entries[h];
7667 + if ((unsigned char *)e - base == underflows[h]) {
7668 + if (!check_underflow(e)) {
7669 +- pr_err("Underflows must be unconditional and "
7670 +- "use the STANDARD target with "
7671 +- "ACCEPT/DROP\n");
7672 ++ pr_debug("Underflows must be unconditional and "
7673 ++ "use the STANDARD target with "
7674 ++ "ACCEPT/DROP\n");
7675 + return -EINVAL;
7676 + }
7677 + newinfo->underflow[h] = underflows[h];
7678 +@@ -1304,56 +1308,18 @@ do_add_counters(struct net *net, const void __user *user,
7679 + unsigned int i, curcpu;
7680 + struct xt_counters_info tmp;
7681 + struct xt_counters *paddc;
7682 +- unsigned int num_counters;
7683 +- const char *name;
7684 +- int size;
7685 +- void *ptmp;
7686 + struct xt_table *t;
7687 + const struct xt_table_info *private;
7688 + int ret = 0;
7689 + void *loc_cpu_entry;
7690 + struct ipt_entry *iter;
7691 + unsigned int addend;
7692 +-#ifdef CONFIG_COMPAT
7693 +- struct compat_xt_counters_info compat_tmp;
7694 +-
7695 +- if (compat) {
7696 +- ptmp = &compat_tmp;
7697 +- size = sizeof(struct compat_xt_counters_info);
7698 +- } else
7699 +-#endif
7700 +- {
7701 +- ptmp = &tmp;
7702 +- size = sizeof(struct xt_counters_info);
7703 +- }
7704 +-
7705 +- if (copy_from_user(ptmp, user, size) != 0)
7706 +- return -EFAULT;
7707 +-
7708 +-#ifdef CONFIG_COMPAT
7709 +- if (compat) {
7710 +- num_counters = compat_tmp.num_counters;
7711 +- name = compat_tmp.name;
7712 +- } else
7713 +-#endif
7714 +- {
7715 +- num_counters = tmp.num_counters;
7716 +- name = tmp.name;
7717 +- }
7718 +
7719 +- if (len != size + num_counters * sizeof(struct xt_counters))
7720 +- return -EINVAL;
7721 +-
7722 +- paddc = vmalloc(len - size);
7723 +- if (!paddc)
7724 +- return -ENOMEM;
7725 +-
7726 +- if (copy_from_user(paddc, user + size, len - size) != 0) {
7727 +- ret = -EFAULT;
7728 +- goto free;
7729 +- }
7730 ++ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
7731 ++ if (IS_ERR(paddc))
7732 ++ return PTR_ERR(paddc);
7733 +
7734 +- t = xt_find_table_lock(net, AF_INET, name);
7735 ++ t = xt_find_table_lock(net, AF_INET, tmp.name);
7736 + if (IS_ERR_OR_NULL(t)) {
7737 + ret = t ? PTR_ERR(t) : -ENOENT;
7738 + goto free;
7739 +@@ -1361,7 +1327,7 @@ do_add_counters(struct net *net, const void __user *user,
7740 +
7741 + local_bh_disable();
7742 + private = t->private;
7743 +- if (private->number != num_counters) {
7744 ++ if (private->number != tmp.num_counters) {
7745 + ret = -EINVAL;
7746 + goto unlock_up_free;
7747 + }
7748 +@@ -1440,7 +1406,6 @@ compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
7749 +
7750 + static int
7751 + compat_find_calc_match(struct xt_entry_match *m,
7752 +- const char *name,
7753 + const struct ipt_ip *ip,
7754 + unsigned int hookmask,
7755 + int *size)
7756 +@@ -1476,21 +1441,19 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
7757 + struct xt_table_info *newinfo,
7758 + unsigned int *size,
7759 + const unsigned char *base,
7760 +- const unsigned char *limit,
7761 +- const unsigned int *hook_entries,
7762 +- const unsigned int *underflows,
7763 +- const char *name)
7764 ++ const unsigned char *limit)
7765 + {
7766 + struct xt_entry_match *ematch;
7767 + struct xt_entry_target *t;
7768 + struct xt_target *target;
7769 + unsigned int entry_offset;
7770 + unsigned int j;
7771 +- int ret, off, h;
7772 ++ int ret, off;
7773 +
7774 + duprintf("check_compat_entry_size_and_hooks %p\n", e);
7775 + if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
7776 +- (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
7777 ++ (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
7778 ++ (unsigned char *)e + e->next_offset > limit) {
7779 + duprintf("Bad offset %p, limit = %p\n", e, limit);
7780 + return -EINVAL;
7781 + }
7782 +@@ -1502,8 +1465,11 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
7783 + return -EINVAL;
7784 + }
7785 +
7786 +- /* For purposes of check_entry casting the compat entry is fine */
7787 +- ret = check_entry((struct ipt_entry *)e, name);
7788 ++ if (!ip_checkentry(&e->ip))
7789 ++ return -EINVAL;
7790 ++
7791 ++ ret = xt_compat_check_entry_offsets(e, e->elems,
7792 ++ e->target_offset, e->next_offset);
7793 + if (ret)
7794 + return ret;
7795 +
7796 +@@ -1511,8 +1477,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
7797 + entry_offset = (void *)e - (void *)base;
7798 + j = 0;
7799 + xt_ematch_foreach(ematch, e) {
7800 +- ret = compat_find_calc_match(ematch, name,
7801 +- &e->ip, e->comefrom, &off);
7802 ++ ret = compat_find_calc_match(ematch, &e->ip, e->comefrom,
7803 ++ &off);
7804 + if (ret != 0)
7805 + goto release_matches;
7806 + ++j;
7807 +@@ -1535,17 +1501,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
7808 + if (ret)
7809 + goto out;
7810 +
7811 +- /* Check hooks & underflows */
7812 +- for (h = 0; h < NF_INET_NUMHOOKS; h++) {
7813 +- if ((unsigned char *)e - base == hook_entries[h])
7814 +- newinfo->hook_entry[h] = hook_entries[h];
7815 +- if ((unsigned char *)e - base == underflows[h])
7816 +- newinfo->underflow[h] = underflows[h];
7817 +- }
7818 +-
7819 +- /* Clear counters and comefrom */
7820 +- memset(&e->counters, 0, sizeof(e->counters));
7821 +- e->comefrom = 0;
7822 + return 0;
7823 +
7824 + out:
7825 +@@ -1559,19 +1514,18 @@ release_matches:
7826 + return ret;
7827 + }
7828 +
7829 +-static int
7830 ++static void
7831 + compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
7832 +- unsigned int *size, const char *name,
7833 ++ unsigned int *size,
7834 + struct xt_table_info *newinfo, unsigned char *base)
7835 + {
7836 + struct xt_entry_target *t;
7837 + struct xt_target *target;
7838 + struct ipt_entry *de;
7839 + unsigned int origsize;
7840 +- int ret, h;
7841 ++ int h;
7842 + struct xt_entry_match *ematch;
7843 +
7844 +- ret = 0;
7845 + origsize = *size;
7846 + de = (struct ipt_entry *)*dstptr;
7847 + memcpy(de, e, sizeof(struct ipt_entry));
7848 +@@ -1580,198 +1534,104 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
7849 + *dstptr += sizeof(struct ipt_entry);
7850 + *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
7851 +
7852 +- xt_ematch_foreach(ematch, e) {
7853 +- ret = xt_compat_match_from_user(ematch, dstptr, size);
7854 +- if (ret != 0)
7855 +- return ret;
7856 +- }
7857 ++ xt_ematch_foreach(ematch, e)
7858 ++ xt_compat_match_from_user(ematch, dstptr, size);
7859 ++
7860 + de->target_offset = e->target_offset - (origsize - *size);
7861 + t = compat_ipt_get_target(e);
7862 + target = t->u.kernel.target;
7863 + xt_compat_target_from_user(t, dstptr, size);
7864 +
7865 + de->next_offset = e->next_offset - (origsize - *size);
7866 ++
7867 + for (h = 0; h < NF_INET_NUMHOOKS; h++) {
7868 + if ((unsigned char *)de - base < newinfo->hook_entry[h])
7869 + newinfo->hook_entry[h] -= origsize - *size;
7870 + if ((unsigned char *)de - base < newinfo->underflow[h])
7871 + newinfo->underflow[h] -= origsize - *size;
7872 + }
7873 +- return ret;
7874 +-}
7875 +-
7876 +-static int
7877 +-compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
7878 +-{
7879 +- struct xt_entry_match *ematch;
7880 +- struct xt_mtchk_param mtpar;
7881 +- unsigned int j;
7882 +- int ret = 0;
7883 +-
7884 +- j = 0;
7885 +- mtpar.net = net;
7886 +- mtpar.table = name;
7887 +- mtpar.entryinfo = &e->ip;
7888 +- mtpar.hook_mask = e->comefrom;
7889 +- mtpar.family = NFPROTO_IPV4;
7890 +- xt_ematch_foreach(ematch, e) {
7891 +- ret = check_match(ematch, &mtpar);
7892 +- if (ret != 0)
7893 +- goto cleanup_matches;
7894 +- ++j;
7895 +- }
7896 +-
7897 +- ret = check_target(e, net, name);
7898 +- if (ret)
7899 +- goto cleanup_matches;
7900 +- return 0;
7901 +-
7902 +- cleanup_matches:
7903 +- xt_ematch_foreach(ematch, e) {
7904 +- if (j-- == 0)
7905 +- break;
7906 +- cleanup_match(ematch, net);
7907 +- }
7908 +- return ret;
7909 + }
7910 +
7911 + static int
7912 + translate_compat_table(struct net *net,
7913 +- const char *name,
7914 +- unsigned int valid_hooks,
7915 + struct xt_table_info **pinfo,
7916 + void **pentry0,
7917 +- unsigned int total_size,
7918 +- unsigned int number,
7919 +- unsigned int *hook_entries,
7920 +- unsigned int *underflows)
7921 ++ const struct compat_ipt_replace *compatr)
7922 + {
7923 + unsigned int i, j;
7924 + struct xt_table_info *newinfo, *info;
7925 + void *pos, *entry0, *entry1;
7926 + struct compat_ipt_entry *iter0;
7927 +- struct ipt_entry *iter1;
7928 ++ struct ipt_replace repl;
7929 + unsigned int size;
7930 + int ret;
7931 +
7932 + info = *pinfo;
7933 + entry0 = *pentry0;
7934 +- size = total_size;
7935 +- info->number = number;
7936 +-
7937 +- /* Init all hooks to impossible value. */
7938 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
7939 +- info->hook_entry[i] = 0xFFFFFFFF;
7940 +- info->underflow[i] = 0xFFFFFFFF;
7941 +- }
7942 ++ size = compatr->size;
7943 ++ info->number = compatr->num_entries;
7944 +
7945 + duprintf("translate_compat_table: size %u\n", info->size);
7946 + j = 0;
7947 + xt_compat_lock(AF_INET);
7948 +- xt_compat_init_offsets(AF_INET, number);
7949 ++ xt_compat_init_offsets(AF_INET, compatr->num_entries);
7950 + /* Walk through entries, checking offsets. */
7951 +- xt_entry_foreach(iter0, entry0, total_size) {
7952 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
7953 + ret = check_compat_entry_size_and_hooks(iter0, info, &size,
7954 + entry0,
7955 +- entry0 + total_size,
7956 +- hook_entries,
7957 +- underflows,
7958 +- name);
7959 ++ entry0 + compatr->size);
7960 + if (ret != 0)
7961 + goto out_unlock;
7962 + ++j;
7963 + }
7964 +
7965 + ret = -EINVAL;
7966 +- if (j != number) {
7967 ++ if (j != compatr->num_entries) {
7968 + duprintf("translate_compat_table: %u not %u entries\n",
7969 +- j, number);
7970 ++ j, compatr->num_entries);
7971 + goto out_unlock;
7972 + }
7973 +
7974 +- /* Check hooks all assigned */
7975 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
7976 +- /* Only hooks which are valid */
7977 +- if (!(valid_hooks & (1 << i)))
7978 +- continue;
7979 +- if (info->hook_entry[i] == 0xFFFFFFFF) {
7980 +- duprintf("Invalid hook entry %u %u\n",
7981 +- i, hook_entries[i]);
7982 +- goto out_unlock;
7983 +- }
7984 +- if (info->underflow[i] == 0xFFFFFFFF) {
7985 +- duprintf("Invalid underflow %u %u\n",
7986 +- i, underflows[i]);
7987 +- goto out_unlock;
7988 +- }
7989 +- }
7990 +-
7991 + ret = -ENOMEM;
7992 + newinfo = xt_alloc_table_info(size);
7993 + if (!newinfo)
7994 + goto out_unlock;
7995 +
7996 +- newinfo->number = number;
7997 ++ newinfo->number = compatr->num_entries;
7998 + for (i = 0; i < NF_INET_NUMHOOKS; i++) {
7999 +- newinfo->hook_entry[i] = info->hook_entry[i];
8000 +- newinfo->underflow[i] = info->underflow[i];
8001 ++ newinfo->hook_entry[i] = compatr->hook_entry[i];
8002 ++ newinfo->underflow[i] = compatr->underflow[i];
8003 + }
8004 + entry1 = newinfo->entries[raw_smp_processor_id()];
8005 + pos = entry1;
8006 +- size = total_size;
8007 +- xt_entry_foreach(iter0, entry0, total_size) {
8008 +- ret = compat_copy_entry_from_user(iter0, &pos, &size,
8009 +- name, newinfo, entry1);
8010 +- if (ret != 0)
8011 +- break;
8012 +- }
8013 ++ size = compatr->size;
8014 ++ xt_entry_foreach(iter0, entry0, compatr->size)
8015 ++ compat_copy_entry_from_user(iter0, &pos, &size,
8016 ++ newinfo, entry1);
8017 ++
8018 ++ /* all module references in entry0 are now gone.
8019 ++ * entry1/newinfo contains a 64bit ruleset that looks exactly as
8020 ++ * generated by 64bit userspace.
8021 ++ *
8022 ++ * Call standard translate_table() to validate all hook_entrys,
8023 ++ * underflows, check for loops, etc.
8024 ++ */
8025 + xt_compat_flush_offsets(AF_INET);
8026 + xt_compat_unlock(AF_INET);
8027 +- if (ret)
8028 +- goto free_newinfo;
8029 +
8030 +- ret = -ELOOP;
8031 +- if (!mark_source_chains(newinfo, valid_hooks, entry1))
8032 +- goto free_newinfo;
8033 ++ memcpy(&repl, compatr, sizeof(*compatr));
8034 +
8035 +- i = 0;
8036 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
8037 +- ret = compat_check_entry(iter1, net, name);
8038 +- if (ret != 0)
8039 +- break;
8040 +- ++i;
8041 +- if (strcmp(ipt_get_target(iter1)->u.user.name,
8042 +- XT_ERROR_TARGET) == 0)
8043 +- ++newinfo->stacksize;
8044 +- }
8045 +- if (ret) {
8046 +- /*
8047 +- * The first i matches need cleanup_entry (calls ->destroy)
8048 +- * because they had called ->check already. The other j-i
8049 +- * entries need only release.
8050 +- */
8051 +- int skip = i;
8052 +- j -= i;
8053 +- xt_entry_foreach(iter0, entry0, newinfo->size) {
8054 +- if (skip-- > 0)
8055 +- continue;
8056 +- if (j-- == 0)
8057 +- break;
8058 +- compat_release_entry(iter0);
8059 +- }
8060 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
8061 +- if (i-- == 0)
8062 +- break;
8063 +- cleanup_entry(iter1, net);
8064 +- }
8065 +- xt_free_table_info(newinfo);
8066 +- return ret;
8067 ++ for (i = 0; i < NF_INET_NUMHOOKS; i++) {
8068 ++ repl.hook_entry[i] = newinfo->hook_entry[i];
8069 ++ repl.underflow[i] = newinfo->underflow[i];
8070 + }
8071 +
8072 +- /* And one copy for every other CPU */
8073 +- for_each_possible_cpu(i)
8074 +- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
8075 +- memcpy(newinfo->entries[i], entry1, newinfo->size);
8076 ++ repl.num_counters = 0;
8077 ++ repl.counters = NULL;
8078 ++ repl.size = newinfo->size;
8079 ++ ret = translate_table(net, newinfo, entry1, &repl);
8080 ++ if (ret)
8081 ++ goto free_newinfo;
8082 +
8083 + *pinfo = newinfo;
8084 + *pentry0 = entry1;
8085 +@@ -1780,17 +1640,16 @@ translate_compat_table(struct net *net,
8086 +
8087 + free_newinfo:
8088 + xt_free_table_info(newinfo);
8089 +-out:
8090 +- xt_entry_foreach(iter0, entry0, total_size) {
8091 ++ return ret;
8092 ++out_unlock:
8093 ++ xt_compat_flush_offsets(AF_INET);
8094 ++ xt_compat_unlock(AF_INET);
8095 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
8096 + if (j-- == 0)
8097 + break;
8098 + compat_release_entry(iter0);
8099 + }
8100 + return ret;
8101 +-out_unlock:
8102 +- xt_compat_flush_offsets(AF_INET);
8103 +- xt_compat_unlock(AF_INET);
8104 +- goto out;
8105 + }
8106 +
8107 + static int
8108 +@@ -1824,10 +1683,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
8109 + goto free_newinfo;
8110 + }
8111 +
8112 +- ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
8113 +- &newinfo, &loc_cpu_entry, tmp.size,
8114 +- tmp.num_entries, tmp.hook_entry,
8115 +- tmp.underflow);
8116 ++ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
8117 + if (ret != 0)
8118 + goto free_newinfo;
8119 +
8120 +diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
8121 +index c6eb42100e9a..ea91058b5f6f 100644
8122 +--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
8123 ++++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
8124 +@@ -108,10 +108,18 @@ static int masq_inet_event(struct notifier_block *this,
8125 + unsigned long event,
8126 + void *ptr)
8127 + {
8128 +- struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
8129 ++ struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
8130 + struct netdev_notifier_info info;
8131 +
8132 +- netdev_notifier_info_init(&info, dev);
8133 ++ /* The masq_dev_notifier will catch the case of the device going
8134 ++ * down. So if the inetdev is dead and being destroyed we have
8135 ++ * no work to do. Otherwise this is an individual address removal
8136 ++ * and we have to perform the flush.
8137 ++ */
8138 ++ if (idev->dead)
8139 ++ return NOTIFY_DONE;
8140 ++
8141 ++ netdev_notifier_info_init(&info, idev->dev);
8142 + return masq_device_event(this, event, &info);
8143 + }
8144 +
8145 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
8146 +index b7ac498fed5f..12c5df33c0b7 100644
8147 +--- a/net/ipv4/route.c
8148 ++++ b/net/ipv4/route.c
8149 +@@ -1924,6 +1924,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
8150 + */
8151 + if (fi && res->prefixlen < 4)
8152 + fi = NULL;
8153 ++ } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
8154 ++ (orig_oif != dev_out->ifindex)) {
8155 ++ /* For local routes that require a particular output interface
8156 ++ * we do not want to cache the result. Caching the result
8157 ++ * causes incorrect behaviour when there are multiple source
8158 ++ * addresses on the interface, the end result being that if the
8159 ++ * intended recipient is waiting on that interface for the
8160 ++ * packet he won't receive it because it will be delivered on
8161 ++ * the loopback interface and the IP_PKTINFO ipi_ifindex will
8162 ++ * be set to the loopback interface as well.
8163 ++ */
8164 ++ fi = NULL;
8165 + }
8166 +
8167 + fnhe = NULL;
8168 +diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
8169 +index ed9c9a91851c..c90e76c2eca0 100644
8170 +--- a/net/ipv4/tcp_metrics.c
8171 ++++ b/net/ipv4/tcp_metrics.c
8172 +@@ -550,7 +550,7 @@ reset:
8173 + */
8174 + if (crtt > tp->srtt_us) {
8175 + /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
8176 +- crtt /= 8 * USEC_PER_MSEC;
8177 ++ crtt /= 8 * USEC_PER_SEC / HZ;
8178 + inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
8179 + } else if (tp->srtt_us == 0) {
8180 + /* RFC6298: 5.7 We've failed to get a valid RTT sample from
8181 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
8182 +index 9c7d88870e2b..af2f64eeb98f 100644
8183 +--- a/net/ipv4/tcp_output.c
8184 ++++ b/net/ipv4/tcp_output.c
8185 +@@ -2532,8 +2532,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
8186 + */
8187 + if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
8188 + skb_headroom(skb) >= 0xFFFF)) {
8189 +- struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
8190 +- GFP_ATOMIC);
8191 ++ struct sk_buff *nskb;
8192 ++
8193 ++ skb_mstamp_get(&skb->skb_mstamp);
8194 ++ nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
8195 + err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
8196 + -ENOBUFS;
8197 + } else {
8198 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
8199 +index 4ea975324888..2a5d388d76a4 100644
8200 +--- a/net/ipv4/udp.c
8201 ++++ b/net/ipv4/udp.c
8202 +@@ -1965,10 +1965,14 @@ void udp_v4_early_demux(struct sk_buff *skb)
8203 + if (!in_dev)
8204 + return;
8205 +
8206 +- ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
8207 +- iph->protocol);
8208 +- if (!ours)
8209 +- return;
8210 ++ /* we are supposed to accept bcast packets */
8211 ++ if (skb->pkt_type == PACKET_MULTICAST) {
8212 ++ ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
8213 ++ iph->protocol);
8214 ++ if (!ours)
8215 ++ return;
8216 ++ }
8217 ++
8218 + sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
8219 + uh->source, iph->saddr, dif);
8220 + } else if (skb->pkt_type == PACKET_HOST) {
8221 +diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
8222 +index 8af3eb57f438..c7c8f71d0d48 100644
8223 +--- a/net/ipv6/exthdrs_core.c
8224 ++++ b/net/ipv6/exthdrs_core.c
8225 +@@ -257,7 +257,11 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
8226 + *fragoff = _frag_off;
8227 + return hp->nexthdr;
8228 + }
8229 +- return -ENOENT;
8230 ++ if (!found)
8231 ++ return -ENOENT;
8232 ++ if (fragoff)
8233 ++ *fragoff = _frag_off;
8234 ++ break;
8235 + }
8236 + hdrlen = 8;
8237 + } else if (nexthdr == NEXTHDR_AUTH) {
8238 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
8239 +index 9cb94cfa0ae7..6987d3cb4163 100644
8240 +--- a/net/ipv6/ip6_tunnel.c
8241 ++++ b/net/ipv6/ip6_tunnel.c
8242 +@@ -273,12 +273,12 @@ static int ip6_tnl_create2(struct net_device *dev)
8243 +
8244 + t = netdev_priv(dev);
8245 +
8246 ++ dev->rtnl_link_ops = &ip6_link_ops;
8247 + err = register_netdevice(dev);
8248 + if (err < 0)
8249 + goto out;
8250 +
8251 + strcpy(t->parms.name, dev->name);
8252 +- dev->rtnl_link_ops = &ip6_link_ops;
8253 +
8254 + dev_hold(dev);
8255 + ip6_tnl_link(ip6n, t);
8256 +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
8257 +index e080fbbbc0e5..1b6ed70bab37 100644
8258 +--- a/net/ipv6/netfilter/ip6_tables.c
8259 ++++ b/net/ipv6/netfilter/ip6_tables.c
8260 +@@ -195,11 +195,12 @@ get_entry(const void *base, unsigned int offset)
8261 +
8262 + /* All zeroes == unconditional rule. */
8263 + /* Mildly perf critical (only if packet tracing is on) */
8264 +-static inline bool unconditional(const struct ip6t_ip6 *ipv6)
8265 ++static inline bool unconditional(const struct ip6t_entry *e)
8266 + {
8267 + static const struct ip6t_ip6 uncond;
8268 +
8269 +- return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
8270 ++ return e->target_offset == sizeof(struct ip6t_entry) &&
8271 ++ memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
8272 + }
8273 +
8274 + static inline const struct xt_entry_target *
8275 +@@ -255,11 +256,10 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
8276 + } else if (s == e) {
8277 + (*rulenum)++;
8278 +
8279 +- if (s->target_offset == sizeof(struct ip6t_entry) &&
8280 ++ if (unconditional(s) &&
8281 + strcmp(t->target.u.kernel.target->name,
8282 + XT_STANDARD_TARGET) == 0 &&
8283 +- t->verdict < 0 &&
8284 +- unconditional(&s->ipv6)) {
8285 ++ t->verdict < 0) {
8286 + /* Tail of chains: STANDARD target (return/policy) */
8287 + *comment = *chainname == hookname
8288 + ? comments[NF_IP6_TRACE_COMMENT_POLICY]
8289 +@@ -449,6 +449,18 @@ ip6t_do_table(struct sk_buff *skb,
8290 + #endif
8291 + }
8292 +
8293 ++static bool find_jump_target(const struct xt_table_info *t,
8294 ++ const struct ip6t_entry *target)
8295 ++{
8296 ++ struct ip6t_entry *iter;
8297 ++
8298 ++ xt_entry_foreach(iter, t->entries, t->size) {
8299 ++ if (iter == target)
8300 ++ return true;
8301 ++ }
8302 ++ return false;
8303 ++}
8304 ++
8305 + /* Figures out from what hook each rule can be called: returns 0 if
8306 + there are loops. Puts hook bitmask in comefrom. */
8307 + static int
8308 +@@ -482,11 +494,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
8309 + e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
8310 +
8311 + /* Unconditional return/END. */
8312 +- if ((e->target_offset == sizeof(struct ip6t_entry) &&
8313 ++ if ((unconditional(e) &&
8314 + (strcmp(t->target.u.user.name,
8315 + XT_STANDARD_TARGET) == 0) &&
8316 +- t->verdict < 0 &&
8317 +- unconditional(&e->ipv6)) || visited) {
8318 ++ t->verdict < 0) || visited) {
8319 + unsigned int oldpos, size;
8320 +
8321 + if ((strcmp(t->target.u.user.name,
8322 +@@ -527,6 +538,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
8323 + size = e->next_offset;
8324 + e = (struct ip6t_entry *)
8325 + (entry0 + pos + size);
8326 ++ if (pos + size >= newinfo->size)
8327 ++ return 0;
8328 + e->counters.pcnt = pos;
8329 + pos += size;
8330 + } else {
8331 +@@ -545,9 +558,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
8332 + /* This a jump; chase it. */
8333 + duprintf("Jump rule %u -> %u\n",
8334 + pos, newpos);
8335 ++ e = (struct ip6t_entry *)
8336 ++ (entry0 + newpos);
8337 ++ if (!find_jump_target(newinfo, e))
8338 ++ return 0;
8339 + } else {
8340 + /* ... this is a fallthru */
8341 + newpos = pos + e->next_offset;
8342 ++ if (newpos >= newinfo->size)
8343 ++ return 0;
8344 + }
8345 + e = (struct ip6t_entry *)
8346 + (entry0 + newpos);
8347 +@@ -574,27 +593,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
8348 + module_put(par.match->me);
8349 + }
8350 +
8351 +-static int
8352 +-check_entry(const struct ip6t_entry *e, const char *name)
8353 +-{
8354 +- const struct xt_entry_target *t;
8355 +-
8356 +- if (!ip6_checkentry(&e->ipv6)) {
8357 +- duprintf("ip_tables: ip check failed %p %s.\n", e, name);
8358 +- return -EINVAL;
8359 +- }
8360 +-
8361 +- if (e->target_offset + sizeof(struct xt_entry_target) >
8362 +- e->next_offset)
8363 +- return -EINVAL;
8364 +-
8365 +- t = ip6t_get_target_c(e);
8366 +- if (e->target_offset + t->u.target_size > e->next_offset)
8367 +- return -EINVAL;
8368 +-
8369 +- return 0;
8370 +-}
8371 +-
8372 + static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
8373 + {
8374 + const struct ip6t_ip6 *ipv6 = par->entryinfo;
8375 +@@ -673,10 +671,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
8376 + struct xt_mtchk_param mtpar;
8377 + struct xt_entry_match *ematch;
8378 +
8379 +- ret = check_entry(e, name);
8380 +- if (ret)
8381 +- return ret;
8382 +-
8383 + j = 0;
8384 + mtpar.net = net;
8385 + mtpar.table = name;
8386 +@@ -720,7 +714,7 @@ static bool check_underflow(const struct ip6t_entry *e)
8387 + const struct xt_entry_target *t;
8388 + unsigned int verdict;
8389 +
8390 +- if (!unconditional(&e->ipv6))
8391 ++ if (!unconditional(e))
8392 + return false;
8393 + t = ip6t_get_target_c(e);
8394 + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
8395 +@@ -740,9 +734,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
8396 + unsigned int valid_hooks)
8397 + {
8398 + unsigned int h;
8399 ++ int err;
8400 +
8401 + if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
8402 +- (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
8403 ++ (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
8404 ++ (unsigned char *)e + e->next_offset > limit) {
8405 + duprintf("Bad offset %p\n", e);
8406 + return -EINVAL;
8407 + }
8408 +@@ -754,6 +750,14 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
8409 + return -EINVAL;
8410 + }
8411 +
8412 ++ if (!ip6_checkentry(&e->ipv6))
8413 ++ return -EINVAL;
8414 ++
8415 ++ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
8416 ++ e->next_offset);
8417 ++ if (err)
8418 ++ return err;
8419 ++
8420 + /* Check hooks & underflows */
8421 + for (h = 0; h < NF_INET_NUMHOOKS; h++) {
8422 + if (!(valid_hooks & (1 << h)))
8423 +@@ -762,9 +766,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
8424 + newinfo->hook_entry[h] = hook_entries[h];
8425 + if ((unsigned char *)e - base == underflows[h]) {
8426 + if (!check_underflow(e)) {
8427 +- pr_err("Underflows must be unconditional and "
8428 +- "use the STANDARD target with "
8429 +- "ACCEPT/DROP\n");
8430 ++ pr_debug("Underflows must be unconditional and "
8431 ++ "use the STANDARD target with "
8432 ++ "ACCEPT/DROP\n");
8433 + return -EINVAL;
8434 + }
8435 + newinfo->underflow[h] = underflows[h];
8436 +@@ -1314,56 +1318,17 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
8437 + unsigned int i, curcpu;
8438 + struct xt_counters_info tmp;
8439 + struct xt_counters *paddc;
8440 +- unsigned int num_counters;
8441 +- char *name;
8442 +- int size;
8443 +- void *ptmp;
8444 + struct xt_table *t;
8445 + const struct xt_table_info *private;
8446 + int ret = 0;
8447 + const void *loc_cpu_entry;
8448 + struct ip6t_entry *iter;
8449 + unsigned int addend;
8450 +-#ifdef CONFIG_COMPAT
8451 +- struct compat_xt_counters_info compat_tmp;
8452 +-
8453 +- if (compat) {
8454 +- ptmp = &compat_tmp;
8455 +- size = sizeof(struct compat_xt_counters_info);
8456 +- } else
8457 +-#endif
8458 +- {
8459 +- ptmp = &tmp;
8460 +- size = sizeof(struct xt_counters_info);
8461 +- }
8462 +-
8463 +- if (copy_from_user(ptmp, user, size) != 0)
8464 +- return -EFAULT;
8465 +
8466 +-#ifdef CONFIG_COMPAT
8467 +- if (compat) {
8468 +- num_counters = compat_tmp.num_counters;
8469 +- name = compat_tmp.name;
8470 +- } else
8471 +-#endif
8472 +- {
8473 +- num_counters = tmp.num_counters;
8474 +- name = tmp.name;
8475 +- }
8476 +-
8477 +- if (len != size + num_counters * sizeof(struct xt_counters))
8478 +- return -EINVAL;
8479 +-
8480 +- paddc = vmalloc(len - size);
8481 +- if (!paddc)
8482 +- return -ENOMEM;
8483 +-
8484 +- if (copy_from_user(paddc, user + size, len - size) != 0) {
8485 +- ret = -EFAULT;
8486 +- goto free;
8487 +- }
8488 +-
8489 +- t = xt_find_table_lock(net, AF_INET6, name);
8490 ++ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
8491 ++ if (IS_ERR(paddc))
8492 ++ return PTR_ERR(paddc);
8493 ++ t = xt_find_table_lock(net, AF_INET6, tmp.name);
8494 + if (IS_ERR_OR_NULL(t)) {
8495 + ret = t ? PTR_ERR(t) : -ENOENT;
8496 + goto free;
8497 +@@ -1372,7 +1337,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
8498 +
8499 + local_bh_disable();
8500 + private = t->private;
8501 +- if (private->number != num_counters) {
8502 ++ if (private->number != tmp.num_counters) {
8503 + ret = -EINVAL;
8504 + goto unlock_up_free;
8505 + }
8506 +@@ -1452,7 +1417,6 @@ compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
8507 +
8508 + static int
8509 + compat_find_calc_match(struct xt_entry_match *m,
8510 +- const char *name,
8511 + const struct ip6t_ip6 *ipv6,
8512 + unsigned int hookmask,
8513 + int *size)
8514 +@@ -1488,21 +1452,19 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
8515 + struct xt_table_info *newinfo,
8516 + unsigned int *size,
8517 + const unsigned char *base,
8518 +- const unsigned char *limit,
8519 +- const unsigned int *hook_entries,
8520 +- const unsigned int *underflows,
8521 +- const char *name)
8522 ++ const unsigned char *limit)
8523 + {
8524 + struct xt_entry_match *ematch;
8525 + struct xt_entry_target *t;
8526 + struct xt_target *target;
8527 + unsigned int entry_offset;
8528 + unsigned int j;
8529 +- int ret, off, h;
8530 ++ int ret, off;
8531 +
8532 + duprintf("check_compat_entry_size_and_hooks %p\n", e);
8533 + if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
8534 +- (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
8535 ++ (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
8536 ++ (unsigned char *)e + e->next_offset > limit) {
8537 + duprintf("Bad offset %p, limit = %p\n", e, limit);
8538 + return -EINVAL;
8539 + }
8540 +@@ -1514,8 +1476,11 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
8541 + return -EINVAL;
8542 + }
8543 +
8544 +- /* For purposes of check_entry casting the compat entry is fine */
8545 +- ret = check_entry((struct ip6t_entry *)e, name);
8546 ++ if (!ip6_checkentry(&e->ipv6))
8547 ++ return -EINVAL;
8548 ++
8549 ++ ret = xt_compat_check_entry_offsets(e, e->elems,
8550 ++ e->target_offset, e->next_offset);
8551 + if (ret)
8552 + return ret;
8553 +
8554 +@@ -1523,8 +1488,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
8555 + entry_offset = (void *)e - (void *)base;
8556 + j = 0;
8557 + xt_ematch_foreach(ematch, e) {
8558 +- ret = compat_find_calc_match(ematch, name,
8559 +- &e->ipv6, e->comefrom, &off);
8560 ++ ret = compat_find_calc_match(ematch, &e->ipv6, e->comefrom,
8561 ++ &off);
8562 + if (ret != 0)
8563 + goto release_matches;
8564 + ++j;
8565 +@@ -1547,17 +1512,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
8566 + if (ret)
8567 + goto out;
8568 +
8569 +- /* Check hooks & underflows */
8570 +- for (h = 0; h < NF_INET_NUMHOOKS; h++) {
8571 +- if ((unsigned char *)e - base == hook_entries[h])
8572 +- newinfo->hook_entry[h] = hook_entries[h];
8573 +- if ((unsigned char *)e - base == underflows[h])
8574 +- newinfo->underflow[h] = underflows[h];
8575 +- }
8576 +-
8577 +- /* Clear counters and comefrom */
8578 +- memset(&e->counters, 0, sizeof(e->counters));
8579 +- e->comefrom = 0;
8580 + return 0;
8581 +
8582 + out:
8583 +@@ -1571,18 +1525,17 @@ release_matches:
8584 + return ret;
8585 + }
8586 +
8587 +-static int
8588 ++static void
8589 + compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
8590 +- unsigned int *size, const char *name,
8591 ++ unsigned int *size,
8592 + struct xt_table_info *newinfo, unsigned char *base)
8593 + {
8594 + struct xt_entry_target *t;
8595 + struct ip6t_entry *de;
8596 + unsigned int origsize;
8597 +- int ret, h;
8598 ++ int h;
8599 + struct xt_entry_match *ematch;
8600 +
8601 +- ret = 0;
8602 + origsize = *size;
8603 + de = (struct ip6t_entry *)*dstptr;
8604 + memcpy(de, e, sizeof(struct ip6t_entry));
8605 +@@ -1591,11 +1544,9 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
8606 + *dstptr += sizeof(struct ip6t_entry);
8607 + *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
8608 +
8609 +- xt_ematch_foreach(ematch, e) {
8610 +- ret = xt_compat_match_from_user(ematch, dstptr, size);
8611 +- if (ret != 0)
8612 +- return ret;
8613 +- }
8614 ++ xt_ematch_foreach(ematch, e)
8615 ++ xt_compat_match_from_user(ematch, dstptr, size);
8616 ++
8617 + de->target_offset = e->target_offset - (origsize - *size);
8618 + t = compat_ip6t_get_target(e);
8619 + xt_compat_target_from_user(t, dstptr, size);
8620 +@@ -1607,181 +1558,82 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
8621 + if ((unsigned char *)de - base < newinfo->underflow[h])
8622 + newinfo->underflow[h] -= origsize - *size;
8623 + }
8624 +- return ret;
8625 +-}
8626 +-
8627 +-static int compat_check_entry(struct ip6t_entry *e, struct net *net,
8628 +- const char *name)
8629 +-{
8630 +- unsigned int j;
8631 +- int ret = 0;
8632 +- struct xt_mtchk_param mtpar;
8633 +- struct xt_entry_match *ematch;
8634 +-
8635 +- j = 0;
8636 +- mtpar.net = net;
8637 +- mtpar.table = name;
8638 +- mtpar.entryinfo = &e->ipv6;
8639 +- mtpar.hook_mask = e->comefrom;
8640 +- mtpar.family = NFPROTO_IPV6;
8641 +- xt_ematch_foreach(ematch, e) {
8642 +- ret = check_match(ematch, &mtpar);
8643 +- if (ret != 0)
8644 +- goto cleanup_matches;
8645 +- ++j;
8646 +- }
8647 +-
8648 +- ret = check_target(e, net, name);
8649 +- if (ret)
8650 +- goto cleanup_matches;
8651 +- return 0;
8652 +-
8653 +- cleanup_matches:
8654 +- xt_ematch_foreach(ematch, e) {
8655 +- if (j-- == 0)
8656 +- break;
8657 +- cleanup_match(ematch, net);
8658 +- }
8659 +- return ret;
8660 + }
8661 +
8662 + static int
8663 + translate_compat_table(struct net *net,
8664 +- const char *name,
8665 +- unsigned int valid_hooks,
8666 + struct xt_table_info **pinfo,
8667 + void **pentry0,
8668 +- unsigned int total_size,
8669 +- unsigned int number,
8670 +- unsigned int *hook_entries,
8671 +- unsigned int *underflows)
8672 ++ const struct compat_ip6t_replace *compatr)
8673 + {
8674 + unsigned int i, j;
8675 + struct xt_table_info *newinfo, *info;
8676 + void *pos, *entry0, *entry1;
8677 + struct compat_ip6t_entry *iter0;
8678 +- struct ip6t_entry *iter1;
8679 ++ struct ip6t_replace repl;
8680 + unsigned int size;
8681 + int ret = 0;
8682 +
8683 + info = *pinfo;
8684 + entry0 = *pentry0;
8685 +- size = total_size;
8686 +- info->number = number;
8687 +-
8688 +- /* Init all hooks to impossible value. */
8689 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
8690 +- info->hook_entry[i] = 0xFFFFFFFF;
8691 +- info->underflow[i] = 0xFFFFFFFF;
8692 +- }
8693 ++ size = compatr->size;
8694 ++ info->number = compatr->num_entries;
8695 +
8696 + duprintf("translate_compat_table: size %u\n", info->size);
8697 + j = 0;
8698 + xt_compat_lock(AF_INET6);
8699 +- xt_compat_init_offsets(AF_INET6, number);
8700 ++ xt_compat_init_offsets(AF_INET6, compatr->num_entries);
8701 + /* Walk through entries, checking offsets. */
8702 +- xt_entry_foreach(iter0, entry0, total_size) {
8703 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
8704 + ret = check_compat_entry_size_and_hooks(iter0, info, &size,
8705 + entry0,
8706 +- entry0 + total_size,
8707 +- hook_entries,
8708 +- underflows,
8709 +- name);
8710 ++ entry0 + compatr->size);
8711 + if (ret != 0)
8712 + goto out_unlock;
8713 + ++j;
8714 + }
8715 +
8716 + ret = -EINVAL;
8717 +- if (j != number) {
8718 ++ if (j != compatr->num_entries) {
8719 + duprintf("translate_compat_table: %u not %u entries\n",
8720 +- j, number);
8721 ++ j, compatr->num_entries);
8722 + goto out_unlock;
8723 + }
8724 +
8725 +- /* Check hooks all assigned */
8726 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
8727 +- /* Only hooks which are valid */
8728 +- if (!(valid_hooks & (1 << i)))
8729 +- continue;
8730 +- if (info->hook_entry[i] == 0xFFFFFFFF) {
8731 +- duprintf("Invalid hook entry %u %u\n",
8732 +- i, hook_entries[i]);
8733 +- goto out_unlock;
8734 +- }
8735 +- if (info->underflow[i] == 0xFFFFFFFF) {
8736 +- duprintf("Invalid underflow %u %u\n",
8737 +- i, underflows[i]);
8738 +- goto out_unlock;
8739 +- }
8740 +- }
8741 +-
8742 + ret = -ENOMEM;
8743 + newinfo = xt_alloc_table_info(size);
8744 + if (!newinfo)
8745 + goto out_unlock;
8746 +
8747 +- newinfo->number = number;
8748 ++ newinfo->number = compatr->num_entries;
8749 + for (i = 0; i < NF_INET_NUMHOOKS; i++) {
8750 +- newinfo->hook_entry[i] = info->hook_entry[i];
8751 +- newinfo->underflow[i] = info->underflow[i];
8752 ++ newinfo->hook_entry[i] = compatr->hook_entry[i];
8753 ++ newinfo->underflow[i] = compatr->underflow[i];
8754 + }
8755 + entry1 = newinfo->entries[raw_smp_processor_id()];
8756 + pos = entry1;
8757 +- size = total_size;
8758 +- xt_entry_foreach(iter0, entry0, total_size) {
8759 +- ret = compat_copy_entry_from_user(iter0, &pos, &size,
8760 +- name, newinfo, entry1);
8761 +- if (ret != 0)
8762 +- break;
8763 +- }
8764 ++ size = compatr->size;
8765 ++ xt_entry_foreach(iter0, entry0, compatr->size)
8766 ++ compat_copy_entry_from_user(iter0, &pos, &size,
8767 ++ newinfo, entry1);
8768 ++
8769 ++ /* all module references in entry0 are now gone. */
8770 + xt_compat_flush_offsets(AF_INET6);
8771 + xt_compat_unlock(AF_INET6);
8772 +- if (ret)
8773 +- goto free_newinfo;
8774 +
8775 +- ret = -ELOOP;
8776 +- if (!mark_source_chains(newinfo, valid_hooks, entry1))
8777 +- goto free_newinfo;
8778 ++ memcpy(&repl, compatr, sizeof(*compatr));
8779 +
8780 +- i = 0;
8781 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
8782 +- ret = compat_check_entry(iter1, net, name);
8783 +- if (ret != 0)
8784 +- break;
8785 +- ++i;
8786 +- if (strcmp(ip6t_get_target(iter1)->u.user.name,
8787 +- XT_ERROR_TARGET) == 0)
8788 +- ++newinfo->stacksize;
8789 +- }
8790 +- if (ret) {
8791 +- /*
8792 +- * The first i matches need cleanup_entry (calls ->destroy)
8793 +- * because they had called ->check already. The other j-i
8794 +- * entries need only release.
8795 +- */
8796 +- int skip = i;
8797 +- j -= i;
8798 +- xt_entry_foreach(iter0, entry0, newinfo->size) {
8799 +- if (skip-- > 0)
8800 +- continue;
8801 +- if (j-- == 0)
8802 +- break;
8803 +- compat_release_entry(iter0);
8804 +- }
8805 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
8806 +- if (i-- == 0)
8807 +- break;
8808 +- cleanup_entry(iter1, net);
8809 +- }
8810 +- xt_free_table_info(newinfo);
8811 +- return ret;
8812 ++ for (i = 0; i < NF_INET_NUMHOOKS; i++) {
8813 ++ repl.hook_entry[i] = newinfo->hook_entry[i];
8814 ++ repl.underflow[i] = newinfo->underflow[i];
8815 + }
8816 +
8817 +- /* And one copy for every other CPU */
8818 +- for_each_possible_cpu(i)
8819 +- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
8820 +- memcpy(newinfo->entries[i], entry1, newinfo->size);
8821 ++ repl.num_counters = 0;
8822 ++ repl.counters = NULL;
8823 ++ repl.size = newinfo->size;
8824 ++ ret = translate_table(net, newinfo, entry1, &repl);
8825 ++ if (ret)
8826 ++ goto free_newinfo;
8827 +
8828 + *pinfo = newinfo;
8829 + *pentry0 = entry1;
8830 +@@ -1790,17 +1642,16 @@ translate_compat_table(struct net *net,
8831 +
8832 + free_newinfo:
8833 + xt_free_table_info(newinfo);
8834 +-out:
8835 +- xt_entry_foreach(iter0, entry0, total_size) {
8836 ++ return ret;
8837 ++out_unlock:
8838 ++ xt_compat_flush_offsets(AF_INET6);
8839 ++ xt_compat_unlock(AF_INET6);
8840 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
8841 + if (j-- == 0)
8842 + break;
8843 + compat_release_entry(iter0);
8844 + }
8845 + return ret;
8846 +-out_unlock:
8847 +- xt_compat_flush_offsets(AF_INET6);
8848 +- xt_compat_unlock(AF_INET6);
8849 +- goto out;
8850 + }
8851 +
8852 + static int
8853 +@@ -1834,10 +1685,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
8854 + goto free_newinfo;
8855 + }
8856 +
8857 +- ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
8858 +- &newinfo, &loc_cpu_entry, tmp.size,
8859 +- tmp.num_entries, tmp.hook_entry,
8860 +- tmp.underflow);
8861 ++ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
8862 + if (ret != 0)
8863 + goto free_newinfo;
8864 +
8865 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
8866 +index 78d180615495..b8e14a5ae0b1 100644
8867 +--- a/net/ipv6/tcp_ipv6.c
8868 ++++ b/net/ipv6/tcp_ipv6.c
8869 +@@ -1749,7 +1749,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
8870 + destp = ntohs(inet->inet_dport);
8871 + srcp = ntohs(inet->inet_sport);
8872 +
8873 +- if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
8874 ++ if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
8875 ++ icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
8876 ++ icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
8877 + timer_active = 1;
8878 + timer_expires = icsk->icsk_timeout;
8879 + } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
8880 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
8881 +index a5ce70502699..7d0111696190 100644
8882 +--- a/net/ipv6/udp.c
8883 ++++ b/net/ipv6/udp.c
8884 +@@ -898,11 +898,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
8885 + ret = udpv6_queue_rcv_skb(sk, skb);
8886 + sock_put(sk);
8887 +
8888 +- /* a return value > 0 means to resubmit the input, but
8889 +- * it wants the return to be -protocol, or 0
8890 +- */
8891 ++ /* a return value > 0 means to resubmit the input */
8892 + if (ret > 0)
8893 +- return -ret;
8894 ++ return ret;
8895 +
8896 + return 0;
8897 + }
8898 +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
8899 +index 369a9822488c..e45d2b77bb42 100644
8900 +--- a/net/l2tp/l2tp_ip.c
8901 ++++ b/net/l2tp/l2tp_ip.c
8902 +@@ -123,12 +123,11 @@ static int l2tp_ip_recv(struct sk_buff *skb)
8903 + struct l2tp_tunnel *tunnel = NULL;
8904 + int length;
8905 +
8906 +- /* Point to L2TP header */
8907 +- optr = ptr = skb->data;
8908 +-
8909 + if (!pskb_may_pull(skb, 4))
8910 + goto discard;
8911 +
8912 ++ /* Point to L2TP header */
8913 ++ optr = ptr = skb->data;
8914 + session_id = ntohl(*((__be32 *) ptr));
8915 + ptr += 4;
8916 +
8917 +@@ -156,6 +155,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
8918 + if (!pskb_may_pull(skb, length))
8919 + goto discard;
8920 +
8921 ++ /* Point to L2TP header */
8922 ++ optr = ptr = skb->data;
8923 ++ ptr += 4;
8924 + pr_debug("%s: ip recv\n", tunnel->name);
8925 + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
8926 + }
8927 +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
8928 +index 38658826175c..b8e469b832df 100644
8929 +--- a/net/l2tp/l2tp_ip6.c
8930 ++++ b/net/l2tp/l2tp_ip6.c
8931 +@@ -135,12 +135,11 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
8932 + struct l2tp_tunnel *tunnel = NULL;
8933 + int length;
8934 +
8935 +- /* Point to L2TP header */
8936 +- optr = ptr = skb->data;
8937 +-
8938 + if (!pskb_may_pull(skb, 4))
8939 + goto discard;
8940 +
8941 ++ /* Point to L2TP header */
8942 ++ optr = ptr = skb->data;
8943 + session_id = ntohl(*((__be32 *) ptr));
8944 + ptr += 4;
8945 +
8946 +@@ -168,6 +167,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
8947 + if (!pskb_may_pull(skb, length))
8948 + goto discard;
8949 +
8950 ++ /* Point to L2TP header */
8951 ++ optr = ptr = skb->data;
8952 ++ ptr += 4;
8953 + pr_debug("%s: ip recv\n", tunnel->name);
8954 + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
8955 + }
8956 +diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
8957 +index bb9cbc17d926..3e8691895385 100644
8958 +--- a/net/llc/af_llc.c
8959 ++++ b/net/llc/af_llc.c
8960 +@@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
8961 + if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
8962 + struct llc_pktinfo info;
8963 +
8964 ++ memset(&info, 0, sizeof(info));
8965 + info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
8966 + llc_pdu_decode_dsap(skb, &info.lpi_sap);
8967 + llc_pdu_decode_da(skb, info.lpi_mac);
8968 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
8969 +index 074cdfa04cc4..c848d0b38c86 100644
8970 +--- a/net/mac80211/rx.c
8971 ++++ b/net/mac80211/rx.c
8972 +@@ -2090,7 +2090,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
8973 + struct ieee80211_sub_if_data *sdata = rx->sdata;
8974 + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
8975 + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
8976 +- u16 q, hdrlen;
8977 ++ u16 ac, q, hdrlen;
8978 +
8979 + hdr = (struct ieee80211_hdr *) skb->data;
8980 + hdrlen = ieee80211_hdrlen(hdr->frame_control);
8981 +@@ -2160,7 +2160,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
8982 + ether_addr_equal(sdata->vif.addr, hdr->addr3))
8983 + return RX_CONTINUE;
8984 +
8985 +- q = ieee80211_select_queue_80211(sdata, skb, hdr);
8986 ++ ac = ieee80211_select_queue_80211(sdata, skb, hdr);
8987 ++ q = sdata->vif.hw_queue[ac];
8988 + if (ieee80211_queue_stopped(&local->hw, q)) {
8989 + IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
8990 + return RX_DROP_MONITOR;
8991 +diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
8992 +index bed5f7042529..bb318e4623a3 100644
8993 +--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
8994 ++++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
8995 +@@ -88,7 +88,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
8996 + dptr = skb->data + dataoff;
8997 + datalen = skb->len - dataoff;
8998 +
8999 +- if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
9000 ++ if (get_callid(dptr, 0, datalen, &matchoff, &matchlen))
9001 + return -EINVAL;
9002 +
9003 + /* N.B: pe_data is only set on success,
9004 +diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
9005 +index 133eb4772f12..0c01ad4e078f 100644
9006 +--- a/net/netfilter/x_tables.c
9007 ++++ b/net/netfilter/x_tables.c
9008 +@@ -418,6 +418,47 @@ int xt_check_match(struct xt_mtchk_param *par,
9009 + }
9010 + EXPORT_SYMBOL_GPL(xt_check_match);
9011 +
9012 ++/** xt_check_entry_match - check that matches end before start of target
9013 ++ *
9014 ++ * @match: beginning of xt_entry_match
9015 ++ * @target: beginning of this rules target (alleged end of matches)
9016 ++ * @alignment: alignment requirement of match structures
9017 ++ *
9018 ++ * Validates that all matches add up to the beginning of the target,
9019 ++ * and that each match covers at least the base structure size.
9020 ++ *
9021 ++ * Return: 0 on success, negative errno on failure.
9022 ++ */
9023 ++static int xt_check_entry_match(const char *match, const char *target,
9024 ++ const size_t alignment)
9025 ++{
9026 ++ const struct xt_entry_match *pos;
9027 ++ int length = target - match;
9028 ++
9029 ++ if (length == 0) /* no matches */
9030 ++ return 0;
9031 ++
9032 ++ pos = (struct xt_entry_match *)match;
9033 ++ do {
9034 ++ if ((unsigned long)pos % alignment)
9035 ++ return -EINVAL;
9036 ++
9037 ++ if (length < (int)sizeof(struct xt_entry_match))
9038 ++ return -EINVAL;
9039 ++
9040 ++ if (pos->u.match_size < sizeof(struct xt_entry_match))
9041 ++ return -EINVAL;
9042 ++
9043 ++ if (pos->u.match_size > length)
9044 ++ return -EINVAL;
9045 ++
9046 ++ length -= pos->u.match_size;
9047 ++ pos = ((void *)((char *)(pos) + (pos)->u.match_size));
9048 ++ } while (length > 0);
9049 ++
9050 ++ return 0;
9051 ++}
9052 ++
9053 + #ifdef CONFIG_COMPAT
9054 + int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
9055 + {
9056 +@@ -487,13 +528,14 @@ int xt_compat_match_offset(const struct xt_match *match)
9057 + }
9058 + EXPORT_SYMBOL_GPL(xt_compat_match_offset);
9059 +
9060 +-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
9061 +- unsigned int *size)
9062 ++void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
9063 ++ unsigned int *size)
9064 + {
9065 + const struct xt_match *match = m->u.kernel.match;
9066 + struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
9067 + int pad, off = xt_compat_match_offset(match);
9068 + u_int16_t msize = cm->u.user.match_size;
9069 ++ char name[sizeof(m->u.user.name)];
9070 +
9071 + m = *dstptr;
9072 + memcpy(m, cm, sizeof(*cm));
9073 +@@ -507,10 +549,12 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
9074 +
9075 + msize += off;
9076 + m->u.user.match_size = msize;
9077 ++ strlcpy(name, match->name, sizeof(name));
9078 ++ module_put(match->me);
9079 ++ strncpy(m->u.user.name, name, sizeof(m->u.user.name));
9080 +
9081 + *size += off;
9082 + *dstptr += msize;
9083 +- return 0;
9084 + }
9085 + EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
9086 +
9087 +@@ -541,8 +585,125 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
9088 + return 0;
9089 + }
9090 + EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
9091 ++
9092 ++/* non-compat version may have padding after verdict */
9093 ++struct compat_xt_standard_target {
9094 ++ struct compat_xt_entry_target t;
9095 ++ compat_uint_t verdict;
9096 ++};
9097 ++
9098 ++int xt_compat_check_entry_offsets(const void *base, const char *elems,
9099 ++ unsigned int target_offset,
9100 ++ unsigned int next_offset)
9101 ++{
9102 ++ long size_of_base_struct = elems - (const char *)base;
9103 ++ const struct compat_xt_entry_target *t;
9104 ++ const char *e = base;
9105 ++
9106 ++ if (target_offset < size_of_base_struct)
9107 ++ return -EINVAL;
9108 ++
9109 ++ if (target_offset + sizeof(*t) > next_offset)
9110 ++ return -EINVAL;
9111 ++
9112 ++ t = (void *)(e + target_offset);
9113 ++ if (t->u.target_size < sizeof(*t))
9114 ++ return -EINVAL;
9115 ++
9116 ++ if (target_offset + t->u.target_size > next_offset)
9117 ++ return -EINVAL;
9118 ++
9119 ++ if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
9120 ++ COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
9121 ++ return -EINVAL;
9122 ++
9123 ++ /* compat_xt_entry match has less strict aligment requirements,
9124 ++ * otherwise they are identical. In case of padding differences
9125 ++ * we need to add compat version of xt_check_entry_match.
9126 ++ */
9127 ++ BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
9128 ++
9129 ++ return xt_check_entry_match(elems, base + target_offset,
9130 ++ __alignof__(struct compat_xt_entry_match));
9131 ++}
9132 ++EXPORT_SYMBOL(xt_compat_check_entry_offsets);
9133 + #endif /* CONFIG_COMPAT */
9134 +
9135 ++/**
9136 ++ * xt_check_entry_offsets - validate arp/ip/ip6t_entry
9137 ++ *
9138 ++ * @base: pointer to arp/ip/ip6t_entry
9139 ++ * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
9140 ++ * @target_offset: the arp/ip/ip6_t->target_offset
9141 ++ * @next_offset: the arp/ip/ip6_t->next_offset
9142 ++ *
9143 ++ * validates that target_offset and next_offset are sane and that all
9144 ++ * match sizes (if any) align with the target offset.
9145 ++ *
9146 ++ * This function does not validate the targets or matches themselves, it
9147 ++ * only tests that all the offsets and sizes are correct, that all
9148 ++ * match structures are aligned, and that the last structure ends where
9149 ++ * the target structure begins.
9150 ++ *
9151 ++ * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
9152 ++ *
9153 ++ * The arp/ip/ip6t_entry structure @base must have passed following tests:
9154 ++ * - it must point to a valid memory location
9155 ++ * - base to base + next_offset must be accessible, i.e. not exceed allocated
9156 ++ * length.
9157 ++ *
9158 ++ * A well-formed entry looks like this:
9159 ++ *
9160 ++ * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
9161 ++ * e->elems[]-----' | |
9162 ++ * matchsize | |
9163 ++ * matchsize | |
9164 ++ * | |
9165 ++ * target_offset---------------------------------' |
9166 ++ * next_offset---------------------------------------------------'
9167 ++ *
9168 ++ * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
9169 ++ * This is where matches (if any) and the target reside.
9170 ++ * target_offset: beginning of target.
9171 ++ * next_offset: start of the next rule; also: size of this rule.
9172 ++ * Since targets have a minimum size, target_offset + minlen <= next_offset.
9173 ++ *
9174 ++ * Every match stores its size, sum of sizes must not exceed target_offset.
9175 ++ *
9176 ++ * Return: 0 on success, negative errno on failure.
9177 ++ */
9178 ++int xt_check_entry_offsets(const void *base,
9179 ++ const char *elems,
9180 ++ unsigned int target_offset,
9181 ++ unsigned int next_offset)
9182 ++{
9183 ++ long size_of_base_struct = elems - (const char *)base;
9184 ++ const struct xt_entry_target *t;
9185 ++ const char *e = base;
9186 ++
9187 ++ /* target start is within the ip/ip6/arpt_entry struct */
9188 ++ if (target_offset < size_of_base_struct)
9189 ++ return -EINVAL;
9190 ++
9191 ++ if (target_offset + sizeof(*t) > next_offset)
9192 ++ return -EINVAL;
9193 ++
9194 ++ t = (void *)(e + target_offset);
9195 ++ if (t->u.target_size < sizeof(*t))
9196 ++ return -EINVAL;
9197 ++
9198 ++ if (target_offset + t->u.target_size > next_offset)
9199 ++ return -EINVAL;
9200 ++
9201 ++ if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
9202 ++ XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
9203 ++ return -EINVAL;
9204 ++
9205 ++ return xt_check_entry_match(elems, base + target_offset,
9206 ++ __alignof__(struct xt_entry_match));
9207 ++}
9208 ++EXPORT_SYMBOL(xt_check_entry_offsets);
9209 ++
9210 + int xt_check_target(struct xt_tgchk_param *par,
9211 + unsigned int size, u_int8_t proto, bool inv_proto)
9212 + {
9213 +@@ -593,6 +754,80 @@ int xt_check_target(struct xt_tgchk_param *par,
9214 + }
9215 + EXPORT_SYMBOL_GPL(xt_check_target);
9216 +
9217 ++/**
9218 ++ * xt_copy_counters_from_user - copy counters and metadata from userspace
9219 ++ *
9220 ++ * @user: src pointer to userspace memory
9221 ++ * @len: alleged size of userspace memory
9222 ++ * @info: where to store the xt_counters_info metadata
9223 ++ * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
9224 ++ *
9225 ++ * Copies counter meta data from @user and stores it in @info.
9226 ++ *
9227 ++ * vmallocs memory to hold the counters, then copies the counter data
9228 ++ * from @user to the new memory and returns a pointer to it.
9229 ++ *
9230 ++ * If @compat is true, @info gets converted automatically to the 64bit
9231 ++ * representation.
9232 ++ *
9233 ++ * The metadata associated with the counters is stored in @info.
9234 ++ *
9235 ++ * Return: returns pointer that caller has to test via IS_ERR().
9236 ++ * If IS_ERR is false, caller has to vfree the pointer.
9237 ++ */
9238 ++void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
9239 ++ struct xt_counters_info *info, bool compat)
9240 ++{
9241 ++ void *mem;
9242 ++ u64 size;
9243 ++
9244 ++#ifdef CONFIG_COMPAT
9245 ++ if (compat) {
9246 ++ /* structures only differ in size due to alignment */
9247 ++ struct compat_xt_counters_info compat_tmp;
9248 ++
9249 ++ if (len <= sizeof(compat_tmp))
9250 ++ return ERR_PTR(-EINVAL);
9251 ++
9252 ++ len -= sizeof(compat_tmp);
9253 ++ if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
9254 ++ return ERR_PTR(-EFAULT);
9255 ++
9256 ++ strlcpy(info->name, compat_tmp.name, sizeof(info->name));
9257 ++ info->num_counters = compat_tmp.num_counters;
9258 ++ user += sizeof(compat_tmp);
9259 ++ } else
9260 ++#endif
9261 ++ {
9262 ++ if (len <= sizeof(*info))
9263 ++ return ERR_PTR(-EINVAL);
9264 ++
9265 ++ len -= sizeof(*info);
9266 ++ if (copy_from_user(info, user, sizeof(*info)) != 0)
9267 ++ return ERR_PTR(-EFAULT);
9268 ++
9269 ++ info->name[sizeof(info->name) - 1] = '\0';
9270 ++ user += sizeof(*info);
9271 ++ }
9272 ++
9273 ++ size = sizeof(struct xt_counters);
9274 ++ size *= info->num_counters;
9275 ++
9276 ++ if (size != (u64)len)
9277 ++ return ERR_PTR(-EINVAL);
9278 ++
9279 ++ mem = vmalloc(len);
9280 ++ if (!mem)
9281 ++ return ERR_PTR(-ENOMEM);
9282 ++
9283 ++ if (copy_from_user(mem, user, len) == 0)
9284 ++ return mem;
9285 ++
9286 ++ vfree(mem);
9287 ++ return ERR_PTR(-EFAULT);
9288 ++}
9289 ++EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
9290 ++
9291 + #ifdef CONFIG_COMPAT
9292 + int xt_compat_target_offset(const struct xt_target *target)
9293 + {
9294 +@@ -608,6 +843,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
9295 + struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
9296 + int pad, off = xt_compat_target_offset(target);
9297 + u_int16_t tsize = ct->u.user.target_size;
9298 ++ char name[sizeof(t->u.user.name)];
9299 +
9300 + t = *dstptr;
9301 + memcpy(t, ct, sizeof(*ct));
9302 +@@ -621,6 +857,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
9303 +
9304 + tsize += off;
9305 + t->u.user.target_size = tsize;
9306 ++ strlcpy(name, target->name, sizeof(name));
9307 ++ module_put(target->me);
9308 ++ strncpy(t->u.user.name, name, sizeof(t->u.user.name));
9309 +
9310 + *size += off;
9311 + *dstptr += tsize;
9312 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
9313 +index fe106b50053e..15fc0938e1c4 100644
9314 +--- a/net/netlink/af_netlink.c
9315 ++++ b/net/netlink/af_netlink.c
9316 +@@ -2630,6 +2630,7 @@ static int netlink_dump(struct sock *sk)
9317 + struct netlink_callback *cb;
9318 + struct sk_buff *skb = NULL;
9319 + struct nlmsghdr *nlh;
9320 ++ struct module *module;
9321 + int len, err = -ENOBUFS;
9322 + int alloc_size;
9323 +
9324 +@@ -2699,9 +2700,11 @@ static int netlink_dump(struct sock *sk)
9325 + cb->done(cb);
9326 +
9327 + nlk->cb_running = false;
9328 ++ module = cb->module;
9329 ++ skb = cb->skb;
9330 + mutex_unlock(nlk->cb_mutex);
9331 +- module_put(cb->module);
9332 +- consume_skb(cb->skb);
9333 ++ module_put(module);
9334 ++ consume_skb(skb);
9335 + return 0;
9336 +
9337 + errout_skb:
9338 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
9339 +index 22853af1b6b7..e73b3fb1a144 100644
9340 +--- a/net/packet/af_packet.c
9341 ++++ b/net/packet/af_packet.c
9342 +@@ -3143,6 +3143,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
9343 + i->ifindex = mreq->mr_ifindex;
9344 + i->alen = mreq->mr_alen;
9345 + memcpy(i->addr, mreq->mr_address, i->alen);
9346 ++ memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
9347 + i->count = 1;
9348 + i->next = po->mclist;
9349 + po->mclist = i;
9350 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
9351 +index a25fae3c8ad6..a2a7a81b2b0b 100644
9352 +--- a/net/sched/sch_api.c
9353 ++++ b/net/sched/sch_api.c
9354 +@@ -740,14 +740,15 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
9355 + return 0;
9356 + }
9357 +
9358 +-void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
9359 ++void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
9360 ++ unsigned int len)
9361 + {
9362 + const struct Qdisc_class_ops *cops;
9363 + unsigned long cl;
9364 + u32 parentid;
9365 + int drops;
9366 +
9367 +- if (n == 0)
9368 ++ if (n == 0 && len == 0)
9369 + return;
9370 + drops = max_t(int, n, 0);
9371 + while ((parentid = sch->parent)) {
9372 +@@ -766,10 +767,11 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
9373 + cops->put(sch, cl);
9374 + }
9375 + sch->q.qlen -= n;
9376 ++ sch->qstats.backlog -= len;
9377 + __qdisc_qstats_drop(sch, drops);
9378 + }
9379 + }
9380 +-EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
9381 ++EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
9382 +
9383 + static void notify_and_destroy(struct net *net, struct sk_buff *skb,
9384 + struct nlmsghdr *n, u32 clid,
9385 +diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
9386 +index beeb75f80fdb..f6e7a60012b1 100644
9387 +--- a/net/sched/sch_cbq.c
9388 ++++ b/net/sched/sch_cbq.c
9389 +@@ -1624,13 +1624,8 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
9390 + new->reshape_fail = cbq_reshape_fail;
9391 + #endif
9392 + }
9393 +- sch_tree_lock(sch);
9394 +- *old = cl->q;
9395 +- cl->q = new;
9396 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
9397 +- qdisc_reset(*old);
9398 +- sch_tree_unlock(sch);
9399 +
9400 ++ *old = qdisc_replace(sch, new, &cl->q);
9401 + return 0;
9402 + }
9403 +
9404 +@@ -1914,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
9405 + {
9406 + struct cbq_sched_data *q = qdisc_priv(sch);
9407 + struct cbq_class *cl = (struct cbq_class *)arg;
9408 +- unsigned int qlen;
9409 ++ unsigned int qlen, backlog;
9410 +
9411 + if (cl->filters || cl->children || cl == &q->link)
9412 + return -EBUSY;
9413 +@@ -1922,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
9414 + sch_tree_lock(sch);
9415 +
9416 + qlen = cl->q->q.qlen;
9417 ++ backlog = cl->q->qstats.backlog;
9418 + qdisc_reset(cl->q);
9419 +- qdisc_tree_decrease_qlen(cl->q, qlen);
9420 ++ qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
9421 +
9422 + if (cl->next_alive)
9423 + cbq_deactivate_class(cl);
9424 +diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
9425 +index c009eb9045ce..3f6437db9b0f 100644
9426 +--- a/net/sched/sch_choke.c
9427 ++++ b/net/sched/sch_choke.c
9428 +@@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
9429 + choke_zap_tail_holes(q);
9430 +
9431 + qdisc_qstats_backlog_dec(sch, skb);
9432 ++ qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
9433 + qdisc_drop(skb, sch);
9434 +- qdisc_tree_decrease_qlen(sch, 1);
9435 + --sch->q.qlen;
9436 + }
9437 +
9438 +@@ -449,6 +449,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
9439 + old = q->tab;
9440 + if (old) {
9441 + unsigned int oqlen = sch->q.qlen, tail = 0;
9442 ++ unsigned dropped = 0;
9443 +
9444 + while (q->head != q->tail) {
9445 + struct sk_buff *skb = q->tab[q->head];
9446 +@@ -460,11 +461,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
9447 + ntab[tail++] = skb;
9448 + continue;
9449 + }
9450 ++ dropped += qdisc_pkt_len(skb);
9451 + qdisc_qstats_backlog_dec(sch, skb);
9452 + --sch->q.qlen;
9453 + qdisc_drop(skb, sch);
9454 + }
9455 +- qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
9456 ++ qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
9457 + q->head = 0;
9458 + q->tail = tail;
9459 + }
9460 +diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
9461 +index de28f8e968e8..0d60ea5a5bb6 100644
9462 +--- a/net/sched/sch_codel.c
9463 ++++ b/net/sched/sch_codel.c
9464 +@@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
9465 +
9466 + skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
9467 +
9468 +- /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
9469 ++ /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
9470 + * or HTB crashes. Defer it for next round.
9471 + */
9472 + if (q->stats.drop_count && sch->q.qlen) {
9473 +- qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
9474 ++ qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
9475 + q->stats.drop_count = 0;
9476 ++ q->stats.drop_len = 0;
9477 + }
9478 + if (skb)
9479 + qdisc_bstats_update(sch, skb);
9480 +@@ -115,7 +116,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
9481 + {
9482 + struct codel_sched_data *q = qdisc_priv(sch);
9483 + struct nlattr *tb[TCA_CODEL_MAX + 1];
9484 +- unsigned int qlen;
9485 ++ unsigned int qlen, dropped = 0;
9486 + int err;
9487 +
9488 + if (!opt)
9489 +@@ -149,10 +150,11 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
9490 + while (sch->q.qlen > sch->limit) {
9491 + struct sk_buff *skb = __skb_dequeue(&sch->q);
9492 +
9493 ++ dropped += qdisc_pkt_len(skb);
9494 + qdisc_qstats_backlog_dec(sch, skb);
9495 + qdisc_drop(skb, sch);
9496 + }
9497 +- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
9498 ++ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
9499 +
9500 + sch_tree_unlock(sch);
9501 + return 0;
9502 +diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
9503 +index 338706092c27..e599803caa1e 100644
9504 +--- a/net/sched/sch_drr.c
9505 ++++ b/net/sched/sch_drr.c
9506 +@@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
9507 + static void drr_purge_queue(struct drr_class *cl)
9508 + {
9509 + unsigned int len = cl->qdisc->q.qlen;
9510 ++ unsigned int backlog = cl->qdisc->qstats.backlog;
9511 +
9512 + qdisc_reset(cl->qdisc);
9513 +- qdisc_tree_decrease_qlen(cl->qdisc, len);
9514 ++ qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
9515 + }
9516 +
9517 + static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
9518 +@@ -226,11 +227,7 @@ static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
9519 + new = &noop_qdisc;
9520 + }
9521 +
9522 +- sch_tree_lock(sch);
9523 +- drr_purge_queue(cl);
9524 +- *old = cl->qdisc;
9525 +- cl->qdisc = new;
9526 +- sch_tree_unlock(sch);
9527 ++ *old = qdisc_replace(sch, new, &cl->qdisc);
9528 + return 0;
9529 + }
9530 +
9531 +diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
9532 +index 227114f27f94..eb87a2a94d19 100644
9533 +--- a/net/sched/sch_dsmark.c
9534 ++++ b/net/sched/sch_dsmark.c
9535 +@@ -67,13 +67,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
9536 + new = &noop_qdisc;
9537 + }
9538 +
9539 +- sch_tree_lock(sch);
9540 +- *old = p->q;
9541 +- p->q = new;
9542 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
9543 +- qdisc_reset(*old);
9544 +- sch_tree_unlock(sch);
9545 +-
9546 ++ *old = qdisc_replace(sch, new, &p->q);
9547 + return 0;
9548 + }
9549 +
9550 +@@ -262,6 +256,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
9551 + return err;
9552 + }
9553 +
9554 ++ qdisc_qstats_backlog_inc(sch, skb);
9555 + sch->q.qlen++;
9556 +
9557 + return NET_XMIT_SUCCESS;
9558 +@@ -284,6 +279,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
9559 + return NULL;
9560 +
9561 + qdisc_bstats_update(sch, skb);
9562 ++ qdisc_qstats_backlog_dec(sch, skb);
9563 + sch->q.qlen--;
9564 +
9565 + index = skb->tc_index & (p->indices - 1);
9566 +@@ -399,6 +395,7 @@ static void dsmark_reset(struct Qdisc *sch)
9567 +
9568 + pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
9569 + qdisc_reset(p->q);
9570 ++ sch->qstats.backlog = 0;
9571 + sch->q.qlen = 0;
9572 + }
9573 +
9574 +diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
9575 +index cbd7e1fd23b4..01fe9d532075 100644
9576 +--- a/net/sched/sch_fq.c
9577 ++++ b/net/sched/sch_fq.c
9578 +@@ -644,6 +644,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
9579 + struct fq_sched_data *q = qdisc_priv(sch);
9580 + struct nlattr *tb[TCA_FQ_MAX + 1];
9581 + int err, drop_count = 0;
9582 ++ unsigned drop_len = 0;
9583 + u32 fq_log;
9584 +
9585 + if (!opt)
9586 +@@ -709,10 +710,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
9587 +
9588 + if (!skb)
9589 + break;
9590 ++ drop_len += qdisc_pkt_len(skb);
9591 + kfree_skb(skb);
9592 + drop_count++;
9593 + }
9594 +- qdisc_tree_decrease_qlen(sch, drop_count);
9595 ++ qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
9596 +
9597 + sch_tree_unlock(sch);
9598 + return err;
9599 +diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
9600 +index 1e52decb7b59..8d21f1b5d5b9 100644
9601 +--- a/net/sched/sch_fq_codel.c
9602 ++++ b/net/sched/sch_fq_codel.c
9603 +@@ -173,7 +173,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
9604 + static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
9605 + {
9606 + struct fq_codel_sched_data *q = qdisc_priv(sch);
9607 +- unsigned int idx;
9608 ++ unsigned int idx, prev_backlog;
9609 + struct fq_codel_flow *flow;
9610 + int uninitialized_var(ret);
9611 +
9612 +@@ -201,6 +201,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
9613 + if (++sch->q.qlen <= sch->limit)
9614 + return NET_XMIT_SUCCESS;
9615 +
9616 ++ prev_backlog = sch->qstats.backlog;
9617 + q->drop_overlimit++;
9618 + /* Return Congestion Notification only if we dropped a packet
9619 + * from this flow.
9620 +@@ -209,7 +210,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
9621 + return NET_XMIT_CN;
9622 +
9623 + /* As we dropped a packet, better let upper stack know this */
9624 +- qdisc_tree_decrease_qlen(sch, 1);
9625 ++ qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
9626 + return NET_XMIT_SUCCESS;
9627 + }
9628 +
9629 +@@ -239,6 +240,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
9630 + struct fq_codel_flow *flow;
9631 + struct list_head *head;
9632 + u32 prev_drop_count, prev_ecn_mark;
9633 ++ unsigned int prev_backlog;
9634 +
9635 + begin:
9636 + head = &q->new_flows;
9637 +@@ -257,6 +259,7 @@ begin:
9638 +
9639 + prev_drop_count = q->cstats.drop_count;
9640 + prev_ecn_mark = q->cstats.ecn_mark;
9641 ++ prev_backlog = sch->qstats.backlog;
9642 +
9643 + skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
9644 + dequeue);
9645 +@@ -274,12 +277,14 @@ begin:
9646 + }
9647 + qdisc_bstats_update(sch, skb);
9648 + flow->deficit -= qdisc_pkt_len(skb);
9649 +- /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
9650 ++ /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
9651 + * or HTB crashes. Defer it for next round.
9652 + */
9653 + if (q->cstats.drop_count && sch->q.qlen) {
9654 +- qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
9655 ++ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
9656 ++ q->cstats.drop_len);
9657 + q->cstats.drop_count = 0;
9658 ++ q->cstats.drop_len = 0;
9659 + }
9660 + return skb;
9661 + }
9662 +@@ -347,11 +352,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
9663 + while (sch->q.qlen > sch->limit) {
9664 + struct sk_buff *skb = fq_codel_dequeue(sch);
9665 +
9666 ++ q->cstats.drop_len += qdisc_pkt_len(skb);
9667 + kfree_skb(skb);
9668 + q->cstats.drop_count++;
9669 + }
9670 +- qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
9671 ++ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
9672 + q->cstats.drop_count = 0;
9673 ++ q->cstats.drop_len = 0;
9674 +
9675 + sch_tree_unlock(sch);
9676 + return 0;
9677 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
9678 +index 507edcf64088..49003540186c 100644
9679 +--- a/net/sched/sch_generic.c
9680 ++++ b/net/sched/sch_generic.c
9681 +@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
9682 + if (validate)
9683 + skb = validate_xmit_skb_list(skb, dev);
9684 +
9685 +- if (skb) {
9686 ++ if (likely(skb)) {
9687 + HARD_TX_LOCK(dev, txq, smp_processor_id());
9688 + if (!netif_xmit_frozen_or_stopped(txq))
9689 + skb = dev_hard_start_xmit(skb, dev, txq, &ret);
9690 +
9691 + HARD_TX_UNLOCK(dev, txq);
9692 ++ } else {
9693 ++ spin_lock(root_lock);
9694 ++ return qdisc_qlen(q);
9695 + }
9696 + spin_lock(root_lock);
9697 +
9698 +diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
9699 +index e6c7416d0332..d3e21dac8b40 100644
9700 +--- a/net/sched/sch_hfsc.c
9701 ++++ b/net/sched/sch_hfsc.c
9702 +@@ -895,9 +895,10 @@ static void
9703 + hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
9704 + {
9705 + unsigned int len = cl->qdisc->q.qlen;
9706 ++ unsigned int backlog = cl->qdisc->qstats.backlog;
9707 +
9708 + qdisc_reset(cl->qdisc);
9709 +- qdisc_tree_decrease_qlen(cl->qdisc, len);
9710 ++ qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
9711 + }
9712 +
9713 + static void
9714 +@@ -1215,11 +1216,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
9715 + new = &noop_qdisc;
9716 + }
9717 +
9718 +- sch_tree_lock(sch);
9719 +- hfsc_purge_queue(sch, cl);
9720 +- *old = cl->qdisc;
9721 +- cl->qdisc = new;
9722 +- sch_tree_unlock(sch);
9723 ++ *old = qdisc_replace(sch, new, &cl->qdisc);
9724 + return 0;
9725 + }
9726 +
9727 +diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
9728 +index 15d3aabfe250..792c6f330f77 100644
9729 +--- a/net/sched/sch_hhf.c
9730 ++++ b/net/sched/sch_hhf.c
9731 +@@ -390,6 +390,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
9732 + struct hhf_sched_data *q = qdisc_priv(sch);
9733 + enum wdrr_bucket_idx idx;
9734 + struct wdrr_bucket *bucket;
9735 ++ unsigned int prev_backlog;
9736 +
9737 + idx = hhf_classify(skb, sch);
9738 +
9739 +@@ -417,6 +418,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
9740 + if (++sch->q.qlen <= sch->limit)
9741 + return NET_XMIT_SUCCESS;
9742 +
9743 ++ prev_backlog = sch->qstats.backlog;
9744 + q->drop_overlimit++;
9745 + /* Return Congestion Notification only if we dropped a packet from this
9746 + * bucket.
9747 +@@ -425,7 +427,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
9748 + return NET_XMIT_CN;
9749 +
9750 + /* As we dropped a packet, better let upper stack know this. */
9751 +- qdisc_tree_decrease_qlen(sch, 1);
9752 ++ qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
9753 + return NET_XMIT_SUCCESS;
9754 + }
9755 +
9756 +@@ -535,7 +537,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
9757 + {
9758 + struct hhf_sched_data *q = qdisc_priv(sch);
9759 + struct nlattr *tb[TCA_HHF_MAX + 1];
9760 +- unsigned int qlen;
9761 ++ unsigned int qlen, prev_backlog;
9762 + int err;
9763 + u64 non_hh_quantum;
9764 + u32 new_quantum = q->quantum;
9765 +@@ -585,12 +587,14 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
9766 + }
9767 +
9768 + qlen = sch->q.qlen;
9769 ++ prev_backlog = sch->qstats.backlog;
9770 + while (sch->q.qlen > sch->limit) {
9771 + struct sk_buff *skb = hhf_dequeue(sch);
9772 +
9773 + kfree_skb(skb);
9774 + }
9775 +- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
9776 ++ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
9777 ++ prev_backlog - sch->qstats.backlog);
9778 +
9779 + sch_tree_unlock(sch);
9780 + return 0;
9781 +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
9782 +index f1acb0f60dc3..ccff00640713 100644
9783 +--- a/net/sched/sch_htb.c
9784 ++++ b/net/sched/sch_htb.c
9785 +@@ -600,6 +600,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
9786 + htb_activate(q, cl);
9787 + }
9788 +
9789 ++ qdisc_qstats_backlog_inc(sch, skb);
9790 + sch->q.qlen++;
9791 + return NET_XMIT_SUCCESS;
9792 + }
9793 +@@ -889,6 +890,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
9794 + ok:
9795 + qdisc_bstats_update(sch, skb);
9796 + qdisc_unthrottled(sch);
9797 ++ qdisc_qstats_backlog_dec(sch, skb);
9798 + sch->q.qlen--;
9799 + return skb;
9800 + }
9801 +@@ -955,6 +957,7 @@ static unsigned int htb_drop(struct Qdisc *sch)
9802 + unsigned int len;
9803 + if (cl->un.leaf.q->ops->drop &&
9804 + (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
9805 ++ sch->qstats.backlog -= len;
9806 + sch->q.qlen--;
9807 + if (!cl->un.leaf.q->q.qlen)
9808 + htb_deactivate(q, cl);
9809 +@@ -984,12 +987,12 @@ static void htb_reset(struct Qdisc *sch)
9810 + }
9811 + cl->prio_activity = 0;
9812 + cl->cmode = HTB_CAN_SEND;
9813 +-
9814 + }
9815 + }
9816 + qdisc_watchdog_cancel(&q->watchdog);
9817 + __skb_queue_purge(&q->direct_queue);
9818 + sch->q.qlen = 0;
9819 ++ sch->qstats.backlog = 0;
9820 + memset(q->hlevel, 0, sizeof(q->hlevel));
9821 + memset(q->row_mask, 0, sizeof(q->row_mask));
9822 + for (i = 0; i < TC_HTB_NUMPRIO; i++)
9823 +@@ -1165,14 +1168,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
9824 + cl->common.classid)) == NULL)
9825 + return -ENOBUFS;
9826 +
9827 +- sch_tree_lock(sch);
9828 +- *old = cl->un.leaf.q;
9829 +- cl->un.leaf.q = new;
9830 +- if (*old != NULL) {
9831 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
9832 +- qdisc_reset(*old);
9833 +- }
9834 +- sch_tree_unlock(sch);
9835 ++ *old = qdisc_replace(sch, new, &cl->un.leaf.q);
9836 + return 0;
9837 + }
9838 +
9839 +@@ -1274,7 +1270,6 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
9840 + {
9841 + struct htb_sched *q = qdisc_priv(sch);
9842 + struct htb_class *cl = (struct htb_class *)arg;
9843 +- unsigned int qlen;
9844 + struct Qdisc *new_q = NULL;
9845 + int last_child = 0;
9846 +
9847 +@@ -1294,9 +1289,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
9848 + sch_tree_lock(sch);
9849 +
9850 + if (!cl->level) {
9851 +- qlen = cl->un.leaf.q->q.qlen;
9852 ++ unsigned int qlen = cl->un.leaf.q->q.qlen;
9853 ++ unsigned int backlog = cl->un.leaf.q->qstats.backlog;
9854 ++
9855 + qdisc_reset(cl->un.leaf.q);
9856 +- qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
9857 ++ qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
9858 + }
9859 +
9860 + /* delete from hash and active; remainder in destroy_class */
9861 +@@ -1430,10 +1427,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
9862 + sch_tree_lock(sch);
9863 + if (parent && !parent->level) {
9864 + unsigned int qlen = parent->un.leaf.q->q.qlen;
9865 ++ unsigned int backlog = parent->un.leaf.q->qstats.backlog;
9866 +
9867 + /* turn parent into inner node */
9868 + qdisc_reset(parent->un.leaf.q);
9869 +- qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
9870 ++ qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
9871 + qdisc_destroy(parent->un.leaf.q);
9872 + if (parent->prio_activity)
9873 + htb_deactivate(q, parent);
9874 +diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
9875 +index 42dd218871e0..23437d62a8db 100644
9876 +--- a/net/sched/sch_multiq.c
9877 ++++ b/net/sched/sch_multiq.c
9878 +@@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
9879 + if (q->queues[i] != &noop_qdisc) {
9880 + struct Qdisc *child = q->queues[i];
9881 + q->queues[i] = &noop_qdisc;
9882 +- qdisc_tree_decrease_qlen(child, child->q.qlen);
9883 ++ qdisc_tree_reduce_backlog(child, child->q.qlen,
9884 ++ child->qstats.backlog);
9885 + qdisc_destroy(child);
9886 + }
9887 + }
9888 +@@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
9889 + q->queues[i] = child;
9890 +
9891 + if (old != &noop_qdisc) {
9892 +- qdisc_tree_decrease_qlen(old,
9893 +- old->q.qlen);
9894 ++ qdisc_tree_reduce_backlog(old,
9895 ++ old->q.qlen,
9896 ++ old->qstats.backlog);
9897 + qdisc_destroy(old);
9898 + }
9899 + sch_tree_unlock(sch);
9900 +@@ -303,13 +305,7 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
9901 + if (new == NULL)
9902 + new = &noop_qdisc;
9903 +
9904 +- sch_tree_lock(sch);
9905 +- *old = q->queues[band];
9906 +- q->queues[band] = new;
9907 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
9908 +- qdisc_reset(*old);
9909 +- sch_tree_unlock(sch);
9910 +-
9911 ++ *old = qdisc_replace(sch, new, &q->queues[band]);
9912 + return 0;
9913 + }
9914 +
9915 +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
9916 +index b34331967e02..f60db2b3e5ef 100644
9917 +--- a/net/sched/sch_netem.c
9918 ++++ b/net/sched/sch_netem.c
9919 +@@ -408,6 +408,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
9920 + sch->q.qlen++;
9921 + }
9922 +
9923 ++/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
9924 ++ * when we statistically choose to corrupt one, we instead segment it, returning
9925 ++ * the first packet to be corrupted, and re-enqueue the remaining frames
9926 ++ */
9927 ++static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
9928 ++{
9929 ++ struct sk_buff *segs;
9930 ++ netdev_features_t features = netif_skb_features(skb);
9931 ++
9932 ++ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
9933 ++
9934 ++ if (IS_ERR_OR_NULL(segs)) {
9935 ++ qdisc_reshape_fail(skb, sch);
9936 ++ return NULL;
9937 ++ }
9938 ++ consume_skb(skb);
9939 ++ return segs;
9940 ++}
9941 ++
9942 + /*
9943 + * Insert one skb into qdisc.
9944 + * Note: parent depends on return value to account for queue length.
9945 +@@ -420,7 +439,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
9946 + /* We don't fill cb now as skb_unshare() may invalidate it */
9947 + struct netem_skb_cb *cb;
9948 + struct sk_buff *skb2;
9949 ++ struct sk_buff *segs = NULL;
9950 ++ unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
9951 ++ int nb = 0;
9952 + int count = 1;
9953 ++ int rc = NET_XMIT_SUCCESS;
9954 +
9955 + /* Random duplication */
9956 + if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
9957 +@@ -466,10 +489,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
9958 + * do it now in software before we mangle it.
9959 + */
9960 + if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
9961 ++ if (skb_is_gso(skb)) {
9962 ++ segs = netem_segment(skb, sch);
9963 ++ if (!segs)
9964 ++ return NET_XMIT_DROP;
9965 ++ } else {
9966 ++ segs = skb;
9967 ++ }
9968 ++
9969 ++ skb = segs;
9970 ++ segs = segs->next;
9971 ++
9972 + if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
9973 + (skb->ip_summed == CHECKSUM_PARTIAL &&
9974 +- skb_checksum_help(skb)))
9975 +- return qdisc_drop(skb, sch);
9976 ++ skb_checksum_help(skb))) {
9977 ++ rc = qdisc_drop(skb, sch);
9978 ++ goto finish_segs;
9979 ++ }
9980 +
9981 + skb->data[prandom_u32() % skb_headlen(skb)] ^=
9982 + 1<<(prandom_u32() % 8);
9983 +@@ -529,6 +565,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
9984 + sch->qstats.requeues++;
9985 + }
9986 +
9987 ++finish_segs:
9988 ++ if (segs) {
9989 ++ while (segs) {
9990 ++ skb2 = segs->next;
9991 ++ segs->next = NULL;
9992 ++ qdisc_skb_cb(segs)->pkt_len = segs->len;
9993 ++ last_len = segs->len;
9994 ++ rc = qdisc_enqueue(segs, sch);
9995 ++ if (rc != NET_XMIT_SUCCESS) {
9996 ++ if (net_xmit_drop_count(rc))
9997 ++ qdisc_qstats_drop(sch);
9998 ++ } else {
9999 ++ nb++;
10000 ++ len += last_len;
10001 ++ }
10002 ++ segs = skb2;
10003 ++ }
10004 ++ sch->q.qlen += nb;
10005 ++ if (nb > 1)
10006 ++ qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
10007 ++ }
10008 + return NET_XMIT_SUCCESS;
10009 + }
10010 +
10011 +@@ -610,7 +667,8 @@ deliver:
10012 + if (unlikely(err != NET_XMIT_SUCCESS)) {
10013 + if (net_xmit_drop_count(err)) {
10014 + qdisc_qstats_drop(sch);
10015 +- qdisc_tree_decrease_qlen(sch, 1);
10016 ++ qdisc_tree_reduce_backlog(sch, 1,
10017 ++ qdisc_pkt_len(skb));
10018 + }
10019 + }
10020 + goto tfifo_dequeue;
10021 +@@ -1049,15 +1107,7 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
10022 + {
10023 + struct netem_sched_data *q = qdisc_priv(sch);
10024 +
10025 +- sch_tree_lock(sch);
10026 +- *old = q->qdisc;
10027 +- q->qdisc = new;
10028 +- if (*old) {
10029 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
10030 +- qdisc_reset(*old);
10031 +- }
10032 +- sch_tree_unlock(sch);
10033 +-
10034 ++ *old = qdisc_replace(sch, new, &q->qdisc);
10035 + return 0;
10036 + }
10037 +
10038 +diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
10039 +index b783a446d884..71ae3b9629f9 100644
10040 +--- a/net/sched/sch_pie.c
10041 ++++ b/net/sched/sch_pie.c
10042 +@@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
10043 + {
10044 + struct pie_sched_data *q = qdisc_priv(sch);
10045 + struct nlattr *tb[TCA_PIE_MAX + 1];
10046 +- unsigned int qlen;
10047 ++ unsigned int qlen, dropped = 0;
10048 + int err;
10049 +
10050 + if (!opt)
10051 +@@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
10052 + while (sch->q.qlen > sch->limit) {
10053 + struct sk_buff *skb = __skb_dequeue(&sch->q);
10054 +
10055 ++ dropped += qdisc_pkt_len(skb);
10056 + qdisc_qstats_backlog_dec(sch, skb);
10057 + qdisc_drop(skb, sch);
10058 + }
10059 +- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
10060 ++ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
10061 +
10062 + sch_tree_unlock(sch);
10063 + return 0;
10064 +diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
10065 +index 8e5cd34aaa74..e671b1a4e815 100644
10066 +--- a/net/sched/sch_prio.c
10067 ++++ b/net/sched/sch_prio.c
10068 +@@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
10069 + struct Qdisc *child = q->queues[i];
10070 + q->queues[i] = &noop_qdisc;
10071 + if (child != &noop_qdisc) {
10072 +- qdisc_tree_decrease_qlen(child, child->q.qlen);
10073 ++ qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
10074 + qdisc_destroy(child);
10075 + }
10076 + }
10077 +@@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
10078 + q->queues[i] = child;
10079 +
10080 + if (old != &noop_qdisc) {
10081 +- qdisc_tree_decrease_qlen(old,
10082 +- old->q.qlen);
10083 ++ qdisc_tree_reduce_backlog(old,
10084 ++ old->q.qlen,
10085 ++ old->qstats.backlog);
10086 + qdisc_destroy(old);
10087 + }
10088 + sch_tree_unlock(sch);
10089 +@@ -268,13 +269,7 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
10090 + if (new == NULL)
10091 + new = &noop_qdisc;
10092 +
10093 +- sch_tree_lock(sch);
10094 +- *old = q->queues[band];
10095 +- q->queues[band] = new;
10096 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
10097 +- qdisc_reset(*old);
10098 +- sch_tree_unlock(sch);
10099 +-
10100 ++ *old = qdisc_replace(sch, new, &q->queues[band]);
10101 + return 0;
10102 + }
10103 +
10104 +diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
10105 +index 3ec7e88a43ca..e2b8fd47008b 100644
10106 +--- a/net/sched/sch_qfq.c
10107 ++++ b/net/sched/sch_qfq.c
10108 +@@ -221,9 +221,10 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
10109 + static void qfq_purge_queue(struct qfq_class *cl)
10110 + {
10111 + unsigned int len = cl->qdisc->q.qlen;
10112 ++ unsigned int backlog = cl->qdisc->qstats.backlog;
10113 +
10114 + qdisc_reset(cl->qdisc);
10115 +- qdisc_tree_decrease_qlen(cl->qdisc, len);
10116 ++ qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
10117 + }
10118 +
10119 + static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
10120 +@@ -619,11 +620,7 @@ static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
10121 + new = &noop_qdisc;
10122 + }
10123 +
10124 +- sch_tree_lock(sch);
10125 +- qfq_purge_queue(cl);
10126 +- *old = cl->qdisc;
10127 +- cl->qdisc = new;
10128 +- sch_tree_unlock(sch);
10129 ++ *old = qdisc_replace(sch, new, &cl->qdisc);
10130 + return 0;
10131 + }
10132 +
10133 +diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
10134 +index 6c0534cc7758..8c0508c0e287 100644
10135 +--- a/net/sched/sch_red.c
10136 ++++ b/net/sched/sch_red.c
10137 +@@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
10138 + q->flags = ctl->flags;
10139 + q->limit = ctl->limit;
10140 + if (child) {
10141 +- qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
10142 ++ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
10143 ++ q->qdisc->qstats.backlog);
10144 + qdisc_destroy(q->qdisc);
10145 + q->qdisc = child;
10146 + }
10147 +@@ -313,12 +314,7 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
10148 + if (new == NULL)
10149 + new = &noop_qdisc;
10150 +
10151 +- sch_tree_lock(sch);
10152 +- *old = q->qdisc;
10153 +- q->qdisc = new;
10154 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
10155 +- qdisc_reset(*old);
10156 +- sch_tree_unlock(sch);
10157 ++ *old = qdisc_replace(sch, new, &q->qdisc);
10158 + return 0;
10159 + }
10160 +
10161 +diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
10162 +index 5819dd82630d..e1d634e3c255 100644
10163 +--- a/net/sched/sch_sfb.c
10164 ++++ b/net/sched/sch_sfb.c
10165 +@@ -518,7 +518,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
10166 +
10167 + sch_tree_lock(sch);
10168 +
10169 +- qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
10170 ++ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
10171 ++ q->qdisc->qstats.backlog);
10172 + qdisc_destroy(q->qdisc);
10173 + q->qdisc = child;
10174 +
10175 +@@ -614,12 +615,7 @@ static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
10176 + if (new == NULL)
10177 + new = &noop_qdisc;
10178 +
10179 +- sch_tree_lock(sch);
10180 +- *old = q->qdisc;
10181 +- q->qdisc = new;
10182 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
10183 +- qdisc_reset(*old);
10184 +- sch_tree_unlock(sch);
10185 ++ *old = qdisc_replace(sch, new, &q->qdisc);
10186 + return 0;
10187 + }
10188 +
10189 +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
10190 +index b877140beda5..4417fb25166f 100644
10191 +--- a/net/sched/sch_sfq.c
10192 ++++ b/net/sched/sch_sfq.c
10193 +@@ -369,7 +369,7 @@ static int
10194 + sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
10195 + {
10196 + struct sfq_sched_data *q = qdisc_priv(sch);
10197 +- unsigned int hash;
10198 ++ unsigned int hash, dropped;
10199 + sfq_index x, qlen;
10200 + struct sfq_slot *slot;
10201 + int uninitialized_var(ret);
10202 +@@ -484,7 +484,7 @@ enqueue:
10203 + return NET_XMIT_SUCCESS;
10204 +
10205 + qlen = slot->qlen;
10206 +- sfq_drop(sch);
10207 ++ dropped = sfq_drop(sch);
10208 + /* Return Congestion Notification only if we dropped a packet
10209 + * from this flow.
10210 + */
10211 +@@ -492,7 +492,7 @@ enqueue:
10212 + return NET_XMIT_CN;
10213 +
10214 + /* As we dropped a packet, better let upper stack know this */
10215 +- qdisc_tree_decrease_qlen(sch, 1);
10216 ++ qdisc_tree_reduce_backlog(sch, 1, dropped);
10217 + return NET_XMIT_SUCCESS;
10218 + }
10219 +
10220 +@@ -560,6 +560,7 @@ static void sfq_rehash(struct Qdisc *sch)
10221 + struct sfq_slot *slot;
10222 + struct sk_buff_head list;
10223 + int dropped = 0;
10224 ++ unsigned int drop_len = 0;
10225 +
10226 + __skb_queue_head_init(&list);
10227 +
10228 +@@ -588,6 +589,7 @@ static void sfq_rehash(struct Qdisc *sch)
10229 + if (x >= SFQ_MAX_FLOWS) {
10230 + drop:
10231 + qdisc_qstats_backlog_dec(sch, skb);
10232 ++ drop_len += qdisc_pkt_len(skb);
10233 + kfree_skb(skb);
10234 + dropped++;
10235 + continue;
10236 +@@ -617,7 +619,7 @@ drop:
10237 + }
10238 + }
10239 + sch->q.qlen -= dropped;
10240 +- qdisc_tree_decrease_qlen(sch, dropped);
10241 ++ qdisc_tree_reduce_backlog(sch, dropped, drop_len);
10242 + }
10243 +
10244 + static void sfq_perturbation(unsigned long arg)
10245 +@@ -641,7 +643,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
10246 + struct sfq_sched_data *q = qdisc_priv(sch);
10247 + struct tc_sfq_qopt *ctl = nla_data(opt);
10248 + struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
10249 +- unsigned int qlen;
10250 ++ unsigned int qlen, dropped = 0;
10251 + struct red_parms *p = NULL;
10252 +
10253 + if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
10254 +@@ -690,8 +692,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
10255 +
10256 + qlen = sch->q.qlen;
10257 + while (sch->q.qlen > q->limit)
10258 +- sfq_drop(sch);
10259 +- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
10260 ++ dropped += sfq_drop(sch);
10261 ++ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
10262 +
10263 + del_timer(&q->perturb_timer);
10264 + if (q->perturb_period) {
10265 +diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
10266 +index a4afde14e865..c2fbde742f37 100644
10267 +--- a/net/sched/sch_tbf.c
10268 ++++ b/net/sched/sch_tbf.c
10269 +@@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
10270 + struct tbf_sched_data *q = qdisc_priv(sch);
10271 + struct sk_buff *segs, *nskb;
10272 + netdev_features_t features = netif_skb_features(skb);
10273 ++ unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
10274 + int ret, nb;
10275 +
10276 + segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
10277 +@@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
10278 + nskb = segs->next;
10279 + segs->next = NULL;
10280 + qdisc_skb_cb(segs)->pkt_len = segs->len;
10281 ++ len += segs->len;
10282 + ret = qdisc_enqueue(segs, q->qdisc);
10283 + if (ret != NET_XMIT_SUCCESS) {
10284 + if (net_xmit_drop_count(ret))
10285 +@@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
10286 + }
10287 + sch->q.qlen += nb;
10288 + if (nb > 1)
10289 +- qdisc_tree_decrease_qlen(sch, 1 - nb);
10290 ++ qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
10291 + consume_skb(skb);
10292 + return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
10293 + }
10294 +@@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
10295 +
10296 + sch_tree_lock(sch);
10297 + if (child) {
10298 +- qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
10299 ++ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
10300 ++ q->qdisc->qstats.backlog);
10301 + qdisc_destroy(q->qdisc);
10302 + q->qdisc = child;
10303 + }
10304 +@@ -502,13 +505,7 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
10305 + if (new == NULL)
10306 + new = &noop_qdisc;
10307 +
10308 +- sch_tree_lock(sch);
10309 +- *old = q->qdisc;
10310 +- q->qdisc = new;
10311 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
10312 +- qdisc_reset(*old);
10313 +- sch_tree_unlock(sch);
10314 +-
10315 ++ *old = qdisc_replace(sch, new, &q->qdisc);
10316 + return 0;
10317 + }
10318 +
10319 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
10320 +index 3267a5cbb3e8..18361cbfc882 100644
10321 +--- a/net/sctp/ipv6.c
10322 ++++ b/net/sctp/ipv6.c
10323 +@@ -519,6 +519,8 @@ static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
10324 + }
10325 + return 0;
10326 + }
10327 ++ if (addr1->v6.sin6_port != addr2->v6.sin6_port)
10328 ++ return 0;
10329 + if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
10330 + return 0;
10331 + /* If this is a linklocal address, compare the scope_id. */
10332 +diff --git a/net/socket.c b/net/socket.c
10333 +index 02fc7c8ea9ed..7f61789c78ff 100644
10334 +--- a/net/socket.c
10335 ++++ b/net/socket.c
10336 +@@ -2410,31 +2410,31 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
10337 + break;
10338 + }
10339 +
10340 +-out_put:
10341 +- fput_light(sock->file, fput_needed);
10342 +-
10343 + if (err == 0)
10344 +- return datagrams;
10345 ++ goto out_put;
10346 +
10347 +- if (datagrams != 0) {
10348 ++ if (datagrams == 0) {
10349 ++ datagrams = err;
10350 ++ goto out_put;
10351 ++ }
10352 ++
10353 ++ /*
10354 ++ * We may return less entries than requested (vlen) if the
10355 ++ * sock is non block and there aren't enough datagrams...
10356 ++ */
10357 ++ if (err != -EAGAIN) {
10358 + /*
10359 +- * We may return less entries than requested (vlen) if the
10360 +- * sock is non block and there aren't enough datagrams...
10361 ++ * ... or if recvmsg returns an error after we
10362 ++ * received some datagrams, where we record the
10363 ++ * error to return on the next call or if the
10364 ++ * app asks about it using getsockopt(SO_ERROR).
10365 + */
10366 +- if (err != -EAGAIN) {
10367 +- /*
10368 +- * ... or if recvmsg returns an error after we
10369 +- * received some datagrams, where we record the
10370 +- * error to return on the next call or if the
10371 +- * app asks about it using getsockopt(SO_ERROR).
10372 +- */
10373 +- sock->sk->sk_err = -err;
10374 +- }
10375 +-
10376 +- return datagrams;
10377 ++ sock->sk->sk_err = -err;
10378 + }
10379 ++out_put:
10380 ++ fput_light(sock->file, fput_needed);
10381 +
10382 +- return err;
10383 ++ return datagrams;
10384 + }
10385 +
10386 + SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
10387 +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
10388 +index 14d38ec5e53d..11e7b55f04e2 100644
10389 +--- a/net/sunrpc/cache.c
10390 ++++ b/net/sunrpc/cache.c
10391 +@@ -1187,14 +1187,14 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
10392 + }
10393 +
10394 + crq->q.reader = 0;
10395 +- crq->item = cache_get(h);
10396 + crq->buf = buf;
10397 + crq->len = 0;
10398 + crq->readers = 0;
10399 + spin_lock(&queue_lock);
10400 +- if (test_bit(CACHE_PENDING, &h->flags))
10401 ++ if (test_bit(CACHE_PENDING, &h->flags)) {
10402 ++ crq->item = cache_get(h);
10403 + list_add_tail(&crq->q.list, &detail->queue);
10404 +- else
10405 ++ } else
10406 + /* Lost a race, no longer PENDING, so don't enqueue */
10407 + ret = -EAGAIN;
10408 + spin_unlock(&queue_lock);
10409 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
10410 +index ae46f0198608..7b738b53f061 100644
10411 +--- a/net/sunrpc/clnt.c
10412 ++++ b/net/sunrpc/clnt.c
10413 +@@ -439,7 +439,7 @@ out_no_rpciod:
10414 + return ERR_PTR(err);
10415 + }
10416 +
10417 +-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
10418 ++static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
10419 + struct rpc_xprt *xprt)
10420 + {
10421 + struct rpc_clnt *clnt = NULL;
10422 +@@ -471,7 +471,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
10423 +
10424 + return clnt;
10425 + }
10426 +-EXPORT_SYMBOL_GPL(rpc_create_xprt);
10427 +
10428 + /**
10429 + * rpc_create - create an RPC client and transport with one call
10430 +@@ -497,6 +496,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
10431 + };
10432 + char servername[48];
10433 +
10434 ++ if (args->bc_xprt) {
10435 ++ WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
10436 ++ xprt = args->bc_xprt->xpt_bc_xprt;
10437 ++ if (xprt) {
10438 ++ xprt_get(xprt);
10439 ++ return rpc_create_xprt(args, xprt);
10440 ++ }
10441 ++ }
10442 ++
10443 + if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
10444 + xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
10445 + if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
10446 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
10447 +index 20d752634efb..287087b10a7e 100644
10448 +--- a/net/unix/af_unix.c
10449 ++++ b/net/unix/af_unix.c
10450 +@@ -305,7 +305,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
10451 + &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
10452 + struct dentry *dentry = unix_sk(s)->path.dentry;
10453 +
10454 +- if (dentry && dentry->d_inode == i) {
10455 ++ if (dentry && d_backing_inode(dentry) == i) {
10456 + sock_hold(s);
10457 + goto found;
10458 + }
10459 +@@ -898,7 +898,7 @@ static struct sock *unix_find_other(struct net *net,
10460 + err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
10461 + if (err)
10462 + goto fail;
10463 +- inode = path.dentry->d_inode;
10464 ++ inode = d_backing_inode(path.dentry);
10465 + err = inode_permission(inode, MAY_WRITE);
10466 + if (err)
10467 + goto put_fail;
10468 +@@ -940,32 +940,20 @@ fail:
10469 + return NULL;
10470 + }
10471 +
10472 +-static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
10473 ++static int unix_mknod(struct dentry *dentry, struct path *path, umode_t mode,
10474 ++ struct path *res)
10475 + {
10476 +- struct dentry *dentry;
10477 +- struct path path;
10478 +- int err = 0;
10479 +- /*
10480 +- * Get the parent directory, calculate the hash for last
10481 +- * component.
10482 +- */
10483 +- dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
10484 +- err = PTR_ERR(dentry);
10485 +- if (IS_ERR(dentry))
10486 +- return err;
10487 ++ int err;
10488 +
10489 +- /*
10490 +- * All right, let's create it.
10491 +- */
10492 +- err = security_path_mknod(&path, dentry, mode, 0);
10493 ++ err = security_path_mknod(path, dentry, mode, 0);
10494 + if (!err) {
10495 +- err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
10496 ++ err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
10497 + if (!err) {
10498 +- res->mnt = mntget(path.mnt);
10499 ++ res->mnt = mntget(path->mnt);
10500 + res->dentry = dget(dentry);
10501 + }
10502 + }
10503 +- done_path_create(&path, dentry);
10504 ++
10505 + return err;
10506 + }
10507 +
10508 +@@ -976,10 +964,12 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
10509 + struct unix_sock *u = unix_sk(sk);
10510 + struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
10511 + char *sun_path = sunaddr->sun_path;
10512 +- int err;
10513 ++ int err, name_err;
10514 + unsigned int hash;
10515 + struct unix_address *addr;
10516 + struct hlist_head *list;
10517 ++ struct path path;
10518 ++ struct dentry *dentry;
10519 +
10520 + err = -EINVAL;
10521 + if (sunaddr->sun_family != AF_UNIX)
10522 +@@ -995,14 +985,34 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
10523 + goto out;
10524 + addr_len = err;
10525 +
10526 ++ name_err = 0;
10527 ++ dentry = NULL;
10528 ++ if (sun_path[0]) {
10529 ++ /* Get the parent directory, calculate the hash for last
10530 ++ * component.
10531 ++ */
10532 ++ dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
10533 ++
10534 ++ if (IS_ERR(dentry)) {
10535 ++ /* delay report until after 'already bound' check */
10536 ++ name_err = PTR_ERR(dentry);
10537 ++ dentry = NULL;
10538 ++ }
10539 ++ }
10540 ++
10541 + err = mutex_lock_interruptible(&u->readlock);
10542 + if (err)
10543 +- goto out;
10544 ++ goto out_path;
10545 +
10546 + err = -EINVAL;
10547 + if (u->addr)
10548 + goto out_up;
10549 +
10550 ++ if (name_err) {
10551 ++ err = name_err == -EEXIST ? -EADDRINUSE : name_err;
10552 ++ goto out_up;
10553 ++ }
10554 ++
10555 + err = -ENOMEM;
10556 + addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
10557 + if (!addr)
10558 +@@ -1013,11 +1023,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
10559 + addr->hash = hash ^ sk->sk_type;
10560 + atomic_set(&addr->refcnt, 1);
10561 +
10562 +- if (sun_path[0]) {
10563 +- struct path path;
10564 ++ if (dentry) {
10565 ++ struct path u_path;
10566 + umode_t mode = S_IFSOCK |
10567 + (SOCK_INODE(sock)->i_mode & ~current_umask());
10568 +- err = unix_mknod(sun_path, mode, &path);
10569 ++ err = unix_mknod(dentry, &path, mode, &u_path);
10570 + if (err) {
10571 + if (err == -EEXIST)
10572 + err = -EADDRINUSE;
10573 +@@ -1025,9 +1035,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
10574 + goto out_up;
10575 + }
10576 + addr->hash = UNIX_HASH_SIZE;
10577 +- hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
10578 ++ hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
10579 + spin_lock(&unix_table_lock);
10580 +- u->path = path;
10581 ++ u->path = u_path;
10582 + list = &unix_socket_table[hash];
10583 + } else {
10584 + spin_lock(&unix_table_lock);
10585 +@@ -1050,6 +1060,10 @@ out_unlock:
10586 + spin_unlock(&unix_table_lock);
10587 + out_up:
10588 + mutex_unlock(&u->readlock);
10589 ++out_path:
10590 ++ if (dentry)
10591 ++ done_path_create(&path, dentry);
10592 ++
10593 + out:
10594 + return err;
10595 + }
10596 +diff --git a/net/unix/diag.c b/net/unix/diag.c
10597 +index 86fa0f3b2caf..9d4218fc0a61 100644
10598 +--- a/net/unix/diag.c
10599 ++++ b/net/unix/diag.c
10600 +@@ -25,7 +25,7 @@ static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
10601 +
10602 + if (dentry) {
10603 + struct unix_diag_vfs uv = {
10604 +- .udiag_vfs_ino = dentry->d_inode->i_ino,
10605 ++ .udiag_vfs_ino = d_backing_inode(dentry)->i_ino,
10606 + .udiag_vfs_dev = dentry->d_sb->s_dev,
10607 + };
10608 +
10609 +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
10610 +index 85d232bed87d..e8d3313ea2c9 100644
10611 +--- a/net/vmw_vsock/af_vsock.c
10612 ++++ b/net/vmw_vsock/af_vsock.c
10613 +@@ -1796,27 +1796,8 @@ vsock_stream_recvmsg(struct kiocb *kiocb,
10614 + else if (sk->sk_shutdown & RCV_SHUTDOWN)
10615 + err = 0;
10616 +
10617 +- if (copied > 0) {
10618 +- /* We only do these additional bookkeeping/notification steps
10619 +- * if we actually copied something out of the queue pair
10620 +- * instead of just peeking ahead.
10621 +- */
10622 +-
10623 +- if (!(flags & MSG_PEEK)) {
10624 +- /* If the other side has shutdown for sending and there
10625 +- * is nothing more to read, then modify the socket
10626 +- * state.
10627 +- */
10628 +- if (vsk->peer_shutdown & SEND_SHUTDOWN) {
10629 +- if (vsock_stream_has_data(vsk) <= 0) {
10630 +- sk->sk_state = SS_UNCONNECTED;
10631 +- sock_set_flag(sk, SOCK_DONE);
10632 +- sk->sk_state_change(sk);
10633 +- }
10634 +- }
10635 +- }
10636 ++ if (copied > 0)
10637 + err = copied;
10638 +- }
10639 +
10640 + out_wait:
10641 + finish_wait(sk_sleep(sk), &wait);
10642 +diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
10643 +index 7ecd04c21360..997ff7b2509b 100644
10644 +--- a/net/x25/x25_facilities.c
10645 ++++ b/net/x25/x25_facilities.c
10646 +@@ -277,6 +277,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
10647 +
10648 + memset(&theirs, 0, sizeof(theirs));
10649 + memcpy(new, ours, sizeof(*new));
10650 ++ memset(dte, 0, sizeof(*dte));
10651 +
10652 + len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
10653 + if (len < 0)
10654 +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
10655 +index 00977873300c..d0e6d351aba8 100644
10656 +--- a/net/xfrm/xfrm_input.c
10657 ++++ b/net/xfrm/xfrm_input.c
10658 +@@ -292,12 +292,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
10659 + XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
10660 +
10661 + skb_dst_force(skb);
10662 ++ dev_hold(skb->dev);
10663 +
10664 + nexthdr = x->type->input(x, skb);
10665 +
10666 + if (nexthdr == -EINPROGRESS)
10667 + return 0;
10668 + resume:
10669 ++ dev_put(skb->dev);
10670 ++
10671 + spin_lock(&x->lock);
10672 + if (nexthdr <= 0) {
10673 + if (nexthdr == -EBADMSG) {
10674 +diff --git a/security/keys/key.c b/security/keys/key.c
10675 +index e17ba6aefdc0..f8bde20bed5d 100644
10676 +--- a/security/keys/key.c
10677 ++++ b/security/keys/key.c
10678 +@@ -580,7 +580,7 @@ int key_reject_and_link(struct key *key,
10679 +
10680 + mutex_unlock(&key_construction_mutex);
10681 +
10682 +- if (keyring)
10683 ++ if (keyring && link_ret == 0)
10684 + __key_link_end(keyring, &key->index_key, edit);
10685 +
10686 + /* wake up anyone waiting for a key to be constructed */
10687 +diff --git a/sound/core/timer.c b/sound/core/timer.c
10688 +index 4927a3c88340..2fd0dccf8505 100644
10689 +--- a/sound/core/timer.c
10690 ++++ b/sound/core/timer.c
10691 +@@ -1053,8 +1053,8 @@ static int snd_timer_s_start(struct snd_timer * timer)
10692 + njiff += timer->sticks - priv->correction;
10693 + priv->correction = 0;
10694 + }
10695 +- priv->last_expires = priv->tlist.expires = njiff;
10696 +- add_timer(&priv->tlist);
10697 ++ priv->last_expires = njiff;
10698 ++ mod_timer(&priv->tlist, njiff);
10699 + return 0;
10700 + }
10701 +
10702 +diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
10703 +index 387bb8f603ac..35cc884bca6b 100644
10704 +--- a/sound/drivers/dummy.c
10705 ++++ b/sound/drivers/dummy.c
10706 +@@ -422,6 +422,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
10707 +
10708 + static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
10709 + {
10710 ++ hrtimer_cancel(&dpcm->timer);
10711 + tasklet_kill(&dpcm->tasklet);
10712 + }
10713 +
10714 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
10715 +index 5b2c7fe0ac85..77bd16739dbd 100644
10716 +--- a/sound/pci/hda/patch_realtek.c
10717 ++++ b/sound/pci/hda/patch_realtek.c
10718 +@@ -5544,8 +5544,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
10719 + {0x15, 0x0221401f}, \
10720 + {0x1a, 0x411111f0}, \
10721 + {0x1b, 0x411111f0}, \
10722 +- {0x1d, 0x40700001}, \
10723 +- {0x1e, 0x411111f0}
10724 ++ {0x1d, 0x40700001}
10725 +
10726 + #define ALC298_STANDARD_PINS \
10727 + {0x18, 0x411111f0}, \
10728 +@@ -5813,35 +5812,48 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
10729 + {0x13, 0x411111f0},
10730 + {0x16, 0x01014020},
10731 + {0x18, 0x411111f0},
10732 +- {0x19, 0x01a19030}),
10733 ++ {0x19, 0x01a19030},
10734 ++ {0x1e, 0x411111f0}),
10735 + SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
10736 + ALC292_STANDARD_PINS,
10737 + {0x12, 0x90a60140},
10738 + {0x13, 0x411111f0},
10739 + {0x16, 0x01014020},
10740 + {0x18, 0x02a19031},
10741 +- {0x19, 0x01a1903e}),
10742 ++ {0x19, 0x01a1903e},
10743 ++ {0x1e, 0x411111f0}),
10744 + SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
10745 + ALC292_STANDARD_PINS,
10746 + {0x12, 0x90a60140},
10747 + {0x13, 0x411111f0},
10748 + {0x16, 0x411111f0},
10749 + {0x18, 0x411111f0},
10750 +- {0x19, 0x411111f0}),
10751 ++ {0x19, 0x411111f0},
10752 ++ {0x1e, 0x411111f0}),
10753 + SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
10754 + ALC292_STANDARD_PINS,
10755 + {0x12, 0x40000000},
10756 + {0x13, 0x90a60140},
10757 + {0x16, 0x21014020},
10758 + {0x18, 0x411111f0},
10759 +- {0x19, 0x21a19030}),
10760 ++ {0x19, 0x21a19030},
10761 ++ {0x1e, 0x411111f0}),
10762 + SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
10763 + ALC292_STANDARD_PINS,
10764 + {0x12, 0x40000000},
10765 + {0x13, 0x90a60140},
10766 + {0x16, 0x411111f0},
10767 + {0x18, 0x411111f0},
10768 +- {0x19, 0x411111f0}),
10769 ++ {0x19, 0x411111f0},
10770 ++ {0x1e, 0x411111f0}),
10771 ++ SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
10772 ++ ALC292_STANDARD_PINS,
10773 ++ {0x12, 0x40000000},
10774 ++ {0x13, 0x90a60140},
10775 ++ {0x16, 0x21014020},
10776 ++ {0x18, 0x411111f0},
10777 ++ {0x19, 0x21a19030},
10778 ++ {0x1e, 0x411111ff}),
10779 + SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
10780 + ALC298_STANDARD_PINS,
10781 + {0x12, 0x90a60130},
10782 +diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
10783 +index 4b5c17f8507e..9bb976e7d609 100644
10784 +--- a/sound/soc/codecs/ssm4567.c
10785 ++++ b/sound/soc/codecs/ssm4567.c
10786 +@@ -206,6 +206,11 @@ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
10787 + regcache_cache_only(ssm4567->regmap, !enable);
10788 +
10789 + if (enable) {
10790 ++ ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET,
10791 ++ 0x00);
10792 ++ if (ret)
10793 ++ return ret;
10794 ++
10795 + ret = regmap_update_bits(ssm4567->regmap,
10796 + SSM4567_REG_POWER_CTRL,
10797 + SSM4567_POWER_SPWDN, 0x00);
10798 +diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
10799 +index e1615113fd84..98e3b9313d55 100644
10800 +--- a/sound/soc/samsung/ac97.c
10801 ++++ b/sound/soc/samsung/ac97.c
10802 +@@ -324,7 +324,7 @@ static const struct snd_soc_component_driver s3c_ac97_component = {
10803 +
10804 + static int s3c_ac97_probe(struct platform_device *pdev)
10805 + {
10806 +- struct resource *mem_res, *dmatx_res, *dmarx_res, *dmamic_res, *irq_res;
10807 ++ struct resource *mem_res, *irq_res;
10808 + struct s3c_audio_pdata *ac97_pdata;
10809 + int ret;
10810 +
10811 +@@ -335,24 +335,6 @@ static int s3c_ac97_probe(struct platform_device *pdev)
10812 + }
10813 +
10814 + /* Check for availability of necessary resource */
10815 +- dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
10816 +- if (!dmatx_res) {
10817 +- dev_err(&pdev->dev, "Unable to get AC97-TX dma resource\n");
10818 +- return -ENXIO;
10819 +- }
10820 +-
10821 +- dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
10822 +- if (!dmarx_res) {
10823 +- dev_err(&pdev->dev, "Unable to get AC97-RX dma resource\n");
10824 +- return -ENXIO;
10825 +- }
10826 +-
10827 +- dmamic_res = platform_get_resource(pdev, IORESOURCE_DMA, 2);
10828 +- if (!dmamic_res) {
10829 +- dev_err(&pdev->dev, "Unable to get AC97-MIC dma resource\n");
10830 +- return -ENXIO;
10831 +- }
10832 +-
10833 + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
10834 + if (!irq_res) {
10835 + dev_err(&pdev->dev, "AC97 IRQ not provided!\n");
10836 +@@ -364,11 +346,11 @@ static int s3c_ac97_probe(struct platform_device *pdev)
10837 + if (IS_ERR(s3c_ac97.regs))
10838 + return PTR_ERR(s3c_ac97.regs);
10839 +
10840 +- s3c_ac97_pcm_out.channel = dmatx_res->start;
10841 ++ s3c_ac97_pcm_out.slave = ac97_pdata->dma_playback;
10842 + s3c_ac97_pcm_out.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
10843 +- s3c_ac97_pcm_in.channel = dmarx_res->start;
10844 ++ s3c_ac97_pcm_in.slave = ac97_pdata->dma_capture;
10845 + s3c_ac97_pcm_in.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
10846 +- s3c_ac97_mic_in.channel = dmamic_res->start;
10847 ++ s3c_ac97_mic_in.slave = ac97_pdata->dma_capture_mic;
10848 + s3c_ac97_mic_in.dma_addr = mem_res->start + S3C_AC97_MIC_DATA;
10849 +
10850 + init_completion(&s3c_ac97.done);
10851 +diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h
10852 +index 0e85dcfec023..085ef30f5ca2 100644
10853 +--- a/sound/soc/samsung/dma.h
10854 ++++ b/sound/soc/samsung/dma.h
10855 +@@ -15,7 +15,7 @@
10856 + #include <sound/dmaengine_pcm.h>
10857 +
10858 + struct s3c_dma_params {
10859 +- int channel; /* Channel ID */
10860 ++ void *slave; /* Channel ID */
10861 + dma_addr_t dma_addr;
10862 + int dma_size; /* Size of the DMA transfer */
10863 + char *ch_name;
10864 +diff --git a/sound/soc/samsung/dmaengine.c b/sound/soc/samsung/dmaengine.c
10865 +index 506f5bf6d082..727008d57d14 100644
10866 +--- a/sound/soc/samsung/dmaengine.c
10867 ++++ b/sound/soc/samsung/dmaengine.c
10868 +@@ -50,14 +50,14 @@ void samsung_asoc_init_dma_data(struct snd_soc_dai *dai,
10869 +
10870 + if (playback) {
10871 + playback_data = &playback->dma_data;
10872 +- playback_data->filter_data = (void *)playback->channel;
10873 ++ playback_data->filter_data = playback->slave;
10874 + playback_data->chan_name = playback->ch_name;
10875 + playback_data->addr = playback->dma_addr;
10876 + playback_data->addr_width = playback->dma_size;
10877 + }
10878 + if (capture) {
10879 + capture_data = &capture->dma_data;
10880 +- capture_data->filter_data = (void *)capture->channel;
10881 ++ capture_data->filter_data = capture->slave;
10882 + capture_data->chan_name = capture->ch_name;
10883 + capture_data->addr = capture->dma_addr;
10884 + capture_data->addr_width = capture->dma_size;
10885 +diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
10886 +index 9d513473b300..f2ce03dbf02f 100644
10887 +--- a/sound/soc/samsung/i2s.c
10888 ++++ b/sound/soc/samsung/i2s.c
10889 +@@ -1167,27 +1167,14 @@ static int samsung_i2s_probe(struct platform_device *pdev)
10890 + }
10891 +
10892 + if (!np) {
10893 +- res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
10894 +- if (!res) {
10895 +- dev_err(&pdev->dev,
10896 +- "Unable to get I2S-TX dma resource\n");
10897 +- return -ENXIO;
10898 +- }
10899 +- pri_dai->dma_playback.channel = res->start;
10900 +-
10901 +- res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
10902 +- if (!res) {
10903 +- dev_err(&pdev->dev,
10904 +- "Unable to get I2S-RX dma resource\n");
10905 +- return -ENXIO;
10906 +- }
10907 +- pri_dai->dma_capture.channel = res->start;
10908 +-
10909 + if (i2s_pdata == NULL) {
10910 + dev_err(&pdev->dev, "Can't work without s3c_audio_pdata\n");
10911 + return -EINVAL;
10912 + }
10913 +
10914 ++ pri_dai->dma_playback.slave = i2s_pdata->dma_playback;
10915 ++ pri_dai->dma_capture.slave = i2s_pdata->dma_capture;
10916 ++
10917 + if (&i2s_pdata->type)
10918 + i2s_cfg = &i2s_pdata->type.i2s;
10919 +
10920 +@@ -1242,11 +1229,8 @@ static int samsung_i2s_probe(struct platform_device *pdev)
10921 + sec_dai->dma_playback.dma_addr = regs_base + I2STXDS;
10922 + sec_dai->dma_playback.ch_name = "tx-sec";
10923 +
10924 +- if (!np) {
10925 +- res = platform_get_resource(pdev, IORESOURCE_DMA, 2);
10926 +- if (res)
10927 +- sec_dai->dma_playback.channel = res->start;
10928 +- }
10929 ++ if (!np)
10930 ++ sec_dai->dma_playback.slave = i2s_pdata->dma_play_sec;
10931 +
10932 + sec_dai->dma_playback.dma_size = 4;
10933 + sec_dai->base = regs_base;
10934 +diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
10935 +index bac034b15a27..9e6e33b17213 100644
10936 +--- a/sound/soc/samsung/pcm.c
10937 ++++ b/sound/soc/samsung/pcm.c
10938 +@@ -486,7 +486,7 @@ static const struct snd_soc_component_driver s3c_pcm_component = {
10939 + static int s3c_pcm_dev_probe(struct platform_device *pdev)
10940 + {
10941 + struct s3c_pcm_info *pcm;
10942 +- struct resource *mem_res, *dmatx_res, *dmarx_res;
10943 ++ struct resource *mem_res;
10944 + struct s3c_audio_pdata *pcm_pdata;
10945 + int ret;
10946 +
10947 +@@ -499,18 +499,6 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
10948 + pcm_pdata = pdev->dev.platform_data;
10949 +
10950 + /* Check for availability of necessary resource */
10951 +- dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
10952 +- if (!dmatx_res) {
10953 +- dev_err(&pdev->dev, "Unable to get PCM-TX dma resource\n");
10954 +- return -ENXIO;
10955 +- }
10956 +-
10957 +- dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
10958 +- if (!dmarx_res) {
10959 +- dev_err(&pdev->dev, "Unable to get PCM-RX dma resource\n");
10960 +- return -ENXIO;
10961 +- }
10962 +-
10963 + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
10964 + if (!mem_res) {
10965 + dev_err(&pdev->dev, "Unable to get register resource\n");
10966 +@@ -568,8 +556,10 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
10967 + s3c_pcm_stereo_out[pdev->id].dma_addr = mem_res->start
10968 + + S3C_PCM_TXFIFO;
10969 +
10970 +- s3c_pcm_stereo_in[pdev->id].channel = dmarx_res->start;
10971 +- s3c_pcm_stereo_out[pdev->id].channel = dmatx_res->start;
10972 ++ if (pcm_pdata) {
10973 ++ s3c_pcm_stereo_in[pdev->id].slave = pcm_pdata->dma_capture;
10974 ++ s3c_pcm_stereo_out[pdev->id].slave = pcm_pdata->dma_playback;
10975 ++ }
10976 +
10977 + pcm->dma_capture = &s3c_pcm_stereo_in[pdev->id];
10978 + pcm->dma_playback = &s3c_pcm_stereo_out[pdev->id];
10979 +diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
10980 +index df65c5b494b1..b6ab3fc5789e 100644
10981 +--- a/sound/soc/samsung/s3c-i2s-v2.c
10982 ++++ b/sound/soc/samsung/s3c-i2s-v2.c
10983 +@@ -709,7 +709,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
10984 + #endif
10985 +
10986 + int s3c_i2sv2_register_component(struct device *dev, int id,
10987 +- struct snd_soc_component_driver *cmp_drv,
10988 ++ const struct snd_soc_component_driver *cmp_drv,
10989 + struct snd_soc_dai_driver *dai_drv)
10990 + {
10991 + struct snd_soc_dai_ops *ops = (struct snd_soc_dai_ops *)dai_drv->ops;
10992 +diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
10993 +index 90abab364b49..d0684145ed1f 100644
10994 +--- a/sound/soc/samsung/s3c-i2s-v2.h
10995 ++++ b/sound/soc/samsung/s3c-i2s-v2.h
10996 +@@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
10997 + * soc core.
10998 + */
10999 + extern int s3c_i2sv2_register_component(struct device *dev, int id,
11000 +- struct snd_soc_component_driver *cmp_drv,
11001 ++ const struct snd_soc_component_driver *cmp_drv,
11002 + struct snd_soc_dai_driver *dai_drv);
11003 +
11004 + #endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
11005 +diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
11006 +index 27b339c6580e..e3650d1497ec 100644
11007 +--- a/sound/soc/samsung/s3c2412-i2s.c
11008 ++++ b/sound/soc/samsung/s3c2412-i2s.c
11009 +@@ -34,13 +34,13 @@
11010 + #include "s3c2412-i2s.h"
11011 +
11012 + static struct s3c_dma_params s3c2412_i2s_pcm_stereo_out = {
11013 +- .channel = DMACH_I2S_OUT,
11014 ++ .slave = (void *)(uintptr_t)DMACH_I2S_OUT,
11015 + .ch_name = "tx",
11016 + .dma_size = 4,
11017 + };
11018 +
11019 + static struct s3c_dma_params s3c2412_i2s_pcm_stereo_in = {
11020 +- .channel = DMACH_I2S_IN,
11021 ++ .slave = (void *)(uintptr_t)DMACH_I2S_IN,
11022 + .ch_name = "rx",
11023 + .dma_size = 4,
11024 + };
11025 +diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
11026 +index fb1d39324a65..60be3985a4ce 100644
11027 +--- a/sound/soc/samsung/s3c24xx-i2s.c
11028 ++++ b/sound/soc/samsung/s3c24xx-i2s.c
11029 +@@ -32,13 +32,13 @@
11030 + #include "s3c24xx-i2s.h"
11031 +
11032 + static struct s3c_dma_params s3c24xx_i2s_pcm_stereo_out = {
11033 +- .channel = DMACH_I2S_OUT,
11034 ++ .slave = (void *)(uintptr_t)DMACH_I2S_OUT,
11035 + .ch_name = "tx",
11036 + .dma_size = 2,
11037 + };
11038 +
11039 + static struct s3c_dma_params s3c24xx_i2s_pcm_stereo_in = {
11040 +- .channel = DMACH_I2S_IN,
11041 ++ .slave = (void *)(uintptr_t)DMACH_I2S_IN,
11042 + .ch_name = "rx",
11043 + .dma_size = 2,
11044 + };
11045 +diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
11046 +index d7d2e208f486..1de2686819cb 100644
11047 +--- a/sound/soc/samsung/spdif.c
11048 ++++ b/sound/soc/samsung/spdif.c
11049 +@@ -359,7 +359,7 @@ static const struct snd_soc_component_driver samsung_spdif_component = {
11050 + static int spdif_probe(struct platform_device *pdev)
11051 + {
11052 + struct s3c_audio_pdata *spdif_pdata;
11053 +- struct resource *mem_res, *dma_res;
11054 ++ struct resource *mem_res;
11055 + struct samsung_spdif_info *spdif;
11056 + int ret;
11057 +
11058 +@@ -367,12 +367,6 @@ static int spdif_probe(struct platform_device *pdev)
11059 +
11060 + dev_dbg(&pdev->dev, "Entered %s\n", __func__);
11061 +
11062 +- dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
11063 +- if (!dma_res) {
11064 +- dev_err(&pdev->dev, "Unable to get dma resource.\n");
11065 +- return -ENXIO;
11066 +- }
11067 +-
11068 + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
11069 + if (!mem_res) {
11070 + dev_err(&pdev->dev, "Unable to get register resource.\n");
11071 +@@ -432,7 +426,7 @@ static int spdif_probe(struct platform_device *pdev)
11072 +
11073 + spdif_stereo_out.dma_size = 2;
11074 + spdif_stereo_out.dma_addr = mem_res->start + DATA_OUTBUF;
11075 +- spdif_stereo_out.channel = dma_res->start;
11076 ++ spdif_stereo_out.slave = spdif_pdata ? spdif_pdata->dma_playback : NULL;
11077 +
11078 + spdif->dma_playback = &spdif_stereo_out;
11079 +
11080 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
11081 +index 982c2df6d0b5..7a44d07c797d 100644
11082 +--- a/sound/usb/quirks.c
11083 ++++ b/sound/usb/quirks.c
11084 +@@ -138,6 +138,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
11085 + usb_audio_err(chip, "cannot memdup\n");
11086 + return -ENOMEM;
11087 + }
11088 ++ INIT_LIST_HEAD(&fp->list);
11089 + if (fp->nr_rates > MAX_NR_RATES) {
11090 + kfree(fp);
11091 + return -EINVAL;
11092 +@@ -155,23 +156,18 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
11093 + stream = (fp->endpoint & USB_DIR_IN)
11094 + ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
11095 + err = snd_usb_add_audio_stream(chip, stream, fp);
11096 +- if (err < 0) {
11097 +- kfree(fp);
11098 +- kfree(rate_table);
11099 +- return err;
11100 +- }
11101 ++ if (err < 0)
11102 ++ goto error;
11103 + if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
11104 + fp->altset_idx >= iface->num_altsetting) {
11105 +- kfree(fp);
11106 +- kfree(rate_table);
11107 +- return -EINVAL;
11108 ++ err = -EINVAL;
11109 ++ goto error;
11110 + }
11111 + alts = &iface->altsetting[fp->altset_idx];
11112 + altsd = get_iface_desc(alts);
11113 + if (altsd->bNumEndpoints < 1) {
11114 +- kfree(fp);
11115 +- kfree(rate_table);
11116 +- return -EINVAL;
11117 ++ err = -EINVAL;
11118 ++ goto error;
11119 + }
11120 +
11121 + fp->protocol = altsd->bInterfaceProtocol;
11122 +@@ -184,6 +180,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
11123 + snd_usb_init_pitch(chip, fp->iface, alts, fp);
11124 + snd_usb_init_sample_rate(chip, fp->iface, alts, fp, fp->rate_max);
11125 + return 0;
11126 ++
11127 ++ error:
11128 ++ list_del(&fp->list); /* unlink for avoiding double-free */
11129 ++ kfree(fp);
11130 ++ kfree(rate_table);
11131 ++ return err;
11132 + }
11133 +
11134 + static int create_auto_pcm_quirk(struct snd_usb_audio *chip,
11135 +@@ -456,6 +458,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
11136 + fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
11137 + fp->datainterval = 0;
11138 + fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
11139 ++ INIT_LIST_HEAD(&fp->list);
11140 +
11141 + switch (fp->maxpacksize) {
11142 + case 0x120:
11143 +@@ -479,6 +482,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
11144 + ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
11145 + err = snd_usb_add_audio_stream(chip, stream, fp);
11146 + if (err < 0) {
11147 ++ list_del(&fp->list); /* unlink for avoiding double-free */
11148 + kfree(fp);
11149 + return err;
11150 + }
11151 +diff --git a/sound/usb/stream.c b/sound/usb/stream.c
11152 +index 310a3822d2b7..25e8075f9ea3 100644
11153 +--- a/sound/usb/stream.c
11154 ++++ b/sound/usb/stream.c
11155 +@@ -315,7 +315,9 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
11156 + /*
11157 + * add this endpoint to the chip instance.
11158 + * if a stream with the same endpoint already exists, append to it.
11159 +- * if not, create a new pcm stream.
11160 ++ * if not, create a new pcm stream. note, fp is added to the substream
11161 ++ * fmt_list and will be freed on the chip instance release. do not free
11162 ++ * fp or do remove it from the substream fmt_list to avoid double-free.
11163 + */
11164 + int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
11165 + int stream,
11166 +@@ -668,6 +670,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
11167 + * (fp->maxpacksize & 0x7ff);
11168 + fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
11169 + fp->clock = clock;
11170 ++ INIT_LIST_HEAD(&fp->list);
11171 +
11172 + /* some quirks for attributes here */
11173 +
11174 +@@ -716,6 +719,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
11175 + dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint);
11176 + err = snd_usb_add_audio_stream(chip, stream, fp);
11177 + if (err < 0) {
11178 ++ list_del(&fp->list); /* unlink for avoiding double-free */
11179 + kfree(fp->rate_table);
11180 + kfree(fp->chmap);
11181 + kfree(fp);
11182 +diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
11183 +index 29ee857c09c6..6f9fbb44cd19 100644
11184 +--- a/tools/perf/Documentation/perf-stat.txt
11185 ++++ b/tools/perf/Documentation/perf-stat.txt
11186 +@@ -50,6 +50,14 @@ OPTIONS
11187 + --scale::
11188 + scale/normalize counter values
11189 +
11190 ++-d::
11191 ++--detailed::
11192 ++ print more detailed statistics, can be specified up to 3 times
11193 ++
11194 ++ -d: detailed events, L1 and LLC data cache
11195 ++ -d -d: more detailed events, dTLB and iTLB events
11196 ++ -d -d -d: very detailed events, adding prefetch events
11197 ++
11198 + -r::
11199 + --repeat=<n>::
11200 + repeat command and print average + stddev (max: 100). 0 means forever.
11201 +diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
11202 +index 4af6b279e34a..da4c17cccb71 100644
11203 +--- a/tools/perf/util/event.c
11204 ++++ b/tools/perf/util/event.c
11205 +@@ -197,7 +197,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
11206 + strcpy(execname, "");
11207 +
11208 + /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
11209 +- n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
11210 ++ n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
11211 + &event->mmap2.start, &event->mmap2.len, prot,
11212 + &event->mmap2.pgoff, &event->mmap2.maj,
11213 + &event->mmap2.min,
11214 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
11215 +index 2c9d47fbc498..7d32b4e82e86 100644
11216 +--- a/virt/kvm/kvm_main.c
11217 ++++ b/virt/kvm/kvm_main.c
11218 +@@ -2559,7 +2559,7 @@ static long kvm_vm_ioctl(struct file *filp,
11219 + if (copy_from_user(&routing, argp, sizeof(routing)))
11220 + goto out;
11221 + r = -EINVAL;
11222 +- if (routing.nr >= KVM_MAX_IRQ_ROUTES)
11223 ++ if (routing.nr > KVM_MAX_IRQ_ROUTES)
11224 + goto out;
11225 + if (routing.flags)
11226 + goto out;