Gentoo Archives: gentoo-commits

From: "Tom Wijsman (tomwij)" <tomwij@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r2714 - genpatches-2.6/trunk/3.12
Date: Wed, 26 Mar 2014 23:50:33
Message-Id: 20140326235015.D46052004E@flycatcher.gentoo.org
1 Author: tomwij
2 Date: 2014-03-26 23:50:15 +0000 (Wed, 26 Mar 2014)
3 New Revision: 2714
4
5 Added:
6 genpatches-2.6/trunk/3.12/1014_linux-3.12.15.patch
7 Modified:
8 genpatches-2.6/trunk/3.12/0000_README
9 Log:
10 Linux patch 3.12.15.
11
12 Modified: genpatches-2.6/trunk/3.12/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/3.12/0000_README 2014-03-24 13:48:24 UTC (rev 2713)
15 +++ genpatches-2.6/trunk/3.12/0000_README 2014-03-26 23:50:15 UTC (rev 2714)
16 @@ -98,6 +98,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 3.12.14
19
20 +Patch: 1014_linux-3.12.15.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 3.12.15
23 +
24 Patch: 1500_XATTR_USER_PREFIX.patch
25 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
26 Desc: Support for namespace user.pax.* on tmpfs.
27
28 Added: genpatches-2.6/trunk/3.12/1014_linux-3.12.15.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/3.12/1014_linux-3.12.15.patch (rev 0)
31 +++ genpatches-2.6/trunk/3.12/1014_linux-3.12.15.patch 2014-03-26 23:50:15 UTC (rev 2714)
32 @@ -0,0 +1,7386 @@
33 +diff --git a/Makefile b/Makefile
34 +index 5d38a5a79b3a..517391a3093e 100644
35 +--- a/Makefile
36 ++++ b/Makefile
37 +@@ -1,6 +1,6 @@
38 + VERSION = 3
39 + PATCHLEVEL = 12
40 +-SUBLEVEL = 14
41 ++SUBLEVEL = 15
42 + EXTRAVERSION =
43 + NAME = One Giant Leap for Frogkind
44 +
45 +diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h
46 +index f33679d2d3ee..50e1d850ee2e 100644
47 +--- a/arch/arm/mach-sa1100/include/mach/collie.h
48 ++++ b/arch/arm/mach-sa1100/include/mach/collie.h
49 +@@ -13,6 +13,8 @@
50 + #ifndef __ASM_ARCH_COLLIE_H
51 + #define __ASM_ARCH_COLLIE_H
52 +
53 ++#include "hardware.h" /* Gives GPIO_MAX */
54 ++
55 + extern void locomolcd_power(int on);
56 +
57 + #define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
58 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
59 +index 0b27b6574296..965c28ff7b3b 100644
60 +--- a/arch/arm64/include/asm/pgtable.h
61 ++++ b/arch/arm64/include/asm/pgtable.h
62 +@@ -136,10 +136,10 @@ extern struct page *empty_zero_page;
63 + /*
64 + * The following only work if pte_present(). Undefined behaviour otherwise.
65 + */
66 +-#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
67 +-#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
68 +-#define pte_young(pte) (pte_val(pte) & PTE_AF)
69 +-#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
70 ++#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
71 ++#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
72 ++#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
73 ++#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
74 + #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
75 + #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
76 +
77 +diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
78 +index e0331414c7d6..86479bbf4714 100644
79 +--- a/arch/mips/include/asm/mipsregs.h
80 ++++ b/arch/mips/include/asm/mipsregs.h
81 +@@ -14,6 +14,7 @@
82 + #define _ASM_MIPSREGS_H
83 +
84 + #include <linux/linkage.h>
85 ++#include <linux/types.h>
86 + #include <asm/hazards.h>
87 + #include <asm/war.h>
88 +
89 +diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
90 +index 599545738af3..c2dcfaa51987 100644
91 +--- a/arch/powerpc/include/asm/ppc_asm.h
92 ++++ b/arch/powerpc/include/asm/ppc_asm.h
93 +@@ -478,13 +478,6 @@ BEGIN_FTR_SECTION_NESTED(945) \
94 + std ra,TASKTHREADPPR(rb); \
95 + END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
96 +
97 +-#define RESTORE_PPR(ra, rb) \
98 +-BEGIN_FTR_SECTION_NESTED(946) \
99 +- ld ra,PACACURRENT(r13); \
100 +- ld rb,TASKTHREADPPR(ra); \
101 +- mtspr SPRN_PPR,rb; /* Restore PPR */ \
102 +-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
103 +-
104 + #endif
105 +
106 + /*
107 +diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
108 +index c04cdf70d487..7be37170fda7 100644
109 +--- a/arch/powerpc/kernel/entry_64.S
110 ++++ b/arch/powerpc/kernel/entry_64.S
111 +@@ -820,6 +820,12 @@ fast_exception_return:
112 + andi. r0,r3,MSR_RI
113 + beq- unrecov_restore
114 +
115 ++ /* Load PPR from thread struct before we clear MSR:RI */
116 ++BEGIN_FTR_SECTION
117 ++ ld r2,PACACURRENT(r13)
118 ++ ld r2,TASKTHREADPPR(r2)
119 ++END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
120 ++
121 + /*
122 + * Clear RI before restoring r13. If we are returning to
123 + * userspace and we take an exception after restoring r13,
124 +@@ -840,8 +846,10 @@ fast_exception_return:
125 + */
126 + andi. r0,r3,MSR_PR
127 + beq 1f
128 ++BEGIN_FTR_SECTION
129 ++ mtspr SPRN_PPR,r2 /* Restore PPR */
130 ++END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
131 + ACCOUNT_CPU_USER_EXIT(r2, r4)
132 +- RESTORE_PPR(r2, r4)
133 + REST_GPR(13, r1)
134 + 1:
135 + mtspr SPRN_SRR1,r3
136 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
137 +index 96d2fdf3aa9e..aa75b2beba7d 100644
138 +--- a/arch/powerpc/kernel/process.c
139 ++++ b/arch/powerpc/kernel/process.c
140 +@@ -928,6 +928,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
141 + flush_altivec_to_thread(src);
142 + flush_vsx_to_thread(src);
143 + flush_spe_to_thread(src);
144 ++ /*
145 ++ * Flush TM state out so we can copy it. __switch_to_tm() does this
146 ++ * flush but it removes the checkpointed state from the current CPU and
147 ++ * transitions the CPU out of TM mode. Hence we need to call
148 ++ * tm_recheckpoint_new_task() (on the same task) to restore the
149 ++ * checkpointed state back and the TM mode.
150 ++ */
151 ++ __switch_to_tm(src);
152 ++ tm_recheckpoint_new_task(src);
153 +
154 + *dst = *src;
155 +
156 +diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
157 +index b47a0e1ab001..c712ecec13ba 100644
158 +--- a/arch/powerpc/kernel/reloc_64.S
159 ++++ b/arch/powerpc/kernel/reloc_64.S
160 +@@ -81,6 +81,7 @@ _GLOBAL(relocate)
161 +
162 + 6: blr
163 +
164 ++.balign 8
165 + p_dyn: .llong __dynamic_start - 0b
166 + p_rela: .llong __rela_dyn_start - 0b
167 + p_st: .llong _stext - 0b
168 +diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
169 +index 7143793859fa..3e01afa21710 100644
170 +--- a/arch/s390/Kconfig
171 ++++ b/arch/s390/Kconfig
172 +@@ -100,7 +100,7 @@ config S390
173 + select GENERIC_CLOCKEVENTS
174 + select GENERIC_CPU_DEVICES if !SMP
175 + select GENERIC_SMP_IDLE_THREAD
176 +- select GENERIC_TIME_VSYSCALL_OLD
177 ++ select GENERIC_TIME_VSYSCALL
178 + select HAVE_ALIGNED_STRUCT_PAGE if SLUB
179 + select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
180 + select HAVE_ARCH_SECCOMP_FILTER
181 +diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
182 +index 87a22092b68f..6c0281f30d44 100644
183 +--- a/arch/s390/appldata/appldata_base.c
184 ++++ b/arch/s390/appldata/appldata_base.c
185 +@@ -527,6 +527,7 @@ static int __init appldata_init(void)
186 + {
187 + int rc;
188 +
189 ++ init_virt_timer(&appldata_timer);
190 + appldata_timer.function = appldata_timer_function;
191 + appldata_timer.data = (unsigned long) &appldata_work;
192 +
193 +diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
194 +index a73eb2e1e918..bc9746a7d47c 100644
195 +--- a/arch/s390/include/asm/vdso.h
196 ++++ b/arch/s390/include/asm/vdso.h
197 +@@ -26,8 +26,9 @@ struct vdso_data {
198 + __u64 wtom_clock_nsec; /* 0x28 */
199 + __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
200 + __u32 tz_dsttime; /* Type of dst correction 0x34 */
201 +- __u32 ectg_available;
202 +- __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */
203 ++ __u32 ectg_available; /* ECTG instruction present 0x38 */
204 ++ __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */
205 ++ __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */
206 + };
207 +
208 + struct vdso_per_cpu_data {
209 +diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
210 +index 2416138ebd3e..496116cd65ec 100644
211 +--- a/arch/s390/kernel/asm-offsets.c
212 ++++ b/arch/s390/kernel/asm-offsets.c
213 +@@ -65,7 +65,8 @@ int main(void)
214 + DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
215 + DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
216 + DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
217 +- DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult));
218 ++ DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
219 ++ DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift));
220 + DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
221 + DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
222 + /* constants used by the vdso */
223 +diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
224 +index 064c3082ab33..dd95f1631621 100644
225 +--- a/arch/s390/kernel/time.c
226 ++++ b/arch/s390/kernel/time.c
227 +@@ -108,20 +108,10 @@ static void fixup_clock_comparator(unsigned long long delta)
228 + set_clock_comparator(S390_lowcore.clock_comparator);
229 + }
230 +
231 +-static int s390_next_ktime(ktime_t expires,
232 ++static int s390_next_event(unsigned long delta,
233 + struct clock_event_device *evt)
234 + {
235 +- struct timespec ts;
236 +- u64 nsecs;
237 +-
238 +- ts.tv_sec = ts.tv_nsec = 0;
239 +- monotonic_to_bootbased(&ts);
240 +- nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
241 +- do_div(nsecs, 125);
242 +- S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
243 +- /* Program the maximum value if we have an overflow (== year 2042) */
244 +- if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
245 +- S390_lowcore.clock_comparator = -1ULL;
246 ++ S390_lowcore.clock_comparator = get_tod_clock() + delta;
247 + set_clock_comparator(S390_lowcore.clock_comparator);
248 + return 0;
249 + }
250 +@@ -146,15 +136,14 @@ void init_cpu_timer(void)
251 + cpu = smp_processor_id();
252 + cd = &per_cpu(comparators, cpu);
253 + cd->name = "comparator";
254 +- cd->features = CLOCK_EVT_FEAT_ONESHOT |
255 +- CLOCK_EVT_FEAT_KTIME;
256 ++ cd->features = CLOCK_EVT_FEAT_ONESHOT;
257 + cd->mult = 16777;
258 + cd->shift = 12;
259 + cd->min_delta_ns = 1;
260 + cd->max_delta_ns = LONG_MAX;
261 + cd->rating = 400;
262 + cd->cpumask = cpumask_of(cpu);
263 +- cd->set_next_ktime = s390_next_ktime;
264 ++ cd->set_next_event = s390_next_event;
265 + cd->set_mode = s390_set_mode;
266 +
267 + clockevents_register_device(cd);
268 +@@ -221,21 +210,30 @@ struct clocksource * __init clocksource_default_clock(void)
269 + return &clocksource_tod;
270 + }
271 +
272 +-void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
273 +- struct clocksource *clock, u32 mult)
274 ++void update_vsyscall(struct timekeeper *tk)
275 + {
276 +- if (clock != &clocksource_tod)
277 ++ u64 nsecps;
278 ++
279 ++ if (tk->clock != &clocksource_tod)
280 + return;
281 +
282 + /* Make userspace gettimeofday spin until we're done. */
283 + ++vdso_data->tb_update_count;
284 + smp_wmb();
285 +- vdso_data->xtime_tod_stamp = clock->cycle_last;
286 +- vdso_data->xtime_clock_sec = wall_time->tv_sec;
287 +- vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
288 +- vdso_data->wtom_clock_sec = wtm->tv_sec;
289 +- vdso_data->wtom_clock_nsec = wtm->tv_nsec;
290 +- vdso_data->ntp_mult = mult;
291 ++ vdso_data->xtime_tod_stamp = tk->clock->cycle_last;
292 ++ vdso_data->xtime_clock_sec = tk->xtime_sec;
293 ++ vdso_data->xtime_clock_nsec = tk->xtime_nsec;
294 ++ vdso_data->wtom_clock_sec =
295 ++ tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
296 ++ vdso_data->wtom_clock_nsec = tk->xtime_nsec +
297 ++ + (tk->wall_to_monotonic.tv_nsec << tk->shift);
298 ++ nsecps = (u64) NSEC_PER_SEC << tk->shift;
299 ++ while (vdso_data->wtom_clock_nsec >= nsecps) {
300 ++ vdso_data->wtom_clock_nsec -= nsecps;
301 ++ vdso_data->wtom_clock_sec++;
302 ++ }
303 ++ vdso_data->tk_mult = tk->mult;
304 ++ vdso_data->tk_shift = tk->shift;
305 + smp_wmb();
306 + ++vdso_data->tb_update_count;
307 + }
308 +diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
309 +index b2224e0b974c..5be8e472f57d 100644
310 +--- a/arch/s390/kernel/vdso32/clock_gettime.S
311 ++++ b/arch/s390/kernel/vdso32/clock_gettime.S
312 +@@ -38,25 +38,26 @@ __kernel_clock_gettime:
313 + sl %r1,__VDSO_XTIME_STAMP+4(%r5)
314 + brc 3,2f
315 + ahi %r0,-1
316 +-2: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
317 ++2: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
318 + lr %r2,%r0
319 +- l %r0,__VDSO_NTP_MULT(%r5)
320 ++ l %r0,__VDSO_TK_MULT(%r5)
321 + ltr %r1,%r1
322 + mr %r0,%r0
323 + jnm 3f
324 +- a %r0,__VDSO_NTP_MULT(%r5)
325 ++ a %r0,__VDSO_TK_MULT(%r5)
326 + 3: alr %r0,%r2
327 +- srdl %r0,12
328 +- al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
329 ++ al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
330 + al %r1,__VDSO_XTIME_NSEC+4(%r5)
331 + brc 12,4f
332 + ahi %r0,1
333 +-4: l %r2,__VDSO_XTIME_SEC+4(%r5)
334 +- al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
335 ++4: al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
336 + al %r1,__VDSO_WTOM_NSEC+4(%r5)
337 + brc 12,5f
338 + ahi %r0,1
339 +-5: al %r2,__VDSO_WTOM_SEC+4(%r5)
340 ++5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
341 ++ srdl %r0,0(%r2) /* >> tk->shift */
342 ++ l %r2,__VDSO_XTIME_SEC+4(%r5)
343 ++ al %r2,__VDSO_WTOM_SEC+4(%r5)
344 + cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
345 + jne 1b
346 + basr %r5,0
347 +@@ -86,20 +87,21 @@ __kernel_clock_gettime:
348 + sl %r1,__VDSO_XTIME_STAMP+4(%r5)
349 + brc 3,12f
350 + ahi %r0,-1
351 +-12: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
352 ++12: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
353 + lr %r2,%r0
354 +- l %r0,__VDSO_NTP_MULT(%r5)
355 ++ l %r0,__VDSO_TK_MULT(%r5)
356 + ltr %r1,%r1
357 + mr %r0,%r0
358 + jnm 13f
359 +- a %r0,__VDSO_NTP_MULT(%r5)
360 ++ a %r0,__VDSO_TK_MULT(%r5)
361 + 13: alr %r0,%r2
362 +- srdl %r0,12
363 +- al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
364 ++ al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
365 + al %r1,__VDSO_XTIME_NSEC+4(%r5)
366 + brc 12,14f
367 + ahi %r0,1
368 +-14: l %r2,__VDSO_XTIME_SEC+4(%r5)
369 ++14: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
370 ++ srdl %r0,0(%r2) /* >> tk->shift */
371 ++ l %r2,__VDSO_XTIME_SEC+4(%r5)
372 + cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
373 + jne 11b
374 + basr %r5,0
375 +diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
376 +index 2d3633175e3b..fd621a950f7c 100644
377 +--- a/arch/s390/kernel/vdso32/gettimeofday.S
378 ++++ b/arch/s390/kernel/vdso32/gettimeofday.S
379 +@@ -35,15 +35,14 @@ __kernel_gettimeofday:
380 + sl %r1,__VDSO_XTIME_STAMP+4(%r5)
381 + brc 3,3f
382 + ahi %r0,-1
383 +-3: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
384 ++3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
385 + st %r0,24(%r15)
386 +- l %r0,__VDSO_NTP_MULT(%r5)
387 ++ l %r0,__VDSO_TK_MULT(%r5)
388 + ltr %r1,%r1
389 + mr %r0,%r0
390 + jnm 4f
391 +- a %r0,__VDSO_NTP_MULT(%r5)
392 ++ a %r0,__VDSO_TK_MULT(%r5)
393 + 4: al %r0,24(%r15)
394 +- srdl %r0,12
395 + al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
396 + al %r1,__VDSO_XTIME_NSEC+4(%r5)
397 + brc 12,5f
398 +@@ -51,6 +50,8 @@ __kernel_gettimeofday:
399 + 5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
400 + cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
401 + jne 1b
402 ++ l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
403 ++ srdl %r0,0(%r4) /* >> tk->shift */
404 + l %r4,24(%r15) /* get tv_sec from stack */
405 + basr %r5,0
406 + 6: ltr %r0,%r0
407 +diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
408 +index d46c95ed5f19..0add1072ba30 100644
409 +--- a/arch/s390/kernel/vdso64/clock_gettime.S
410 ++++ b/arch/s390/kernel/vdso64/clock_gettime.S
411 +@@ -34,14 +34,15 @@ __kernel_clock_gettime:
412 + tmll %r4,0x0001 /* pending update ? loop */
413 + jnz 0b
414 + stck 48(%r15) /* Store TOD clock */
415 ++ lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
416 ++ lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
417 ++ alg %r0,__VDSO_WTOM_SEC(%r5) /* + wall_to_monotonic.sec */
418 + lg %r1,48(%r15)
419 + sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
420 +- msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
421 +- srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
422 +- alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
423 +- lg %r0,__VDSO_XTIME_SEC(%r5)
424 +- alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
425 +- alg %r0,__VDSO_WTOM_SEC(%r5)
426 ++ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
427 ++ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
428 ++ alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
429 ++ srlg %r1,%r1,0(%r2) /* >> tk->shift */
430 + clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
431 + jne 0b
432 + larl %r5,13f
433 +@@ -62,12 +63,13 @@ __kernel_clock_gettime:
434 + tmll %r4,0x0001 /* pending update ? loop */
435 + jnz 5b
436 + stck 48(%r15) /* Store TOD clock */
437 ++ lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
438 + lg %r1,48(%r15)
439 + sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
440 +- msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
441 +- srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
442 +- alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
443 +- lg %r0,__VDSO_XTIME_SEC(%r5)
444 ++ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
445 ++ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
446 ++ srlg %r1,%r1,0(%r2) /* >> tk->shift */
447 ++ lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
448 + clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
449 + jne 5b
450 + larl %r5,13f
451 +diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
452 +index 36ee674722ec..d0860d1d0ccc 100644
453 +--- a/arch/s390/kernel/vdso64/gettimeofday.S
454 ++++ b/arch/s390/kernel/vdso64/gettimeofday.S
455 +@@ -31,12 +31,13 @@ __kernel_gettimeofday:
456 + stck 48(%r15) /* Store TOD clock */
457 + lg %r1,48(%r15)
458 + sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
459 +- msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
460 +- srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
461 +- alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */
462 +- lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
463 ++ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
464 ++ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
465 ++ lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
466 + clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
467 + jne 0b
468 ++ lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
469 ++ srlg %r1,%r1,0(%r5) /* >> tk->shift */
470 + larl %r5,5f
471 + 2: clg %r1,0(%r5)
472 + jl 3f
473 +diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
474 +index 2c37aadcbc35..32ce71375b21 100644
475 +--- a/arch/x86/include/asm/kdebug.h
476 ++++ b/arch/x86/include/asm/kdebug.h
477 +@@ -21,7 +21,7 @@ enum die_val {
478 + DIE_NMIUNKNOWN,
479 + };
480 +
481 +-extern void printk_address(unsigned long address, int reliable);
482 ++extern void printk_address(unsigned long address);
483 + extern void die(const char *, struct pt_regs *,long);
484 + extern int __must_check __die(const char *, struct pt_regs *, long);
485 + extern void show_trace(struct task_struct *t, struct pt_regs *regs,
486 +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
487 +index deb6421c9e69..d9c12d3022a7 100644
488 +--- a/arch/x86/kernel/dumpstack.c
489 ++++ b/arch/x86/kernel/dumpstack.c
490 +@@ -25,12 +25,17 @@ unsigned int code_bytes = 64;
491 + int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
492 + static int die_counter;
493 +
494 +-void printk_address(unsigned long address, int reliable)
495 ++static void printk_stack_address(unsigned long address, int reliable)
496 + {
497 + pr_cont(" [<%p>] %s%pB\n",
498 + (void *)address, reliable ? "" : "? ", (void *)address);
499 + }
500 +
501 ++void printk_address(unsigned long address)
502 ++{
503 ++ pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
504 ++}
505 ++
506 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
507 + static void
508 + print_ftrace_graph_addr(unsigned long addr, void *data,
509 +@@ -151,7 +156,7 @@ static void print_trace_address(void *data, unsigned long addr, int reliable)
510 + {
511 + touch_nmi_watchdog();
512 + printk(data);
513 +- printk_address(addr, reliable);
514 ++ printk_stack_address(addr, reliable);
515 + }
516 +
517 + static const struct stacktrace_ops print_trace_ops = {
518 +@@ -281,7 +286,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
519 + #else
520 + /* Executive summary in case the oops scrolled away */
521 + printk(KERN_ALERT "RIP ");
522 +- printk_address(regs->ip, 1);
523 ++ printk_address(regs->ip);
524 + printk(" RSP <%016lx>\n", regs->sp);
525 + #endif
526 + return 0;
527 +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
528 +index 81ba27679f18..f36bd42d6f0c 100644
529 +--- a/arch/x86/kernel/head_32.S
530 ++++ b/arch/x86/kernel/head_32.S
531 +@@ -544,6 +544,10 @@ ENDPROC(early_idt_handlers)
532 + /* This is global to keep gas from relaxing the jumps */
533 + ENTRY(early_idt_handler)
534 + cld
535 ++
536 ++ cmpl $2,(%esp) # X86_TRAP_NMI
537 ++ je is_nmi # Ignore NMI
538 ++
539 + cmpl $2,%ss:early_recursion_flag
540 + je hlt_loop
541 + incl %ss:early_recursion_flag
542 +@@ -594,8 +598,9 @@ ex_entry:
543 + pop %edx
544 + pop %ecx
545 + pop %eax
546 +- addl $8,%esp /* drop vector number and error code */
547 + decl %ss:early_recursion_flag
548 ++is_nmi:
549 ++ addl $8,%esp /* drop vector number and error code */
550 + iret
551 + ENDPROC(early_idt_handler)
552 +
553 +diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
554 +index e1aabdb314c8..a468c0a65c42 100644
555 +--- a/arch/x86/kernel/head_64.S
556 ++++ b/arch/x86/kernel/head_64.S
557 +@@ -343,6 +343,9 @@ early_idt_handlers:
558 + ENTRY(early_idt_handler)
559 + cld
560 +
561 ++ cmpl $2,(%rsp) # X86_TRAP_NMI
562 ++ je is_nmi # Ignore NMI
563 ++
564 + cmpl $2,early_recursion_flag(%rip)
565 + jz 1f
566 + incl early_recursion_flag(%rip)
567 +@@ -405,8 +408,9 @@ ENTRY(early_idt_handler)
568 + popq %rdx
569 + popq %rcx
570 + popq %rax
571 +- addq $16,%rsp # drop vector number and error code
572 + decl early_recursion_flag(%rip)
573 ++is_nmi:
574 ++ addq $16,%rsp # drop vector number and error code
575 + INTERRUPT_RETURN
576 + ENDPROC(early_idt_handler)
577 +
578 +diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
579 +index 5d576ab34403..21935afebe19 100644
580 +--- a/arch/x86/kernel/i387.c
581 ++++ b/arch/x86/kernel/i387.c
582 +@@ -86,10 +86,19 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
583 +
584 + void __kernel_fpu_end(void)
585 + {
586 +- if (use_eager_fpu())
587 +- math_state_restore();
588 +- else
589 ++ if (use_eager_fpu()) {
590 ++ /*
591 ++ * For eager fpu, most the time, tsk_used_math() is true.
592 ++ * Restore the user math as we are done with the kernel usage.
593 ++ * At few instances during thread exit, signal handling etc,
594 ++ * tsk_used_math() is false. Those few places will take proper
595 ++ * actions, so we don't need to restore the math here.
596 ++ */
597 ++ if (likely(tsk_used_math(current)))
598 ++ math_state_restore();
599 ++ } else {
600 + stts();
601 ++ }
602 + }
603 + EXPORT_SYMBOL(__kernel_fpu_end);
604 +
605 +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
606 +index bb1dc51bab05..8e9fe8dfd37b 100644
607 +--- a/arch/x86/kernel/process_64.c
608 ++++ b/arch/x86/kernel/process_64.c
609 +@@ -63,7 +63,7 @@ void __show_regs(struct pt_regs *regs, int all)
610 + unsigned int ds, cs, es;
611 +
612 + printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
613 +- printk_address(regs->ip, 1);
614 ++ printk_address(regs->ip);
615 + printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
616 + regs->sp, regs->flags);
617 + printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
618 +diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
619 +index 04ee1e2e4c02..52dbf1e400dc 100644
620 +--- a/arch/x86/kernel/quirks.c
621 ++++ b/arch/x86/kernel/quirks.c
622 +@@ -529,7 +529,7 @@ static void quirk_amd_nb_node(struct pci_dev *dev)
623 + return;
624 +
625 + pci_read_config_dword(nb_ht, 0x60, &val);
626 +- node = val & 7;
627 ++ node = pcibus_to_node(dev->bus) | (val & 7);
628 + /*
629 + * Some hardware may return an invalid node ID,
630 + * so check it first:
631 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
632 +index c0bc80391e40..612c717747dd 100644
633 +--- a/arch/x86/kvm/svm.c
634 ++++ b/arch/x86/kvm/svm.c
635 +@@ -2993,10 +2993,8 @@ static int cr8_write_interception(struct vcpu_svm *svm)
636 + u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
637 + /* instruction emulation calls kvm_set_cr8() */
638 + r = cr_interception(svm);
639 +- if (irqchip_in_kernel(svm->vcpu.kvm)) {
640 +- clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
641 ++ if (irqchip_in_kernel(svm->vcpu.kvm))
642 + return r;
643 +- }
644 + if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
645 + return r;
646 + kvm_run->exit_reason = KVM_EXIT_SET_TPR;
647 +@@ -3558,6 +3556,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
648 + if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
649 + return;
650 +
651 ++ clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
652 ++
653 + if (irr == -1)
654 + return;
655 +
656 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
657 +index d8b1ff68dbb9..5b90bbcad9f6 100644
658 +--- a/arch/x86/mm/fault.c
659 ++++ b/arch/x86/mm/fault.c
660 +@@ -596,7 +596,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
661 +
662 + printk(KERN_CONT " at %p\n", (void *) address);
663 + printk(KERN_ALERT "IP:");
664 +- printk_address(regs->ip, 1);
665 ++ printk_address(regs->ip);
666 +
667 + dump_pagetable(address);
668 + }
669 +diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
670 +index 9515f18898b2..f37dec579712 100644
671 +--- a/drivers/acpi/blacklist.c
672 ++++ b/drivers/acpi/blacklist.c
673 +@@ -297,6 +297,54 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
674 + DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
675 + },
676 + },
677 ++ {
678 ++ .callback = dmi_disable_osi_win8,
679 ++ .ident = "ThinkPad Edge E530",
680 ++ .matches = {
681 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
682 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
683 ++ },
684 ++ },
685 ++ {
686 ++ .callback = dmi_disable_osi_win8,
687 ++ .ident = "ThinkPad Edge E530",
688 ++ .matches = {
689 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
690 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
691 ++ },
692 ++ },
693 ++ {
694 ++ .callback = dmi_disable_osi_win8,
695 ++ .ident = "Acer Aspire V5-573G",
696 ++ .matches = {
697 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
698 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
699 ++ },
700 ++ },
701 ++ {
702 ++ .callback = dmi_disable_osi_win8,
703 ++ .ident = "Acer Aspire V5-572G",
704 ++ .matches = {
705 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
706 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
707 ++ },
708 ++ },
709 ++ {
710 ++ .callback = dmi_disable_osi_win8,
711 ++ .ident = "ThinkPad T431s",
712 ++ .matches = {
713 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
714 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
715 ++ },
716 ++ },
717 ++ {
718 ++ .callback = dmi_disable_osi_win8,
719 ++ .ident = "ThinkPad T430",
720 ++ .matches = {
721 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
722 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
723 ++ },
724 ++ },
725 +
726 + /*
727 + * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
728 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
729 +index 15986f32009e..3cc0b92e3544 100644
730 +--- a/drivers/acpi/ec.c
731 ++++ b/drivers/acpi/ec.c
732 +@@ -70,6 +70,8 @@ enum ec_command {
733 + #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
734 + #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
735 + #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
736 ++#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
737 ++ * when trying to clear the EC */
738 +
739 + enum {
740 + EC_FLAGS_QUERY_PENDING, /* Query is pending */
741 +@@ -123,6 +125,7 @@ EXPORT_SYMBOL(first_ec);
742 + static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
743 + static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
744 + static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
745 ++static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
746 +
747 + /* --------------------------------------------------------------------------
748 + Transaction Management
749 +@@ -468,6 +471,29 @@ acpi_handle ec_get_handle(void)
750 +
751 + EXPORT_SYMBOL(ec_get_handle);
752 +
753 ++static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
754 ++
755 ++/*
756 ++ * Clears stale _Q events that might have accumulated in the EC.
757 ++ * Run with locked ec mutex.
758 ++ */
759 ++static void acpi_ec_clear(struct acpi_ec *ec)
760 ++{
761 ++ int i, status;
762 ++ u8 value = 0;
763 ++
764 ++ for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
765 ++ status = acpi_ec_query_unlocked(ec, &value);
766 ++ if (status || !value)
767 ++ break;
768 ++ }
769 ++
770 ++ if (unlikely(i == ACPI_EC_CLEAR_MAX))
771 ++ pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
772 ++ else
773 ++ pr_info("%d stale EC events cleared\n", i);
774 ++}
775 ++
776 + void acpi_ec_block_transactions(void)
777 + {
778 + struct acpi_ec *ec = first_ec;
779 +@@ -491,6 +517,10 @@ void acpi_ec_unblock_transactions(void)
780 + mutex_lock(&ec->mutex);
781 + /* Allow transactions to be carried out again */
782 + clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
783 ++
784 ++ if (EC_FLAGS_CLEAR_ON_RESUME)
785 ++ acpi_ec_clear(ec);
786 ++
787 + mutex_unlock(&ec->mutex);
788 + }
789 +
790 +@@ -848,6 +878,13 @@ static int acpi_ec_add(struct acpi_device *device)
791 +
792 + /* EC is fully operational, allow queries */
793 + clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
794 ++
795 ++ /* Clear stale _Q events if hardware might require that */
796 ++ if (EC_FLAGS_CLEAR_ON_RESUME) {
797 ++ mutex_lock(&ec->mutex);
798 ++ acpi_ec_clear(ec);
799 ++ mutex_unlock(&ec->mutex);
800 ++ }
801 + return ret;
802 + }
803 +
804 +@@ -949,6 +986,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
805 + return 0;
806 + }
807 +
808 ++/*
809 ++ * On some hardware it is necessary to clear events accumulated by the EC during
810 ++ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
811 ++ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
812 ++ *
813 ++ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
814 ++ *
815 ++ * Ideally, the EC should also be instructed NOT to accumulate events during
816 ++ * sleep (which Windows seems to do somehow), but the interface to control this
817 ++ * behaviour is not known at this time.
818 ++ *
819 ++ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
820 ++ * however it is very likely that other Samsung models are affected.
821 ++ *
822 ++ * On systems which don't accumulate _Q events during sleep, this extra check
823 ++ * should be harmless.
824 ++ */
825 ++static int ec_clear_on_resume(const struct dmi_system_id *id)
826 ++{
827 ++ pr_debug("Detected system needing EC poll on resume.\n");
828 ++ EC_FLAGS_CLEAR_ON_RESUME = 1;
829 ++ return 0;
830 ++}
831 ++
832 + static struct dmi_system_id ec_dmi_table[] __initdata = {
833 + {
834 + ec_skip_dsdt_scan, "Compal JFL92", {
835 +@@ -992,6 +1053,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
836 + ec_validate_ecdt, "ASUS hardware", {
837 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
838 + DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
839 ++ {
840 ++ ec_clear_on_resume, "Samsung hardware", {
841 ++ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
842 + {},
843 + };
844 +
845 +diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
846 +index b7201fc6f1e1..0bdacc5e26a3 100644
847 +--- a/drivers/acpi/resource.c
848 ++++ b/drivers/acpi/resource.c
849 +@@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
850 + switch (ares->type) {
851 + case ACPI_RESOURCE_TYPE_MEMORY24:
852 + memory24 = &ares->data.memory24;
853 ++ if (!memory24->address_length)
854 ++ return false;
855 + acpi_dev_get_memresource(res, memory24->minimum,
856 + memory24->address_length,
857 + memory24->write_protect);
858 + break;
859 + case ACPI_RESOURCE_TYPE_MEMORY32:
860 + memory32 = &ares->data.memory32;
861 ++ if (!memory32->address_length)
862 ++ return false;
863 + acpi_dev_get_memresource(res, memory32->minimum,
864 + memory32->address_length,
865 + memory32->write_protect);
866 + break;
867 + case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
868 + fixed_memory32 = &ares->data.fixed_memory32;
869 ++ if (!fixed_memory32->address_length)
870 ++ return false;
871 + acpi_dev_get_memresource(res, fixed_memory32->address,
872 + fixed_memory32->address_length,
873 + fixed_memory32->write_protect);
874 +@@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
875 + switch (ares->type) {
876 + case ACPI_RESOURCE_TYPE_IO:
877 + io = &ares->data.io;
878 ++ if (!io->address_length)
879 ++ return false;
880 + acpi_dev_get_ioresource(res, io->minimum,
881 + io->address_length,
882 + io->io_decode);
883 + break;
884 + case ACPI_RESOURCE_TYPE_FIXED_IO:
885 + fixed_io = &ares->data.fixed_io;
886 ++ if (!fixed_io->address_length)
887 ++ return false;
888 + acpi_dev_get_ioresource(res, fixed_io->address,
889 + fixed_io->address_length,
890 + ACPI_DECODE_10);
891 +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
892 +index 14df30580e15..99e5158456d8 100644
893 +--- a/drivers/acpi/sleep.c
894 ++++ b/drivers/acpi/sleep.c
895 +@@ -75,6 +75,17 @@ static int acpi_sleep_prepare(u32 acpi_state)
896 + return 0;
897 + }
898 +
899 ++static bool acpi_sleep_state_supported(u8 sleep_state)
900 ++{
901 ++ acpi_status status;
902 ++ u8 type_a, type_b;
903 ++
904 ++ status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
905 ++ return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
906 ++ || (acpi_gbl_FADT.sleep_control.address
907 ++ && acpi_gbl_FADT.sleep_status.address));
908 ++}
909 ++
910 + #ifdef CONFIG_ACPI_SLEEP
911 + static u32 acpi_target_sleep_state = ACPI_STATE_S0;
912 +
913 +@@ -608,15 +619,9 @@ static void acpi_sleep_suspend_setup(void)
914 + {
915 + int i;
916 +
917 +- for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
918 +- acpi_status status;
919 +- u8 type_a, type_b;
920 +-
921 +- status = acpi_get_sleep_type_data(i, &type_a, &type_b);
922 +- if (ACPI_SUCCESS(status)) {
923 ++ for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
924 ++ if (acpi_sleep_state_supported(i))
925 + sleep_states[i] = 1;
926 +- }
927 +- }
928 +
929 + suspend_set_ops(old_suspend_ordering ?
930 + &acpi_suspend_ops_old : &acpi_suspend_ops);
931 +@@ -747,11 +752,7 @@ static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
932 +
933 + static void acpi_sleep_hibernate_setup(void)
934 + {
935 +- acpi_status status;
936 +- u8 type_a, type_b;
937 +-
938 +- status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
939 +- if (ACPI_FAILURE(status))
940 ++ if (!acpi_sleep_state_supported(ACPI_STATE_S4))
941 + return;
942 +
943 + hibernation_set_ops(old_suspend_ordering ?
944 +@@ -800,8 +801,6 @@ static void acpi_power_off(void)
945 +
946 + int __init acpi_sleep_init(void)
947 + {
948 +- acpi_status status;
949 +- u8 type_a, type_b;
950 + char supported[ACPI_S_STATE_COUNT * 3 + 1];
951 + char *pos = supported;
952 + int i;
953 +@@ -816,8 +815,7 @@ int __init acpi_sleep_init(void)
954 + acpi_sleep_suspend_setup();
955 + acpi_sleep_hibernate_setup();
956 +
957 +- status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
958 +- if (ACPI_SUCCESS(status)) {
959 ++ if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
960 + sleep_states[ACPI_STATE_S5] = 1;
961 + pm_power_off_prepare = acpi_power_off_prepare;
962 + pm_power_off = acpi_power_off;
963 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
964 +index f3c361b5c5e5..c5d056e974f1 100644
965 +--- a/drivers/ata/libata-core.c
966 ++++ b/drivers/ata/libata-core.c
967 +@@ -4175,6 +4175,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
968 +
969 + /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
970 + { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
971 ++ { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
972 +
973 + /* Blacklist entries taken from Silicon Image 3124/3132
974 + Windows driver .inf file - also several Linux problem reports */
975 +@@ -4224,7 +4225,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
976 +
977 + /* devices that don't properly handle queued TRIM commands */
978 + { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
979 +- { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
980 ++ { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
981 +
982 + /*
983 + * Some WD SATA-I drives spin up and down erratically when the link
984 +diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
985 +index de4aa409abe2..2c6d5e118ac1 100644
986 +--- a/drivers/firewire/core-device.c
987 ++++ b/drivers/firewire/core-device.c
988 +@@ -916,7 +916,7 @@ static int lookup_existing_device(struct device *dev, void *data)
989 + old->config_rom_retries = 0;
990 + fw_notice(card, "rediscovered device %s\n", dev_name(dev));
991 +
992 +- PREPARE_DELAYED_WORK(&old->work, fw_device_update);
993 ++ old->workfn = fw_device_update;
994 + fw_schedule_device_work(old, 0);
995 +
996 + if (current_node == card->root_node)
997 +@@ -1075,7 +1075,7 @@ static void fw_device_init(struct work_struct *work)
998 + if (atomic_cmpxchg(&device->state,
999 + FW_DEVICE_INITIALIZING,
1000 + FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
1001 +- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
1002 ++ device->workfn = fw_device_shutdown;
1003 + fw_schedule_device_work(device, SHUTDOWN_DELAY);
1004 + } else {
1005 + fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
1006 +@@ -1196,13 +1196,20 @@ static void fw_device_refresh(struct work_struct *work)
1007 + dev_name(&device->device), fw_rcode_string(ret));
1008 + gone:
1009 + atomic_set(&device->state, FW_DEVICE_GONE);
1010 +- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
1011 ++ device->workfn = fw_device_shutdown;
1012 + fw_schedule_device_work(device, SHUTDOWN_DELAY);
1013 + out:
1014 + if (node_id == card->root_node->node_id)
1015 + fw_schedule_bm_work(card, 0);
1016 + }
1017 +
1018 ++static void fw_device_workfn(struct work_struct *work)
1019 ++{
1020 ++ struct fw_device *device = container_of(to_delayed_work(work),
1021 ++ struct fw_device, work);
1022 ++ device->workfn(work);
1023 ++}
1024 ++
1025 + void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1026 + {
1027 + struct fw_device *device;
1028 +@@ -1252,7 +1259,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1029 + * power-up after getting plugged in. We schedule the
1030 + * first config rom scan half a second after bus reset.
1031 + */
1032 +- INIT_DELAYED_WORK(&device->work, fw_device_init);
1033 ++ device->workfn = fw_device_init;
1034 ++ INIT_DELAYED_WORK(&device->work, fw_device_workfn);
1035 + fw_schedule_device_work(device, INITIAL_DELAY);
1036 + break;
1037 +
1038 +@@ -1268,7 +1276,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1039 + if (atomic_cmpxchg(&device->state,
1040 + FW_DEVICE_RUNNING,
1041 + FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
1042 +- PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
1043 ++ device->workfn = fw_device_refresh;
1044 + fw_schedule_device_work(device,
1045 + device->is_local ? 0 : INITIAL_DELAY);
1046 + }
1047 +@@ -1283,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1048 + smp_wmb(); /* update node_id before generation */
1049 + device->generation = card->generation;
1050 + if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
1051 +- PREPARE_DELAYED_WORK(&device->work, fw_device_update);
1052 ++ device->workfn = fw_device_update;
1053 + fw_schedule_device_work(device, 0);
1054 + }
1055 + break;
1056 +@@ -1308,7 +1316,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1057 + device = node->data;
1058 + if (atomic_xchg(&device->state,
1059 + FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
1060 +- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
1061 ++ device->workfn = fw_device_shutdown;
1062 + fw_schedule_device_work(device,
1063 + list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
1064 + }
1065 +diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
1066 +index 6b895986dc22..4af0a7bad7f2 100644
1067 +--- a/drivers/firewire/net.c
1068 ++++ b/drivers/firewire/net.c
1069 +@@ -929,8 +929,6 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
1070 + if (rcode == RCODE_COMPLETE) {
1071 + fwnet_transmit_packet_done(ptask);
1072 + } else {
1073 +- fwnet_transmit_packet_failed(ptask);
1074 +-
1075 + if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
1076 + dev_err(&ptask->dev->netdev->dev,
1077 + "fwnet_write_complete failed: %x (skipped %d)\n",
1078 +@@ -938,8 +936,10 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
1079 +
1080 + errors_skipped = 0;
1081 + last_rcode = rcode;
1082 +- } else
1083 ++ } else {
1084 + errors_skipped++;
1085 ++ }
1086 ++ fwnet_transmit_packet_failed(ptask);
1087 + }
1088 + }
1089 +
1090 +diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
1091 +index 6aa8a86cb83b..ee805a57b72d 100644
1092 +--- a/drivers/firewire/ohci.c
1093 ++++ b/drivers/firewire/ohci.c
1094 +@@ -290,7 +290,6 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
1095 + #define QUIRK_NO_MSI 0x10
1096 + #define QUIRK_TI_SLLZ059 0x20
1097 + #define QUIRK_IR_WAKE 0x40
1098 +-#define QUIRK_PHY_LCTRL_TIMEOUT 0x80
1099 +
1100 + /* In case of multiple matches in ohci_quirks[], only the first one is used. */
1101 + static const struct {
1102 +@@ -303,10 +302,7 @@ static const struct {
1103 + QUIRK_BE_HEADERS},
1104 +
1105 + {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
1106 +- QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI},
1107 +-
1108 +- {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
1109 +- QUIRK_PHY_LCTRL_TIMEOUT},
1110 ++ QUIRK_NO_MSI},
1111 +
1112 + {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
1113 + QUIRK_RESET_PACKET},
1114 +@@ -353,7 +349,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
1115 + ", disable MSI = " __stringify(QUIRK_NO_MSI)
1116 + ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
1117 + ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
1118 +- ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT)
1119 + ")");
1120 +
1121 + #define OHCI_PARAM_DEBUG_AT_AR 1
1122 +@@ -2295,9 +2290,6 @@ static int ohci_enable(struct fw_card *card,
1123 + * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
1124 + * cannot actually use the phy at that time. These need tens of
1125 + * millisecods pause between LPS write and first phy access too.
1126 +- *
1127 +- * But do not wait for 50msec on Agere/LSI cards. Their phy
1128 +- * arbitration state machine may time out during such a long wait.
1129 + */
1130 +
1131 + reg_write(ohci, OHCI1394_HCControlSet,
1132 +@@ -2305,11 +2297,8 @@ static int ohci_enable(struct fw_card *card,
1133 + OHCI1394_HCControl_postedWriteEnable);
1134 + flush_writes(ohci);
1135 +
1136 +- if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT))
1137 ++ for (lps = 0, i = 0; !lps && i < 3; i++) {
1138 + msleep(50);
1139 +-
1140 +- for (lps = 0, i = 0; !lps && i < 150; i++) {
1141 +- msleep(1);
1142 + lps = reg_read(ohci, OHCI1394_HCControlSet) &
1143 + OHCI1394_HCControl_LPS;
1144 + }
1145 +diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
1146 +index 281029daf98c..7aef911fdc71 100644
1147 +--- a/drivers/firewire/sbp2.c
1148 ++++ b/drivers/firewire/sbp2.c
1149 +@@ -146,6 +146,7 @@ struct sbp2_logical_unit {
1150 + */
1151 + int generation;
1152 + int retries;
1153 ++ work_func_t workfn;
1154 + struct delayed_work work;
1155 + bool has_sdev;
1156 + bool blocked;
1157 +@@ -864,7 +865,7 @@ static void sbp2_login(struct work_struct *work)
1158 + /* set appropriate retry limit(s) in BUSY_TIMEOUT register */
1159 + sbp2_set_busy_timeout(lu);
1160 +
1161 +- PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
1162 ++ lu->workfn = sbp2_reconnect;
1163 + sbp2_agent_reset(lu);
1164 +
1165 + /* This was a re-login. */
1166 +@@ -918,7 +919,7 @@ static void sbp2_login(struct work_struct *work)
1167 + * If a bus reset happened, sbp2_update will have requeued
1168 + * lu->work already. Reset the work from reconnect to login.
1169 + */
1170 +- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
1171 ++ lu->workfn = sbp2_login;
1172 + }
1173 +
1174 + static void sbp2_reconnect(struct work_struct *work)
1175 +@@ -952,7 +953,7 @@ static void sbp2_reconnect(struct work_struct *work)
1176 + lu->retries++ >= 5) {
1177 + dev_err(tgt_dev(tgt), "failed to reconnect\n");
1178 + lu->retries = 0;
1179 +- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
1180 ++ lu->workfn = sbp2_login;
1181 + }
1182 + sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
1183 +
1184 +@@ -972,6 +973,13 @@ static void sbp2_reconnect(struct work_struct *work)
1185 + sbp2_conditionally_unblock(lu);
1186 + }
1187 +
1188 ++static void sbp2_lu_workfn(struct work_struct *work)
1189 ++{
1190 ++ struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
1191 ++ struct sbp2_logical_unit, work);
1192 ++ lu->workfn(work);
1193 ++}
1194 ++
1195 + static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
1196 + {
1197 + struct sbp2_logical_unit *lu;
1198 +@@ -998,7 +1006,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
1199 + lu->blocked = false;
1200 + ++tgt->dont_block;
1201 + INIT_LIST_HEAD(&lu->orb_list);
1202 +- INIT_DELAYED_WORK(&lu->work, sbp2_login);
1203 ++ lu->workfn = sbp2_login;
1204 ++ INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
1205 +
1206 + list_add_tail(&lu->link, &tgt->lu_list);
1207 + return 0;
1208 +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1209 +index 2ad27880cd04..2bef0e4cfda8 100644
1210 +--- a/drivers/gpu/drm/i915/i915_drv.c
1211 ++++ b/drivers/gpu/drm/i915/i915_drv.c
1212 +@@ -376,7 +376,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
1213 + void intel_detect_pch(struct drm_device *dev)
1214 + {
1215 + struct drm_i915_private *dev_priv = dev->dev_private;
1216 +- struct pci_dev *pch;
1217 ++ struct pci_dev *pch = NULL;
1218 +
1219 + /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
1220 + * (which really amounts to a PCH but no South Display).
1221 +@@ -397,12 +397,9 @@ void intel_detect_pch(struct drm_device *dev)
1222 + * all the ISA bridge devices and check for the first match, instead
1223 + * of only checking the first one.
1224 + */
1225 +- pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1226 +- while (pch) {
1227 +- struct pci_dev *curr = pch;
1228 ++ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
1229 + if (pch->vendor == PCI_VENDOR_ID_INTEL) {
1230 +- unsigned short id;
1231 +- id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
1232 ++ unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
1233 + dev_priv->pch_id = id;
1234 +
1235 + if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
1236 +@@ -428,18 +425,16 @@ void intel_detect_pch(struct drm_device *dev)
1237 + DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
1238 + WARN_ON(!IS_HASWELL(dev));
1239 + WARN_ON(!IS_ULT(dev));
1240 +- } else {
1241 +- goto check_next;
1242 +- }
1243 +- pci_dev_put(pch);
1244 ++ } else
1245 ++ continue;
1246 ++
1247 + break;
1248 + }
1249 +-check_next:
1250 +- pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
1251 +- pci_dev_put(curr);
1252 + }
1253 + if (!pch)
1254 +- DRM_DEBUG_KMS("No PCH found?\n");
1255 ++ DRM_DEBUG_KMS("No PCH found.\n");
1256 ++
1257 ++ pci_dev_put(pch);
1258 + }
1259 +
1260 + bool i915_semaphore_is_enabled(struct drm_device *dev)
1261 +diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
1262 +index 4148cc85bf7f..4d302f3dec89 100644
1263 +--- a/drivers/gpu/drm/i915/intel_hdmi.c
1264 ++++ b/drivers/gpu/drm/i915/intel_hdmi.c
1265 +@@ -834,7 +834,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
1266 + {
1267 + struct drm_device *dev = intel_hdmi_to_dev(hdmi);
1268 +
1269 +- if (IS_G4X(dev))
1270 ++ if (!hdmi->has_hdmi_sink || IS_G4X(dev))
1271 + return 165000;
1272 + else if (IS_HASWELL(dev))
1273 + return 300000;
1274 +@@ -887,8 +887,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1275 + * outputs. We also need to check that the higher clock still fits
1276 + * within limits.
1277 + */
1278 +- if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
1279 +- && HAS_PCH_SPLIT(dev)) {
1280 ++ if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
1281 ++ clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) {
1282 + DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
1283 + desired_bpp = 12*3;
1284 +
1285 +diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
1286 +index 5e891b226acf..7bb7074a131f 100644
1287 +--- a/drivers/gpu/drm/radeon/atombios_encoders.c
1288 ++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
1289 +@@ -1313,7 +1313,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1290 + }
1291 + if (is_dp)
1292 + args.v5.ucLaneNum = dp_lane_count;
1293 +- else if (radeon_encoder->pixel_clock > 165000)
1294 ++ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
1295 + args.v5.ucLaneNum = 8;
1296 + else
1297 + args.v5.ucLaneNum = 4;
1298 +diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
1299 +index 31f5f0e88328..25370ac56b4b 100644
1300 +--- a/drivers/gpu/drm/radeon/cik.c
1301 ++++ b/drivers/gpu/drm/radeon/cik.c
1302 +@@ -3517,8 +3517,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
1303 + {
1304 + if (enable)
1305 + WREG32(CP_MEC_CNTL, 0);
1306 +- else
1307 ++ else {
1308 + WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
1309 ++ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1310 ++ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1311 ++ }
1312 + udelay(50);
1313 + }
1314 +
1315 +@@ -6995,26 +6998,7 @@ static int cik_startup(struct radeon_device *rdev)
1316 +
1317 + cik_mc_program(rdev);
1318 +
1319 +- if (rdev->flags & RADEON_IS_IGP) {
1320 +- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
1321 +- !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
1322 +- r = cik_init_microcode(rdev);
1323 +- if (r) {
1324 +- DRM_ERROR("Failed to load firmware!\n");
1325 +- return r;
1326 +- }
1327 +- }
1328 +- } else {
1329 +- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
1330 +- !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
1331 +- !rdev->mc_fw) {
1332 +- r = cik_init_microcode(rdev);
1333 +- if (r) {
1334 +- DRM_ERROR("Failed to load firmware!\n");
1335 +- return r;
1336 +- }
1337 +- }
1338 +-
1339 ++ if (!(rdev->flags & RADEON_IS_IGP)) {
1340 + r = ci_mc_load_microcode(rdev);
1341 + if (r) {
1342 + DRM_ERROR("Failed to load MC firmware!\n");
1343 +@@ -7327,6 +7311,27 @@ int cik_init(struct radeon_device *rdev)
1344 + if (r)
1345 + return r;
1346 +
1347 ++ if (rdev->flags & RADEON_IS_IGP) {
1348 ++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
1349 ++ !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
1350 ++ r = cik_init_microcode(rdev);
1351 ++ if (r) {
1352 ++ DRM_ERROR("Failed to load firmware!\n");
1353 ++ return r;
1354 ++ }
1355 ++ }
1356 ++ } else {
1357 ++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
1358 ++ !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
1359 ++ !rdev->mc_fw) {
1360 ++ r = cik_init_microcode(rdev);
1361 ++ if (r) {
1362 ++ DRM_ERROR("Failed to load firmware!\n");
1363 ++ return r;
1364 ++ }
1365 ++ }
1366 ++ }
1367 ++
1368 + ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1369 + ring->ring_obj = NULL;
1370 + r600_ring_init(rdev, ring, 1024 * 1024);
1371 +diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
1372 +index aaf7ffce8b5b..d565f4076a23 100644
1373 +--- a/drivers/gpu/drm/radeon/cik_sdma.c
1374 ++++ b/drivers/gpu/drm/radeon/cik_sdma.c
1375 +@@ -174,6 +174,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
1376 + WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
1377 + WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
1378 + }
1379 ++ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
1380 ++ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
1381 + }
1382 +
1383 + /**
1384 +@@ -201,6 +203,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable)
1385 + u32 me_cntl, reg_offset;
1386 + int i;
1387 +
1388 ++ if (enable == false) {
1389 ++ cik_sdma_gfx_stop(rdev);
1390 ++ cik_sdma_rlc_stop(rdev);
1391 ++ }
1392 ++
1393 + for (i = 0; i < 2; i++) {
1394 + if (i == 0)
1395 + reg_offset = SDMA0_REGISTER_OFFSET;
1396 +@@ -328,10 +335,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev)
1397 + if (!rdev->sdma_fw)
1398 + return -EINVAL;
1399 +
1400 +- /* stop the gfx rings and rlc compute queues */
1401 +- cik_sdma_gfx_stop(rdev);
1402 +- cik_sdma_rlc_stop(rdev);
1403 +-
1404 + /* halt the MEs */
1405 + cik_sdma_enable(rdev, false);
1406 +
1407 +@@ -400,9 +403,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
1408 + */
1409 + void cik_sdma_fini(struct radeon_device *rdev)
1410 + {
1411 +- /* stop the gfx rings and rlc compute queues */
1412 +- cik_sdma_gfx_stop(rdev);
1413 +- cik_sdma_rlc_stop(rdev);
1414 + /* halt the MEs */
1415 + cik_sdma_enable(rdev, false);
1416 + radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
1417 +diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
1418 +index 5f07d1bfbd76..c429bb9b17b6 100644
1419 +--- a/drivers/gpu/drm/radeon/evergreen.c
1420 ++++ b/drivers/gpu/drm/radeon/evergreen.c
1421 +@@ -5061,26 +5061,11 @@ static int evergreen_startup(struct radeon_device *rdev)
1422 + evergreen_mc_program(rdev);
1423 +
1424 + if (ASIC_IS_DCE5(rdev)) {
1425 +- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
1426 +- r = ni_init_microcode(rdev);
1427 +- if (r) {
1428 +- DRM_ERROR("Failed to load firmware!\n");
1429 +- return r;
1430 +- }
1431 +- }
1432 + r = ni_mc_load_microcode(rdev);
1433 + if (r) {
1434 + DRM_ERROR("Failed to load MC firmware!\n");
1435 + return r;
1436 + }
1437 +- } else {
1438 +- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1439 +- r = r600_init_microcode(rdev);
1440 +- if (r) {
1441 +- DRM_ERROR("Failed to load firmware!\n");
1442 +- return r;
1443 +- }
1444 +- }
1445 + }
1446 +
1447 + if (rdev->flags & RADEON_IS_AGP) {
1448 +@@ -5308,6 +5293,24 @@ int evergreen_init(struct radeon_device *rdev)
1449 + if (r)
1450 + return r;
1451 +
1452 ++ if (ASIC_IS_DCE5(rdev)) {
1453 ++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
1454 ++ r = ni_init_microcode(rdev);
1455 ++ if (r) {
1456 ++ DRM_ERROR("Failed to load firmware!\n");
1457 ++ return r;
1458 ++ }
1459 ++ }
1460 ++ } else {
1461 ++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1462 ++ r = r600_init_microcode(rdev);
1463 ++ if (r) {
1464 ++ DRM_ERROR("Failed to load firmware!\n");
1465 ++ return r;
1466 ++ }
1467 ++ }
1468 ++ }
1469 ++
1470 + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
1471 + r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
1472 +
1473 +diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h
1474 +index 76ada8cfe902..3a03ba37d043 100644
1475 +--- a/drivers/gpu/drm/radeon/evergreen_smc.h
1476 ++++ b/drivers/gpu/drm/radeon/evergreen_smc.h
1477 +@@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
1478 +
1479 + #define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
1480 +
1481 +-#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0
1482 ++#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8
1483 + #define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC
1484 + #define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
1485 +
1486 +diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
1487 +index b2dbd48f7f28..474343adf262 100644
1488 +--- a/drivers/gpu/drm/radeon/ni.c
1489 ++++ b/drivers/gpu/drm/radeon/ni.c
1490 +@@ -1881,23 +1881,7 @@ static int cayman_startup(struct radeon_device *rdev)
1491 +
1492 + evergreen_mc_program(rdev);
1493 +
1494 +- if (rdev->flags & RADEON_IS_IGP) {
1495 +- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1496 +- r = ni_init_microcode(rdev);
1497 +- if (r) {
1498 +- DRM_ERROR("Failed to load firmware!\n");
1499 +- return r;
1500 +- }
1501 +- }
1502 +- } else {
1503 +- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
1504 +- r = ni_init_microcode(rdev);
1505 +- if (r) {
1506 +- DRM_ERROR("Failed to load firmware!\n");
1507 +- return r;
1508 +- }
1509 +- }
1510 +-
1511 ++ if (!(rdev->flags & RADEON_IS_IGP)) {
1512 + r = ni_mc_load_microcode(rdev);
1513 + if (r) {
1514 + DRM_ERROR("Failed to load MC firmware!\n");
1515 +@@ -2148,6 +2132,24 @@ int cayman_init(struct radeon_device *rdev)
1516 + if (r)
1517 + return r;
1518 +
1519 ++ if (rdev->flags & RADEON_IS_IGP) {
1520 ++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1521 ++ r = ni_init_microcode(rdev);
1522 ++ if (r) {
1523 ++ DRM_ERROR("Failed to load firmware!\n");
1524 ++ return r;
1525 ++ }
1526 ++ }
1527 ++ } else {
1528 ++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
1529 ++ r = ni_init_microcode(rdev);
1530 ++ if (r) {
1531 ++ DRM_ERROR("Failed to load firmware!\n");
1532 ++ return r;
1533 ++ }
1534 ++ }
1535 ++ }
1536 ++
1537 + ring->ring_obj = NULL;
1538 + r600_ring_init(rdev, ring, 1024 * 1024);
1539 +
1540 +diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
1541 +index 67da7e285cde..5af2729f2055 100644
1542 +--- a/drivers/gpu/drm/radeon/r600.c
1543 ++++ b/drivers/gpu/drm/radeon/r600.c
1544 +@@ -2726,14 +2726,6 @@ static int r600_startup(struct radeon_device *rdev)
1545 +
1546 + r600_mc_program(rdev);
1547 +
1548 +- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1549 +- r = r600_init_microcode(rdev);
1550 +- if (r) {
1551 +- DRM_ERROR("Failed to load firmware!\n");
1552 +- return r;
1553 +- }
1554 +- }
1555 +-
1556 + if (rdev->flags & RADEON_IS_AGP) {
1557 + r600_agp_enable(rdev);
1558 + } else {
1559 +@@ -2921,6 +2913,14 @@ int r600_init(struct radeon_device *rdev)
1560 + if (r)
1561 + return r;
1562 +
1563 ++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1564 ++ r = r600_init_microcode(rdev);
1565 ++ if (r) {
1566 ++ DRM_ERROR("Failed to load firmware!\n");
1567 ++ return r;
1568 ++ }
1569 ++ }
1570 ++
1571 + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
1572 + r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
1573 +
1574 +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
1575 +index 71245d6f34a2..84323c943bfc 100644
1576 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c
1577 ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
1578 +@@ -712,6 +712,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
1579 + DRM_ERROR("Failed initializing VRAM heap.\n");
1580 + return r;
1581 + }
1582 ++ /* Change the size here instead of the init above so only lpfn is affected */
1583 ++ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1584 ++
1585 + r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
1586 + RADEON_GEM_DOMAIN_VRAM,
1587 + NULL, &rdev->stollen_vga_memory);
1588 +diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
1589 +index 99dd9d8fcf72..c4960ad71e5e 100644
1590 +--- a/drivers/gpu/drm/radeon/rv770.c
1591 ++++ b/drivers/gpu/drm/radeon/rv770.c
1592 +@@ -1665,14 +1665,6 @@ static int rv770_startup(struct radeon_device *rdev)
1593 +
1594 + rv770_mc_program(rdev);
1595 +
1596 +- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1597 +- r = r600_init_microcode(rdev);
1598 +- if (r) {
1599 +- DRM_ERROR("Failed to load firmware!\n");
1600 +- return r;
1601 +- }
1602 +- }
1603 +-
1604 + if (rdev->flags & RADEON_IS_AGP) {
1605 + rv770_agp_enable(rdev);
1606 + } else {
1607 +@@ -1876,6 +1868,14 @@ int rv770_init(struct radeon_device *rdev)
1608 + if (r)
1609 + return r;
1610 +
1611 ++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1612 ++ r = r600_init_microcode(rdev);
1613 ++ if (r) {
1614 ++ DRM_ERROR("Failed to load firmware!\n");
1615 ++ return r;
1616 ++ }
1617 ++ }
1618 ++
1619 + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
1620 + r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
1621 +
1622 +diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
1623 +index 8277ee01a7b4..873e0a608948 100644
1624 +--- a/drivers/gpu/drm/radeon/si.c
1625 ++++ b/drivers/gpu/drm/radeon/si.c
1626 +@@ -6387,15 +6387,6 @@ static int si_startup(struct radeon_device *rdev)
1627 +
1628 + si_mc_program(rdev);
1629 +
1630 +- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
1631 +- !rdev->rlc_fw || !rdev->mc_fw) {
1632 +- r = si_init_microcode(rdev);
1633 +- if (r) {
1634 +- DRM_ERROR("Failed to load firmware!\n");
1635 +- return r;
1636 +- }
1637 +- }
1638 +-
1639 + r = si_mc_load_microcode(rdev);
1640 + if (r) {
1641 + DRM_ERROR("Failed to load MC firmware!\n");
1642 +@@ -6663,6 +6654,15 @@ int si_init(struct radeon_device *rdev)
1643 + if (r)
1644 + return r;
1645 +
1646 ++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
1647 ++ !rdev->rlc_fw || !rdev->mc_fw) {
1648 ++ r = si_init_microcode(rdev);
1649 ++ if (r) {
1650 ++ DRM_ERROR("Failed to load firmware!\n");
1651 ++ return r;
1652 ++ }
1653 ++ }
1654 ++
1655 + ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1656 + ring->ring_obj = NULL;
1657 + r600_ring_init(rdev, ring, 1024 * 1024);
1658 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
1659 +index 2332aa1bf93c..83895f2d16c6 100644
1660 +--- a/drivers/gpu/drm/radeon/si_dpm.c
1661 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
1662 +@@ -2396,7 +2396,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev,
1663 + if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
1664 + enable_sq_ramping = false;
1665 +
1666 +- if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
1667 ++ if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
1668 + enable_sq_ramping = false;
1669 +
1670 + for (i = 0; i < state->performance_level_count; i++) {
1671 +@@ -5409,7 +5409,7 @@ static void si_populate_mc_reg_addresses(struct radeon_device *rdev,
1672 +
1673 + for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
1674 + if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
1675 +- if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
1676 ++ if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
1677 + break;
1678 + mc_reg_table->address[i].s0 =
1679 + cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
1680 +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
1681 +index 729805322883..acd0fe0c80d2 100644
1682 +--- a/drivers/gpu/drm/ttm/ttm_bo.c
1683 ++++ b/drivers/gpu/drm/ttm/ttm_bo.c
1684 +@@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
1685 +
1686 + moved:
1687 + if (bo->evicted) {
1688 +- ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
1689 +- if (ret)
1690 +- pr_err("Can not flush read caches\n");
1691 ++ if (bdev->driver->invalidate_caches) {
1692 ++ ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
1693 ++ if (ret)
1694 ++ pr_err("Can not flush read caches\n");
1695 ++ }
1696 + bo->evicted = false;
1697 + }
1698 +
1699 +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
1700 +index ff758eded96f..cd30d98ac510 100644
1701 +--- a/drivers/i2c/busses/Kconfig
1702 ++++ b/drivers/i2c/busses/Kconfig
1703 +@@ -376,7 +376,7 @@ config I2C_CBUS_GPIO
1704 +
1705 + config I2C_CPM
1706 + tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)"
1707 +- depends on (CPM1 || CPM2) && OF_I2C
1708 ++ depends on CPM1 || CPM2
1709 + help
1710 + This supports the use of the I2C interface on Freescale
1711 + processors with CPM1 or CPM2.
1712 +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1713 +index ea7051ee1493..ba93ef85652d 100644
1714 +--- a/drivers/infiniband/ulp/isert/ib_isert.c
1715 ++++ b/drivers/infiniband/ulp/isert/ib_isert.c
1716 +@@ -496,8 +496,8 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
1717 + isert_conn->state = ISER_CONN_INIT;
1718 + INIT_LIST_HEAD(&isert_conn->conn_accept_node);
1719 + init_completion(&isert_conn->conn_login_comp);
1720 +- init_waitqueue_head(&isert_conn->conn_wait);
1721 +- init_waitqueue_head(&isert_conn->conn_wait_comp_err);
1722 ++ init_completion(&isert_conn->conn_wait);
1723 ++ init_completion(&isert_conn->conn_wait_comp_err);
1724 + kref_init(&isert_conn->conn_kref);
1725 + kref_get(&isert_conn->conn_kref);
1726 + mutex_init(&isert_conn->conn_mutex);
1727 +@@ -669,11 +669,11 @@ isert_disconnect_work(struct work_struct *work)
1728 +
1729 + pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1730 + mutex_lock(&isert_conn->conn_mutex);
1731 +- isert_conn->state = ISER_CONN_DOWN;
1732 ++ if (isert_conn->state == ISER_CONN_UP)
1733 ++ isert_conn->state = ISER_CONN_TERMINATING;
1734 +
1735 + if (isert_conn->post_recv_buf_count == 0 &&
1736 + atomic_read(&isert_conn->post_send_buf_count) == 0) {
1737 +- pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
1738 + mutex_unlock(&isert_conn->conn_mutex);
1739 + goto wake_up;
1740 + }
1741 +@@ -693,7 +693,7 @@ isert_disconnect_work(struct work_struct *work)
1742 + mutex_unlock(&isert_conn->conn_mutex);
1743 +
1744 + wake_up:
1745 +- wake_up(&isert_conn->conn_wait);
1746 ++ complete(&isert_conn->conn_wait);
1747 + isert_put_conn(isert_conn);
1748 + }
1749 +
1750 +@@ -1427,7 +1427,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1751 + case ISCSI_OP_SCSI_CMD:
1752 + spin_lock_bh(&conn->cmd_lock);
1753 + if (!list_empty(&cmd->i_conn_node))
1754 +- list_del(&cmd->i_conn_node);
1755 ++ list_del_init(&cmd->i_conn_node);
1756 + spin_unlock_bh(&conn->cmd_lock);
1757 +
1758 + if (cmd->data_direction == DMA_TO_DEVICE)
1759 +@@ -1439,7 +1439,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1760 + case ISCSI_OP_SCSI_TMFUNC:
1761 + spin_lock_bh(&conn->cmd_lock);
1762 + if (!list_empty(&cmd->i_conn_node))
1763 +- list_del(&cmd->i_conn_node);
1764 ++ list_del_init(&cmd->i_conn_node);
1765 + spin_unlock_bh(&conn->cmd_lock);
1766 +
1767 + transport_generic_free_cmd(&cmd->se_cmd, 0);
1768 +@@ -1449,7 +1449,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1769 + case ISCSI_OP_TEXT:
1770 + spin_lock_bh(&conn->cmd_lock);
1771 + if (!list_empty(&cmd->i_conn_node))
1772 +- list_del(&cmd->i_conn_node);
1773 ++ list_del_init(&cmd->i_conn_node);
1774 + spin_unlock_bh(&conn->cmd_lock);
1775 +
1776 + /*
1777 +@@ -1512,6 +1512,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1778 + iscsit_stop_dataout_timer(cmd);
1779 + device->unreg_rdma_mem(isert_cmd, isert_conn);
1780 + cmd->write_data_done = wr->cur_rdma_length;
1781 ++ wr->send_wr_num = 0;
1782 +
1783 + pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1784 + spin_lock_bh(&cmd->istate_lock);
1785 +@@ -1552,7 +1553,7 @@ isert_do_control_comp(struct work_struct *work)
1786 + pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1787 + /*
1788 + * Call atomic_dec(&isert_conn->post_send_buf_count)
1789 +- * from isert_free_conn()
1790 ++ * from isert_wait_conn()
1791 + */
1792 + isert_conn->logout_posted = true;
1793 + iscsit_logout_post_handler(cmd, cmd->conn);
1794 +@@ -1576,6 +1577,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1795 + struct ib_device *ib_dev)
1796 + {
1797 + struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1798 ++ struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1799 +
1800 + if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1801 + cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1802 +@@ -1587,7 +1589,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1803 + queue_work(isert_comp_wq, &isert_cmd->comp_work);
1804 + return;
1805 + }
1806 +- atomic_dec(&isert_conn->post_send_buf_count);
1807 ++ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1808 +
1809 + cmd->i_state = ISTATE_SENT_STATUS;
1810 + isert_completion_put(tx_desc, isert_cmd, ib_dev);
1811 +@@ -1625,7 +1627,7 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
1812 + case ISER_IB_RDMA_READ:
1813 + pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1814 +
1815 +- atomic_dec(&isert_conn->post_send_buf_count);
1816 ++ atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1817 + isert_completion_rdma_read(tx_desc, isert_cmd);
1818 + break;
1819 + default:
1820 +@@ -1636,31 +1638,39 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
1821 + }
1822 +
1823 + static void
1824 +-isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1825 ++isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1826 + {
1827 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1828 ++ struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1829 ++
1830 ++ if (!isert_cmd)
1831 ++ isert_unmap_tx_desc(tx_desc, ib_dev);
1832 ++ else
1833 ++ isert_completion_put(tx_desc, isert_cmd, ib_dev);
1834 ++}
1835 ++
1836 ++static void
1837 ++isert_cq_rx_comp_err(struct isert_conn *isert_conn)
1838 ++{
1839 ++ struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1840 ++ struct iscsi_conn *conn = isert_conn->conn;
1841 +
1842 +- if (tx_desc) {
1843 +- struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1844 ++ if (isert_conn->post_recv_buf_count)
1845 ++ return;
1846 +
1847 +- if (!isert_cmd)
1848 +- isert_unmap_tx_desc(tx_desc, ib_dev);
1849 +- else
1850 +- isert_completion_put(tx_desc, isert_cmd, ib_dev);
1851 ++ if (conn->sess) {
1852 ++ target_sess_cmd_list_set_waiting(conn->sess->se_sess);
1853 ++ target_wait_for_sess_cmds(conn->sess->se_sess);
1854 + }
1855 +
1856 +- if (isert_conn->post_recv_buf_count == 0 &&
1857 +- atomic_read(&isert_conn->post_send_buf_count) == 0) {
1858 +- pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1859 +- pr_debug("Calling wake_up from isert_cq_comp_err\n");
1860 ++ while (atomic_read(&isert_conn->post_send_buf_count))
1861 ++ msleep(3000);
1862 +
1863 +- mutex_lock(&isert_conn->conn_mutex);
1864 +- if (isert_conn->state != ISER_CONN_DOWN)
1865 +- isert_conn->state = ISER_CONN_TERMINATING;
1866 +- mutex_unlock(&isert_conn->conn_mutex);
1867 ++ mutex_lock(&isert_conn->conn_mutex);
1868 ++ isert_conn->state = ISER_CONN_DOWN;
1869 ++ mutex_unlock(&isert_conn->conn_mutex);
1870 +
1871 +- wake_up(&isert_conn->conn_wait_comp_err);
1872 +- }
1873 ++ complete(&isert_conn->conn_wait_comp_err);
1874 + }
1875 +
1876 + static void
1877 +@@ -1685,8 +1695,11 @@ isert_cq_tx_work(struct work_struct *work)
1878 + pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1879 + pr_debug("TX wc.status: 0x%08x\n", wc.status);
1880 + pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
1881 +- atomic_dec(&isert_conn->post_send_buf_count);
1882 +- isert_cq_comp_err(tx_desc, isert_conn);
1883 ++
1884 ++ if (wc.wr_id != ISER_FASTREG_LI_WRID) {
1885 ++ atomic_dec(&isert_conn->post_send_buf_count);
1886 ++ isert_cq_tx_comp_err(tx_desc, isert_conn);
1887 ++ }
1888 + }
1889 + }
1890 +
1891 +@@ -1729,7 +1742,7 @@ isert_cq_rx_work(struct work_struct *work)
1892 + wc.vendor_err);
1893 + }
1894 + isert_conn->post_recv_buf_count--;
1895 +- isert_cq_comp_err(NULL, isert_conn);
1896 ++ isert_cq_rx_comp_err(isert_conn);
1897 + }
1898 + }
1899 +
1900 +@@ -2151,6 +2164,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
1901 +
1902 + if (!fr_desc->valid) {
1903 + memset(&inv_wr, 0, sizeof(inv_wr));
1904 ++ inv_wr.wr_id = ISER_FASTREG_LI_WRID;
1905 + inv_wr.opcode = IB_WR_LOCAL_INV;
1906 + inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
1907 + wr = &inv_wr;
1908 +@@ -2161,6 +2175,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
1909 +
1910 + /* Prepare FASTREG WR */
1911 + memset(&fr_wr, 0, sizeof(fr_wr));
1912 ++ fr_wr.wr_id = ISER_FASTREG_LI_WRID;
1913 + fr_wr.opcode = IB_WR_FAST_REG_MR;
1914 + fr_wr.wr.fast_reg.iova_start =
1915 + fr_desc->data_frpl->page_list[0] + page_off;
1916 +@@ -2325,12 +2340,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1917 + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1918 + isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
1919 +
1920 +- atomic_inc(&isert_conn->post_send_buf_count);
1921 ++ atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1922 +
1923 + rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
1924 + if (rc) {
1925 + pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
1926 +- atomic_dec(&isert_conn->post_send_buf_count);
1927 ++ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1928 + }
1929 + pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
1930 + isert_cmd);
1931 +@@ -2358,12 +2373,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
1932 + return rc;
1933 + }
1934 +
1935 +- atomic_inc(&isert_conn->post_send_buf_count);
1936 ++ atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
1937 +
1938 + rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
1939 + if (rc) {
1940 + pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
1941 +- atomic_dec(&isert_conn->post_send_buf_count);
1942 ++ atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1943 + }
1944 + pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
1945 + isert_cmd);
1946 +@@ -2650,22 +2665,11 @@ isert_free_np(struct iscsi_np *np)
1947 + kfree(isert_np);
1948 + }
1949 +
1950 +-static int isert_check_state(struct isert_conn *isert_conn, int state)
1951 +-{
1952 +- int ret;
1953 +-
1954 +- mutex_lock(&isert_conn->conn_mutex);
1955 +- ret = (isert_conn->state == state);
1956 +- mutex_unlock(&isert_conn->conn_mutex);
1957 +-
1958 +- return ret;
1959 +-}
1960 +-
1961 +-static void isert_free_conn(struct iscsi_conn *conn)
1962 ++static void isert_wait_conn(struct iscsi_conn *conn)
1963 + {
1964 + struct isert_conn *isert_conn = conn->context;
1965 +
1966 +- pr_debug("isert_free_conn: Starting \n");
1967 ++ pr_debug("isert_wait_conn: Starting \n");
1968 + /*
1969 + * Decrement post_send_buf_count for special case when called
1970 + * from isert_do_control_comp() -> iscsit_logout_post_handler()
1971 +@@ -2675,38 +2679,29 @@ static void isert_free_conn(struct iscsi_conn *conn)
1972 + atomic_dec(&isert_conn->post_send_buf_count);
1973 +
1974 + if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
1975 +- pr_debug("Calling rdma_disconnect from isert_free_conn\n");
1976 ++ pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
1977 + rdma_disconnect(isert_conn->conn_cm_id);
1978 + }
1979 + /*
1980 + * Only wait for conn_wait_comp_err if the isert_conn made it
1981 + * into full feature phase..
1982 + */
1983 +- if (isert_conn->state == ISER_CONN_UP) {
1984 +- pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
1985 +- isert_conn->state);
1986 +- mutex_unlock(&isert_conn->conn_mutex);
1987 +-
1988 +- wait_event(isert_conn->conn_wait_comp_err,
1989 +- (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
1990 +-
1991 +- wait_event(isert_conn->conn_wait,
1992 +- (isert_check_state(isert_conn, ISER_CONN_DOWN)));
1993 +-
1994 +- isert_put_conn(isert_conn);
1995 +- return;
1996 +- }
1997 + if (isert_conn->state == ISER_CONN_INIT) {
1998 + mutex_unlock(&isert_conn->conn_mutex);
1999 +- isert_put_conn(isert_conn);
2000 + return;
2001 + }
2002 +- pr_debug("isert_free_conn: wait_event conn_wait %d\n",
2003 +- isert_conn->state);
2004 ++ if (isert_conn->state == ISER_CONN_UP)
2005 ++ isert_conn->state = ISER_CONN_TERMINATING;
2006 + mutex_unlock(&isert_conn->conn_mutex);
2007 +
2008 +- wait_event(isert_conn->conn_wait,
2009 +- (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2010 ++ wait_for_completion(&isert_conn->conn_wait_comp_err);
2011 ++
2012 ++ wait_for_completion(&isert_conn->conn_wait);
2013 ++}
2014 ++
2015 ++static void isert_free_conn(struct iscsi_conn *conn)
2016 ++{
2017 ++ struct isert_conn *isert_conn = conn->context;
2018 +
2019 + isert_put_conn(isert_conn);
2020 + }
2021 +@@ -2719,6 +2714,7 @@ static struct iscsit_transport iser_target_transport = {
2022 + .iscsit_setup_np = isert_setup_np,
2023 + .iscsit_accept_np = isert_accept_np,
2024 + .iscsit_free_np = isert_free_np,
2025 ++ .iscsit_wait_conn = isert_wait_conn,
2026 + .iscsit_free_conn = isert_free_conn,
2027 + .iscsit_get_login_rx = isert_get_login_rx,
2028 + .iscsit_put_login_tx = isert_put_login_tx,
2029 +diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
2030 +index 631f2090f0b8..52f4bf0d1a0f 100644
2031 +--- a/drivers/infiniband/ulp/isert/ib_isert.h
2032 ++++ b/drivers/infiniband/ulp/isert/ib_isert.h
2033 +@@ -6,6 +6,7 @@
2034 +
2035 + #define ISERT_RDMA_LISTEN_BACKLOG 10
2036 + #define ISCSI_ISER_SG_TABLESIZE 256
2037 ++#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
2038 +
2039 + enum isert_desc_type {
2040 + ISCSI_TX_CONTROL,
2041 +@@ -114,8 +115,8 @@ struct isert_conn {
2042 + struct isert_device *conn_device;
2043 + struct work_struct conn_logout_work;
2044 + struct mutex conn_mutex;
2045 +- wait_queue_head_t conn_wait;
2046 +- wait_queue_head_t conn_wait_comp_err;
2047 ++ struct completion conn_wait;
2048 ++ struct completion conn_wait_comp_err;
2049 + struct kref conn_kref;
2050 + struct list_head conn_frwr_pool;
2051 + int conn_frwr_pool_size;
2052 +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
2053 +index ea3e4b4f7e58..6ab68e058a0a 100644
2054 +--- a/drivers/md/dm-cache-target.c
2055 ++++ b/drivers/md/dm-cache-target.c
2056 +@@ -867,12 +867,13 @@ static void issue_copy_real(struct dm_cache_migration *mg)
2057 + int r;
2058 + struct dm_io_region o_region, c_region;
2059 + struct cache *cache = mg->cache;
2060 ++ sector_t cblock = from_cblock(mg->cblock);
2061 +
2062 + o_region.bdev = cache->origin_dev->bdev;
2063 + o_region.count = cache->sectors_per_block;
2064 +
2065 + c_region.bdev = cache->cache_dev->bdev;
2066 +- c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
2067 ++ c_region.sector = cblock * cache->sectors_per_block;
2068 + c_region.count = cache->sectors_per_block;
2069 +
2070 + if (mg->writeback || mg->demote) {
2071 +@@ -2181,20 +2182,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2072 + bool discarded_block;
2073 + struct dm_bio_prison_cell *cell;
2074 + struct policy_result lookup_result;
2075 +- struct per_bio_data *pb;
2076 ++ struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
2077 +
2078 +- if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
2079 ++ if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2080 + /*
2081 + * This can only occur if the io goes to a partial block at
2082 + * the end of the origin device. We don't cache these.
2083 + * Just remap to the origin and carry on.
2084 + */
2085 +- remap_to_origin_clear_discard(cache, bio, block);
2086 ++ remap_to_origin(cache, bio);
2087 + return DM_MAPIO_REMAPPED;
2088 + }
2089 +
2090 +- pb = init_per_bio_data(bio, pb_data_size);
2091 +-
2092 + if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
2093 + defer_bio(cache, bio);
2094 + return DM_MAPIO_SUBMITTED;
2095 +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
2096 +index afb419e514bf..579b58200bf2 100644
2097 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c
2098 ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
2099 +@@ -91,6 +91,69 @@ struct block_op {
2100 + dm_block_t block;
2101 + };
2102 +
2103 ++struct bop_ring_buffer {
2104 ++ unsigned begin;
2105 ++ unsigned end;
2106 ++ struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
2107 ++};
2108 ++
2109 ++static void brb_init(struct bop_ring_buffer *brb)
2110 ++{
2111 ++ brb->begin = 0;
2112 ++ brb->end = 0;
2113 ++}
2114 ++
2115 ++static bool brb_empty(struct bop_ring_buffer *brb)
2116 ++{
2117 ++ return brb->begin == brb->end;
2118 ++}
2119 ++
2120 ++static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
2121 ++{
2122 ++ unsigned r = old + 1;
2123 ++ return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
2124 ++}
2125 ++
2126 ++static int brb_push(struct bop_ring_buffer *brb,
2127 ++ enum block_op_type type, dm_block_t b)
2128 ++{
2129 ++ struct block_op *bop;
2130 ++ unsigned next = brb_next(brb, brb->end);
2131 ++
2132 ++ /*
2133 ++ * We don't allow the last bop to be filled, this way we can
2134 ++ * differentiate between full and empty.
2135 ++ */
2136 ++ if (next == brb->begin)
2137 ++ return -ENOMEM;
2138 ++
2139 ++ bop = brb->bops + brb->end;
2140 ++ bop->type = type;
2141 ++ bop->block = b;
2142 ++
2143 ++ brb->end = next;
2144 ++
2145 ++ return 0;
2146 ++}
2147 ++
2148 ++static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
2149 ++{
2150 ++ struct block_op *bop;
2151 ++
2152 ++ if (brb_empty(brb))
2153 ++ return -ENODATA;
2154 ++
2155 ++ bop = brb->bops + brb->begin;
2156 ++ result->type = bop->type;
2157 ++ result->block = bop->block;
2158 ++
2159 ++ brb->begin = brb_next(brb, brb->begin);
2160 ++
2161 ++ return 0;
2162 ++}
2163 ++
2164 ++/*----------------------------------------------------------------*/
2165 ++
2166 + struct sm_metadata {
2167 + struct dm_space_map sm;
2168 +
2169 +@@ -101,25 +164,20 @@ struct sm_metadata {
2170 +
2171 + unsigned recursion_count;
2172 + unsigned allocated_this_transaction;
2173 +- unsigned nr_uncommitted;
2174 +- struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
2175 ++ struct bop_ring_buffer uncommitted;
2176 +
2177 + struct threshold threshold;
2178 + };
2179 +
2180 + static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
2181 + {
2182 +- struct block_op *op;
2183 ++ int r = brb_push(&smm->uncommitted, type, b);
2184 +
2185 +- if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) {
2186 ++ if (r) {
2187 + DMERR("too many recursive allocations");
2188 + return -ENOMEM;
2189 + }
2190 +
2191 +- op = smm->uncommitted + smm->nr_uncommitted++;
2192 +- op->type = type;
2193 +- op->block = b;
2194 +-
2195 + return 0;
2196 + }
2197 +
2198 +@@ -158,11 +216,17 @@ static int out(struct sm_metadata *smm)
2199 + return -ENOMEM;
2200 + }
2201 +
2202 +- if (smm->recursion_count == 1 && smm->nr_uncommitted) {
2203 +- while (smm->nr_uncommitted && !r) {
2204 +- smm->nr_uncommitted--;
2205 +- r = commit_bop(smm, smm->uncommitted +
2206 +- smm->nr_uncommitted);
2207 ++ if (smm->recursion_count == 1) {
2208 ++ while (!brb_empty(&smm->uncommitted)) {
2209 ++ struct block_op bop;
2210 ++
2211 ++ r = brb_pop(&smm->uncommitted, &bop);
2212 ++ if (r) {
2213 ++ DMERR("bug in bop ring buffer");
2214 ++ break;
2215 ++ }
2216 ++
2217 ++ r = commit_bop(smm, &bop);
2218 + if (r)
2219 + break;
2220 + }
2221 +@@ -217,7 +281,8 @@ static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
2222 + static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
2223 + uint32_t *result)
2224 + {
2225 +- int r, i;
2226 ++ int r;
2227 ++ unsigned i;
2228 + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
2229 + unsigned adjustment = 0;
2230 +
2231 +@@ -225,8 +290,10 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
2232 + * We may have some uncommitted adjustments to add. This list
2233 + * should always be really short.
2234 + */
2235 +- for (i = 0; i < smm->nr_uncommitted; i++) {
2236 +- struct block_op *op = smm->uncommitted + i;
2237 ++ for (i = smm->uncommitted.begin;
2238 ++ i != smm->uncommitted.end;
2239 ++ i = brb_next(&smm->uncommitted, i)) {
2240 ++ struct block_op *op = smm->uncommitted.bops + i;
2241 +
2242 + if (op->block != b)
2243 + continue;
2244 +@@ -254,7 +321,8 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
2245 + static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
2246 + dm_block_t b, int *result)
2247 + {
2248 +- int r, i, adjustment = 0;
2249 ++ int r, adjustment = 0;
2250 ++ unsigned i;
2251 + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
2252 + uint32_t rc;
2253 +
2254 +@@ -262,8 +330,11 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
2255 + * We may have some uncommitted adjustments to add. This list
2256 + * should always be really short.
2257 + */
2258 +- for (i = 0; i < smm->nr_uncommitted; i++) {
2259 +- struct block_op *op = smm->uncommitted + i;
2260 ++ for (i = smm->uncommitted.begin;
2261 ++ i != smm->uncommitted.end;
2262 ++ i = brb_next(&smm->uncommitted, i)) {
2263 ++
2264 ++ struct block_op *op = smm->uncommitted.bops + i;
2265 +
2266 + if (op->block != b)
2267 + continue;
2268 +@@ -671,7 +742,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
2269 + smm->begin = superblock + 1;
2270 + smm->recursion_count = 0;
2271 + smm->allocated_this_transaction = 0;
2272 +- smm->nr_uncommitted = 0;
2273 ++ brb_init(&smm->uncommitted);
2274 + threshold_init(&smm->threshold);
2275 +
2276 + memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
2277 +@@ -713,7 +784,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm,
2278 + smm->begin = 0;
2279 + smm->recursion_count = 0;
2280 + smm->allocated_this_transaction = 0;
2281 +- smm->nr_uncommitted = 0;
2282 ++ brb_init(&smm->uncommitted);
2283 + threshold_init(&smm->threshold);
2284 +
2285 + memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
2286 +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
2287 +index c1c3b132fed5..e381142d636f 100644
2288 +--- a/drivers/net/can/flexcan.c
2289 ++++ b/drivers/net/can/flexcan.c
2290 +@@ -144,6 +144,8 @@
2291 +
2292 + #define FLEXCAN_MB_CODE_MASK (0xf0ffffff)
2293 +
2294 ++#define FLEXCAN_TIMEOUT_US (50)
2295 ++
2296 + /*
2297 + * FLEXCAN hardware feature flags
2298 + *
2299 +@@ -259,6 +261,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
2300 + }
2301 + #endif
2302 +
2303 ++static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
2304 ++{
2305 ++ if (!priv->reg_xceiver)
2306 ++ return 0;
2307 ++
2308 ++ return regulator_enable(priv->reg_xceiver);
2309 ++}
2310 ++
2311 ++static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
2312 ++{
2313 ++ if (!priv->reg_xceiver)
2314 ++ return 0;
2315 ++
2316 ++ return regulator_disable(priv->reg_xceiver);
2317 ++}
2318 ++
2319 + static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
2320 + u32 reg_esr)
2321 + {
2322 +@@ -266,26 +284,42 @@ static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
2323 + (reg_esr & FLEXCAN_ESR_ERR_BUS);
2324 + }
2325 +
2326 +-static inline void flexcan_chip_enable(struct flexcan_priv *priv)
2327 ++static int flexcan_chip_enable(struct flexcan_priv *priv)
2328 + {
2329 + struct flexcan_regs __iomem *regs = priv->base;
2330 ++ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
2331 + u32 reg;
2332 +
2333 + reg = flexcan_read(&regs->mcr);
2334 + reg &= ~FLEXCAN_MCR_MDIS;
2335 + flexcan_write(reg, &regs->mcr);
2336 +
2337 +- udelay(10);
2338 ++ while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
2339 ++ usleep_range(10, 20);
2340 ++
2341 ++ if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
2342 ++ return -ETIMEDOUT;
2343 ++
2344 ++ return 0;
2345 + }
2346 +
2347 +-static inline void flexcan_chip_disable(struct flexcan_priv *priv)
2348 ++static int flexcan_chip_disable(struct flexcan_priv *priv)
2349 + {
2350 + struct flexcan_regs __iomem *regs = priv->base;
2351 ++ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
2352 + u32 reg;
2353 +
2354 + reg = flexcan_read(&regs->mcr);
2355 + reg |= FLEXCAN_MCR_MDIS;
2356 + flexcan_write(reg, &regs->mcr);
2357 ++
2358 ++ while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
2359 ++ usleep_range(10, 20);
2360 ++
2361 ++ if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
2362 ++ return -ETIMEDOUT;
2363 ++
2364 ++ return 0;
2365 + }
2366 +
2367 + static int flexcan_get_berr_counter(const struct net_device *dev,
2368 +@@ -706,7 +740,9 @@ static int flexcan_chip_start(struct net_device *dev)
2369 + u32 reg_mcr, reg_ctrl;
2370 +
2371 + /* enable module */
2372 +- flexcan_chip_enable(priv);
2373 ++ err = flexcan_chip_enable(priv);
2374 ++ if (err)
2375 ++ return err;
2376 +
2377 + /* soft reset */
2378 + flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
2379 +@@ -785,11 +821,9 @@ static int flexcan_chip_start(struct net_device *dev)
2380 + if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
2381 + flexcan_write(0x0, &regs->rxfgmask);
2382 +
2383 +- if (priv->reg_xceiver) {
2384 +- err = regulator_enable(priv->reg_xceiver);
2385 +- if (err)
2386 +- goto out;
2387 +- }
2388 ++ err = flexcan_transceiver_enable(priv);
2389 ++ if (err)
2390 ++ goto out;
2391 +
2392 + /* synchronize with the can bus */
2393 + reg_mcr = flexcan_read(&regs->mcr);
2394 +@@ -824,16 +858,17 @@ static void flexcan_chip_stop(struct net_device *dev)
2395 + struct flexcan_regs __iomem *regs = priv->base;
2396 + u32 reg;
2397 +
2398 +- /* Disable all interrupts */
2399 +- flexcan_write(0, &regs->imask1);
2400 +-
2401 + /* Disable + halt module */
2402 + reg = flexcan_read(&regs->mcr);
2403 + reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT;
2404 + flexcan_write(reg, &regs->mcr);
2405 +
2406 +- if (priv->reg_xceiver)
2407 +- regulator_disable(priv->reg_xceiver);
2408 ++ /* Disable all interrupts */
2409 ++ flexcan_write(0, &regs->imask1);
2410 ++ flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
2411 ++ &regs->ctrl);
2412 ++
2413 ++ flexcan_transceiver_disable(priv);
2414 + priv->can.state = CAN_STATE_STOPPED;
2415 +
2416 + return;
2417 +@@ -863,7 +898,7 @@ static int flexcan_open(struct net_device *dev)
2418 + /* start chip and queuing */
2419 + err = flexcan_chip_start(dev);
2420 + if (err)
2421 +- goto out_close;
2422 ++ goto out_free_irq;
2423 +
2424 + can_led_event(dev, CAN_LED_EVENT_OPEN);
2425 +
2426 +@@ -872,6 +907,8 @@ static int flexcan_open(struct net_device *dev)
2427 +
2428 + return 0;
2429 +
2430 ++ out_free_irq:
2431 ++ free_irq(dev->irq, dev);
2432 + out_close:
2433 + close_candev(dev);
2434 + out_disable_per:
2435 +@@ -942,12 +979,16 @@ static int register_flexcandev(struct net_device *dev)
2436 + goto out_disable_ipg;
2437 +
2438 + /* select "bus clock", chip must be disabled */
2439 +- flexcan_chip_disable(priv);
2440 ++ err = flexcan_chip_disable(priv);
2441 ++ if (err)
2442 ++ goto out_disable_per;
2443 + reg = flexcan_read(&regs->ctrl);
2444 + reg |= FLEXCAN_CTRL_CLK_SRC;
2445 + flexcan_write(reg, &regs->ctrl);
2446 +
2447 +- flexcan_chip_enable(priv);
2448 ++ err = flexcan_chip_enable(priv);
2449 ++ if (err)
2450 ++ goto out_chip_disable;
2451 +
2452 + /* set freeze, halt and activate FIFO, restrict register access */
2453 + reg = flexcan_read(&regs->mcr);
2454 +@@ -964,14 +1005,15 @@ static int register_flexcandev(struct net_device *dev)
2455 + if (!(reg & FLEXCAN_MCR_FEN)) {
2456 + netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
2457 + err = -ENODEV;
2458 +- goto out_disable_per;
2459 ++ goto out_chip_disable;
2460 + }
2461 +
2462 + err = register_candev(dev);
2463 +
2464 +- out_disable_per:
2465 + /* disable core and turn off clocks */
2466 ++ out_chip_disable:
2467 + flexcan_chip_disable(priv);
2468 ++ out_disable_per:
2469 + clk_disable_unprepare(priv->clk_per);
2470 + out_disable_ipg:
2471 + clk_disable_unprepare(priv->clk_ipg);
2472 +@@ -1101,9 +1143,10 @@ static int flexcan_probe(struct platform_device *pdev)
2473 + static int flexcan_remove(struct platform_device *pdev)
2474 + {
2475 + struct net_device *dev = platform_get_drvdata(pdev);
2476 ++ struct flexcan_priv *priv = netdev_priv(dev);
2477 +
2478 + unregister_flexcandev(dev);
2479 +-
2480 ++ netif_napi_del(&priv->napi);
2481 + free_candev(dev);
2482 +
2483 + return 0;
2484 +@@ -1114,8 +1157,11 @@ static int flexcan_suspend(struct device *device)
2485 + {
2486 + struct net_device *dev = dev_get_drvdata(device);
2487 + struct flexcan_priv *priv = netdev_priv(dev);
2488 ++ int err;
2489 +
2490 +- flexcan_chip_disable(priv);
2491 ++ err = flexcan_chip_disable(priv);
2492 ++ if (err)
2493 ++ return err;
2494 +
2495 + if (netif_running(dev)) {
2496 + netif_stop_queue(dev);
2497 +@@ -1136,9 +1182,7 @@ static int flexcan_resume(struct device *device)
2498 + netif_device_attach(dev);
2499 + netif_start_queue(dev);
2500 + }
2501 +- flexcan_chip_enable(priv);
2502 +-
2503 +- return 0;
2504 ++ return flexcan_chip_enable(priv);
2505 + }
2506 + #endif /* CONFIG_PM_SLEEP */
2507 +
2508 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
2509 +index c0acf98d1ea5..14a50a11d72e 100644
2510 +--- a/drivers/net/ethernet/broadcom/tg3.c
2511 ++++ b/drivers/net/ethernet/broadcom/tg3.c
2512 +@@ -6813,8 +6813,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
2513 +
2514 + work_mask |= opaque_key;
2515 +
2516 +- if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2517 +- (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2518 ++ if (desc->err_vlan & RXD_ERR_MASK) {
2519 + drop_it:
2520 + tg3_recycle_rx(tnapi, tpr, opaque_key,
2521 + desc_idx, *post_ptr);
2522 +diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
2523 +index 70257808aa37..ac50e7c9c2b8 100644
2524 +--- a/drivers/net/ethernet/broadcom/tg3.h
2525 ++++ b/drivers/net/ethernet/broadcom/tg3.h
2526 +@@ -2598,7 +2598,11 @@ struct tg3_rx_buffer_desc {
2527 + #define RXD_ERR_TOO_SMALL 0x00400000
2528 + #define RXD_ERR_NO_RESOURCES 0x00800000
2529 + #define RXD_ERR_HUGE_FRAME 0x01000000
2530 +-#define RXD_ERR_MASK 0xffff0000
2531 ++
2532 ++#define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \
2533 ++ RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \
2534 ++ RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \
2535 ++ RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME)
2536 +
2537 + u32 reserved;
2538 + u32 opaque;
2539 +diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
2540 +index 3dd39dcfe36b..a12410381cb1 100644
2541 +--- a/drivers/net/ethernet/sfc/ptp.c
2542 ++++ b/drivers/net/ethernet/sfc/ptp.c
2543 +@@ -1360,6 +1360,13 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
2544 + struct efx_ptp_data *ptp = efx->ptp_data;
2545 + int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
2546 +
2547 ++ if (!ptp) {
2548 ++ if (net_ratelimit())
2549 ++ netif_warn(efx, drv, efx->net_dev,
2550 ++ "Received PTP event but PTP not set up\n");
2551 ++ return;
2552 ++ }
2553 ++
2554 + if (!ptp->enabled)
2555 + return;
2556 +
2557 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
2558 +index 7c8343a4f918..10636cbd3807 100644
2559 +--- a/drivers/net/tun.c
2560 ++++ b/drivers/net/tun.c
2561 +@@ -1650,7 +1650,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2562 + TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
2563 + NETIF_F_HW_VLAN_STAG_TX;
2564 + dev->features = dev->hw_features;
2565 +- dev->vlan_features = dev->features;
2566 ++ dev->vlan_features = dev->features &
2567 ++ ~(NETIF_F_HW_VLAN_CTAG_TX |
2568 ++ NETIF_F_HW_VLAN_STAG_TX);
2569 +
2570 + INIT_LIST_HEAD(&tun->disabled);
2571 + err = tun_attach(tun, file, false);
2572 +diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
2573 +index 5e2bac650bd8..3ecb2133dee6 100644
2574 +--- a/drivers/net/usb/ax88179_178a.c
2575 ++++ b/drivers/net/usb/ax88179_178a.c
2576 +@@ -1031,20 +1031,12 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
2577 + dev->mii.phy_id = 0x03;
2578 + dev->mii.supports_gmii = 1;
2579 +
2580 +- if (usb_device_no_sg_constraint(dev->udev))
2581 +- dev->can_dma_sg = 1;
2582 +-
2583 + dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2584 + NETIF_F_RXCSUM;
2585 +
2586 + dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2587 + NETIF_F_RXCSUM;
2588 +
2589 +- if (dev->can_dma_sg) {
2590 +- dev->net->features |= NETIF_F_SG | NETIF_F_TSO;
2591 +- dev->net->hw_features |= NETIF_F_SG | NETIF_F_TSO;
2592 +- }
2593 +-
2594 + /* Enable checksum offload */
2595 + *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
2596 + AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
2597 +diff --git a/drivers/net/veth.c b/drivers/net/veth.c
2598 +index eee1f19ef1e9..61c4044f644e 100644
2599 +--- a/drivers/net/veth.c
2600 ++++ b/drivers/net/veth.c
2601 +@@ -269,7 +269,8 @@ static void veth_setup(struct net_device *dev)
2602 + dev->ethtool_ops = &veth_ethtool_ops;
2603 + dev->features |= NETIF_F_LLTX;
2604 + dev->features |= VETH_FEATURES;
2605 +- dev->vlan_features = dev->features;
2606 ++ dev->vlan_features = dev->features &
2607 ++ ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX);
2608 + dev->destructor = veth_dev_free;
2609 +
2610 + dev->hw_features = VETH_FEATURES;
2611 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2612 +index 8065066a6230..0232156dade3 100644
2613 +--- a/drivers/net/virtio_net.c
2614 ++++ b/drivers/net/virtio_net.c
2615 +@@ -1621,7 +1621,8 @@ static int virtnet_probe(struct virtio_device *vdev)
2616 + /* If we can receive ANY GSO packets, we must allocate large ones. */
2617 + if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2618 + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2619 +- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
2620 ++ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
2621 ++ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
2622 + vi->big_packets = true;
2623 +
2624 + if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
2625 +diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
2626 +index 7e2788c488ed..55d89390b4bc 100644
2627 +--- a/drivers/net/vmxnet3/vmxnet3_drv.c
2628 ++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
2629 +@@ -1760,11 +1760,20 @@ vmxnet3_netpoll(struct net_device *netdev)
2630 + {
2631 + struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2632 +
2633 +- if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2634 +- vmxnet3_disable_all_intrs(adapter);
2635 +-
2636 +- vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
2637 +- vmxnet3_enable_all_intrs(adapter);
2638 ++ switch (adapter->intr.type) {
2639 ++#ifdef CONFIG_PCI_MSI
2640 ++ case VMXNET3_IT_MSIX: {
2641 ++ int i;
2642 ++ for (i = 0; i < adapter->num_rx_queues; i++)
2643 ++ vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2644 ++ break;
2645 ++ }
2646 ++#endif
2647 ++ case VMXNET3_IT_MSI:
2648 ++ default:
2649 ++ vmxnet3_intr(0, adapter->netdev);
2650 ++ break;
2651 ++ }
2652 +
2653 + }
2654 + #endif /* CONFIG_NET_POLL_CONTROLLER */
2655 +diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
2656 +index 092b9d412e7f..1078fbd7bda2 100644
2657 +--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
2658 ++++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
2659 +@@ -56,7 +56,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
2660 + {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
2661 + {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
2662 + {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
2663 +- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
2664 ++ {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
2665 + {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
2666 + {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
2667 + {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
2668 +@@ -95,7 +95,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
2669 + {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
2670 + {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
2671 + {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
2672 +- {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
2673 ++ {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
2674 + {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
2675 + };
2676 +
2677 +diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
2678 +index a1ab4ff46818..c2fa0e3490c7 100644
2679 +--- a/drivers/net/wireless/ath/ath9k/recv.c
2680 ++++ b/drivers/net/wireless/ath/ath9k/recv.c
2681 +@@ -730,11 +730,18 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
2682 + return NULL;
2683 +
2684 + /*
2685 +- * mark descriptor as zero-length and set the 'more'
2686 +- * flag to ensure that both buffers get discarded
2687 ++ * Re-check previous descriptor, in case it has been filled
2688 ++ * in the mean time.
2689 + */
2690 +- rs->rs_datalen = 0;
2691 +- rs->rs_more = true;
2692 ++ ret = ath9k_hw_rxprocdesc(ah, ds, rs);
2693 ++ if (ret == -EINPROGRESS) {
2694 ++ /*
2695 ++ * mark descriptor as zero-length and set the 'more'
2696 ++ * flag to ensure that both buffers get discarded
2697 ++ */
2698 ++ rs->rs_datalen = 0;
2699 ++ rs->rs_more = true;
2700 ++ }
2701 + }
2702 +
2703 + list_del(&bf->list);
2704 +@@ -1093,32 +1100,32 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
2705 + struct ath_common *common = ath9k_hw_common(ah);
2706 + struct ieee80211_hdr *hdr;
2707 + bool discard_current = sc->rx.discard_next;
2708 +- int ret = 0;
2709 +
2710 + /*
2711 + * Discard corrupt descriptors which are marked in
2712 + * ath_get_next_rx_buf().
2713 + */
2714 +- sc->rx.discard_next = rx_stats->rs_more;
2715 + if (discard_current)
2716 +- return -EINVAL;
2717 ++ goto corrupt;
2718 ++
2719 ++ sc->rx.discard_next = false;
2720 +
2721 + /*
2722 + * Discard zero-length packets.
2723 + */
2724 + if (!rx_stats->rs_datalen) {
2725 + RX_STAT_INC(rx_len_err);
2726 +- return -EINVAL;
2727 ++ goto corrupt;
2728 + }
2729 +
2730 +- /*
2731 +- * rs_status follows rs_datalen so if rs_datalen is too large
2732 +- * we can take a hint that hardware corrupted it, so ignore
2733 +- * those frames.
2734 +- */
2735 ++ /*
2736 ++ * rs_status follows rs_datalen so if rs_datalen is too large
2737 ++ * we can take a hint that hardware corrupted it, so ignore
2738 ++ * those frames.
2739 ++ */
2740 + if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
2741 + RX_STAT_INC(rx_len_err);
2742 +- return -EINVAL;
2743 ++ goto corrupt;
2744 + }
2745 +
2746 + /* Only use status info from the last fragment */
2747 +@@ -1132,10 +1139,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
2748 + * This is different from the other corrupt descriptor
2749 + * condition handled above.
2750 + */
2751 +- if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) {
2752 +- ret = -EINVAL;
2753 +- goto exit;
2754 +- }
2755 ++ if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC)
2756 ++ goto corrupt;
2757 +
2758 + hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
2759 +
2760 +@@ -1151,18 +1156,15 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
2761 + if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
2762 + RX_STAT_INC(rx_spectral);
2763 +
2764 +- ret = -EINVAL;
2765 +- goto exit;
2766 ++ return -EINVAL;
2767 + }
2768 +
2769 + /*
2770 + * everything but the rate is checked here, the rate check is done
2771 + * separately to avoid doing two lookups for a rate for each frame.
2772 + */
2773 +- if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
2774 +- ret = -EINVAL;
2775 +- goto exit;
2776 +- }
2777 ++ if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
2778 ++ return -EINVAL;
2779 +
2780 + rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr);
2781 + if (rx_stats->is_mybeacon) {
2782 +@@ -1173,15 +1175,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
2783 + /*
2784 + * This shouldn't happen, but have a safety check anyway.
2785 + */
2786 +- if (WARN_ON(!ah->curchan)) {
2787 +- ret = -EINVAL;
2788 +- goto exit;
2789 +- }
2790 ++ if (WARN_ON(!ah->curchan))
2791 ++ return -EINVAL;
2792 +
2793 +- if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
2794 +- ret =-EINVAL;
2795 +- goto exit;
2796 +- }
2797 ++ if (ath9k_process_rate(common, hw, rx_stats, rx_status))
2798 ++ return -EINVAL;
2799 +
2800 + ath9k_process_rssi(common, hw, rx_stats, rx_status);
2801 +
2802 +@@ -1196,9 +1194,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
2803 + sc->rx.num_pkts++;
2804 + #endif
2805 +
2806 +-exit:
2807 +- sc->rx.discard_next = false;
2808 +- return ret;
2809 ++ return 0;
2810 ++
2811 ++corrupt:
2812 ++ sc->rx.discard_next = rx_stats->rs_more;
2813 ++ return -EINVAL;
2814 + }
2815 +
2816 + static void ath9k_rx_skb_postprocess(struct ath_common *common,
2817 +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
2818 +index 7fe6b5923a9c..ba39178a94ab 100644
2819 +--- a/drivers/net/wireless/ath/ath9k/xmit.c
2820 ++++ b/drivers/net/wireless/ath/ath9k/xmit.c
2821 +@@ -1457,14 +1457,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
2822 + for (tidno = 0, tid = &an->tid[tidno];
2823 + tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
2824 +
2825 +- if (!tid->sched)
2826 +- continue;
2827 +-
2828 + ac = tid->ac;
2829 + txq = ac->txq;
2830 +
2831 + ath_txq_lock(sc, txq);
2832 +
2833 ++ if (!tid->sched) {
2834 ++ ath_txq_unlock(sc, txq);
2835 ++ continue;
2836 ++ }
2837 ++
2838 + buffered = ath_tid_has_buffered(tid);
2839 +
2840 + tid->sched = false;
2841 +@@ -2199,14 +2201,15 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
2842 + txq->stopped = true;
2843 + }
2844 +
2845 ++ if (txctl->an)
2846 ++ tid = ath_get_skb_tid(sc, txctl->an, skb);
2847 ++
2848 + if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
2849 + ath_txq_unlock(sc, txq);
2850 + txq = sc->tx.uapsdq;
2851 + ath_txq_lock(sc, txq);
2852 + } else if (txctl->an &&
2853 + ieee80211_is_data_present(hdr->frame_control)) {
2854 +- tid = ath_get_skb_tid(sc, txctl->an, skb);
2855 +-
2856 + WARN_ON(tid->ac->txq != txctl->txq);
2857 +
2858 + if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
2859 +diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
2860 +index cae4d3182e33..d6e6405a9b07 100644
2861 +--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
2862 ++++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
2863 +@@ -704,6 +704,24 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2864 + return ret;
2865 + }
2866 +
2867 ++static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
2868 ++{
2869 ++ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
2870 ++ return false;
2871 ++ return true;
2872 ++}
2873 ++
2874 ++static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
2875 ++{
2876 ++ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
2877 ++ return false;
2878 ++ if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
2879 ++ return true;
2880 ++
2881 ++ /* disabled by default */
2882 ++ return false;
2883 ++}
2884 ++
2885 + static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
2886 + struct ieee80211_vif *vif,
2887 + enum ieee80211_ampdu_mlme_action action,
2888 +@@ -725,7 +743,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
2889 +
2890 + switch (action) {
2891 + case IEEE80211_AMPDU_RX_START:
2892 +- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
2893 ++ if (!iwl_enable_rx_ampdu(priv->cfg))
2894 + break;
2895 + IWL_DEBUG_HT(priv, "start Rx\n");
2896 + ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
2897 +@@ -737,7 +755,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
2898 + case IEEE80211_AMPDU_TX_START:
2899 + if (!priv->trans->ops->txq_enable)
2900 + break;
2901 +- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
2902 ++ if (!iwl_enable_tx_ampdu(priv->cfg))
2903 + break;
2904 + IWL_DEBUG_HT(priv, "start Tx\n");
2905 + ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
2906 +diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
2907 +index c3c13ce96eb0..e800002d6158 100644
2908 +--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
2909 ++++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
2910 +@@ -590,6 +590,7 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
2911 + sizeof(priv->tid_data[sta_id][tid]));
2912 +
2913 + priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
2914 ++ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
2915 +
2916 + priv->num_stations--;
2917 +
2918 +diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
2919 +index 1fef5240e6ad..e219e761f48b 100644
2920 +--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
2921 ++++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
2922 +@@ -1291,8 +1291,6 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
2923 + struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
2924 + struct iwl_ht_agg *agg;
2925 + struct sk_buff_head reclaimed_skbs;
2926 +- struct ieee80211_tx_info *info;
2927 +- struct ieee80211_hdr *hdr;
2928 + struct sk_buff *skb;
2929 + int sta_id;
2930 + int tid;
2931 +@@ -1379,22 +1377,28 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
2932 + freed = 0;
2933 +
2934 + skb_queue_walk(&reclaimed_skbs, skb) {
2935 +- hdr = (struct ieee80211_hdr *)skb->data;
2936 ++ struct ieee80211_hdr *hdr = (void *)skb->data;
2937 ++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2938 +
2939 + if (ieee80211_is_data_qos(hdr->frame_control))
2940 + freed++;
2941 + else
2942 + WARN_ON_ONCE(1);
2943 +
2944 +- info = IEEE80211_SKB_CB(skb);
2945 + iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
2946 +
2947 ++ memset(&info->status, 0, sizeof(info->status));
2948 ++ /* Packet was transmitted successfully, failures come as single
2949 ++ * frames because before failing a frame the firmware transmits
2950 ++ * it without aggregation at least once.
2951 ++ */
2952 ++ info->flags |= IEEE80211_TX_STAT_ACK;
2953 ++
2954 + if (freed == 1) {
2955 + /* this is the first skb we deliver in this batch */
2956 + /* put the rate scaling data there */
2957 + info = IEEE80211_SKB_CB(skb);
2958 + memset(&info->status, 0, sizeof(info->status));
2959 +- info->flags |= IEEE80211_TX_STAT_ACK;
2960 + info->flags |= IEEE80211_TX_STAT_AMPDU;
2961 + info->status.ampdu_ack_len = ba_resp->txed_2_done;
2962 + info->status.ampdu_len = ba_resp->txed;
2963 +diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
2964 +index 99e1da3123c9..2cdbd940575e 100644
2965 +--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
2966 ++++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
2967 +@@ -1210,7 +1210,7 @@ module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
2968 + MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
2969 + module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
2970 + MODULE_PARM_DESC(11n_disable,
2971 +- "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
2972 ++ "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
2973 + module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
2974 + int, S_IRUGO);
2975 + MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
2976 +diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
2977 +index a1f580c0c6c6..4c6cff4218cb 100644
2978 +--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
2979 ++++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
2980 +@@ -79,9 +79,12 @@ enum iwl_power_level {
2981 + IWL_POWER_NUM
2982 + };
2983 +
2984 +-#define IWL_DISABLE_HT_ALL BIT(0)
2985 +-#define IWL_DISABLE_HT_TXAGG BIT(1)
2986 +-#define IWL_DISABLE_HT_RXAGG BIT(2)
2987 ++enum iwl_disable_11n {
2988 ++ IWL_DISABLE_HT_ALL = BIT(0),
2989 ++ IWL_DISABLE_HT_TXAGG = BIT(1),
2990 ++ IWL_DISABLE_HT_RXAGG = BIT(2),
2991 ++ IWL_ENABLE_HT_TXAGG = BIT(3),
2992 ++};
2993 +
2994 + /**
2995 + * struct iwl_mod_params
2996 +@@ -90,7 +93,7 @@ enum iwl_power_level {
2997 + *
2998 + * @sw_crypto: using hardware encryption, default = 0
2999 + * @disable_11n: disable 11n capabilities, default = 0,
3000 +- * use IWL_DISABLE_HT_* constants
3001 ++ * use IWL_[DIS,EN]ABLE_HT_* constants
3002 + * @amsdu_size_8K: enable 8K amsdu size, default = 0
3003 + * @restart_fw: restart firmware, default = 1
3004 + * @wd_disable: enable stuck queue check, default = 0
3005 +diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
3006 +index 9833cdf6177c..5f6fd44e72f1 100644
3007 +--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
3008 ++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
3009 +@@ -297,6 +297,24 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
3010 + ieee80211_free_txskb(hw, skb);
3011 + }
3012 +
3013 ++static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
3014 ++{
3015 ++ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
3016 ++ return false;
3017 ++ return true;
3018 ++}
3019 ++
3020 ++static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
3021 ++{
3022 ++ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
3023 ++ return false;
3024 ++ if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
3025 ++ return true;
3026 ++
3027 ++ /* enabled by default */
3028 ++ return true;
3029 ++}
3030 ++
3031 + static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
3032 + struct ieee80211_vif *vif,
3033 + enum ieee80211_ampdu_mlme_action action,
3034 +@@ -316,7 +334,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
3035 +
3036 + switch (action) {
3037 + case IEEE80211_AMPDU_RX_START:
3038 +- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) {
3039 ++ if (!iwl_enable_rx_ampdu(mvm->cfg)) {
3040 + ret = -EINVAL;
3041 + break;
3042 + }
3043 +@@ -326,7 +344,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
3044 + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
3045 + break;
3046 + case IEEE80211_AMPDU_TX_START:
3047 +- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) {
3048 ++ if (!iwl_enable_tx_ampdu(mvm->cfg)) {
3049 + ret = -EINVAL;
3050 + break;
3051 + }
3052 +diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
3053 +index b0389279cc1e..c86663ebb493 100644
3054 +--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
3055 ++++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
3056 +@@ -152,7 +152,7 @@ enum iwl_power_scheme {
3057 + IWL_POWER_SCHEME_LP
3058 + };
3059 +
3060 +-#define IWL_CONN_MAX_LISTEN_INTERVAL 70
3061 ++#define IWL_CONN_MAX_LISTEN_INTERVAL 10
3062 + #define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
3063 + IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
3064 + IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
3065 +diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
3066 +index e05440d90319..f41add9c8093 100644
3067 +--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
3068 ++++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
3069 +@@ -819,16 +819,12 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
3070 + struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
3071 + struct sk_buff_head reclaimed_skbs;
3072 + struct iwl_mvm_tid_data *tid_data;
3073 +- struct ieee80211_tx_info *info;
3074 + struct ieee80211_sta *sta;
3075 + struct iwl_mvm_sta *mvmsta;
3076 +- struct ieee80211_hdr *hdr;
3077 + struct sk_buff *skb;
3078 + int sta_id, tid, freed;
3079 +-
3080 + /* "flow" corresponds to Tx queue */
3081 + u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
3082 +-
3083 + /* "ssn" is start of block-ack Tx window, corresponds to index
3084 + * (in Tx queue's circular buffer) of first TFD/frame in window */
3085 + u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
3086 +@@ -885,22 +881,26 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
3087 + freed = 0;
3088 +
3089 + skb_queue_walk(&reclaimed_skbs, skb) {
3090 +- hdr = (struct ieee80211_hdr *)skb->data;
3091 ++ struct ieee80211_hdr *hdr = (void *)skb->data;
3092 ++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3093 +
3094 + if (ieee80211_is_data_qos(hdr->frame_control))
3095 + freed++;
3096 + else
3097 + WARN_ON_ONCE(1);
3098 +
3099 +- info = IEEE80211_SKB_CB(skb);
3100 + iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
3101 +
3102 ++ memset(&info->status, 0, sizeof(info->status));
3103 ++ /* Packet was transmitted successfully, failures come as single
3104 ++ * frames because before failing a frame the firmware transmits
3105 ++ * it without aggregation at least once.
3106 ++ */
3107 ++ info->flags |= IEEE80211_TX_STAT_ACK;
3108 ++
3109 + if (freed == 1) {
3110 + /* this is the first skb we deliver in this batch */
3111 + /* put the rate scaling data there */
3112 +- info = IEEE80211_SKB_CB(skb);
3113 +- memset(&info->status, 0, sizeof(info->status));
3114 +- info->flags |= IEEE80211_TX_STAT_ACK;
3115 + info->flags |= IEEE80211_TX_STAT_AMPDU;
3116 + info->status.ampdu_ack_len = ba_notif->txed_2_done;
3117 + info->status.ampdu_len = ba_notif->txed;
3118 +diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
3119 +index 5e0eec4d71c7..5d9a8084665d 100644
3120 +--- a/drivers/net/wireless/mwifiex/11ac.c
3121 ++++ b/drivers/net/wireless/mwifiex/11ac.c
3122 +@@ -189,8 +189,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
3123 + vht_cap->header.len =
3124 + cpu_to_le16(sizeof(struct ieee80211_vht_cap));
3125 + memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header),
3126 +- (u8 *)bss_desc->bcn_vht_cap +
3127 +- sizeof(struct ieee_types_header),
3128 ++ (u8 *)bss_desc->bcn_vht_cap,
3129 + le16_to_cpu(vht_cap->header.len));
3130 +
3131 + mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
3132 +diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
3133 +index 0b803c05cab3..983c10c49658 100644
3134 +--- a/drivers/net/wireless/mwifiex/11n.c
3135 ++++ b/drivers/net/wireless/mwifiex/11n.c
3136 +@@ -308,8 +308,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
3137 + ht_cap->header.len =
3138 + cpu_to_le16(sizeof(struct ieee80211_ht_cap));
3139 + memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header),
3140 +- (u8 *) bss_desc->bcn_ht_cap +
3141 +- sizeof(struct ieee_types_header),
3142 ++ (u8 *)bss_desc->bcn_ht_cap,
3143 + le16_to_cpu(ht_cap->header.len));
3144 +
3145 + mwifiex_fill_cap_info(priv, radio_type, ht_cap);
3146 +diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
3147 +index 52da8ee7599a..cb84edcd794b 100644
3148 +--- a/drivers/net/wireless/mwifiex/pcie.c
3149 ++++ b/drivers/net/wireless/mwifiex/pcie.c
3150 +@@ -1212,6 +1212,12 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
3151 + rd_index = card->rxbd_rdptr & reg->rx_mask;
3152 + skb_data = card->rx_buf_list[rd_index];
3153 +
3154 ++ /* If skb allocation was failed earlier for Rx packet,
3155 ++ * rx_buf_list[rd_index] would have been left with a NULL.
3156 ++ */
3157 ++ if (!skb_data)
3158 ++ return -ENOMEM;
3159 ++
3160 + MWIFIEX_SKB_PACB(skb_data, &buf_pa);
3161 + pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
3162 + PCI_DMA_FROMDEVICE);
3163 +@@ -1526,6 +1532,14 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
3164 + if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
3165 + mwifiex_process_sleep_confirm_resp(adapter, skb->data,
3166 + skb->len);
3167 ++ mwifiex_pcie_enable_host_int(adapter);
3168 ++ if (mwifiex_write_reg(adapter,
3169 ++ PCIE_CPU_INT_EVENT,
3170 ++ CPU_INTR_SLEEP_CFM_DONE)) {
3171 ++ dev_warn(adapter->dev,
3172 ++ "Write register failed\n");
3173 ++ return -1;
3174 ++ }
3175 + while (reg->sleep_cookie && (count++ < 10) &&
3176 + mwifiex_pcie_ok_to_access_hw(adapter))
3177 + usleep_range(50, 60);
3178 +@@ -1994,23 +2008,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
3179 + adapter->int_status |= pcie_ireg;
3180 + spin_unlock_irqrestore(&adapter->int_lock, flags);
3181 +
3182 +- if (pcie_ireg & HOST_INTR_CMD_DONE) {
3183 +- if ((adapter->ps_state == PS_STATE_SLEEP_CFM) ||
3184 +- (adapter->ps_state == PS_STATE_SLEEP)) {
3185 +- mwifiex_pcie_enable_host_int(adapter);
3186 +- if (mwifiex_write_reg(adapter,
3187 +- PCIE_CPU_INT_EVENT,
3188 +- CPU_INTR_SLEEP_CFM_DONE)
3189 +- ) {
3190 +- dev_warn(adapter->dev,
3191 +- "Write register failed\n");
3192 +- return;
3193 +-
3194 +- }
3195 +- }
3196 +- } else if (!adapter->pps_uapsd_mode &&
3197 +- adapter->ps_state == PS_STATE_SLEEP &&
3198 +- mwifiex_pcie_ok_to_access_hw(adapter)) {
3199 ++ if (!adapter->pps_uapsd_mode &&
3200 ++ adapter->ps_state == PS_STATE_SLEEP &&
3201 ++ mwifiex_pcie_ok_to_access_hw(adapter)) {
3202 + /* Potentially for PCIe we could get other
3203 + * interrupts like shared. Don't change power
3204 + * state until cookie is set */
3205 +diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
3206 +index ba48e64673d9..a17d4675ddc0 100644
3207 +--- a/drivers/net/wireless/mwifiex/scan.c
3208 ++++ b/drivers/net/wireless/mwifiex/scan.c
3209 +@@ -2101,12 +2101,12 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
3210 + curr_bss->ht_info_offset);
3211 +
3212 + if (curr_bss->bcn_vht_cap)
3213 +- curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf +
3214 +- curr_bss->vht_cap_offset);
3215 ++ curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf +
3216 ++ curr_bss->vht_cap_offset);
3217 +
3218 + if (curr_bss->bcn_vht_oper)
3219 +- curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf +
3220 +- curr_bss->vht_info_offset);
3221 ++ curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf +
3222 ++ curr_bss->vht_info_offset);
3223 +
3224 + if (curr_bss->bcn_bss_co_2040)
3225 + curr_bss->bcn_bss_co_2040 =
3226 +diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
3227 +index 1c70b8d09227..9d0b0c442c95 100644
3228 +--- a/drivers/net/wireless/mwifiex/usb.c
3229 ++++ b/drivers/net/wireless/mwifiex/usb.c
3230 +@@ -512,13 +512,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
3231 + MWIFIEX_BSS_ROLE_ANY),
3232 + MWIFIEX_ASYNC_CMD);
3233 +
3234 +-#ifdef CONFIG_PM
3235 +- /* Resume handler may be called due to remote wakeup,
3236 +- * force to exit suspend anyway
3237 +- */
3238 +- usb_disable_autosuspend(card->udev);
3239 +-#endif /* CONFIG_PM */
3240 +-
3241 + return 0;
3242 + }
3243 +
3244 +@@ -555,7 +548,6 @@ static struct usb_driver mwifiex_usb_driver = {
3245 + .id_table = mwifiex_usb_table,
3246 + .suspend = mwifiex_usb_suspend,
3247 + .resume = mwifiex_usb_resume,
3248 +- .supports_autosuspend = 1,
3249 + };
3250 +
3251 + static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
3252 +diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
3253 +index 95fa3599b407..35f881585962 100644
3254 +--- a/drivers/net/wireless/mwifiex/wmm.c
3255 ++++ b/drivers/net/wireless/mwifiex/wmm.c
3256 +@@ -559,7 +559,8 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
3257 + mwifiex_wmm_delete_all_ralist(priv);
3258 + memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
3259 +
3260 +- if (priv->adapter->if_ops.clean_pcie_ring)
3261 ++ if (priv->adapter->if_ops.clean_pcie_ring &&
3262 ++ !priv->adapter->surprise_removed)
3263 + priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
3264 + spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
3265 + }
3266 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
3267 +index 863bc4bb4806..9fc3f1f4557b 100644
3268 +--- a/drivers/pci/pci.c
3269 ++++ b/drivers/pci/pci.c
3270 +@@ -1131,6 +1131,9 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
3271 + return err;
3272 + pci_fixup_device(pci_fixup_enable, dev);
3273 +
3274 ++ if (dev->msi_enabled || dev->msix_enabled)
3275 ++ return 0;
3276 ++
3277 + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
3278 + if (pin) {
3279 + pci_read_config_word(dev, PCI_COMMAND, &cmd);
3280 +@@ -1166,10 +1169,8 @@ static void pci_enable_bridge(struct pci_dev *dev)
3281 + pci_enable_bridge(dev->bus->self);
3282 +
3283 + if (pci_is_enabled(dev)) {
3284 +- if (!dev->is_busmaster) {
3285 +- dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n");
3286 ++ if (!dev->is_busmaster)
3287 + pci_set_master(dev);
3288 +- }
3289 + return;
3290 + }
3291 +
3292 +diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
3293 +index 6ebf3067bde4..b2dcde123e56 100644
3294 +--- a/drivers/pinctrl/pinctrl-sunxi.c
3295 ++++ b/drivers/pinctrl/pinctrl-sunxi.c
3296 +@@ -14,6 +14,7 @@
3297 + #include <linux/clk.h>
3298 + #include <linux/gpio.h>
3299 + #include <linux/irqdomain.h>
3300 ++#include <linux/irqchip/chained_irq.h>
3301 + #include <linux/module.h>
3302 + #include <linux/of.h>
3303 + #include <linux/of_address.h>
3304 +@@ -665,6 +666,7 @@ static struct irq_chip sunxi_pinctrl_irq_chip = {
3305 +
3306 + static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
3307 + {
3308 ++ struct irq_chip *chip = irq_get_chip(irq);
3309 + struct sunxi_pinctrl *pctl = irq_get_handler_data(irq);
3310 + const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG);
3311 +
3312 +@@ -674,10 +676,12 @@ static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
3313 + if (reg) {
3314 + int irqoffset;
3315 +
3316 ++ chained_irq_enter(chip, desc);
3317 + for_each_set_bit(irqoffset, &reg, SUNXI_IRQ_NUMBER) {
3318 + int pin_irq = irq_find_mapping(pctl->domain, irqoffset);
3319 + generic_handle_irq(pin_irq);
3320 + }
3321 ++ chained_irq_exit(chip, desc);
3322 + }
3323 + }
3324 +
3325 +diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
3326 +index 167f3d00c916..66977ebf13b3 100644
3327 +--- a/drivers/pnp/pnpacpi/rsparser.c
3328 ++++ b/drivers/pnp/pnpacpi/rsparser.c
3329 +@@ -183,9 +183,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
3330 + struct resource r = {0};
3331 + int i, flags;
3332 +
3333 +- if (acpi_dev_resource_memory(res, &r)
3334 +- || acpi_dev_resource_io(res, &r)
3335 +- || acpi_dev_resource_address_space(res, &r)
3336 ++ if (acpi_dev_resource_address_space(res, &r)
3337 + || acpi_dev_resource_ext_address_space(res, &r)) {
3338 + pnp_add_resource(dev, &r);
3339 + return AE_OK;
3340 +@@ -217,6 +215,17 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
3341 + }
3342 +
3343 + switch (res->type) {
3344 ++ case ACPI_RESOURCE_TYPE_MEMORY24:
3345 ++ case ACPI_RESOURCE_TYPE_MEMORY32:
3346 ++ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
3347 ++ if (acpi_dev_resource_memory(res, &r))
3348 ++ pnp_add_resource(dev, &r);
3349 ++ break;
3350 ++ case ACPI_RESOURCE_TYPE_IO:
3351 ++ case ACPI_RESOURCE_TYPE_FIXED_IO:
3352 ++ if (acpi_dev_resource_io(res, &r))
3353 ++ pnp_add_resource(dev, &r);
3354 ++ break;
3355 + case ACPI_RESOURCE_TYPE_DMA:
3356 + dma = &res->data.dma;
3357 + if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
3358 +diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
3359 +index b4b0d83f9ef6..7061ac0ad428 100644
3360 +--- a/drivers/rapidio/devices/tsi721.h
3361 ++++ b/drivers/rapidio/devices/tsi721.h
3362 +@@ -678,6 +678,7 @@ struct tsi721_bdma_chan {
3363 + struct list_head free_list;
3364 + dma_cookie_t completed_cookie;
3365 + struct tasklet_struct tasklet;
3366 ++ bool active;
3367 + };
3368 +
3369 + #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
3370 +diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
3371 +index 502663f5f7c6..91245f5dbe81 100644
3372 +--- a/drivers/rapidio/devices/tsi721_dma.c
3373 ++++ b/drivers/rapidio/devices/tsi721_dma.c
3374 +@@ -206,8 +206,8 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
3375 + {
3376 + /* Disable BDMA channel interrupts */
3377 + iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
3378 +-
3379 +- tasklet_schedule(&bdma_chan->tasklet);
3380 ++ if (bdma_chan->active)
3381 ++ tasklet_schedule(&bdma_chan->tasklet);
3382 + }
3383 +
3384 + #ifdef CONFIG_PCI_MSI
3385 +@@ -562,7 +562,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
3386 + }
3387 + #endif /* CONFIG_PCI_MSI */
3388 +
3389 +- tasklet_enable(&bdma_chan->tasklet);
3390 ++ bdma_chan->active = true;
3391 + tsi721_bdma_interrupt_enable(bdma_chan, 1);
3392 +
3393 + return bdma_chan->bd_num - 1;
3394 +@@ -576,9 +576,7 @@ err_out:
3395 + static void tsi721_free_chan_resources(struct dma_chan *dchan)
3396 + {
3397 + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
3398 +-#ifdef CONFIG_PCI_MSI
3399 + struct tsi721_device *priv = to_tsi721(dchan->device);
3400 +-#endif
3401 + LIST_HEAD(list);
3402 +
3403 + dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
3404 +@@ -589,14 +587,25 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan)
3405 + BUG_ON(!list_empty(&bdma_chan->active_list));
3406 + BUG_ON(!list_empty(&bdma_chan->queue));
3407 +
3408 +- tasklet_disable(&bdma_chan->tasklet);
3409 ++ tsi721_bdma_interrupt_enable(bdma_chan, 0);
3410 ++ bdma_chan->active = false;
3411 ++
3412 ++#ifdef CONFIG_PCI_MSI
3413 ++ if (priv->flags & TSI721_USING_MSIX) {
3414 ++ synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
3415 ++ bdma_chan->id].vector);
3416 ++ synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
3417 ++ bdma_chan->id].vector);
3418 ++ } else
3419 ++#endif
3420 ++ synchronize_irq(priv->pdev->irq);
3421 ++
3422 ++ tasklet_kill(&bdma_chan->tasklet);
3423 +
3424 + spin_lock_bh(&bdma_chan->lock);
3425 + list_splice_init(&bdma_chan->free_list, &list);
3426 + spin_unlock_bh(&bdma_chan->lock);
3427 +
3428 +- tsi721_bdma_interrupt_enable(bdma_chan, 0);
3429 +-
3430 + #ifdef CONFIG_PCI_MSI
3431 + if (priv->flags & TSI721_USING_MSIX) {
3432 + free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
3433 +@@ -790,6 +799,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
3434 + bdma_chan->dchan.cookie = 1;
3435 + bdma_chan->dchan.chan_id = i;
3436 + bdma_chan->id = i;
3437 ++ bdma_chan->active = false;
3438 +
3439 + spin_lock_init(&bdma_chan->lock);
3440 +
3441 +@@ -799,7 +809,6 @@ int tsi721_register_dma(struct tsi721_device *priv)
3442 +
3443 + tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
3444 + (unsigned long)bdma_chan);
3445 +- tasklet_disable(&bdma_chan->tasklet);
3446 + list_add_tail(&bdma_chan->dchan.device_node,
3447 + &mport->dma.channels);
3448 + }
3449 +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
3450 +index a01b8b3b70ca..d97fbf4eb65b 100644
3451 +--- a/drivers/regulator/core.c
3452 ++++ b/drivers/regulator/core.c
3453 +@@ -923,6 +923,8 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
3454 + return 0;
3455 + }
3456 +
3457 ++static int _regulator_do_enable(struct regulator_dev *rdev);
3458 ++
3459 + /**
3460 + * set_machine_constraints - sets regulator constraints
3461 + * @rdev: regulator source
3462 +@@ -979,10 +981,9 @@ static int set_machine_constraints(struct regulator_dev *rdev,
3463 + /* If the constraints say the regulator should be on at this point
3464 + * and we have control then make sure it is enabled.
3465 + */
3466 +- if ((rdev->constraints->always_on || rdev->constraints->boot_on) &&
3467 +- ops->enable) {
3468 +- ret = ops->enable(rdev);
3469 +- if (ret < 0) {
3470 ++ if (rdev->constraints->always_on || rdev->constraints->boot_on) {
3471 ++ ret = _regulator_do_enable(rdev);
3472 ++ if (ret < 0 && ret != -EINVAL) {
3473 + rdev_err(rdev, "failed to enable\n");
3474 + goto out;
3475 + }
3476 +@@ -3571,9 +3572,8 @@ int regulator_suspend_finish(void)
3477 + struct regulator_ops *ops = rdev->desc->ops;
3478 +
3479 + mutex_lock(&rdev->mutex);
3480 +- if ((rdev->use_count > 0 || rdev->constraints->always_on) &&
3481 +- ops->enable) {
3482 +- error = ops->enable(rdev);
3483 ++ if (rdev->use_count > 0 || rdev->constraints->always_on) {
3484 ++ error = _regulator_do_enable(rdev);
3485 + if (error)
3486 + ret = error;
3487 + } else {
3488 +diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
3489 +index f93cc32eb818..71e974738014 100644
3490 +--- a/drivers/s390/char/fs3270.c
3491 ++++ b/drivers/s390/char/fs3270.c
3492 +@@ -564,6 +564,7 @@ static void __exit
3493 + fs3270_exit(void)
3494 + {
3495 + raw3270_unregister_notifier(&fs3270_notifier);
3496 ++ device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0));
3497 + __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
3498 + }
3499 +
3500 +diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
3501 +index 4911310a38f5..22a9bb1abae1 100644
3502 +--- a/drivers/scsi/isci/host.h
3503 ++++ b/drivers/scsi/isci/host.h
3504 +@@ -311,9 +311,8 @@ static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
3505 + }
3506 +
3507 + #define for_each_isci_host(id, ihost, pdev) \
3508 +- for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
3509 +- id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
3510 +- ihost = to_pci_info(pdev)->hosts[++id])
3511 ++ for (id = 0; id < SCI_MAX_CONTROLLERS && \
3512 ++ (ihost = to_pci_info(pdev)->hosts[id]); id++)
3513 +
3514 + static inline void wait_for_start(struct isci_host *ihost)
3515 + {
3516 +diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
3517 +index 85c77f6b802b..ac879745ef80 100644
3518 +--- a/drivers/scsi/isci/port_config.c
3519 ++++ b/drivers/scsi/isci/port_config.c
3520 +@@ -615,13 +615,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
3521 + SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
3522 + } else {
3523 + /* the phy is already the part of the port */
3524 +- u32 port_state = iport->sm.current_state_id;
3525 +-
3526 +- /* if the PORT'S state is resetting then the link up is from
3527 +- * port hard reset in this case, we need to tell the port
3528 +- * that link up is recieved
3529 +- */
3530 +- BUG_ON(port_state != SCI_PORT_RESETTING);
3531 + port_agent->phy_ready_mask |= 1 << phy_index;
3532 + sci_port_link_up(iport, iphy);
3533 + }
3534 +diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
3535 +index 0d30ca849e8f..5d6fda72d659 100644
3536 +--- a/drivers/scsi/isci/task.c
3537 ++++ b/drivers/scsi/isci/task.c
3538 +@@ -801,7 +801,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
3539 + /* XXX: need to cleanup any ireqs targeting this
3540 + * domain_device
3541 + */
3542 +- ret = TMF_RESP_FUNC_COMPLETE;
3543 ++ ret = -ENODEV;
3544 + goto out;
3545 + }
3546 +
3547 +diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
3548 +index 93db74ef3461..43acfce3a435 100644
3549 +--- a/drivers/scsi/qla2xxx/qla_def.h
3550 ++++ b/drivers/scsi/qla2xxx/qla_def.h
3551 +@@ -2993,8 +2993,7 @@ struct qla_hw_data {
3552 + IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
3553 + IS_QLA8044(ha))
3554 + #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3555 +-#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
3556 +- IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
3557 ++#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
3558 + #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3559 + #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3560 + #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
3561 +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
3562 +index ff9c86b1a0d8..e32fccd6580c 100644
3563 +--- a/drivers/scsi/qla2xxx/qla_isr.c
3564 ++++ b/drivers/scsi/qla2xxx/qla_isr.c
3565 +@@ -2829,6 +2829,7 @@ static int
3566 + qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3567 + {
3568 + #define MIN_MSIX_COUNT 2
3569 ++#define ATIO_VECTOR 2
3570 + int i, ret;
3571 + struct msix_entry *entries;
3572 + struct qla_msix_entry *qentry;
3573 +@@ -2885,34 +2886,47 @@ msix_failed:
3574 + }
3575 +
3576 + /* Enable MSI-X vectors for the base queue */
3577 +- for (i = 0; i < ha->msix_count; i++) {
3578 ++ for (i = 0; i < 2; i++) {
3579 + qentry = &ha->msix_entries[i];
3580 +- if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
3581 +- ret = request_irq(qentry->vector,
3582 +- qla83xx_msix_entries[i].handler,
3583 +- 0, qla83xx_msix_entries[i].name, rsp);
3584 +- } else if (IS_P3P_TYPE(ha)) {
3585 ++ if (IS_P3P_TYPE(ha))
3586 + ret = request_irq(qentry->vector,
3587 + qla82xx_msix_entries[i].handler,
3588 + 0, qla82xx_msix_entries[i].name, rsp);
3589 +- } else {
3590 ++ else
3591 + ret = request_irq(qentry->vector,
3592 + msix_entries[i].handler,
3593 + 0, msix_entries[i].name, rsp);
3594 +- }
3595 +- if (ret) {
3596 +- ql_log(ql_log_fatal, vha, 0x00cb,
3597 +- "MSI-X: unable to register handler -- %x/%d.\n",
3598 +- qentry->vector, ret);
3599 +- qla24xx_disable_msix(ha);
3600 +- ha->mqenable = 0;
3601 +- goto msix_out;
3602 +- }
3603 ++ if (ret)
3604 ++ goto msix_register_fail;
3605 + qentry->have_irq = 1;
3606 + qentry->rsp = rsp;
3607 + rsp->msix = qentry;
3608 + }
3609 +
3610 ++ /*
3611 ++ * If target mode is enable, also request the vector for the ATIO
3612 ++ * queue.
3613 ++ */
3614 ++ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
3615 ++ qentry = &ha->msix_entries[ATIO_VECTOR];
3616 ++ ret = request_irq(qentry->vector,
3617 ++ qla83xx_msix_entries[ATIO_VECTOR].handler,
3618 ++ 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
3619 ++ qentry->have_irq = 1;
3620 ++ qentry->rsp = rsp;
3621 ++ rsp->msix = qentry;
3622 ++ }
3623 ++
3624 ++msix_register_fail:
3625 ++ if (ret) {
3626 ++ ql_log(ql_log_fatal, vha, 0x00cb,
3627 ++ "MSI-X: unable to register handler -- %x/%d.\n",
3628 ++ qentry->vector, ret);
3629 ++ qla24xx_disable_msix(ha);
3630 ++ ha->mqenable = 0;
3631 ++ goto msix_out;
3632 ++ }
3633 ++
3634 + /* Enable MSI-X vector for response queue update for queue 0 */
3635 + if (IS_QLA83XX(ha)) {
3636 + if (ha->msixbase && ha->mqiobase &&
3637 +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
3638 +index 17d740427240..9969fa1ef7c4 100644
3639 +--- a/drivers/scsi/storvsc_drv.c
3640 ++++ b/drivers/scsi/storvsc_drv.c
3641 +@@ -1419,6 +1419,9 @@ static void storvsc_device_destroy(struct scsi_device *sdevice)
3642 + {
3643 + struct stor_mem_pools *memp = sdevice->hostdata;
3644 +
3645 ++ if (!memp)
3646 ++ return;
3647 ++
3648 + mempool_destroy(memp->request_mempool);
3649 + kmem_cache_destroy(memp->request_pool);
3650 + kfree(memp);
3651 +diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
3652 +index 37bad952ab38..05dd69212e32 100644
3653 +--- a/drivers/spi/spi-ath79.c
3654 ++++ b/drivers/spi/spi-ath79.c
3655 +@@ -132,9 +132,9 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
3656 +
3657 + flags = GPIOF_DIR_OUT;
3658 + if (spi->mode & SPI_CS_HIGH)
3659 +- flags |= GPIOF_INIT_HIGH;
3660 +- else
3661 + flags |= GPIOF_INIT_LOW;
3662 ++ else
3663 ++ flags |= GPIOF_INIT_HIGH;
3664 +
3665 + status = gpio_request_one(cdata->gpio, flags,
3666 + dev_name(&spi->dev));
3667 +diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
3668 +index cc5b75d10c38..524d112d5369 100644
3669 +--- a/drivers/spi/spi-coldfire-qspi.c
3670 ++++ b/drivers/spi/spi-coldfire-qspi.c
3671 +@@ -539,7 +539,8 @@ static int mcfqspi_resume(struct device *dev)
3672 + #ifdef CONFIG_PM_RUNTIME
3673 + static int mcfqspi_runtime_suspend(struct device *dev)
3674 + {
3675 +- struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
3676 ++ struct spi_master *master = dev_get_drvdata(dev);
3677 ++ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
3678 +
3679 + clk_disable(mcfqspi->clk);
3680 +
3681 +@@ -548,7 +549,8 @@ static int mcfqspi_runtime_suspend(struct device *dev)
3682 +
3683 + static int mcfqspi_runtime_resume(struct device *dev)
3684 + {
3685 +- struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
3686 ++ struct spi_master *master = dev_get_drvdata(dev);
3687 ++ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
3688 +
3689 + clk_enable(mcfqspi->clk);
3690 +
3691 +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
3692 +index 4e44575bd87a..f1322343d789 100644
3693 +--- a/drivers/spi/spi-fsl-dspi.c
3694 ++++ b/drivers/spi/spi-fsl-dspi.c
3695 +@@ -421,7 +421,6 @@ static int dspi_suspend(struct device *dev)
3696 +
3697 + static int dspi_resume(struct device *dev)
3698 + {
3699 +-
3700 + struct spi_master *master = dev_get_drvdata(dev);
3701 + struct fsl_dspi *dspi = spi_master_get_devdata(master);
3702 +
3703 +@@ -505,7 +504,7 @@ static int dspi_probe(struct platform_device *pdev)
3704 + clk_prepare_enable(dspi->clk);
3705 +
3706 + init_waitqueue_head(&dspi->waitq);
3707 +- platform_set_drvdata(pdev, dspi);
3708 ++ platform_set_drvdata(pdev, master);
3709 +
3710 + ret = spi_bitbang_start(&dspi->bitbang);
3711 + if (ret != 0) {
3712 +@@ -527,7 +526,8 @@ out_master_put:
3713 +
3714 + static int dspi_remove(struct platform_device *pdev)
3715 + {
3716 +- struct fsl_dspi *dspi = platform_get_drvdata(pdev);
3717 ++ struct spi_master *master = platform_get_drvdata(pdev);
3718 ++ struct fsl_dspi *dspi = spi_master_get_devdata(master);
3719 +
3720 + /* Disconnect from the SPI framework */
3721 + spi_bitbang_stop(&dspi->bitbang);
3722 +diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
3723 +index 15323d8bd9cf..941069517423 100644
3724 +--- a/drivers/spi/spi-imx.c
3725 ++++ b/drivers/spi/spi-imx.c
3726 +@@ -892,8 +892,8 @@ static int spi_imx_remove(struct platform_device *pdev)
3727 + spi_bitbang_stop(&spi_imx->bitbang);
3728 +
3729 + writel(0, spi_imx->base + MXC_CSPICTRL);
3730 +- clk_disable_unprepare(spi_imx->clk_ipg);
3731 +- clk_disable_unprepare(spi_imx->clk_per);
3732 ++ clk_unprepare(spi_imx->clk_ipg);
3733 ++ clk_unprepare(spi_imx->clk_per);
3734 + spi_master_put(master);
3735 +
3736 + return 0;
3737 +diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
3738 +index d7ac040e0dc1..d02088f7dc33 100644
3739 +--- a/drivers/staging/zram/zram_drv.c
3740 ++++ b/drivers/staging/zram/zram_drv.c
3741 +@@ -621,6 +621,8 @@ static ssize_t disksize_store(struct device *dev,
3742 +
3743 + disksize = PAGE_ALIGN(disksize);
3744 + meta = zram_meta_alloc(disksize);
3745 ++ if (!meta)
3746 ++ return -ENOMEM;
3747 + down_write(&zram->init_lock);
3748 + if (zram->init_done) {
3749 + up_write(&zram->init_lock);
3750 +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
3751 +index e12f2aab3c87..b5e574659785 100644
3752 +--- a/drivers/target/iscsi/iscsi_target.c
3753 ++++ b/drivers/target/iscsi/iscsi_target.c
3754 +@@ -785,7 +785,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
3755 + spin_unlock_bh(&conn->cmd_lock);
3756 +
3757 + list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
3758 +- list_del(&cmd->i_conn_node);
3759 ++ list_del_init(&cmd->i_conn_node);
3760 + iscsit_free_cmd(cmd, false);
3761 + }
3762 + }
3763 +@@ -3704,7 +3704,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
3764 + break;
3765 + case ISTATE_REMOVE:
3766 + spin_lock_bh(&conn->cmd_lock);
3767 +- list_del(&cmd->i_conn_node);
3768 ++ list_del_init(&cmd->i_conn_node);
3769 + spin_unlock_bh(&conn->cmd_lock);
3770 +
3771 + iscsit_free_cmd(cmd, false);
3772 +@@ -4149,7 +4149,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
3773 + spin_lock_bh(&conn->cmd_lock);
3774 + list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
3775 +
3776 +- list_del(&cmd->i_conn_node);
3777 ++ list_del_init(&cmd->i_conn_node);
3778 + spin_unlock_bh(&conn->cmd_lock);
3779 +
3780 + iscsit_increment_maxcmdsn(cmd, sess);
3781 +@@ -4194,6 +4194,10 @@ int iscsit_close_connection(
3782 + iscsit_stop_timers_for_cmds(conn);
3783 + iscsit_stop_nopin_response_timer(conn);
3784 + iscsit_stop_nopin_timer(conn);
3785 ++
3786 ++ if (conn->conn_transport->iscsit_wait_conn)
3787 ++ conn->conn_transport->iscsit_wait_conn(conn);
3788 ++
3789 + iscsit_free_queue_reqs_for_conn(conn);
3790 +
3791 + /*
3792 +diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
3793 +index 33be1fb1df32..4ca8fd2a70db 100644
3794 +--- a/drivers/target/iscsi/iscsi_target_erl2.c
3795 ++++ b/drivers/target/iscsi/iscsi_target_erl2.c
3796 +@@ -138,7 +138,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
3797 + list_for_each_entry_safe(cmd, cmd_tmp,
3798 + &cr->conn_recovery_cmd_list, i_conn_node) {
3799 +
3800 +- list_del(&cmd->i_conn_node);
3801 ++ list_del_init(&cmd->i_conn_node);
3802 + cmd->conn = NULL;
3803 + spin_unlock(&cr->conn_recovery_cmd_lock);
3804 + iscsit_free_cmd(cmd, true);
3805 +@@ -160,7 +160,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
3806 + list_for_each_entry_safe(cmd, cmd_tmp,
3807 + &cr->conn_recovery_cmd_list, i_conn_node) {
3808 +
3809 +- list_del(&cmd->i_conn_node);
3810 ++ list_del_init(&cmd->i_conn_node);
3811 + cmd->conn = NULL;
3812 + spin_unlock(&cr->conn_recovery_cmd_lock);
3813 + iscsit_free_cmd(cmd, true);
3814 +@@ -216,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery(
3815 + }
3816 + cr = cmd->cr;
3817 +
3818 +- list_del(&cmd->i_conn_node);
3819 ++ list_del_init(&cmd->i_conn_node);
3820 + return --cr->cmd_count;
3821 + }
3822 +
3823 +@@ -297,7 +297,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
3824 + if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
3825 + continue;
3826 +
3827 +- list_del(&cmd->i_conn_node);
3828 ++ list_del_init(&cmd->i_conn_node);
3829 +
3830 + spin_unlock_bh(&conn->cmd_lock);
3831 + iscsit_free_cmd(cmd, true);
3832 +@@ -335,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
3833 + /*
3834 + * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
3835 + * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
3836 +- * list_del(&cmd->i_conn_node); to release the command to the
3837 ++ * list_del_init(&cmd->i_conn_node); to release the command to the
3838 + * session pool and remove it from the connection's list.
3839 + *
3840 + * Also stop the DataOUT timer, which will be restarted after
3841 +@@ -351,7 +351,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
3842 + " CID: %hu\n", cmd->iscsi_opcode,
3843 + cmd->init_task_tag, cmd->cmd_sn, conn->cid);
3844 +
3845 +- list_del(&cmd->i_conn_node);
3846 ++ list_del_init(&cmd->i_conn_node);
3847 + spin_unlock_bh(&conn->cmd_lock);
3848 + iscsit_free_cmd(cmd, true);
3849 + spin_lock_bh(&conn->cmd_lock);
3850 +@@ -371,7 +371,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
3851 + */
3852 + if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
3853 + iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
3854 +- list_del(&cmd->i_conn_node);
3855 ++ list_del_init(&cmd->i_conn_node);
3856 + spin_unlock_bh(&conn->cmd_lock);
3857 + iscsit_free_cmd(cmd, true);
3858 + spin_lock_bh(&conn->cmd_lock);
3859 +@@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
3860 +
3861 + cmd->sess = conn->sess;
3862 +
3863 +- list_del(&cmd->i_conn_node);
3864 ++ list_del_init(&cmd->i_conn_node);
3865 + spin_unlock_bh(&conn->cmd_lock);
3866 +
3867 + iscsit_free_all_datain_reqs(cmd);
3868 +diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
3869 +index 4faeb47fa5e1..3cf77c0b76b4 100644
3870 +--- a/drivers/target/iscsi/iscsi_target_tpg.c
3871 ++++ b/drivers/target/iscsi/iscsi_target_tpg.c
3872 +@@ -137,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
3873 + list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
3874 +
3875 + spin_lock(&tpg->tpg_state_lock);
3876 +- if (tpg->tpg_state == TPG_STATE_FREE) {
3877 ++ if (tpg->tpg_state != TPG_STATE_ACTIVE) {
3878 + spin_unlock(&tpg->tpg_state_lock);
3879 + continue;
3880 + }
3881 +diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
3882 +index 7722cb9d5a80..72573661a14a 100644
3883 +--- a/drivers/thermal/x86_pkg_temp_thermal.c
3884 ++++ b/drivers/thermal/x86_pkg_temp_thermal.c
3885 +@@ -68,6 +68,10 @@ struct phy_dev_entry {
3886 + struct thermal_zone_device *tzone;
3887 + };
3888 +
3889 ++static const struct thermal_zone_params pkg_temp_tz_params = {
3890 ++ .no_hwmon = true,
3891 ++};
3892 ++
3893 + /* List maintaining number of package instances */
3894 + static LIST_HEAD(phy_dev_list);
3895 + static DEFINE_MUTEX(phy_dev_list_mutex);
3896 +@@ -446,7 +450,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
3897 + thres_count,
3898 + (thres_count == MAX_NUMBER_OF_TRIPS) ?
3899 + 0x03 : 0x01,
3900 +- phy_dev_entry, &tzone_ops, NULL, 0, 0);
3901 ++ phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0);
3902 + if (IS_ERR(phy_dev_entry->tzone)) {
3903 + err = PTR_ERR(phy_dev_entry->tzone);
3904 + goto err_ret_free;
3905 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
3906 +index 548d1996590f..652438325197 100644
3907 +--- a/drivers/usb/core/config.c
3908 ++++ b/drivers/usb/core/config.c
3909 +@@ -718,6 +718,10 @@ int usb_get_configuration(struct usb_device *dev)
3910 + result = -ENOMEM;
3911 + goto err;
3912 + }
3913 ++
3914 ++ if (dev->quirks & USB_QUIRK_DELAY_INIT)
3915 ++ msleep(100);
3916 ++
3917 + result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
3918 + bigbuffer, length);
3919 + if (result < 0) {
3920 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
3921 +index 01fe36273f3b..1053eb651b2f 100644
3922 +--- a/drivers/usb/core/quirks.c
3923 ++++ b/drivers/usb/core/quirks.c
3924 +@@ -46,6 +46,10 @@ static const struct usb_device_id usb_quirk_list[] = {
3925 + /* Microsoft LifeCam-VX700 v2.0 */
3926 + { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
3927 +
3928 ++ /* Logitech HD Pro Webcams C920 and C930e */
3929 ++ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
3930 ++ { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
3931 ++
3932 + /* Logitech Quickcam Fusion */
3933 + { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
3934 +
3935 +diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
3936 +index 854c2ec7b699..3e86bf4371b3 100644
3937 +--- a/drivers/usb/host/ehci-pci.c
3938 ++++ b/drivers/usb/host/ehci-pci.c
3939 +@@ -58,8 +58,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
3940 + {
3941 + struct ehci_hcd *ehci = hcd_to_ehci(hcd);
3942 + struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
3943 +- struct pci_dev *p_smbus;
3944 +- u8 rev;
3945 + u32 temp;
3946 + int retval;
3947 +
3948 +@@ -175,22 +173,12 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
3949 + /* SB600 and old version of SB700 have a bug in EHCI controller,
3950 + * which causes usb devices lose response in some cases.
3951 + */
3952 +- if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) {
3953 +- p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
3954 +- PCI_DEVICE_ID_ATI_SBX00_SMBUS,
3955 +- NULL);
3956 +- if (!p_smbus)
3957 +- break;
3958 +- rev = p_smbus->revision;
3959 +- if ((pdev->device == 0x4386) || (rev == 0x3a)
3960 +- || (rev == 0x3b)) {
3961 +- u8 tmp;
3962 +- ehci_info(ehci, "applying AMD SB600/SB700 USB "
3963 +- "freeze workaround\n");
3964 +- pci_read_config_byte(pdev, 0x53, &tmp);
3965 +- pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
3966 +- }
3967 +- pci_dev_put(p_smbus);
3968 ++ if ((pdev->device == 0x4386 || pdev->device == 0x4396) &&
3969 ++ usb_amd_hang_symptom_quirk()) {
3970 ++ u8 tmp;
3971 ++ ehci_info(ehci, "applying AMD SB600/SB700 USB freeze workaround\n");
3972 ++ pci_read_config_byte(pdev, 0x53, &tmp);
3973 ++ pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
3974 + }
3975 + break;
3976 + case PCI_VENDOR_ID_NETMOS:
3977 +diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
3978 +index ec337c2bd5e0..659cde1ed1ea 100644
3979 +--- a/drivers/usb/host/ohci-pci.c
3980 ++++ b/drivers/usb/host/ohci-pci.c
3981 +@@ -150,28 +150,16 @@ static int ohci_quirk_nec(struct usb_hcd *hcd)
3982 + static int ohci_quirk_amd700(struct usb_hcd *hcd)
3983 + {
3984 + struct ohci_hcd *ohci = hcd_to_ohci(hcd);
3985 +- struct pci_dev *amd_smbus_dev;
3986 +- u8 rev;
3987 +
3988 + if (usb_amd_find_chipset_info())
3989 + ohci->flags |= OHCI_QUIRK_AMD_PLL;
3990 +
3991 +- amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
3992 +- PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
3993 +- if (!amd_smbus_dev)
3994 +- return 0;
3995 +-
3996 +- rev = amd_smbus_dev->revision;
3997 +-
3998 + /* SB800 needs pre-fetch fix */
3999 +- if ((rev >= 0x40) && (rev <= 0x4f)) {
4000 ++ if (usb_amd_prefetch_quirk()) {
4001 + ohci->flags |= OHCI_QUIRK_AMD_PREFETCH;
4002 + ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
4003 + }
4004 +
4005 +- pci_dev_put(amd_smbus_dev);
4006 +- amd_smbus_dev = NULL;
4007 +-
4008 + return 0;
4009 + }
4010 +
4011 +diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
4012 +index 08ef2829a7e2..463156d03140 100644
4013 +--- a/drivers/usb/host/pci-quirks.c
4014 ++++ b/drivers/usb/host/pci-quirks.c
4015 +@@ -79,11 +79,30 @@
4016 + #define USB_INTEL_USB3_PSSEN 0xD8
4017 + #define USB_INTEL_USB3PRM 0xDC
4018 +
4019 ++/*
4020 ++ * amd_chipset_gen values represent AMD different chipset generations
4021 ++ */
4022 ++enum amd_chipset_gen {
4023 ++ NOT_AMD_CHIPSET = 0,
4024 ++ AMD_CHIPSET_SB600,
4025 ++ AMD_CHIPSET_SB700,
4026 ++ AMD_CHIPSET_SB800,
4027 ++ AMD_CHIPSET_HUDSON2,
4028 ++ AMD_CHIPSET_BOLTON,
4029 ++ AMD_CHIPSET_YANGTZE,
4030 ++ AMD_CHIPSET_UNKNOWN,
4031 ++};
4032 ++
4033 ++struct amd_chipset_type {
4034 ++ enum amd_chipset_gen gen;
4035 ++ u8 rev;
4036 ++};
4037 ++
4038 + static struct amd_chipset_info {
4039 + struct pci_dev *nb_dev;
4040 + struct pci_dev *smbus_dev;
4041 + int nb_type;
4042 +- int sb_type;
4043 ++ struct amd_chipset_type sb_type;
4044 + int isoc_reqs;
4045 + int probe_count;
4046 + int probe_result;
4047 +@@ -91,6 +110,51 @@ static struct amd_chipset_info {
4048 +
4049 + static DEFINE_SPINLOCK(amd_lock);
4050 +
4051 ++/*
4052 ++ * amd_chipset_sb_type_init - initialize amd chipset southbridge type
4053 ++ *
4054 ++ * AMD FCH/SB generation and revision is identified by SMBus controller
4055 ++ * vendor, device and revision IDs.
4056 ++ *
4057 ++ * Returns: 1 if it is an AMD chipset, 0 otherwise.
4058 ++ */
4059 ++int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
4060 ++{
4061 ++ u8 rev = 0;
4062 ++ pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
4063 ++
4064 ++ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
4065 ++ PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
4066 ++ if (pinfo->smbus_dev) {
4067 ++ rev = pinfo->smbus_dev->revision;
4068 ++ if (rev >= 0x10 && rev <= 0x1f)
4069 ++ pinfo->sb_type.gen = AMD_CHIPSET_SB600;
4070 ++ else if (rev >= 0x30 && rev <= 0x3f)
4071 ++ pinfo->sb_type.gen = AMD_CHIPSET_SB700;
4072 ++ else if (rev >= 0x40 && rev <= 0x4f)
4073 ++ pinfo->sb_type.gen = AMD_CHIPSET_SB800;
4074 ++ } else {
4075 ++ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
4076 ++ PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
4077 ++
4078 ++ if (!pinfo->smbus_dev) {
4079 ++ pinfo->sb_type.gen = NOT_AMD_CHIPSET;
4080 ++ return 0;
4081 ++ }
4082 ++
4083 ++ rev = pinfo->smbus_dev->revision;
4084 ++ if (rev >= 0x11 && rev <= 0x14)
4085 ++ pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
4086 ++ else if (rev >= 0x15 && rev <= 0x18)
4087 ++ pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
4088 ++ else if (rev >= 0x39 && rev <= 0x3a)
4089 ++ pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
4090 ++ }
4091 ++
4092 ++ pinfo->sb_type.rev = rev;
4093 ++ return 1;
4094 ++}
4095 ++
4096 + void sb800_prefetch(struct device *dev, int on)
4097 + {
4098 + u16 misc;
4099 +@@ -106,7 +170,6 @@ EXPORT_SYMBOL_GPL(sb800_prefetch);
4100 +
4101 + int usb_amd_find_chipset_info(void)
4102 + {
4103 +- u8 rev = 0;
4104 + unsigned long flags;
4105 + struct amd_chipset_info info;
4106 + int ret;
4107 +@@ -122,27 +185,17 @@ int usb_amd_find_chipset_info(void)
4108 + memset(&info, 0, sizeof(info));
4109 + spin_unlock_irqrestore(&amd_lock, flags);
4110 +
4111 +- info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
4112 +- if (info.smbus_dev) {
4113 +- rev = info.smbus_dev->revision;
4114 +- if (rev >= 0x40)
4115 +- info.sb_type = 1;
4116 +- else if (rev >= 0x30 && rev <= 0x3b)
4117 +- info.sb_type = 3;
4118 +- } else {
4119 +- info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
4120 +- 0x780b, NULL);
4121 +- if (!info.smbus_dev) {
4122 +- ret = 0;
4123 +- goto commit;
4124 +- }
4125 +-
4126 +- rev = info.smbus_dev->revision;
4127 +- if (rev >= 0x11 && rev <= 0x18)
4128 +- info.sb_type = 2;
4129 ++ if (!amd_chipset_sb_type_init(&info)) {
4130 ++ ret = 0;
4131 ++ goto commit;
4132 + }
4133 +
4134 +- if (info.sb_type == 0) {
4135 ++ /* Below chipset generations needn't enable AMD PLL quirk */
4136 ++ if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
4137 ++ info.sb_type.gen == AMD_CHIPSET_SB600 ||
4138 ++ info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
4139 ++ (info.sb_type.gen == AMD_CHIPSET_SB700 &&
4140 ++ info.sb_type.rev > 0x3b)) {
4141 + if (info.smbus_dev) {
4142 + pci_dev_put(info.smbus_dev);
4143 + info.smbus_dev = NULL;
4144 +@@ -197,6 +250,27 @@ commit:
4145 + }
4146 + EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
4147 +
4148 ++bool usb_amd_hang_symptom_quirk(void)
4149 ++{
4150 ++ u8 rev;
4151 ++
4152 ++ usb_amd_find_chipset_info();
4153 ++ rev = amd_chipset.sb_type.rev;
4154 ++ /* SB600 and old version of SB700 have hang symptom bug */
4155 ++ return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
4156 ++ (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
4157 ++ rev >= 0x3a && rev <= 0x3b);
4158 ++}
4159 ++EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
4160 ++
4161 ++bool usb_amd_prefetch_quirk(void)
4162 ++{
4163 ++ usb_amd_find_chipset_info();
4164 ++ /* SB800 needs pre-fetch fix */
4165 ++ return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
4166 ++}
4167 ++EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
4168 ++
4169 + /*
4170 + * The hardware normally enables the A-link power management feature, which
4171 + * lets the system lower the power consumption in idle states.
4172 +@@ -229,7 +303,9 @@ static void usb_amd_quirk_pll(int disable)
4173 + }
4174 + }
4175 +
4176 +- if (amd_chipset.sb_type == 1 || amd_chipset.sb_type == 2) {
4177 ++ if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
4178 ++ amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
4179 ++ amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
4180 + outb_p(AB_REG_BAR_LOW, 0xcd6);
4181 + addr_low = inb_p(0xcd7);
4182 + outb_p(AB_REG_BAR_HIGH, 0xcd6);
4183 +@@ -240,7 +316,8 @@ static void usb_amd_quirk_pll(int disable)
4184 + outl_p(0x40, AB_DATA(addr));
4185 + outl_p(0x34, AB_INDX(addr));
4186 + val = inl_p(AB_DATA(addr));
4187 +- } else if (amd_chipset.sb_type == 3) {
4188 ++ } else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
4189 ++ amd_chipset.sb_type.rev <= 0x3b) {
4190 + pci_read_config_dword(amd_chipset.smbus_dev,
4191 + AB_REG_BAR_SB700, &addr);
4192 + outl(AX_INDXC, AB_INDX(addr));
4193 +@@ -353,7 +430,7 @@ void usb_amd_dev_put(void)
4194 + amd_chipset.nb_dev = NULL;
4195 + amd_chipset.smbus_dev = NULL;
4196 + amd_chipset.nb_type = 0;
4197 +- amd_chipset.sb_type = 0;
4198 ++ memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
4199 + amd_chipset.isoc_reqs = 0;
4200 + amd_chipset.probe_result = 0;
4201 +
4202 +diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
4203 +index ed6700d00fe6..638e88f7a28b 100644
4204 +--- a/drivers/usb/host/pci-quirks.h
4205 ++++ b/drivers/usb/host/pci-quirks.h
4206 +@@ -5,6 +5,8 @@
4207 + void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
4208 + int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
4209 + int usb_amd_find_chipset_info(void);
4210 ++bool usb_amd_hang_symptom_quirk(void);
4211 ++bool usb_amd_prefetch_quirk(void);
4212 + void usb_amd_dev_put(void);
4213 + void usb_amd_quirk_pll_disable(void);
4214 + void usb_amd_quirk_pll_enable(void);
4215 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
4216 +index 82fb34183a7f..f178f762b543 100644
4217 +--- a/drivers/usb/host/xhci.c
4218 ++++ b/drivers/usb/host/xhci.c
4219 +@@ -4730,6 +4730,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4220 + /* Accept arbitrarily long scatter-gather lists */
4221 + hcd->self.sg_tablesize = ~0;
4222 +
4223 ++ /* support to build packet from discontinuous buffers */
4224 ++ hcd->self.no_sg_constraint = 1;
4225 ++
4226 + /* XHCI controllers don't stop the ep queue on short packets :| */
4227 + hcd->self.no_stop_on_short = 1;
4228 +
4229 +@@ -4754,14 +4757,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4230 + /* xHCI private pointer was set in xhci_pci_probe for the second
4231 + * registered roothub.
4232 + */
4233 +- xhci = hcd_to_xhci(hcd);
4234 +- /*
4235 +- * Support arbitrarily aligned sg-list entries on hosts without
4236 +- * TD fragment rules (which are currently unsupported).
4237 +- */
4238 +- if (xhci->hci_version < 0x100)
4239 +- hcd->self.no_sg_constraint = 1;
4240 +-
4241 + return 0;
4242 + }
4243 +
4244 +@@ -4788,9 +4783,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4245 + if (xhci->hci_version > 0x96)
4246 + xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4247 +
4248 +- if (xhci->hci_version < 0x100)
4249 +- hcd->self.no_sg_constraint = 1;
4250 +-
4251 + /* Make sure the HC is halted. */
4252 + retval = xhci_halt(xhci);
4253 + if (retval)
4254 +diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
4255 +index 7f9ff75d0db2..fcb950031246 100644
4256 +--- a/drivers/video/efifb.c
4257 ++++ b/drivers/video/efifb.c
4258 +@@ -108,8 +108,8 @@ static int efifb_setup(char *options)
4259 + if (!*this_opt) continue;
4260 +
4261 + for (i = 0; i < M_UNKNOWN; i++) {
4262 +- if (!strcmp(this_opt, efifb_dmi_list[i].optname) &&
4263 +- efifb_dmi_list[i].base != 0) {
4264 ++ if (efifb_dmi_list[i].base != 0 &&
4265 ++ !strcmp(this_opt, efifb_dmi_list[i].optname)) {
4266 + screen_info.lfb_base = efifb_dmi_list[i].base;
4267 + screen_info.lfb_linelength = efifb_dmi_list[i].stride;
4268 + screen_info.lfb_width = efifb_dmi_list[i].width;
4269 +diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
4270 +index 76273c1d26a6..b5ee393e2e8d 100644
4271 +--- a/fs/bio-integrity.c
4272 ++++ b/fs/bio-integrity.c
4273 +@@ -316,7 +316,7 @@ static void bio_integrity_generate(struct bio *bio)
4274 + bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
4275 + bix.sector_size = bi->sector_size;
4276 +
4277 +- bio_for_each_segment(bv, bio, i) {
4278 ++ bio_for_each_segment_all(bv, bio, i) {
4279 + void *kaddr = kmap_atomic(bv->bv_page);
4280 + bix.data_buf = kaddr + bv->bv_offset;
4281 + bix.data_size = bv->bv_len;
4282 +diff --git a/fs/eventpoll.c b/fs/eventpoll.c
4283 +index 810c28fb8c3c..d76c9744c774 100644
4284 +--- a/fs/eventpoll.c
4285 ++++ b/fs/eventpoll.c
4286 +@@ -41,6 +41,7 @@
4287 + #include <linux/proc_fs.h>
4288 + #include <linux/seq_file.h>
4289 + #include <linux/compat.h>
4290 ++#include <linux/rculist.h>
4291 +
4292 + /*
4293 + * LOCKING:
4294 +@@ -133,8 +134,12 @@ struct nested_calls {
4295 + * of these on a server and we do not want this to take another cache line.
4296 + */
4297 + struct epitem {
4298 +- /* RB tree node used to link this structure to the eventpoll RB tree */
4299 +- struct rb_node rbn;
4300 ++ union {
4301 ++ /* RB tree node links this structure to the eventpoll RB tree */
4302 ++ struct rb_node rbn;
4303 ++ /* Used to free the struct epitem */
4304 ++ struct rcu_head rcu;
4305 ++ };
4306 +
4307 + /* List header used to link this structure to the eventpoll ready list */
4308 + struct list_head rdllink;
4309 +@@ -580,14 +585,14 @@ static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
4310 + * @sproc: Pointer to the scan callback.
4311 + * @priv: Private opaque data passed to the @sproc callback.
4312 + * @depth: The current depth of recursive f_op->poll calls.
4313 ++ * @ep_locked: caller already holds ep->mtx
4314 + *
4315 + * Returns: The same integer error code returned by the @sproc callback.
4316 + */
4317 + static int ep_scan_ready_list(struct eventpoll *ep,
4318 + int (*sproc)(struct eventpoll *,
4319 + struct list_head *, void *),
4320 +- void *priv,
4321 +- int depth)
4322 ++ void *priv, int depth, bool ep_locked)
4323 + {
4324 + int error, pwake = 0;
4325 + unsigned long flags;
4326 +@@ -598,7 +603,9 @@ static int ep_scan_ready_list(struct eventpoll *ep,
4327 + * We need to lock this because we could be hit by
4328 + * eventpoll_release_file() and epoll_ctl().
4329 + */
4330 +- mutex_lock_nested(&ep->mtx, depth);
4331 ++
4332 ++ if (!ep_locked)
4333 ++ mutex_lock_nested(&ep->mtx, depth);
4334 +
4335 + /*
4336 + * Steal the ready list, and re-init the original one to the
4337 +@@ -662,7 +669,8 @@ static int ep_scan_ready_list(struct eventpoll *ep,
4338 + }
4339 + spin_unlock_irqrestore(&ep->lock, flags);
4340 +
4341 +- mutex_unlock(&ep->mtx);
4342 ++ if (!ep_locked)
4343 ++ mutex_unlock(&ep->mtx);
4344 +
4345 + /* We have to call this outside the lock */
4346 + if (pwake)
4347 +@@ -671,6 +679,12 @@ static int ep_scan_ready_list(struct eventpoll *ep,
4348 + return error;
4349 + }
4350 +
4351 ++static void epi_rcu_free(struct rcu_head *head)
4352 ++{
4353 ++ struct epitem *epi = container_of(head, struct epitem, rcu);
4354 ++ kmem_cache_free(epi_cache, epi);
4355 ++}
4356 ++
4357 + /*
4358 + * Removes a "struct epitem" from the eventpoll RB tree and deallocates
4359 + * all the associated resources. Must be called with "mtx" held.
4360 +@@ -692,8 +706,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
4361 +
4362 + /* Remove the current item from the list of epoll hooks */
4363 + spin_lock(&file->f_lock);
4364 +- if (ep_is_linked(&epi->fllink))
4365 +- list_del_init(&epi->fllink);
4366 ++ list_del_rcu(&epi->fllink);
4367 + spin_unlock(&file->f_lock);
4368 +
4369 + rb_erase(&epi->rbn, &ep->rbr);
4370 +@@ -704,9 +717,14 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
4371 + spin_unlock_irqrestore(&ep->lock, flags);
4372 +
4373 + wakeup_source_unregister(ep_wakeup_source(epi));
4374 +-
4375 +- /* At this point it is safe to free the eventpoll item */
4376 +- kmem_cache_free(epi_cache, epi);
4377 ++ /*
4378 ++ * At this point it is safe to free the eventpoll item. Use the union
4379 ++ * field epi->rcu, since we are trying to minimize the size of
4380 ++ * 'struct epitem'. The 'rbn' field is no longer in use. Protected by
4381 ++ * ep->mtx. The rcu read side, reverse_path_check_proc(), does not make
4382 ++ * use of the rbn field.
4383 ++ */
4384 ++ call_rcu(&epi->rcu, epi_rcu_free);
4385 +
4386 + atomic_long_dec(&ep->user->epoll_watches);
4387 +
4388 +@@ -807,15 +825,34 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
4389 + return 0;
4390 + }
4391 +
4392 ++static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
4393 ++ poll_table *pt);
4394 ++
4395 ++struct readyevents_arg {
4396 ++ struct eventpoll *ep;
4397 ++ bool locked;
4398 ++};
4399 ++
4400 + static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
4401 + {
4402 +- return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1);
4403 ++ struct readyevents_arg *arg = priv;
4404 ++
4405 ++ return ep_scan_ready_list(arg->ep, ep_read_events_proc, NULL,
4406 ++ call_nests + 1, arg->locked);
4407 + }
4408 +
4409 + static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
4410 + {
4411 + int pollflags;
4412 + struct eventpoll *ep = file->private_data;
4413 ++ struct readyevents_arg arg;
4414 ++
4415 ++ /*
4416 ++ * During ep_insert() we already hold the ep->mtx for the tfile.
4417 ++ * Prevent re-aquisition.
4418 ++ */
4419 ++ arg.locked = wait && (wait->_qproc == ep_ptable_queue_proc);
4420 ++ arg.ep = ep;
4421 +
4422 + /* Insert inside our poll wait queue */
4423 + poll_wait(file, &ep->poll_wait, wait);
4424 +@@ -827,7 +864,7 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
4425 + * could re-enter here.
4426 + */
4427 + pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
4428 +- ep_poll_readyevents_proc, ep, ep, current);
4429 ++ ep_poll_readyevents_proc, &arg, ep, current);
4430 +
4431 + return pollflags != -1 ? pollflags : 0;
4432 + }
4433 +@@ -872,7 +909,6 @@ static const struct file_operations eventpoll_fops = {
4434 + */
4435 + void eventpoll_release_file(struct file *file)
4436 + {
4437 +- struct list_head *lsthead = &file->f_ep_links;
4438 + struct eventpoll *ep;
4439 + struct epitem *epi;
4440 +
4441 +@@ -890,17 +926,12 @@ void eventpoll_release_file(struct file *file)
4442 + * Besides, ep_remove() acquires the lock, so we can't hold it here.
4443 + */
4444 + mutex_lock(&epmutex);
4445 +-
4446 +- while (!list_empty(lsthead)) {
4447 +- epi = list_first_entry(lsthead, struct epitem, fllink);
4448 +-
4449 ++ list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
4450 + ep = epi->ep;
4451 +- list_del_init(&epi->fllink);
4452 + mutex_lock_nested(&ep->mtx, 0);
4453 + ep_remove(ep, epi);
4454 + mutex_unlock(&ep->mtx);
4455 + }
4456 +-
4457 + mutex_unlock(&epmutex);
4458 + }
4459 +
4460 +@@ -1138,7 +1169,9 @@ static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
4461 + struct file *child_file;
4462 + struct epitem *epi;
4463 +
4464 +- list_for_each_entry(epi, &file->f_ep_links, fllink) {
4465 ++ /* CTL_DEL can remove links here, but that can't increase our count */
4466 ++ rcu_read_lock();
4467 ++ list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
4468 + child_file = epi->ep->file;
4469 + if (is_file_epoll(child_file)) {
4470 + if (list_empty(&child_file->f_ep_links)) {
4471 +@@ -1160,6 +1193,7 @@ static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
4472 + "file is not an ep!\n");
4473 + }
4474 + }
4475 ++ rcu_read_unlock();
4476 + return error;
4477 + }
4478 +
4479 +@@ -1231,7 +1265,7 @@ static noinline void ep_destroy_wakeup_source(struct epitem *epi)
4480 + * Must be called with "mtx" held.
4481 + */
4482 + static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
4483 +- struct file *tfile, int fd)
4484 ++ struct file *tfile, int fd, int full_check)
4485 + {
4486 + int error, revents, pwake = 0;
4487 + unsigned long flags;
4488 +@@ -1286,7 +1320,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
4489 +
4490 + /* Add the current item to the list of active epoll hook for this file */
4491 + spin_lock(&tfile->f_lock);
4492 +- list_add_tail(&epi->fllink, &tfile->f_ep_links);
4493 ++ list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
4494 + spin_unlock(&tfile->f_lock);
4495 +
4496 + /*
4497 +@@ -1297,7 +1331,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
4498 +
4499 + /* now check if we've created too many backpaths */
4500 + error = -EINVAL;
4501 +- if (reverse_path_check())
4502 ++ if (full_check && reverse_path_check())
4503 + goto error_remove_epi;
4504 +
4505 + /* We have to drop the new item inside our item list to keep track of it */
4506 +@@ -1327,8 +1361,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
4507 +
4508 + error_remove_epi:
4509 + spin_lock(&tfile->f_lock);
4510 +- if (ep_is_linked(&epi->fllink))
4511 +- list_del_init(&epi->fllink);
4512 ++ list_del_rcu(&epi->fllink);
4513 + spin_unlock(&tfile->f_lock);
4514 +
4515 + rb_erase(&epi->rbn, &ep->rbr);
4516 +@@ -1521,7 +1554,7 @@ static int ep_send_events(struct eventpoll *ep,
4517 + esed.maxevents = maxevents;
4518 + esed.events = events;
4519 +
4520 +- return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0);
4521 ++ return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false);
4522 + }
4523 +
4524 + static inline struct timespec ep_set_mstimeout(long ms)
4525 +@@ -1791,11 +1824,12 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
4526 + struct epoll_event __user *, event)
4527 + {
4528 + int error;
4529 +- int did_lock_epmutex = 0;
4530 ++ int full_check = 0;
4531 + struct fd f, tf;
4532 + struct eventpoll *ep;
4533 + struct epitem *epi;
4534 + struct epoll_event epds;
4535 ++ struct eventpoll *tep = NULL;
4536 +
4537 + error = -EFAULT;
4538 + if (ep_op_has_event(op) &&
4539 +@@ -1844,27 +1878,37 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
4540 + * and hang them on the tfile_check_list, so we can check that we
4541 + * haven't created too many possible wakeup paths.
4542 + *
4543 +- * We need to hold the epmutex across both ep_insert and ep_remove
4544 +- * b/c we want to make sure we are looking at a coherent view of
4545 +- * epoll network.
4546 ++ * We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
4547 ++ * the epoll file descriptor is attaching directly to a wakeup source,
4548 ++ * unless the epoll file descriptor is nested. The purpose of taking the
4549 ++ * 'epmutex' on add is to prevent complex toplogies such as loops and
4550 ++ * deep wakeup paths from forming in parallel through multiple
4551 ++ * EPOLL_CTL_ADD operations.
4552 + */
4553 +- if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
4554 +- mutex_lock(&epmutex);
4555 +- did_lock_epmutex = 1;
4556 +- }
4557 ++ mutex_lock_nested(&ep->mtx, 0);
4558 + if (op == EPOLL_CTL_ADD) {
4559 +- if (is_file_epoll(tf.file)) {
4560 +- error = -ELOOP;
4561 +- if (ep_loop_check(ep, tf.file) != 0) {
4562 +- clear_tfile_check_list();
4563 +- goto error_tgt_fput;
4564 ++ if (!list_empty(&f.file->f_ep_links) ||
4565 ++ is_file_epoll(tf.file)) {
4566 ++ full_check = 1;
4567 ++ mutex_unlock(&ep->mtx);
4568 ++ mutex_lock(&epmutex);
4569 ++ if (is_file_epoll(tf.file)) {
4570 ++ error = -ELOOP;
4571 ++ if (ep_loop_check(ep, tf.file) != 0) {
4572 ++ clear_tfile_check_list();
4573 ++ goto error_tgt_fput;
4574 ++ }
4575 ++ } else
4576 ++ list_add(&tf.file->f_tfile_llink,
4577 ++ &tfile_check_list);
4578 ++ mutex_lock_nested(&ep->mtx, 0);
4579 ++ if (is_file_epoll(tf.file)) {
4580 ++ tep = tf.file->private_data;
4581 ++ mutex_lock_nested(&tep->mtx, 1);
4582 + }
4583 +- } else
4584 +- list_add(&tf.file->f_tfile_llink, &tfile_check_list);
4585 ++ }
4586 + }
4587 +
4588 +- mutex_lock_nested(&ep->mtx, 0);
4589 +-
4590 + /*
4591 + * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
4592 + * above, we can be sure to be able to use the item looked up by
4593 +@@ -1877,10 +1921,11 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
4594 + case EPOLL_CTL_ADD:
4595 + if (!epi) {
4596 + epds.events |= POLLERR | POLLHUP;
4597 +- error = ep_insert(ep, &epds, tf.file, fd);
4598 ++ error = ep_insert(ep, &epds, tf.file, fd, full_check);
4599 + } else
4600 + error = -EEXIST;
4601 +- clear_tfile_check_list();
4602 ++ if (full_check)
4603 ++ clear_tfile_check_list();
4604 + break;
4605 + case EPOLL_CTL_DEL:
4606 + if (epi)
4607 +@@ -1896,10 +1941,12 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
4608 + error = -ENOENT;
4609 + break;
4610 + }
4611 ++ if (tep != NULL)
4612 ++ mutex_unlock(&tep->mtx);
4613 + mutex_unlock(&ep->mtx);
4614 +
4615 + error_tgt_fput:
4616 +- if (did_lock_epmutex)
4617 ++ if (full_check)
4618 + mutex_unlock(&epmutex);
4619 +
4620 + fdput(tf);
4621 +diff --git a/fs/namei.c b/fs/namei.c
4622 +index 23ac50f4ee40..187cacf1c83c 100644
4623 +--- a/fs/namei.c
4624 ++++ b/fs/namei.c
4625 +@@ -3924,6 +3924,7 @@ retry:
4626 + out_dput:
4627 + done_path_create(&new_path, new_dentry);
4628 + if (retry_estale(error, how)) {
4629 ++ path_put(&old_path);
4630 + how |= LOOKUP_REVAL;
4631 + goto retry;
4632 + }
4633 +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
4634 +index ef792f29f831..5d8ccecf5f5c 100644
4635 +--- a/fs/nfs/delegation.c
4636 ++++ b/fs/nfs/delegation.c
4637 +@@ -659,16 +659,19 @@ int nfs_async_inode_return_delegation(struct inode *inode,
4638 +
4639 + rcu_read_lock();
4640 + delegation = rcu_dereference(NFS_I(inode)->delegation);
4641 ++ if (delegation == NULL)
4642 ++ goto out_enoent;
4643 +
4644 +- if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) {
4645 +- rcu_read_unlock();
4646 +- return -ENOENT;
4647 +- }
4648 ++ if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
4649 ++ goto out_enoent;
4650 + nfs_mark_return_delegation(server, delegation);
4651 + rcu_read_unlock();
4652 +
4653 + nfs_delegation_run_state_manager(clp);
4654 + return 0;
4655 ++out_enoent:
4656 ++ rcu_read_unlock();
4657 ++ return -ENOENT;
4658 + }
4659 +
4660 + static struct inode *
4661 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4662 +index d3d7766f55e3..a53651743d4d 100644
4663 +--- a/fs/nfs/nfs4proc.c
4664 ++++ b/fs/nfs/nfs4proc.c
4665 +@@ -3972,8 +3972,9 @@ static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4666 + {
4667 + nfs4_stateid current_stateid;
4668 +
4669 +- if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode))
4670 +- return false;
4671 ++ /* If the current stateid represents a lost lock, then exit */
4672 ++ if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4673 ++ return true;
4674 + return nfs4_stateid_match(stateid, &current_stateid);
4675 + }
4676 +
4677 +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
4678 +index d71903c6068b..f07941160515 100644
4679 +--- a/fs/ocfs2/file.c
4680 ++++ b/fs/ocfs2/file.c
4681 +@@ -2371,8 +2371,8 @@ out_dio:
4682 +
4683 + if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
4684 + ((file->f_flags & O_DIRECT) && !direct_io)) {
4685 +- ret = filemap_fdatawrite_range(file->f_mapping, pos,
4686 +- pos + count - 1);
4687 ++ ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
4688 ++ *ppos + count - 1);
4689 + if (ret < 0)
4690 + written = ret;
4691 +
4692 +@@ -2385,8 +2385,8 @@ out_dio:
4693 + }
4694 +
4695 + if (!ret)
4696 +- ret = filemap_fdatawait_range(file->f_mapping, pos,
4697 +- pos + count - 1);
4698 ++ ret = filemap_fdatawait_range(file->f_mapping, *ppos,
4699 ++ *ppos + count - 1);
4700 + }
4701 +
4702 + /*
4703 +diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
4704 +index aaa50611ec66..d7b5108789e2 100644
4705 +--- a/fs/ocfs2/quota_global.c
4706 ++++ b/fs/ocfs2/quota_global.c
4707 +@@ -717,6 +717,12 @@ static int ocfs2_release_dquot(struct dquot *dquot)
4708 + */
4709 + if (status < 0)
4710 + mlog_errno(status);
4711 ++ /*
4712 ++ * Clear dq_off so that we search for the structure in quota file next
4713 ++ * time we acquire it. The structure might be deleted and reallocated
4714 ++ * elsewhere by another node while our dquot structure is on freelist.
4715 ++ */
4716 ++ dquot->dq_off = 0;
4717 + clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
4718 + out_trans:
4719 + ocfs2_commit_trans(osb, handle);
4720 +@@ -756,16 +762,17 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
4721 + status = ocfs2_lock_global_qf(info, 1);
4722 + if (status < 0)
4723 + goto out;
4724 +- if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
4725 +- status = ocfs2_qinfo_lock(info, 0);
4726 +- if (status < 0)
4727 +- goto out_dq;
4728 +- status = qtree_read_dquot(&info->dqi_gi, dquot);
4729 +- ocfs2_qinfo_unlock(info, 0);
4730 +- if (status < 0)
4731 +- goto out_dq;
4732 +- }
4733 +- set_bit(DQ_READ_B, &dquot->dq_flags);
4734 ++ status = ocfs2_qinfo_lock(info, 0);
4735 ++ if (status < 0)
4736 ++ goto out_dq;
4737 ++ /*
4738 ++ * We always want to read dquot structure from disk because we don't
4739 ++ * know what happened with it while it was on freelist.
4740 ++ */
4741 ++ status = qtree_read_dquot(&info->dqi_gi, dquot);
4742 ++ ocfs2_qinfo_unlock(info, 0);
4743 ++ if (status < 0)
4744 ++ goto out_dq;
4745 +
4746 + OCFS2_DQUOT(dquot)->dq_use_count++;
4747 + OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
4748 +diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
4749 +index 2e4344be3b96..2001862bf2b1 100644
4750 +--- a/fs/ocfs2/quota_local.c
4751 ++++ b/fs/ocfs2/quota_local.c
4752 +@@ -1303,10 +1303,6 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
4753 + ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
4754 +
4755 + out:
4756 +- /* Clear the read bit so that next time someone uses this
4757 +- * dquot he reads fresh info from disk and allocates local
4758 +- * dquot structure */
4759 +- clear_bit(DQ_READ_B, &dquot->dq_flags);
4760 + return status;
4761 + }
4762 +
4763 +diff --git a/fs/proc/base.c b/fs/proc/base.c
4764 +index 1485e38daaa3..c35eaa404933 100644
4765 +--- a/fs/proc/base.c
4766 ++++ b/fs/proc/base.c
4767 +@@ -1813,6 +1813,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path)
4768 + if (rc)
4769 + goto out_mmput;
4770 +
4771 ++ rc = -ENOENT;
4772 + down_read(&mm->mmap_sem);
4773 + vma = find_exact_vma(mm, vm_start, vm_end);
4774 + if (vma && vma->vm_file) {
4775 +diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
4776 +index 7c2e030e72f1..a12f6ed91c84 100644
4777 +--- a/include/linux/blktrace_api.h
4778 ++++ b/include/linux/blktrace_api.h
4779 +@@ -5,6 +5,7 @@
4780 + #include <linux/relay.h>
4781 + #include <linux/compat.h>
4782 + #include <uapi/linux/blktrace_api.h>
4783 ++#include <linux/list.h>
4784 +
4785 + #if defined(CONFIG_BLK_DEV_IO_TRACE)
4786 +
4787 +@@ -23,6 +24,7 @@ struct blk_trace {
4788 + struct dentry *dir;
4789 + struct dentry *dropped_file;
4790 + struct dentry *msg_file;
4791 ++ struct list_head running_list;
4792 + atomic_t dropped;
4793 + };
4794 +
4795 +diff --git a/include/linux/firewire.h b/include/linux/firewire.h
4796 +index 5d7782e42b8f..c3683bdf28fe 100644
4797 +--- a/include/linux/firewire.h
4798 ++++ b/include/linux/firewire.h
4799 +@@ -200,6 +200,7 @@ struct fw_device {
4800 + unsigned irmc:1;
4801 + unsigned bc_implemented:2;
4802 +
4803 ++ work_func_t workfn;
4804 + struct delayed_work work;
4805 + struct fw_attribute_group attribute_group;
4806 + };
4807 +diff --git a/include/linux/mm.h b/include/linux/mm.h
4808 +index fed08c0c543b..648bcb007eba 100644
4809 +--- a/include/linux/mm.h
4810 ++++ b/include/linux/mm.h
4811 +@@ -161,7 +161,7 @@ extern unsigned int kobjsize(const void *objp);
4812 + * Special vmas that are non-mergable, non-mlock()able.
4813 + * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
4814 + */
4815 +-#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
4816 ++#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
4817 +
4818 + /*
4819 + * mapping from the currently active vm_flags protection bits (the
4820 +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
4821 +index ebeab360d851..0ecc46e7af3d 100644
4822 +--- a/include/linux/tracepoint.h
4823 ++++ b/include/linux/tracepoint.h
4824 +@@ -60,6 +60,12 @@ struct tp_module {
4825 + unsigned int num_tracepoints;
4826 + struct tracepoint * const *tracepoints_ptrs;
4827 + };
4828 ++bool trace_module_has_bad_taint(struct module *mod);
4829 ++#else
4830 ++static inline bool trace_module_has_bad_taint(struct module *mod)
4831 ++{
4832 ++ return false;
4833 ++}
4834 + #endif /* CONFIG_MODULES */
4835 +
4836 + struct tracepoint_iter {
4837 +diff --git a/include/net/tcp.h b/include/net/tcp.h
4838 +index b1aa324c5e65..51dcc6faa561 100644
4839 +--- a/include/net/tcp.h
4840 ++++ b/include/net/tcp.h
4841 +@@ -482,6 +482,24 @@ extern int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
4842 + extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
4843 + struct ip_options *opt);
4844 + #ifdef CONFIG_SYN_COOKIES
4845 ++#include <linux/ktime.h>
4846 ++
4847 ++/* Syncookies use a monotonic timer which increments every 64 seconds.
4848 ++ * This counter is used both as a hash input and partially encoded into
4849 ++ * the cookie value. A cookie is only validated further if the delta
4850 ++ * between the current counter value and the encoded one is less than this,
4851 ++ * i.e. a sent cookie is valid only at most for 128 seconds (or less if
4852 ++ * the counter advances immediately after a cookie is generated).
4853 ++ */
4854 ++#define MAX_SYNCOOKIE_AGE 2
4855 ++
4856 ++static inline u32 tcp_cookie_time(void)
4857 ++{
4858 ++ struct timespec now;
4859 ++ getnstimeofday(&now);
4860 ++ return now.tv_sec >> 6; /* 64 seconds granularity */
4861 ++}
4862 ++
4863 + extern u32 __cookie_v4_init_sequence(const struct iphdr *iph,
4864 + const struct tcphdr *th, u16 *mssp);
4865 + extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
4866 +@@ -1303,7 +1321,8 @@ struct tcp_fastopen_request {
4867 + /* Fast Open cookie. Size 0 means a cookie request */
4868 + struct tcp_fastopen_cookie cookie;
4869 + struct msghdr *data; /* data in MSG_FASTOPEN */
4870 +- u16 copied; /* queued in tcp_connect() */
4871 ++ size_t size;
4872 ++ int copied; /* queued in tcp_connect() */
4873 + };
4874 + void tcp_free_fastopen_req(struct tcp_sock *tp);
4875 +
4876 +diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
4877 +index a12589c4ee92..361bd0f04018 100644
4878 +--- a/include/target/iscsi/iscsi_transport.h
4879 ++++ b/include/target/iscsi/iscsi_transport.h
4880 +@@ -12,6 +12,7 @@ struct iscsit_transport {
4881 + int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
4882 + int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
4883 + void (*iscsit_free_np)(struct iscsi_np *);
4884 ++ void (*iscsit_wait_conn)(struct iscsi_conn *);
4885 + void (*iscsit_free_conn)(struct iscsi_conn *);
4886 + int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
4887 + int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
4888 +diff --git a/ipc/msg.c b/ipc/msg.c
4889 +index 558aa91186b6..52770bfde2a5 100644
4890 +--- a/ipc/msg.c
4891 ++++ b/ipc/msg.c
4892 +@@ -885,6 +885,8 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
4893 + return -EINVAL;
4894 +
4895 + if (msgflg & MSG_COPY) {
4896 ++ if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT))
4897 ++ return -EINVAL;
4898 + copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
4899 + if (IS_ERR(copy))
4900 + return PTR_ERR(copy);
4901 +diff --git a/kernel/cpuset.c b/kernel/cpuset.c
4902 +index 4772034b4b17..5ae9f950e024 100644
4903 +--- a/kernel/cpuset.c
4904 ++++ b/kernel/cpuset.c
4905 +@@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
4906 + * Temporarilly set tasks mems_allowed to target nodes of migration,
4907 + * so that the migration code can allocate pages on these nodes.
4908 + *
4909 +- * Call holding cpuset_mutex, so current's cpuset won't change
4910 +- * during this call, as manage_mutex holds off any cpuset_attach()
4911 +- * calls. Therefore we don't need to take task_lock around the
4912 +- * call to guarantee_online_mems(), as we know no one is changing
4913 +- * our task's cpuset.
4914 +- *
4915 + * While the mm_struct we are migrating is typically from some
4916 + * other task, the task_struct mems_allowed that we are hacking
4917 + * is for our current task, which must allocate new pages for that
4918 +@@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
4919 +
4920 + do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
4921 +
4922 ++ rcu_read_lock();
4923 + mems_cs = effective_nodemask_cpuset(task_cs(tsk));
4924 + guarantee_online_mems(mems_cs, &tsk->mems_allowed);
4925 ++ rcu_read_unlock();
4926 + }
4927 +
4928 + /*
4929 +@@ -2511,9 +2507,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
4930 +
4931 + task_lock(current);
4932 + cs = nearest_hardwall_ancestor(task_cs(current));
4933 ++ allowed = node_isset(node, cs->mems_allowed);
4934 + task_unlock(current);
4935 +
4936 +- allowed = node_isset(node, cs->mems_allowed);
4937 + mutex_unlock(&callback_mutex);
4938 + return allowed;
4939 + }
4940 +diff --git a/kernel/futex.c b/kernel/futex.c
4941 +index 221a58fc62f7..231754863a87 100644
4942 +--- a/kernel/futex.c
4943 ++++ b/kernel/futex.c
4944 +@@ -251,6 +251,9 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
4945 + return -EINVAL;
4946 + address -= key->both.offset;
4947 +
4948 ++ if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
4949 ++ return -EFAULT;
4950 ++
4951 + /*
4952 + * PROCESS_PRIVATE futexes are fast.
4953 + * As the mm cannot disappear under us and the 'key' only needs
4954 +@@ -259,8 +262,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
4955 + * but access_ok() should be faster than find_vma()
4956 + */
4957 + if (!fshared) {
4958 +- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
4959 +- return -EFAULT;
4960 + key->private.mm = mm;
4961 + key->private.address = address;
4962 + get_futex_key_refs(key);
4963 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
4964 +index 3e59f951d42f..4c84746a840b 100644
4965 +--- a/kernel/irq/manage.c
4966 ++++ b/kernel/irq/manage.c
4967 +@@ -802,8 +802,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
4968 +
4969 + static void wake_threads_waitq(struct irq_desc *desc)
4970 + {
4971 +- if (atomic_dec_and_test(&desc->threads_active) &&
4972 +- waitqueue_active(&desc->wait_for_threads))
4973 ++ if (atomic_dec_and_test(&desc->threads_active))
4974 + wake_up(&desc->wait_for_threads);
4975 + }
4976 +
4977 +diff --git a/kernel/rcutree.h b/kernel/rcutree.h
4978 +index 5f97eab602cd..52be957c9fe2 100644
4979 +--- a/kernel/rcutree.h
4980 ++++ b/kernel/rcutree.h
4981 +@@ -104,6 +104,8 @@ struct rcu_dynticks {
4982 + /* idle-period nonlazy_posted snapshot. */
4983 + unsigned long last_accelerate;
4984 + /* Last jiffy CBs were accelerated. */
4985 ++ unsigned long last_advance_all;
4986 ++ /* Last jiffy CBs were all advanced. */
4987 + int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
4988 + #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
4989 + };
4990 +diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
4991 +index 130c97b027f2..511e6b47c594 100644
4992 +--- a/kernel/rcutree_plugin.h
4993 ++++ b/kernel/rcutree_plugin.h
4994 +@@ -1627,20 +1627,26 @@ module_param(rcu_idle_gp_delay, int, 0644);
4995 + static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
4996 + module_param(rcu_idle_lazy_gp_delay, int, 0644);
4997 +
4998 +-extern int tick_nohz_enabled;
4999 ++extern int tick_nohz_active;
5000 +
5001 + /*
5002 +- * Try to advance callbacks for all flavors of RCU on the current CPU.
5003 +- * Afterwards, if there are any callbacks ready for immediate invocation,
5004 +- * return true.
5005 ++ * Try to advance callbacks for all flavors of RCU on the current CPU, but
5006 ++ * only if it has been awhile since the last time we did so. Afterwards,
5007 ++ * if there are any callbacks ready for immediate invocation, return true.
5008 + */
5009 + static bool rcu_try_advance_all_cbs(void)
5010 + {
5011 + bool cbs_ready = false;
5012 + struct rcu_data *rdp;
5013 ++ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
5014 + struct rcu_node *rnp;
5015 + struct rcu_state *rsp;
5016 +
5017 ++ /* Exit early if we advanced recently. */
5018 ++ if (jiffies == rdtp->last_advance_all)
5019 ++ return 0;
5020 ++ rdtp->last_advance_all = jiffies;
5021 ++
5022 + for_each_rcu_flavor(rsp) {
5023 + rdp = this_cpu_ptr(rsp->rda);
5024 + rnp = rdp->mynode;
5025 +@@ -1718,7 +1724,7 @@ static void rcu_prepare_for_idle(int cpu)
5026 + int tne;
5027 +
5028 + /* Handle nohz enablement switches conservatively. */
5029 +- tne = ACCESS_ONCE(tick_nohz_enabled);
5030 ++ tne = ACCESS_ONCE(tick_nohz_active);
5031 + if (tne != rdtp->tick_nohz_enabled_snap) {
5032 + if (rcu_cpu_has_callbacks(cpu, NULL))
5033 + invoke_rcu_core(); /* force nohz to see update. */
5034 +@@ -1739,6 +1745,8 @@ static void rcu_prepare_for_idle(int cpu)
5035 + */
5036 + if (rdtp->all_lazy &&
5037 + rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
5038 ++ rdtp->all_lazy = false;
5039 ++ rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
5040 + invoke_rcu_core();
5041 + return;
5042 + }
5043 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
5044 +index ceae65e69a64..a494ace683e3 100644
5045 +--- a/kernel/sched/core.c
5046 ++++ b/kernel/sched/core.c
5047 +@@ -5119,10 +5119,13 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5048 + DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5049 + DEFINE_PER_CPU(int, sd_llc_size);
5050 + DEFINE_PER_CPU(int, sd_llc_id);
5051 ++DEFINE_PER_CPU(struct sched_domain *, sd_busy);
5052 ++DEFINE_PER_CPU(struct sched_domain *, sd_asym);
5053 +
5054 + static void update_top_cache_domain(int cpu)
5055 + {
5056 + struct sched_domain *sd;
5057 ++ struct sched_domain *busy_sd = NULL;
5058 + int id = cpu;
5059 + int size = 1;
5060 +
5061 +@@ -5130,11 +5133,16 @@ static void update_top_cache_domain(int cpu)
5062 + if (sd) {
5063 + id = cpumask_first(sched_domain_span(sd));
5064 + size = cpumask_weight(sched_domain_span(sd));
5065 ++ busy_sd = sd->parent; /* sd_busy */
5066 + }
5067 ++ rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
5068 +
5069 + rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
5070 + per_cpu(sd_llc_size, cpu) = size;
5071 + per_cpu(sd_llc_id, cpu) = id;
5072 ++
5073 ++ sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
5074 ++ rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
5075 + }
5076 +
5077 + /*
5078 +@@ -5325,6 +5333,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5079 + * die on a /0 trap.
5080 + */
5081 + sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
5082 ++ sg->sgp->power_orig = sg->sgp->power;
5083 +
5084 + /*
5085 + * Make sure the first group of this domain contains the
5086 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
5087 +index 411732334906..790e2fc808da 100644
5088 +--- a/kernel/sched/fair.c
5089 ++++ b/kernel/sched/fair.c
5090 +@@ -5598,16 +5598,16 @@ static inline void nohz_balance_exit_idle(int cpu)
5091 + static inline void set_cpu_sd_state_busy(void)
5092 + {
5093 + struct sched_domain *sd;
5094 ++ int cpu = smp_processor_id();
5095 +
5096 + rcu_read_lock();
5097 +- sd = rcu_dereference_check_sched_domain(this_rq()->sd);
5098 ++ sd = rcu_dereference(per_cpu(sd_busy, cpu));
5099 +
5100 + if (!sd || !sd->nohz_idle)
5101 + goto unlock;
5102 + sd->nohz_idle = 0;
5103 +
5104 +- for (; sd; sd = sd->parent)
5105 +- atomic_inc(&sd->groups->sgp->nr_busy_cpus);
5106 ++ atomic_inc(&sd->groups->sgp->nr_busy_cpus);
5107 + unlock:
5108 + rcu_read_unlock();
5109 + }
5110 +@@ -5615,16 +5615,16 @@ unlock:
5111 + void set_cpu_sd_state_idle(void)
5112 + {
5113 + struct sched_domain *sd;
5114 ++ int cpu = smp_processor_id();
5115 +
5116 + rcu_read_lock();
5117 +- sd = rcu_dereference_check_sched_domain(this_rq()->sd);
5118 ++ sd = rcu_dereference(per_cpu(sd_busy, cpu));
5119 +
5120 + if (!sd || sd->nohz_idle)
5121 + goto unlock;
5122 + sd->nohz_idle = 1;
5123 +
5124 +- for (; sd; sd = sd->parent)
5125 +- atomic_dec(&sd->groups->sgp->nr_busy_cpus);
5126 ++ atomic_dec(&sd->groups->sgp->nr_busy_cpus);
5127 + unlock:
5128 + rcu_read_unlock();
5129 + }
5130 +@@ -5807,6 +5807,8 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
5131 + {
5132 + unsigned long now = jiffies;
5133 + struct sched_domain *sd;
5134 ++ struct sched_group_power *sgp;
5135 ++ int nr_busy;
5136 +
5137 + if (unlikely(idle_cpu(cpu)))
5138 + return 0;
5139 +@@ -5832,22 +5834,22 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
5140 + goto need_kick;
5141 +
5142 + rcu_read_lock();
5143 +- for_each_domain(cpu, sd) {
5144 +- struct sched_group *sg = sd->groups;
5145 +- struct sched_group_power *sgp = sg->sgp;
5146 +- int nr_busy = atomic_read(&sgp->nr_busy_cpus);
5147 ++ sd = rcu_dereference(per_cpu(sd_busy, cpu));
5148 +
5149 +- if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
5150 +- goto need_kick_unlock;
5151 ++ if (sd) {
5152 ++ sgp = sd->groups->sgp;
5153 ++ nr_busy = atomic_read(&sgp->nr_busy_cpus);
5154 +
5155 +- if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
5156 +- && (cpumask_first_and(nohz.idle_cpus_mask,
5157 +- sched_domain_span(sd)) < cpu))
5158 ++ if (nr_busy > 1)
5159 + goto need_kick_unlock;
5160 +-
5161 +- if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
5162 +- break;
5163 + }
5164 ++
5165 ++ sd = rcu_dereference(per_cpu(sd_asym, cpu));
5166 ++
5167 ++ if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
5168 ++ sched_domain_span(sd)) < cpu))
5169 ++ goto need_kick_unlock;
5170 ++
5171 + rcu_read_unlock();
5172 + return 0;
5173 +
5174 +@@ -6013,15 +6015,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
5175 + struct cfs_rq *cfs_rq = cfs_rq_of(se);
5176 +
5177 + /*
5178 +- * Ensure the task's vruntime is normalized, so that when its
5179 ++ * Ensure the task's vruntime is normalized, so that when it's
5180 + * switched back to the fair class the enqueue_entity(.flags=0) will
5181 + * do the right thing.
5182 + *
5183 +- * If it was on_rq, then the dequeue_entity(.flags=0) will already
5184 +- * have normalized the vruntime, if it was !on_rq, then only when
5185 ++ * If it's on_rq, then the dequeue_entity(.flags=0) will already
5186 ++ * have normalized the vruntime, if it's !on_rq, then only when
5187 + * the task is sleeping will it still have non-normalized vruntime.
5188 + */
5189 +- if (!se->on_rq && p->state != TASK_RUNNING) {
5190 ++ if (!p->on_rq && p->state != TASK_RUNNING) {
5191 + /*
5192 + * Fix up our vruntime so that the current sleep doesn't
5193 + * cause 'unlimited' sleep bonus.
5194 +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
5195 +index 417b1b3fd7e9..ff04e1a06412 100644
5196 +--- a/kernel/sched/rt.c
5197 ++++ b/kernel/sched/rt.c
5198 +@@ -246,8 +246,10 @@ static inline void rt_set_overload(struct rq *rq)
5199 + * if we should look at the mask. It would be a shame
5200 + * if we looked at the mask, but the mask was not
5201 + * updated yet.
5202 ++ *
5203 ++ * Matched by the barrier in pull_rt_task().
5204 + */
5205 +- wmb();
5206 ++ smp_wmb();
5207 + atomic_inc(&rq->rd->rto_count);
5208 + }
5209 +
5210 +@@ -1227,8 +1229,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
5211 + */
5212 + if (curr && unlikely(rt_task(curr)) &&
5213 + (curr->nr_cpus_allowed < 2 ||
5214 +- curr->prio <= p->prio) &&
5215 +- (p->nr_cpus_allowed > 1)) {
5216 ++ curr->prio <= p->prio)) {
5217 + int target = find_lowest_rq(p);
5218 +
5219 + if (target != -1)
5220 +@@ -1644,6 +1645,12 @@ static int pull_rt_task(struct rq *this_rq)
5221 + if (likely(!rt_overloaded(this_rq)))
5222 + return 0;
5223 +
5224 ++ /*
5225 ++ * Match the barrier from rt_set_overloaded; this guarantees that if we
5226 ++ * see overloaded we must also see the rto_mask bit.
5227 ++ */
5228 ++ smp_rmb();
5229 ++
5230 + for_each_cpu(cpu, this_rq->rd->rto_mask) {
5231 + if (this_cpu == cpu)
5232 + continue;
5233 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
5234 +index a6208afd80e7..4f310592b1ba 100644
5235 +--- a/kernel/sched/sched.h
5236 ++++ b/kernel/sched/sched.h
5237 +@@ -596,6 +596,8 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
5238 + DECLARE_PER_CPU(struct sched_domain *, sd_llc);
5239 + DECLARE_PER_CPU(int, sd_llc_size);
5240 + DECLARE_PER_CPU(int, sd_llc_id);
5241 ++DECLARE_PER_CPU(struct sched_domain *, sd_busy);
5242 ++DECLARE_PER_CPU(struct sched_domain *, sd_asym);
5243 +
5244 + struct sched_group_power {
5245 + atomic_t ref;
5246 +diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
5247 +index 3612fc77f834..ea20f7d1ac2c 100644
5248 +--- a/kernel/time/tick-sched.c
5249 ++++ b/kernel/time/tick-sched.c
5250 +@@ -361,8 +361,8 @@ void __init tick_nohz_init(void)
5251 + /*
5252 + * NO HZ enabled ?
5253 + */
5254 +-int tick_nohz_enabled __read_mostly = 1;
5255 +-
5256 ++static int tick_nohz_enabled __read_mostly = 1;
5257 ++int tick_nohz_active __read_mostly;
5258 + /*
5259 + * Enable / Disable tickless mode
5260 + */
5261 +@@ -465,7 +465,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
5262 + struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
5263 + ktime_t now, idle;
5264 +
5265 +- if (!tick_nohz_enabled)
5266 ++ if (!tick_nohz_active)
5267 + return -1;
5268 +
5269 + now = ktime_get();
5270 +@@ -506,7 +506,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
5271 + struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
5272 + ktime_t now, iowait;
5273 +
5274 +- if (!tick_nohz_enabled)
5275 ++ if (!tick_nohz_active)
5276 + return -1;
5277 +
5278 + now = ktime_get();
5279 +@@ -711,8 +711,10 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
5280 + return false;
5281 + }
5282 +
5283 +- if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
5284 ++ if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
5285 ++ ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ };
5286 + return false;
5287 ++ }
5288 +
5289 + if (need_resched())
5290 + return false;
5291 +@@ -799,11 +801,6 @@ void tick_nohz_idle_enter(void)
5292 + local_irq_disable();
5293 +
5294 + ts = &__get_cpu_var(tick_cpu_sched);
5295 +- /*
5296 +- * set ts->inidle unconditionally. even if the system did not
5297 +- * switch to nohz mode the cpu frequency governers rely on the
5298 +- * update of the idle time accounting in tick_nohz_start_idle().
5299 +- */
5300 + ts->inidle = 1;
5301 + __tick_nohz_idle_enter(ts);
5302 +
5303 +@@ -973,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void)
5304 + struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
5305 + ktime_t next;
5306 +
5307 +- if (!tick_nohz_enabled)
5308 ++ if (!tick_nohz_active)
5309 + return;
5310 +
5311 + local_irq_disable();
5312 +@@ -981,7 +978,7 @@ static void tick_nohz_switch_to_nohz(void)
5313 + local_irq_enable();
5314 + return;
5315 + }
5316 +-
5317 ++ tick_nohz_active = 1;
5318 + ts->nohz_mode = NOHZ_MODE_LOWRES;
5319 +
5320 + /*
5321 +@@ -1139,8 +1136,10 @@ void tick_setup_sched_timer(void)
5322 + }
5323 +
5324 + #ifdef CONFIG_NO_HZ_COMMON
5325 +- if (tick_nohz_enabled)
5326 ++ if (tick_nohz_enabled) {
5327 + ts->nohz_mode = NOHZ_MODE_HIGHRES;
5328 ++ tick_nohz_active = 1;
5329 ++ }
5330 + #endif
5331 + }
5332 + #endif /* HIGH_RES_TIMERS */
5333 +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
5334 +index b8b8560bfb95..7f727b34280d 100644
5335 +--- a/kernel/trace/blktrace.c
5336 ++++ b/kernel/trace/blktrace.c
5337 +@@ -26,6 +26,7 @@
5338 + #include <linux/export.h>
5339 + #include <linux/time.h>
5340 + #include <linux/uaccess.h>
5341 ++#include <linux/list.h>
5342 +
5343 + #include <trace/events/block.h>
5344 +
5345 +@@ -38,6 +39,9 @@ static unsigned int blktrace_seq __read_mostly = 1;
5346 + static struct trace_array *blk_tr;
5347 + static bool blk_tracer_enabled __read_mostly;
5348 +
5349 ++static LIST_HEAD(running_trace_list);
5350 ++static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
5351 ++
5352 + /* Select an alternative, minimalistic output than the original one */
5353 + #define TRACE_BLK_OPT_CLASSIC 0x1
5354 +
5355 +@@ -107,10 +111,18 @@ record_it:
5356 + * Send out a notify for this process, if we haven't done so since a trace
5357 + * started
5358 + */
5359 +-static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
5360 ++static void trace_note_tsk(struct task_struct *tsk)
5361 + {
5362 ++ unsigned long flags;
5363 ++ struct blk_trace *bt;
5364 ++
5365 + tsk->btrace_seq = blktrace_seq;
5366 +- trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
5367 ++ spin_lock_irqsave(&running_trace_lock, flags);
5368 ++ list_for_each_entry(bt, &running_trace_list, running_list) {
5369 ++ trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
5370 ++ sizeof(tsk->comm));
5371 ++ }
5372 ++ spin_unlock_irqrestore(&running_trace_lock, flags);
5373 + }
5374 +
5375 + static void trace_note_time(struct blk_trace *bt)
5376 +@@ -229,16 +241,15 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
5377 + goto record_it;
5378 + }
5379 +
5380 ++ if (unlikely(tsk->btrace_seq != blktrace_seq))
5381 ++ trace_note_tsk(tsk);
5382 ++
5383 + /*
5384 + * A word about the locking here - we disable interrupts to reserve
5385 + * some space in the relay per-cpu buffer, to prevent an irq
5386 + * from coming in and stepping on our toes.
5387 + */
5388 + local_irq_save(flags);
5389 +-
5390 +- if (unlikely(tsk->btrace_seq != blktrace_seq))
5391 +- trace_note_tsk(bt, tsk);
5392 +-
5393 + t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
5394 + if (t) {
5395 + sequence = per_cpu_ptr(bt->sequence, cpu);
5396 +@@ -477,6 +488,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
5397 + bt->dir = dir;
5398 + bt->dev = dev;
5399 + atomic_set(&bt->dropped, 0);
5400 ++ INIT_LIST_HEAD(&bt->running_list);
5401 +
5402 + ret = -EIO;
5403 + bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
5404 +@@ -601,6 +613,9 @@ int blk_trace_startstop(struct request_queue *q, int start)
5405 + blktrace_seq++;
5406 + smp_mb();
5407 + bt->trace_state = Blktrace_running;
5408 ++ spin_lock_irq(&running_trace_lock);
5409 ++ list_add(&bt->running_list, &running_trace_list);
5410 ++ spin_unlock_irq(&running_trace_lock);
5411 +
5412 + trace_note_time(bt);
5413 + ret = 0;
5414 +@@ -608,6 +623,9 @@ int blk_trace_startstop(struct request_queue *q, int start)
5415 + } else {
5416 + if (bt->trace_state == Blktrace_running) {
5417 + bt->trace_state = Blktrace_stopped;
5418 ++ spin_lock_irq(&running_trace_lock);
5419 ++ list_del_init(&bt->running_list);
5420 ++ spin_unlock_irq(&running_trace_lock);
5421 + relay_flush(bt->rchan);
5422 + ret = 0;
5423 + }
5424 +@@ -1472,6 +1490,9 @@ static int blk_trace_remove_queue(struct request_queue *q)
5425 + if (atomic_dec_and_test(&blk_probes_ref))
5426 + blk_unregister_tracepoints();
5427 +
5428 ++ spin_lock_irq(&running_trace_lock);
5429 ++ list_del(&bt->running_list);
5430 ++ spin_unlock_irq(&running_trace_lock);
5431 + blk_trace_free(bt);
5432 + return 0;
5433 + }
5434 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
5435 +index 368a4d50cc30..b03b1f897b5e 100644
5436 +--- a/kernel/trace/trace_events.c
5437 ++++ b/kernel/trace/trace_events.c
5438 +@@ -1763,6 +1763,16 @@ static void trace_module_add_events(struct module *mod)
5439 + {
5440 + struct ftrace_event_call **call, **start, **end;
5441 +
5442 ++ if (!mod->num_trace_events)
5443 ++ return;
5444 ++
5445 ++ /* Don't add infrastructure for mods without tracepoints */
5446 ++ if (trace_module_has_bad_taint(mod)) {
5447 ++ pr_err("%s: module has bad taint, not creating trace events\n",
5448 ++ mod->name);
5449 ++ return;
5450 ++ }
5451 ++
5452 + start = mod->trace_events;
5453 + end = mod->trace_events + mod->num_trace_events;
5454 +
5455 +diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
5456 +index 29f26540e9c9..031cc5655a51 100644
5457 +--- a/kernel/tracepoint.c
5458 ++++ b/kernel/tracepoint.c
5459 +@@ -631,6 +631,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter)
5460 + EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
5461 +
5462 + #ifdef CONFIG_MODULES
5463 ++bool trace_module_has_bad_taint(struct module *mod)
5464 ++{
5465 ++ return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP));
5466 ++}
5467 ++
5468 + static int tracepoint_module_coming(struct module *mod)
5469 + {
5470 + struct tp_module *tp_mod, *iter;
5471 +@@ -641,7 +646,7 @@ static int tracepoint_module_coming(struct module *mod)
5472 + * module headers (for forced load), to make sure we don't cause a crash.
5473 + * Staging and out-of-tree GPL modules are fine.
5474 + */
5475 +- if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
5476 ++ if (trace_module_has_bad_taint(mod))
5477 + return 0;
5478 + mutex_lock(&tracepoints_mutex);
5479 + tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
5480 +diff --git a/lib/show_mem.c b/lib/show_mem.c
5481 +index b7c72311ad0c..5847a4921b8e 100644
5482 +--- a/lib/show_mem.c
5483 ++++ b/lib/show_mem.c
5484 +@@ -12,8 +12,7 @@
5485 + void show_mem(unsigned int filter)
5486 + {
5487 + pg_data_t *pgdat;
5488 +- unsigned long total = 0, reserved = 0, shared = 0,
5489 +- nonshared = 0, highmem = 0;
5490 ++ unsigned long total = 0, reserved = 0, highmem = 0;
5491 +
5492 + printk("Mem-Info:\n");
5493 + show_free_areas(filter);
5494 +@@ -22,43 +21,27 @@ void show_mem(unsigned int filter)
5495 + return;
5496 +
5497 + for_each_online_pgdat(pgdat) {
5498 +- unsigned long i, flags;
5499 ++ unsigned long flags;
5500 ++ int zoneid;
5501 +
5502 + pgdat_resize_lock(pgdat, &flags);
5503 +- for (i = 0; i < pgdat->node_spanned_pages; i++) {
5504 +- struct page *page;
5505 +- unsigned long pfn = pgdat->node_start_pfn + i;
5506 +-
5507 +- if (unlikely(!(i % MAX_ORDER_NR_PAGES)))
5508 +- touch_nmi_watchdog();
5509 +-
5510 +- if (!pfn_valid(pfn))
5511 ++ for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
5512 ++ struct zone *zone = &pgdat->node_zones[zoneid];
5513 ++ if (!populated_zone(zone))
5514 + continue;
5515 +
5516 +- page = pfn_to_page(pfn);
5517 +-
5518 +- if (PageHighMem(page))
5519 +- highmem++;
5520 ++ total += zone->present_pages;
5521 ++ reserved = zone->present_pages - zone->managed_pages;
5522 +
5523 +- if (PageReserved(page))
5524 +- reserved++;
5525 +- else if (page_count(page) == 1)
5526 +- nonshared++;
5527 +- else if (page_count(page) > 1)
5528 +- shared += page_count(page) - 1;
5529 +-
5530 +- total++;
5531 ++ if (is_highmem_idx(zoneid))
5532 ++ highmem += zone->present_pages;
5533 + }
5534 + pgdat_resize_unlock(pgdat, &flags);
5535 + }
5536 +
5537 + printk("%lu pages RAM\n", total);
5538 +-#ifdef CONFIG_HIGHMEM
5539 +- printk("%lu pages HighMem\n", highmem);
5540 +-#endif
5541 ++ printk("%lu pages HighMem/MovableOnly\n", highmem);
5542 + printk("%lu pages reserved\n", reserved);
5543 +- printk("%lu pages shared\n", shared);
5544 +- printk("%lu pages non-shared\n", nonshared);
5545 + #ifdef CONFIG_QUICKLIST
5546 + printk("%lu pages in pagetable cache\n",
5547 + quicklist_total_size());
5548 +diff --git a/mm/compaction.c b/mm/compaction.c
5549 +index 74ad00908c79..d2c6751879dc 100644
5550 +--- a/mm/compaction.c
5551 ++++ b/mm/compaction.c
5552 +@@ -252,7 +252,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
5553 + {
5554 + int nr_scanned = 0, total_isolated = 0;
5555 + struct page *cursor, *valid_page = NULL;
5556 +- unsigned long nr_strict_required = end_pfn - blockpfn;
5557 + unsigned long flags;
5558 + bool locked = false;
5559 +
5560 +@@ -265,11 +264,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
5561 +
5562 + nr_scanned++;
5563 + if (!pfn_valid_within(blockpfn))
5564 +- continue;
5565 ++ goto isolate_fail;
5566 ++
5567 + if (!valid_page)
5568 + valid_page = page;
5569 + if (!PageBuddy(page))
5570 +- continue;
5571 ++ goto isolate_fail;
5572 +
5573 + /*
5574 + * The zone lock must be held to isolate freepages.
5575 +@@ -290,12 +290,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
5576 +
5577 + /* Recheck this is a buddy page under lock */
5578 + if (!PageBuddy(page))
5579 +- continue;
5580 ++ goto isolate_fail;
5581 +
5582 + /* Found a free page, break it into order-0 pages */
5583 + isolated = split_free_page(page);
5584 +- if (!isolated && strict)
5585 +- break;
5586 + total_isolated += isolated;
5587 + for (i = 0; i < isolated; i++) {
5588 + list_add(&page->lru, freelist);
5589 +@@ -306,7 +304,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
5590 + if (isolated) {
5591 + blockpfn += isolated - 1;
5592 + cursor += isolated - 1;
5593 ++ continue;
5594 + }
5595 ++
5596 ++isolate_fail:
5597 ++ if (strict)
5598 ++ break;
5599 ++ else
5600 ++ continue;
5601 ++
5602 + }
5603 +
5604 + trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
5605 +@@ -316,7 +322,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
5606 + * pages requested were isolated. If there were any failures, 0 is
5607 + * returned and CMA will fail.
5608 + */
5609 +- if (strict && nr_strict_required > total_isolated)
5610 ++ if (strict && blockpfn < end_pfn)
5611 + total_isolated = 0;
5612 +
5613 + if (locked)
5614 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
5615 +index dd7789ce7572..389973fd6bb7 100644
5616 +--- a/mm/huge_memory.c
5617 ++++ b/mm/huge_memory.c
5618 +@@ -1897,7 +1897,7 @@ out:
5619 + return ret;
5620 + }
5621 +
5622 +-#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
5623 ++#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
5624 +
5625 + int hugepage_madvise(struct vm_area_struct *vma,
5626 + unsigned long *vm_flags, int advice)
5627 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
5628 +index 8e7adcba8176..15429b92ff98 100644
5629 +--- a/mm/memcontrol.c
5630 ++++ b/mm/memcontrol.c
5631 +@@ -1089,8 +1089,8 @@ skip_node:
5632 + * skipping css reference should be safe.
5633 + */
5634 + if (next_css) {
5635 +- if ((next_css->flags & CSS_ONLINE) &&
5636 +- (next_css == &root->css || css_tryget(next_css)))
5637 ++ if ((next_css == &root->css) ||
5638 ++ ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
5639 + return mem_cgroup_from_css(next_css);
5640 +
5641 + prev_css = next_css;
5642 +@@ -6346,11 +6346,24 @@ static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
5643 + static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5644 + {
5645 + struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5646 ++ struct cgroup_subsys_state *iter;
5647 +
5648 + kmem_cgroup_css_offline(memcg);
5649 +
5650 + mem_cgroup_invalidate_reclaim_iterators(memcg);
5651 +- mem_cgroup_reparent_charges(memcg);
5652 ++
5653 ++ /*
5654 ++ * This requires that offlining is serialized. Right now that is
5655 ++ * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
5656 ++ */
5657 ++ rcu_read_lock();
5658 ++ css_for_each_descendant_post(iter, css) {
5659 ++ rcu_read_unlock();
5660 ++ mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
5661 ++ rcu_read_lock();
5662 ++ }
5663 ++ rcu_read_unlock();
5664 ++
5665 + mem_cgroup_destroy_all_caches(memcg);
5666 + vmpressure_cleanup(&memcg->vmpressure);
5667 + }
5668 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
5669 +index 317ea747d2cd..06f847933eeb 100644
5670 +--- a/mm/page_alloc.c
5671 ++++ b/mm/page_alloc.c
5672 +@@ -1217,6 +1217,15 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
5673 + }
5674 + local_irq_restore(flags);
5675 + }
5676 ++static bool gfp_thisnode_allocation(gfp_t gfp_mask)
5677 ++{
5678 ++ return (gfp_mask & GFP_THISNODE) == GFP_THISNODE;
5679 ++}
5680 ++#else
5681 ++static bool gfp_thisnode_allocation(gfp_t gfp_mask)
5682 ++{
5683 ++ return false;
5684 ++}
5685 + #endif
5686 +
5687 + /*
5688 +@@ -1553,7 +1562,13 @@ again:
5689 + get_pageblock_migratetype(page));
5690 + }
5691 +
5692 +- __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
5693 ++ /*
5694 ++ * NOTE: GFP_THISNODE allocations do not partake in the kswapd
5695 ++ * aging protocol, so they can't be fair.
5696 ++ */
5697 ++ if (!gfp_thisnode_allocation(gfp_flags))
5698 ++ __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
5699 ++
5700 + __count_zone_vm_events(PGALLOC, zone, 1 << order);
5701 + zone_statistics(preferred_zone, zone, gfp_flags);
5702 + local_irq_restore(flags);
5703 +@@ -1925,8 +1940,12 @@ zonelist_scan:
5704 + * ultimately fall back to remote zones that do not
5705 + * partake in the fairness round-robin cycle of this
5706 + * zonelist.
5707 ++ *
5708 ++ * NOTE: GFP_THISNODE allocations do not partake in
5709 ++ * the kswapd aging protocol, so they can't be fair.
5710 + */
5711 +- if (alloc_flags & ALLOC_WMARK_LOW) {
5712 ++ if ((alloc_flags & ALLOC_WMARK_LOW) &&
5713 ++ !gfp_thisnode_allocation(gfp_mask)) {
5714 + if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
5715 + continue;
5716 + if (!zone_local(preferred_zone, zone))
5717 +@@ -2492,8 +2511,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
5718 + * allowed per node queues are empty and that nodes are
5719 + * over allocated.
5720 + */
5721 +- if (IS_ENABLED(CONFIG_NUMA) &&
5722 +- (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
5723 ++ if (gfp_thisnode_allocation(gfp_mask))
5724 + goto nopage;
5725 +
5726 + restart:
5727 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
5728 +index 11af243bf92f..467e3e071832 100644
5729 +--- a/net/core/neighbour.c
5730 ++++ b/net/core/neighbour.c
5731 +@@ -764,9 +764,6 @@ static void neigh_periodic_work(struct work_struct *work)
5732 + nht = rcu_dereference_protected(tbl->nht,
5733 + lockdep_is_held(&tbl->lock));
5734 +
5735 +- if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
5736 +- goto out;
5737 +-
5738 + /*
5739 + * periodically recompute ReachableTime from random function
5740 + */
5741 +@@ -779,6 +776,9 @@ static void neigh_periodic_work(struct work_struct *work)
5742 + neigh_rand_reach_time(p->base_reachable_time);
5743 + }
5744 +
5745 ++ if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
5746 ++ goto out;
5747 ++
5748 + for (i = 0 ; i < (1 << nht->hash_shift); i++) {
5749 + np = &nht->hash_buckets[i];
5750 +
5751 +diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
5752 +index c31e3ad98ef2..ba22cc3a5a53 100644
5753 +--- a/net/ipv4/ip_tunnel_core.c
5754 ++++ b/net/ipv4/ip_tunnel_core.c
5755 +@@ -109,7 +109,6 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
5756 + secpath_reset(skb);
5757 + if (!skb->l4_rxhash)
5758 + skb->rxhash = 0;
5759 +- skb_dst_drop(skb);
5760 + skb->vlan_tci = 0;
5761 + skb_set_queue_mapping(skb, 0);
5762 + skb->pkt_type = PACKET_HOST;
5763 +diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
5764 +index 14a15c49129d..15e024105f91 100644
5765 +--- a/net/ipv4/syncookies.c
5766 ++++ b/net/ipv4/syncookies.c
5767 +@@ -89,8 +89,7 @@ __u32 cookie_init_timestamp(struct request_sock *req)
5768 +
5769 +
5770 + static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
5771 +- __be16 dport, __u32 sseq, __u32 count,
5772 +- __u32 data)
5773 ++ __be16 dport, __u32 sseq, __u32 data)
5774 + {
5775 + /*
5776 + * Compute the secure sequence number.
5777 +@@ -102,7 +101,7 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
5778 + * As an extra hack, we add a small "data" value that encodes the
5779 + * MSS into the second hash value.
5780 + */
5781 +-
5782 ++ u32 count = tcp_cookie_time();
5783 + return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
5784 + sseq + (count << COOKIEBITS) +
5785 + ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
5786 +@@ -114,22 +113,21 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
5787 + * If the syncookie is bad, the data returned will be out of
5788 + * range. This must be checked by the caller.
5789 + *
5790 +- * The count value used to generate the cookie must be within
5791 +- * "maxdiff" if the current (passed-in) "count". The return value
5792 +- * is (__u32)-1 if this test fails.
5793 ++ * The count value used to generate the cookie must be less than
5794 ++ * MAX_SYNCOOKIE_AGE minutes in the past.
5795 ++ * The return value (__u32)-1 if this test fails.
5796 + */
5797 + static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
5798 +- __be16 sport, __be16 dport, __u32 sseq,
5799 +- __u32 count, __u32 maxdiff)
5800 ++ __be16 sport, __be16 dport, __u32 sseq)
5801 + {
5802 +- __u32 diff;
5803 ++ u32 diff, count = tcp_cookie_time();
5804 +
5805 + /* Strip away the layers from the cookie */
5806 + cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
5807 +
5808 + /* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
5809 + diff = (count - (cookie >> COOKIEBITS)) & ((__u32) - 1 >> COOKIEBITS);
5810 +- if (diff >= maxdiff)
5811 ++ if (diff >= MAX_SYNCOOKIE_AGE)
5812 + return (__u32)-1;
5813 +
5814 + return (cookie -
5815 +@@ -138,22 +136,22 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
5816 + }
5817 +
5818 + /*
5819 +- * MSS Values are taken from the 2009 paper
5820 +- * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson:
5821 +- * - values 1440 to 1460 accounted for 80% of observed mss values
5822 +- * - values outside the 536-1460 range are rare (<0.2%).
5823 ++ * MSS Values are chosen based on the 2011 paper
5824 ++ * 'An Analysis of TCP Maximum Segement Sizes' by S. Alcock and R. Nelson.
5825 ++ * Values ..
5826 ++ * .. lower than 536 are rare (< 0.2%)
5827 ++ * .. between 537 and 1299 account for less than < 1.5% of observed values
5828 ++ * .. in the 1300-1349 range account for about 15 to 20% of observed mss values
5829 ++ * .. exceeding 1460 are very rare (< 0.04%)
5830 + *
5831 +- * Table must be sorted.
5832 ++ * 1460 is the single most frequently announced mss value (30 to 46% depending
5833 ++ * on monitor location). Table must be sorted.
5834 + */
5835 + static __u16 const msstab[] = {
5836 +- 64,
5837 +- 512,
5838 + 536,
5839 +- 1024,
5840 +- 1440,
5841 ++ 1300,
5842 ++ 1440, /* 1440, 1452: PPPoE */
5843 + 1460,
5844 +- 4312,
5845 +- 8960,
5846 + };
5847 +
5848 + /*
5849 +@@ -173,7 +171,7 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
5850 +
5851 + return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
5852 + th->source, th->dest, ntohl(th->seq),
5853 +- jiffies / (HZ * 60), mssind);
5854 ++ mssind);
5855 + }
5856 + EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
5857 +
5858 +@@ -189,13 +187,6 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
5859 + }
5860 +
5861 + /*
5862 +- * This (misnamed) value is the age of syncookie which is permitted.
5863 +- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
5864 +- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
5865 +- * backoff) to compute at runtime so it's currently hardcoded here.
5866 +- */
5867 +-#define COUNTER_TRIES 4
5868 +-/*
5869 + * Check if a ack sequence number is a valid syncookie.
5870 + * Return the decoded mss if it is, or 0 if not.
5871 + */
5872 +@@ -204,9 +195,7 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
5873 + {
5874 + __u32 seq = ntohl(th->seq) - 1;
5875 + __u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
5876 +- th->source, th->dest, seq,
5877 +- jiffies / (HZ * 60),
5878 +- COUNTER_TRIES);
5879 ++ th->source, th->dest, seq);
5880 +
5881 + return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
5882 + }
5883 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
5884 +index be5246e1d5b6..531ab5721d79 100644
5885 +--- a/net/ipv4/tcp.c
5886 ++++ b/net/ipv4/tcp.c
5887 +@@ -1000,7 +1000,8 @@ void tcp_free_fastopen_req(struct tcp_sock *tp)
5888 + }
5889 + }
5890 +
5891 +-static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
5892 ++static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
5893 ++ int *copied, size_t size)
5894 + {
5895 + struct tcp_sock *tp = tcp_sk(sk);
5896 + int err, flags;
5897 +@@ -1015,11 +1016,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
5898 + if (unlikely(tp->fastopen_req == NULL))
5899 + return -ENOBUFS;
5900 + tp->fastopen_req->data = msg;
5901 ++ tp->fastopen_req->size = size;
5902 +
5903 + flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
5904 + err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
5905 + msg->msg_namelen, flags);
5906 +- *size = tp->fastopen_req->copied;
5907 ++ *copied = tp->fastopen_req->copied;
5908 + tcp_free_fastopen_req(tp);
5909 + return err;
5910 + }
5911 +@@ -1039,7 +1041,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
5912 +
5913 + flags = msg->msg_flags;
5914 + if (flags & MSG_FASTOPEN) {
5915 +- err = tcp_sendmsg_fastopen(sk, msg, &copied_syn);
5916 ++ err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
5917 + if (err == -EINPROGRESS && copied_syn > 0)
5918 + goto out;
5919 + else if (err)
5920 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
5921 +index fb8227a8c004..e088932bcfae 100644
5922 +--- a/net/ipv4/tcp_output.c
5923 ++++ b/net/ipv4/tcp_output.c
5924 +@@ -2902,7 +2902,12 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
5925 + space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
5926 + MAX_TCP_OPTION_SPACE;
5927 +
5928 +- syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
5929 ++ space = min_t(size_t, space, fo->size);
5930 ++
5931 ++ /* limit to order-0 allocations */
5932 ++ space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
5933 ++
5934 ++ syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
5935 + sk->sk_allocation);
5936 + if (syn_data == NULL)
5937 + goto fallback;
5938 +diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
5939 +index 140748debc4a..8af3eb57f438 100644
5940 +--- a/net/ipv6/exthdrs_core.c
5941 ++++ b/net/ipv6/exthdrs_core.c
5942 +@@ -212,7 +212,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
5943 + found = (nexthdr == target);
5944 +
5945 + if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
5946 +- if (target < 0)
5947 ++ if (target < 0 || found)
5948 + break;
5949 + return -ENOENT;
5950 + }
5951 +diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
5952 +index bf63ac8a49b9..d703218a653b 100644
5953 +--- a/net/ipv6/syncookies.c
5954 ++++ b/net/ipv6/syncookies.c
5955 +@@ -24,26 +24,21 @@
5956 + #define COOKIEBITS 24 /* Upper bits store count */
5957 + #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
5958 +
5959 +-/* Table must be sorted. */
5960 ++/* RFC 2460, Section 8.3:
5961 ++ * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..]
5962 ++ *
5963 ++ * Due to IPV6_MIN_MTU=1280 the lowest possible MSS is 1220, which allows
5964 ++ * using higher values than ipv4 tcp syncookies.
5965 ++ * The other values are chosen based on ethernet (1500 and 9k MTU), plus
5966 ++ * one that accounts for common encap (PPPoe) overhead. Table must be sorted.
5967 ++ */
5968 + static __u16 const msstab[] = {
5969 +- 64,
5970 +- 512,
5971 +- 536,
5972 +- 1280 - 60,
5973 ++ 1280 - 60, /* IPV6_MIN_MTU - 60 */
5974 + 1480 - 60,
5975 + 1500 - 60,
5976 +- 4460 - 60,
5977 + 9000 - 60,
5978 + };
5979 +
5980 +-/*
5981 +- * This (misnamed) value is the age of syncookie which is permitted.
5982 +- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
5983 +- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
5984 +- * backoff) to compute at runtime so it's currently hardcoded here.
5985 +- */
5986 +-#define COUNTER_TRIES 4
5987 +-
5988 + static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
5989 + struct request_sock *req,
5990 + struct dst_entry *dst)
5991 +@@ -86,8 +81,9 @@ static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *dadd
5992 + static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
5993 + const struct in6_addr *daddr,
5994 + __be16 sport, __be16 dport, __u32 sseq,
5995 +- __u32 count, __u32 data)
5996 ++ __u32 data)
5997 + {
5998 ++ u32 count = tcp_cookie_time();
5999 + return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
6000 + sseq + (count << COOKIEBITS) +
6001 + ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
6002 +@@ -96,15 +92,14 @@ static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
6003 +
6004 + static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
6005 + const struct in6_addr *daddr, __be16 sport,
6006 +- __be16 dport, __u32 sseq, __u32 count,
6007 +- __u32 maxdiff)
6008 ++ __be16 dport, __u32 sseq)
6009 + {
6010 +- __u32 diff;
6011 ++ __u32 diff, count = tcp_cookie_time();
6012 +
6013 + cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
6014 +
6015 + diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
6016 +- if (diff >= maxdiff)
6017 ++ if (diff >= MAX_SYNCOOKIE_AGE)
6018 + return (__u32)-1;
6019 +
6020 + return (cookie -
6021 +@@ -125,8 +120,7 @@ u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
6022 + *mssp = msstab[mssind];
6023 +
6024 + return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
6025 +- th->dest, ntohl(th->seq),
6026 +- jiffies / (HZ * 60), mssind);
6027 ++ th->dest, ntohl(th->seq), mssind);
6028 + }
6029 + EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence);
6030 +
6031 +@@ -146,8 +140,7 @@ int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
6032 + {
6033 + __u32 seq = ntohl(th->seq) - 1;
6034 + __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
6035 +- th->source, th->dest, seq,
6036 +- jiffies / (HZ * 60), COUNTER_TRIES);
6037 ++ th->source, th->dest, seq);
6038 +
6039 + return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
6040 + }
6041 +diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
6042 +index 06556d6e1a4d..ab4569df9cef 100644
6043 +--- a/net/ipv6/udp_offload.c
6044 ++++ b/net/ipv6/udp_offload.c
6045 +@@ -111,7 +111,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
6046 + fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
6047 + fptr->nexthdr = nexthdr;
6048 + fptr->reserved = 0;
6049 +- ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
6050 ++ fptr->identification = skb_shinfo(skb)->ip6_frag_id;
6051 +
6052 + /* Fragment the skb. ipv6 header and the remaining fields of the
6053 + * fragment header are updated in ipv6_gso_segment()
6054 +diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
6055 +index 22290a929b94..641f43219a48 100644
6056 +--- a/net/mac80211/mesh_ps.c
6057 ++++ b/net/mac80211/mesh_ps.c
6058 +@@ -36,6 +36,7 @@ static struct sk_buff *mps_qos_null_get(struct sta_info *sta)
6059 + sdata->vif.addr);
6060 + nullfunc->frame_control = fc;
6061 + nullfunc->duration_id = 0;
6062 ++ nullfunc->seq_ctrl = 0;
6063 + /* no address resolution for this frame -> set addr 1 immediately */
6064 + memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
6065 + memset(skb_put(skb, 2), 0, 2); /* append QoS control field */
6066 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
6067 +index 86e4ad56b573..8d7f4abe65ba 100644
6068 +--- a/net/mac80211/mlme.c
6069 ++++ b/net/mac80211/mlme.c
6070 +@@ -282,6 +282,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
6071 + switch (vht_oper->chan_width) {
6072 + case IEEE80211_VHT_CHANWIDTH_USE_HT:
6073 + vht_chandef.width = chandef->width;
6074 ++ vht_chandef.center_freq1 = chandef->center_freq1;
6075 + break;
6076 + case IEEE80211_VHT_CHANWIDTH_80MHZ:
6077 + vht_chandef.width = NL80211_CHAN_WIDTH_80;
6078 +@@ -331,6 +332,28 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
6079 + ret = 0;
6080 +
6081 + out:
6082 ++ /*
6083 ++ * When tracking the current AP, don't do any further checks if the
6084 ++ * new chandef is identical to the one we're currently using for the
6085 ++ * connection. This keeps us from playing ping-pong with regulatory,
6086 ++ * without it the following can happen (for example):
6087 ++ * - connect to an AP with 80 MHz, world regdom allows 80 MHz
6088 ++ * - AP advertises regdom US
6089 ++ * - CRDA loads regdom US with 80 MHz prohibited (old database)
6090 ++ * - the code below detects an unsupported channel, downgrades, and
6091 ++ * we disconnect from the AP in the caller
6092 ++ * - disconnect causes CRDA to reload world regdomain and the game
6093 ++ * starts anew.
6094 ++ * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
6095 ++ *
6096 ++ * It seems possible that there are still scenarios with CSA or real
6097 ++ * bandwidth changes where a this could happen, but those cases are
6098 ++ * less common and wouldn't completely prevent using the AP.
6099 ++ */
6100 ++ if (tracking &&
6101 ++ cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef))
6102 ++ return ret;
6103 ++
6104 + /* don't print the message below for VHT mismatch if VHT is disabled */
6105 + if (ret & IEEE80211_STA_DISABLE_VHT)
6106 + vht_chandef = *chandef;
6107 +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
6108 +index aeb967a0aeed..db41c190e76d 100644
6109 +--- a/net/mac80211/sta_info.c
6110 ++++ b/net/mac80211/sta_info.c
6111 +@@ -340,6 +340,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
6112 + return NULL;
6113 +
6114 + spin_lock_init(&sta->lock);
6115 ++ spin_lock_init(&sta->ps_lock);
6116 + INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
6117 + INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
6118 + mutex_init(&sta->ampdu_mlme.mtx);
6119 +@@ -1049,6 +1050,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
6120 +
6121 + skb_queue_head_init(&pending);
6122 +
6123 ++ /* sync with ieee80211_tx_h_unicast_ps_buf */
6124 ++ spin_lock(&sta->ps_lock);
6125 + /* Send all buffered frames to the station */
6126 + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
6127 + int count = skb_queue_len(&pending), tmp;
6128 +@@ -1068,6 +1071,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
6129 + }
6130 +
6131 + ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
6132 ++ spin_unlock(&sta->ps_lock);
6133 +
6134 + local->total_ps_buffered -= buffered;
6135 +
6136 +@@ -1114,6 +1118,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
6137 + memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
6138 + memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
6139 + memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
6140 ++ nullfunc->seq_ctrl = 0;
6141 +
6142 + skb->priority = tid;
6143 + skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
6144 +diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
6145 +index 4208dbd5861f..492d59cbf289 100644
6146 +--- a/net/mac80211/sta_info.h
6147 ++++ b/net/mac80211/sta_info.h
6148 +@@ -245,6 +245,7 @@ struct sta_ampdu_mlme {
6149 + * @drv_unblock_wk: used for driver PS unblocking
6150 + * @listen_interval: listen interval of this station, when we're acting as AP
6151 + * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
6152 ++ * @ps_lock: used for powersave (when mac80211 is the AP) related locking
6153 + * @ps_tx_buf: buffers (per AC) of frames to transmit to this station
6154 + * when it leaves power saving state or polls
6155 + * @tx_filtered: buffers (per AC) of frames we already tried to
6156 +@@ -328,10 +329,8 @@ struct sta_info {
6157 + /* use the accessors defined below */
6158 + unsigned long _flags;
6159 +
6160 +- /*
6161 +- * STA powersave frame queues, no more than the internal
6162 +- * locking required.
6163 +- */
6164 ++ /* STA powersave lock and frame queues */
6165 ++ spinlock_t ps_lock;
6166 + struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
6167 + struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
6168 + unsigned long driver_buffered_tids;
6169 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
6170 +index 81dca92176c7..d6a47e76efff 100644
6171 +--- a/net/mac80211/tx.c
6172 ++++ b/net/mac80211/tx.c
6173 +@@ -477,6 +477,20 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
6174 + sta->sta.addr, sta->sta.aid, ac);
6175 + if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
6176 + purge_old_ps_buffers(tx->local);
6177 ++
6178 ++ /* sync with ieee80211_sta_ps_deliver_wakeup */
6179 ++ spin_lock(&sta->ps_lock);
6180 ++ /*
6181 ++ * STA woke up the meantime and all the frames on ps_tx_buf have
6182 ++ * been queued to pending queue. No reordering can happen, go
6183 ++ * ahead and Tx the packet.
6184 ++ */
6185 ++ if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
6186 ++ !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
6187 ++ spin_unlock(&sta->ps_lock);
6188 ++ return TX_CONTINUE;
6189 ++ }
6190 ++
6191 + if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
6192 + struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
6193 + ps_dbg(tx->sdata,
6194 +@@ -490,6 +504,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
6195 + info->control.vif = &tx->sdata->vif;
6196 + info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
6197 + skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
6198 ++ spin_unlock(&sta->ps_lock);
6199 +
6200 + if (!timer_pending(&local->sta_cleanup))
6201 + mod_timer(&local->sta_cleanup,
6202 +diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
6203 +index afba19cb6f87..a282fddf8b00 100644
6204 +--- a/net/mac80211/wme.c
6205 ++++ b/net/mac80211/wme.c
6206 +@@ -153,6 +153,11 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
6207 + return IEEE80211_AC_BE;
6208 + }
6209 +
6210 ++ if (skb->protocol == sdata->control_port_protocol) {
6211 ++ skb->priority = 7;
6212 ++ return ieee80211_downgrade_queue(sdata, skb);
6213 ++ }
6214 ++
6215 + /* use the data classifier to determine what 802.1d tag the
6216 + * data frame has */
6217 + skb->priority = cfg80211_classify8021d(skb);
6218 +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
6219 +index dfe3f36ff2aa..56ebe71cfe13 100644
6220 +--- a/net/sctp/sm_statefuns.c
6221 ++++ b/net/sctp/sm_statefuns.c
6222 +@@ -759,6 +759,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
6223 + struct sctp_chunk auth;
6224 + sctp_ierror_t ret;
6225 +
6226 ++ /* Make sure that we and the peer are AUTH capable */
6227 ++ if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
6228 ++ kfree_skb(chunk->auth_chunk);
6229 ++ sctp_association_free(new_asoc);
6230 ++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
6231 ++ }
6232 ++
6233 + /* set-up our fake chunk so that we can process it */
6234 + auth.skb = chunk->auth_chunk;
6235 + auth.asoc = chunk->asoc;
6236 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
6237 +index 83a1daa642bb..1d034825fcc3 100644
6238 +--- a/net/sunrpc/xprtsock.c
6239 ++++ b/net/sunrpc/xprtsock.c
6240 +@@ -853,6 +853,8 @@ static void xs_close(struct rpc_xprt *xprt)
6241 +
6242 + dprintk("RPC: xs_close xprt %p\n", xprt);
6243 +
6244 ++ cancel_delayed_work_sync(&transport->connect_worker);
6245 ++
6246 + xs_reset_transport(transport);
6247 + xprt->reestablish_timeout = 0;
6248 +
6249 +@@ -887,12 +889,8 @@ static void xs_local_destroy(struct rpc_xprt *xprt)
6250 + */
6251 + static void xs_destroy(struct rpc_xprt *xprt)
6252 + {
6253 +- struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
6254 +-
6255 + dprintk("RPC: xs_destroy xprt %p\n", xprt);
6256 +
6257 +- cancel_delayed_work_sync(&transport->connect_worker);
6258 +-
6259 + xs_local_destroy(xprt);
6260 + }
6261 +
6262 +@@ -1834,6 +1832,10 @@ static inline void xs_reclassify_socket(int family, struct socket *sock)
6263 + }
6264 + #endif
6265 +
6266 ++static void xs_dummy_setup_socket(struct work_struct *work)
6267 ++{
6268 ++}
6269 ++
6270 + static struct socket *xs_create_sock(struct rpc_xprt *xprt,
6271 + struct sock_xprt *transport, int family, int type, int protocol)
6272 + {
6273 +@@ -2673,6 +2675,9 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
6274 + xprt->ops = &xs_local_ops;
6275 + xprt->timeout = &xs_local_default_timeout;
6276 +
6277 ++ INIT_DELAYED_WORK(&transport->connect_worker,
6278 ++ xs_dummy_setup_socket);
6279 ++
6280 + switch (sun->sun_family) {
6281 + case AF_LOCAL:
6282 + if (sun->sun_path[0] != '/') {
6283 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
6284 +index a427623ee574..d7c1ac621a90 100644
6285 +--- a/net/unix/af_unix.c
6286 ++++ b/net/unix/af_unix.c
6287 +@@ -161,9 +161,8 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
6288 +
6289 + static inline unsigned int unix_hash_fold(__wsum n)
6290 + {
6291 +- unsigned int hash = (__force unsigned int)n;
6292 ++ unsigned int hash = (__force unsigned int)csum_fold(n);
6293 +
6294 +- hash ^= hash>>16;
6295 + hash ^= hash>>8;
6296 + return hash&(UNIX_HASH_SIZE-1);
6297 + }
6298 +diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
6299 +index 2906d520eea7..3be02b680268 100644
6300 +--- a/net/xfrm/xfrm_ipcomp.c
6301 ++++ b/net/xfrm/xfrm_ipcomp.c
6302 +@@ -141,14 +141,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
6303 + const int plen = skb->len;
6304 + int dlen = IPCOMP_SCRATCH_SIZE;
6305 + u8 *start = skb->data;
6306 +- const int cpu = get_cpu();
6307 +- u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
6308 +- struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
6309 ++ struct crypto_comp *tfm;
6310 ++ u8 *scratch;
6311 + int err;
6312 +
6313 + local_bh_disable();
6314 ++ scratch = *this_cpu_ptr(ipcomp_scratches);
6315 ++ tfm = *this_cpu_ptr(ipcd->tfms);
6316 + err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
6317 +- local_bh_enable();
6318 + if (err)
6319 + goto out;
6320 +
6321 +@@ -158,13 +158,13 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
6322 + }
6323 +
6324 + memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
6325 +- put_cpu();
6326 ++ local_bh_enable();
6327 +
6328 + pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
6329 + return 0;
6330 +
6331 + out:
6332 +- put_cpu();
6333 ++ local_bh_enable();
6334 + return err;
6335 + }
6336 +
6337 +diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
6338 +index 30f119b1d1ec..820313a04d49 100644
6339 +--- a/security/selinux/ss/ebitmap.c
6340 ++++ b/security/selinux/ss/ebitmap.c
6341 +@@ -213,7 +213,12 @@ netlbl_import_failure:
6342 + }
6343 + #endif /* CONFIG_NETLABEL */
6344 +
6345 +-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
6346 ++/*
6347 ++ * Check to see if all the bits set in e2 are also set in e1. Optionally,
6348 ++ * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
6349 ++ * last_e2bit.
6350 ++ */
6351 ++int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit)
6352 + {
6353 + struct ebitmap_node *n1, *n2;
6354 + int i;
6355 +@@ -223,14 +228,25 @@ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
6356 +
6357 + n1 = e1->node;
6358 + n2 = e2->node;
6359 ++
6360 + while (n1 && n2 && (n1->startbit <= n2->startbit)) {
6361 + if (n1->startbit < n2->startbit) {
6362 + n1 = n1->next;
6363 + continue;
6364 + }
6365 +- for (i = 0; i < EBITMAP_UNIT_NUMS; i++) {
6366 ++ for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; )
6367 ++ i--; /* Skip trailing NULL map entries */
6368 ++ if (last_e2bit && (i >= 0)) {
6369 ++ u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
6370 ++ __fls(n2->maps[i]);
6371 ++ if (lastsetbit > last_e2bit)
6372 ++ return 0;
6373 ++ }
6374 ++
6375 ++ while (i >= 0) {
6376 + if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
6377 + return 0;
6378 ++ i--;
6379 + }
6380 +
6381 + n1 = n1->next;
6382 +diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
6383 +index 922f8afa89dd..712c8a7b8e8b 100644
6384 +--- a/security/selinux/ss/ebitmap.h
6385 ++++ b/security/selinux/ss/ebitmap.h
6386 +@@ -16,7 +16,13 @@
6387 +
6388 + #include <net/netlabel.h>
6389 +
6390 +-#define EBITMAP_UNIT_NUMS ((32 - sizeof(void *) - sizeof(u32)) \
6391 ++#ifdef CONFIG_64BIT
6392 ++#define EBITMAP_NODE_SIZE 64
6393 ++#else
6394 ++#define EBITMAP_NODE_SIZE 32
6395 ++#endif
6396 ++
6397 ++#define EBITMAP_UNIT_NUMS ((EBITMAP_NODE_SIZE-sizeof(void *)-sizeof(u32))\
6398 + / sizeof(unsigned long))
6399 + #define EBITMAP_UNIT_SIZE BITS_PER_LONG
6400 + #define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
6401 +@@ -117,7 +123,7 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
6402 +
6403 + int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
6404 + int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
6405 +-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2);
6406 ++int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
6407 + int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
6408 + int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
6409 + void ebitmap_destroy(struct ebitmap *e);
6410 +diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
6411 +index 40de8d3f208e..c85bc1ec040c 100644
6412 +--- a/security/selinux/ss/mls.c
6413 ++++ b/security/selinux/ss/mls.c
6414 +@@ -160,8 +160,6 @@ void mls_sid_to_context(struct context *context,
6415 + int mls_level_isvalid(struct policydb *p, struct mls_level *l)
6416 + {
6417 + struct level_datum *levdatum;
6418 +- struct ebitmap_node *node;
6419 +- int i;
6420 +
6421 + if (!l->sens || l->sens > p->p_levels.nprim)
6422 + return 0;
6423 +@@ -170,19 +168,13 @@ int mls_level_isvalid(struct policydb *p, struct mls_level *l)
6424 + if (!levdatum)
6425 + return 0;
6426 +
6427 +- ebitmap_for_each_positive_bit(&l->cat, node, i) {
6428 +- if (i > p->p_cats.nprim)
6429 +- return 0;
6430 +- if (!ebitmap_get_bit(&levdatum->level->cat, i)) {
6431 +- /*
6432 +- * Category may not be associated with
6433 +- * sensitivity.
6434 +- */
6435 +- return 0;
6436 +- }
6437 +- }
6438 +-
6439 +- return 1;
6440 ++ /*
6441 ++ * Return 1 iff all the bits set in l->cat are also be set in
6442 ++ * levdatum->level->cat and no bit in l->cat is larger than
6443 ++ * p->p_cats.nprim.
6444 ++ */
6445 ++ return ebitmap_contains(&levdatum->level->cat, &l->cat,
6446 ++ p->p_cats.nprim);
6447 + }
6448 +
6449 + int mls_range_isvalid(struct policydb *p, struct mls_range *r)
6450 +diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h
6451 +index 03bed52a8052..e93648774137 100644
6452 +--- a/security/selinux/ss/mls_types.h
6453 ++++ b/security/selinux/ss/mls_types.h
6454 +@@ -35,7 +35,7 @@ static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
6455 + static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
6456 + {
6457 + return ((l1->sens >= l2->sens) &&
6458 +- ebitmap_contains(&l1->cat, &l2->cat));
6459 ++ ebitmap_contains(&l1->cat, &l2->cat, 0));
6460 + }
6461 +
6462 + #define mls_level_incomp(l1, l2) \
6463 +diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
6464 +index d0d7ac1e99d2..f62356c2f54c 100644
6465 +--- a/sound/pci/hda/hda_eld.c
6466 ++++ b/sound/pci/hda/hda_eld.c
6467 +@@ -478,10 +478,9 @@ static void hdmi_print_sad_info(int i, struct cea_sad *a,
6468 + snd_iprintf(buffer, "sad%d_profile\t\t%d\n", i, a->profile);
6469 + }
6470 +
6471 +-static void hdmi_print_eld_info(struct snd_info_entry *entry,
6472 +- struct snd_info_buffer *buffer)
6473 ++void snd_hdmi_print_eld_info(struct hdmi_eld *eld,
6474 ++ struct snd_info_buffer *buffer)
6475 + {
6476 +- struct hdmi_eld *eld = entry->private_data;
6477 + struct parsed_hdmi_eld *e = &eld->info;
6478 + char buf[SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE];
6479 + int i;
6480 +@@ -500,13 +499,10 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
6481 + [4 ... 7] = "reserved"
6482 + };
6483 +
6484 +- mutex_lock(&eld->lock);
6485 + snd_iprintf(buffer, "monitor_present\t\t%d\n", eld->monitor_present);
6486 + snd_iprintf(buffer, "eld_valid\t\t%d\n", eld->eld_valid);
6487 +- if (!eld->eld_valid) {
6488 +- mutex_unlock(&eld->lock);
6489 ++ if (!eld->eld_valid)
6490 + return;
6491 +- }
6492 + snd_iprintf(buffer, "monitor_name\t\t%s\n", e->monitor_name);
6493 + snd_iprintf(buffer, "connection_type\t\t%s\n",
6494 + eld_connection_type_names[e->conn_type]);
6495 +@@ -528,13 +524,11 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
6496 +
6497 + for (i = 0; i < e->sad_count; i++)
6498 + hdmi_print_sad_info(i, e->sad + i, buffer);
6499 +- mutex_unlock(&eld->lock);
6500 + }
6501 +
6502 +-static void hdmi_write_eld_info(struct snd_info_entry *entry,
6503 +- struct snd_info_buffer *buffer)
6504 ++void snd_hdmi_write_eld_info(struct hdmi_eld *eld,
6505 ++ struct snd_info_buffer *buffer)
6506 + {
6507 +- struct hdmi_eld *eld = entry->private_data;
6508 + struct parsed_hdmi_eld *e = &eld->info;
6509 + char line[64];
6510 + char name[64];
6511 +@@ -542,7 +536,6 @@ static void hdmi_write_eld_info(struct snd_info_entry *entry,
6512 + long long val;
6513 + unsigned int n;
6514 +
6515 +- mutex_lock(&eld->lock);
6516 + while (!snd_info_get_line(buffer, line, sizeof(line))) {
6517 + if (sscanf(line, "%s %llx", name, &val) != 2)
6518 + continue;
6519 +@@ -594,38 +587,7 @@ static void hdmi_write_eld_info(struct snd_info_entry *entry,
6520 + e->sad_count = n + 1;
6521 + }
6522 + }
6523 +- mutex_unlock(&eld->lock);
6524 +-}
6525 +-
6526 +-
6527 +-int snd_hda_eld_proc_new(struct hda_codec *codec, struct hdmi_eld *eld,
6528 +- int index)
6529 +-{
6530 +- char name[32];
6531 +- struct snd_info_entry *entry;
6532 +- int err;
6533 +-
6534 +- snprintf(name, sizeof(name), "eld#%d.%d", codec->addr, index);
6535 +- err = snd_card_proc_new(codec->bus->card, name, &entry);
6536 +- if (err < 0)
6537 +- return err;
6538 +-
6539 +- snd_info_set_text_ops(entry, eld, hdmi_print_eld_info);
6540 +- entry->c.text.write = hdmi_write_eld_info;
6541 +- entry->mode |= S_IWUSR;
6542 +- eld->proc_entry = entry;
6543 +-
6544 +- return 0;
6545 +-}
6546 +-
6547 +-void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld)
6548 +-{
6549 +- if (!codec->bus->shutdown && eld->proc_entry) {
6550 +- snd_device_free(codec->bus->card, eld->proc_entry);
6551 +- eld->proc_entry = NULL;
6552 +- }
6553 + }
6554 +-
6555 + #endif /* CONFIG_PROC_FS */
6556 +
6557 + /* update PCM info based on ELD */
6558 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6559 +index f7e76619f7c9..ccf5eb6b3d37 100644
6560 +--- a/sound/pci/hda/hda_intel.c
6561 ++++ b/sound/pci/hda/hda_intel.c
6562 +@@ -169,6 +169,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
6563 + "{Intel, PPT},"
6564 + "{Intel, LPT},"
6565 + "{Intel, LPT_LP},"
6566 ++ "{Intel, WPT_LP},"
6567 + "{Intel, HPT},"
6568 + "{Intel, PBG},"
6569 + "{Intel, SCH},"
6570 +@@ -568,6 +569,7 @@ enum {
6571 + AZX_DRIVER_ICH,
6572 + AZX_DRIVER_PCH,
6573 + AZX_DRIVER_SCH,
6574 ++ AZX_DRIVER_HDMI,
6575 + AZX_DRIVER_ATI,
6576 + AZX_DRIVER_ATIHDMI,
6577 + AZX_DRIVER_ATIHDMI_NS,
6578 +@@ -647,6 +649,7 @@ static char *driver_short_names[] = {
6579 + [AZX_DRIVER_ICH] = "HDA Intel",
6580 + [AZX_DRIVER_PCH] = "HDA Intel PCH",
6581 + [AZX_DRIVER_SCH] = "HDA Intel MID",
6582 ++ [AZX_DRIVER_HDMI] = "HDA Intel HDMI",
6583 + [AZX_DRIVER_ATI] = "HDA ATI SB",
6584 + [AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI",
6585 + [AZX_DRIVER_ATIHDMI_NS] = "HDA ATI HDMI",
6586 +@@ -3994,13 +3997,16 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
6587 + /* Lynx Point-LP */
6588 + { PCI_DEVICE(0x8086, 0x9c21),
6589 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
6590 ++ /* Wildcat Point-LP */
6591 ++ { PCI_DEVICE(0x8086, 0x9ca0),
6592 ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
6593 + /* Haswell */
6594 + { PCI_DEVICE(0x8086, 0x0a0c),
6595 +- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_HASWELL },
6596 ++ .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
6597 + { PCI_DEVICE(0x8086, 0x0c0c),
6598 +- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_HASWELL },
6599 ++ .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
6600 + { PCI_DEVICE(0x8086, 0x0d0c),
6601 +- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_HASWELL },
6602 ++ .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
6603 + /* 5 Series/3400 */
6604 + { PCI_DEVICE(0x8086, 0x3b56),
6605 + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
6606 +@@ -4080,6 +4086,22 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
6607 + .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6608 + { PCI_DEVICE(0x1002, 0xaa48),
6609 + .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6610 ++ { PCI_DEVICE(0x1002, 0xaa50),
6611 ++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6612 ++ { PCI_DEVICE(0x1002, 0xaa58),
6613 ++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6614 ++ { PCI_DEVICE(0x1002, 0xaa60),
6615 ++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6616 ++ { PCI_DEVICE(0x1002, 0xaa68),
6617 ++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6618 ++ { PCI_DEVICE(0x1002, 0xaa80),
6619 ++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6620 ++ { PCI_DEVICE(0x1002, 0xaa88),
6621 ++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6622 ++ { PCI_DEVICE(0x1002, 0xaa90),
6623 ++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6624 ++ { PCI_DEVICE(0x1002, 0xaa98),
6625 ++ .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6626 + { PCI_DEVICE(0x1002, 0x9902),
6627 + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI },
6628 + { PCI_DEVICE(0x1002, 0xaaa0),
6629 +diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
6630 +index 2e7493ef8ee0..040d93324f32 100644
6631 +--- a/sound/pci/hda/hda_local.h
6632 ++++ b/sound/pci/hda/hda_local.h
6633 +@@ -751,10 +751,6 @@ struct hdmi_eld {
6634 + int eld_size;
6635 + char eld_buffer[ELD_MAX_SIZE];
6636 + struct parsed_hdmi_eld info;
6637 +- struct mutex lock;
6638 +-#ifdef CONFIG_PROC_FS
6639 +- struct snd_info_entry *proc_entry;
6640 +-#endif
6641 + };
6642 +
6643 + int snd_hdmi_get_eld_size(struct hda_codec *codec, hda_nid_t nid);
6644 +@@ -767,20 +763,10 @@ void snd_hdmi_eld_update_pcm_info(struct parsed_hdmi_eld *e,
6645 + struct hda_pcm_stream *hinfo);
6646 +
6647 + #ifdef CONFIG_PROC_FS
6648 +-int snd_hda_eld_proc_new(struct hda_codec *codec, struct hdmi_eld *eld,
6649 +- int index);
6650 +-void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld);
6651 +-#else
6652 +-static inline int snd_hda_eld_proc_new(struct hda_codec *codec,
6653 +- struct hdmi_eld *eld,
6654 +- int index)
6655 +-{
6656 +- return 0;
6657 +-}
6658 +-static inline void snd_hda_eld_proc_free(struct hda_codec *codec,
6659 +- struct hdmi_eld *eld)
6660 +-{
6661 +-}
6662 ++void snd_hdmi_print_eld_info(struct hdmi_eld *eld,
6663 ++ struct snd_info_buffer *buffer);
6664 ++void snd_hdmi_write_eld_info(struct hdmi_eld *eld,
6665 ++ struct snd_info_buffer *buffer);
6666 + #endif
6667 +
6668 + #define SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE 80
6669 +diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
6670 +index 7fc15814c618..41ebdd8812b1 100644
6671 +--- a/sound/pci/hda/patch_analog.c
6672 ++++ b/sound/pci/hda/patch_analog.c
6673 +@@ -1085,6 +1085,7 @@ static int patch_ad1884(struct hda_codec *codec)
6674 + spec = codec->spec;
6675 +
6676 + spec->gen.mixer_nid = 0x20;
6677 ++ spec->gen.mixer_merge_nid = 0x21;
6678 + spec->gen.beep_nid = 0x10;
6679 + set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
6680 +
6681 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
6682 +index 417e0fc2d119..adb374babd18 100644
6683 +--- a/sound/pci/hda/patch_hdmi.c
6684 ++++ b/sound/pci/hda/patch_hdmi.c
6685 +@@ -45,6 +45,7 @@ module_param(static_hdmi_pcm, bool, 0644);
6686 + MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
6687 +
6688 + #define is_haswell(codec) ((codec)->vendor_id == 0x80862807)
6689 ++#define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
6690 +
6691 + struct hdmi_spec_per_cvt {
6692 + hda_nid_t cvt_nid;
6693 +@@ -63,9 +64,11 @@ struct hdmi_spec_per_pin {
6694 + hda_nid_t pin_nid;
6695 + int num_mux_nids;
6696 + hda_nid_t mux_nids[HDA_MAX_CONNECTIONS];
6697 ++ hda_nid_t cvt_nid;
6698 +
6699 + struct hda_codec *codec;
6700 + struct hdmi_eld sink_eld;
6701 ++ struct mutex lock;
6702 + struct delayed_work work;
6703 + struct snd_kcontrol *eld_ctl;
6704 + int repoll_count;
6705 +@@ -75,6 +78,9 @@ struct hdmi_spec_per_pin {
6706 + bool chmap_set; /* channel-map override by ALSA API? */
6707 + unsigned char chmap[8]; /* ALSA API channel-map */
6708 + char pcm_name[8]; /* filled in build_pcm callbacks */
6709 ++#ifdef CONFIG_PROC_FS
6710 ++ struct snd_info_entry *proc_entry;
6711 ++#endif
6712 + };
6713 +
6714 + struct hdmi_spec {
6715 +@@ -351,17 +357,19 @@ static int hdmi_eld_ctl_info(struct snd_kcontrol *kcontrol,
6716 + {
6717 + struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
6718 + struct hdmi_spec *spec = codec->spec;
6719 ++ struct hdmi_spec_per_pin *per_pin;
6720 + struct hdmi_eld *eld;
6721 + int pin_idx;
6722 +
6723 + uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
6724 +
6725 + pin_idx = kcontrol->private_value;
6726 +- eld = &get_pin(spec, pin_idx)->sink_eld;
6727 ++ per_pin = get_pin(spec, pin_idx);
6728 ++ eld = &per_pin->sink_eld;
6729 +
6730 +- mutex_lock(&eld->lock);
6731 ++ mutex_lock(&per_pin->lock);
6732 + uinfo->count = eld->eld_valid ? eld->eld_size : 0;
6733 +- mutex_unlock(&eld->lock);
6734 ++ mutex_unlock(&per_pin->lock);
6735 +
6736 + return 0;
6737 + }
6738 +@@ -371,15 +379,17 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
6739 + {
6740 + struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
6741 + struct hdmi_spec *spec = codec->spec;
6742 ++ struct hdmi_spec_per_pin *per_pin;
6743 + struct hdmi_eld *eld;
6744 + int pin_idx;
6745 +
6746 + pin_idx = kcontrol->private_value;
6747 +- eld = &get_pin(spec, pin_idx)->sink_eld;
6748 ++ per_pin = get_pin(spec, pin_idx);
6749 ++ eld = &per_pin->sink_eld;
6750 +
6751 +- mutex_lock(&eld->lock);
6752 ++ mutex_lock(&per_pin->lock);
6753 + if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
6754 +- mutex_unlock(&eld->lock);
6755 ++ mutex_unlock(&per_pin->lock);
6756 + snd_BUG();
6757 + return -EINVAL;
6758 + }
6759 +@@ -389,7 +399,7 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
6760 + if (eld->eld_valid)
6761 + memcpy(ucontrol->value.bytes.data, eld->eld_buffer,
6762 + eld->eld_size);
6763 +- mutex_unlock(&eld->lock);
6764 ++ mutex_unlock(&per_pin->lock);
6765 +
6766 + return 0;
6767 + }
6768 +@@ -490,6 +500,68 @@ static void hdmi_set_channel_count(struct hda_codec *codec,
6769 + AC_VERB_SET_CVT_CHAN_COUNT, chs - 1);
6770 + }
6771 +
6772 ++/*
6773 ++ * ELD proc files
6774 ++ */
6775 ++
6776 ++#ifdef CONFIG_PROC_FS
6777 ++static void print_eld_info(struct snd_info_entry *entry,
6778 ++ struct snd_info_buffer *buffer)
6779 ++{
6780 ++ struct hdmi_spec_per_pin *per_pin = entry->private_data;
6781 ++
6782 ++ mutex_lock(&per_pin->lock);
6783 ++ snd_hdmi_print_eld_info(&per_pin->sink_eld, buffer);
6784 ++ mutex_unlock(&per_pin->lock);
6785 ++}
6786 ++
6787 ++static void write_eld_info(struct snd_info_entry *entry,
6788 ++ struct snd_info_buffer *buffer)
6789 ++{
6790 ++ struct hdmi_spec_per_pin *per_pin = entry->private_data;
6791 ++
6792 ++ mutex_lock(&per_pin->lock);
6793 ++ snd_hdmi_write_eld_info(&per_pin->sink_eld, buffer);
6794 ++ mutex_unlock(&per_pin->lock);
6795 ++}
6796 ++
6797 ++static int eld_proc_new(struct hdmi_spec_per_pin *per_pin, int index)
6798 ++{
6799 ++ char name[32];
6800 ++ struct hda_codec *codec = per_pin->codec;
6801 ++ struct snd_info_entry *entry;
6802 ++ int err;
6803 ++
6804 ++ snprintf(name, sizeof(name), "eld#%d.%d", codec->addr, index);
6805 ++ err = snd_card_proc_new(codec->bus->card, name, &entry);
6806 ++ if (err < 0)
6807 ++ return err;
6808 ++
6809 ++ snd_info_set_text_ops(entry, per_pin, print_eld_info);
6810 ++ entry->c.text.write = write_eld_info;
6811 ++ entry->mode |= S_IWUSR;
6812 ++ per_pin->proc_entry = entry;
6813 ++
6814 ++ return 0;
6815 ++}
6816 ++
6817 ++static void eld_proc_free(struct hdmi_spec_per_pin *per_pin)
6818 ++{
6819 ++ if (!per_pin->codec->bus->shutdown && per_pin->proc_entry) {
6820 ++ snd_device_free(per_pin->codec->bus->card, per_pin->proc_entry);
6821 ++ per_pin->proc_entry = NULL;
6822 ++ }
6823 ++}
6824 ++#else
6825 ++static inline int eld_proc_new(struct hdmi_spec_per_pin *per_pin,
6826 ++ int index)
6827 ++{
6828 ++ return 0;
6829 ++}
6830 ++static inline void eld_proc_free(struct hdmi_spec_per_pin *per_pin)
6831 ++{
6832 ++}
6833 ++#endif
6834 +
6835 + /*
6836 + * Channel mapping routines
6837 +@@ -608,25 +680,35 @@ static void hdmi_std_setup_channel_mapping(struct hda_codec *codec,
6838 + bool non_pcm,
6839 + int ca)
6840 + {
6841 ++ struct cea_channel_speaker_allocation *ch_alloc;
6842 + int i;
6843 + int err;
6844 + int order;
6845 + int non_pcm_mapping[8];
6846 +
6847 + order = get_channel_allocation_order(ca);
6848 ++ ch_alloc = &channel_allocations[order];
6849 +
6850 + if (hdmi_channel_mapping[ca][1] == 0) {
6851 +- for (i = 0; i < channel_allocations[order].channels; i++)
6852 +- hdmi_channel_mapping[ca][i] = i | (i << 4);
6853 +- for (; i < 8; i++)
6854 +- hdmi_channel_mapping[ca][i] = 0xf | (i << 4);
6855 ++ int hdmi_slot = 0;
6856 ++ /* fill actual channel mappings in ALSA channel (i) order */
6857 ++ for (i = 0; i < ch_alloc->channels; i++) {
6858 ++ while (!ch_alloc->speakers[7 - hdmi_slot] && !WARN_ON(hdmi_slot >= 8))
6859 ++ hdmi_slot++; /* skip zero slots */
6860 ++
6861 ++ hdmi_channel_mapping[ca][i] = (i << 4) | hdmi_slot++;
6862 ++ }
6863 ++ /* fill the rest of the slots with ALSA channel 0xf */
6864 ++ for (hdmi_slot = 0; hdmi_slot < 8; hdmi_slot++)
6865 ++ if (!ch_alloc->speakers[7 - hdmi_slot])
6866 ++ hdmi_channel_mapping[ca][i++] = (0xf << 4) | hdmi_slot;
6867 + }
6868 +
6869 + if (non_pcm) {
6870 +- for (i = 0; i < channel_allocations[order].channels; i++)
6871 +- non_pcm_mapping[i] = i | (i << 4);
6872 ++ for (i = 0; i < ch_alloc->channels; i++)
6873 ++ non_pcm_mapping[i] = (i << 4) | i;
6874 + for (; i < 8; i++)
6875 +- non_pcm_mapping[i] = 0xf | (i << 4);
6876 ++ non_pcm_mapping[i] = (0xf << 4) | i;
6877 + }
6878 +
6879 + for (i = 0; i < 8; i++) {
6880 +@@ -639,25 +721,31 @@ static void hdmi_std_setup_channel_mapping(struct hda_codec *codec,
6881 + break;
6882 + }
6883 + }
6884 +-
6885 +- hdmi_debug_channel_mapping(codec, pin_nid);
6886 + }
6887 +
6888 + struct channel_map_table {
6889 + unsigned char map; /* ALSA API channel map position */
6890 +- unsigned char cea_slot; /* CEA slot value */
6891 + int spk_mask; /* speaker position bit mask */
6892 + };
6893 +
6894 + static struct channel_map_table map_tables[] = {
6895 +- { SNDRV_CHMAP_FL, 0x00, FL },
6896 +- { SNDRV_CHMAP_FR, 0x01, FR },
6897 +- { SNDRV_CHMAP_RL, 0x04, RL },
6898 +- { SNDRV_CHMAP_RR, 0x05, RR },
6899 +- { SNDRV_CHMAP_LFE, 0x02, LFE },
6900 +- { SNDRV_CHMAP_FC, 0x03, FC },
6901 +- { SNDRV_CHMAP_RLC, 0x06, RLC },
6902 +- { SNDRV_CHMAP_RRC, 0x07, RRC },
6903 ++ { SNDRV_CHMAP_FL, FL },
6904 ++ { SNDRV_CHMAP_FR, FR },
6905 ++ { SNDRV_CHMAP_RL, RL },
6906 ++ { SNDRV_CHMAP_RR, RR },
6907 ++ { SNDRV_CHMAP_LFE, LFE },
6908 ++ { SNDRV_CHMAP_FC, FC },
6909 ++ { SNDRV_CHMAP_RLC, RLC },
6910 ++ { SNDRV_CHMAP_RRC, RRC },
6911 ++ { SNDRV_CHMAP_RC, RC },
6912 ++ { SNDRV_CHMAP_FLC, FLC },
6913 ++ { SNDRV_CHMAP_FRC, FRC },
6914 ++ { SNDRV_CHMAP_FLH, FLH },
6915 ++ { SNDRV_CHMAP_FRH, FRH },
6916 ++ { SNDRV_CHMAP_FLW, FLW },
6917 ++ { SNDRV_CHMAP_FRW, FRW },
6918 ++ { SNDRV_CHMAP_TC, TC },
6919 ++ { SNDRV_CHMAP_FCH, FCH },
6920 + {} /* terminator */
6921 + };
6922 +
6923 +@@ -673,25 +761,19 @@ static int to_spk_mask(unsigned char c)
6924 + }
6925 +
6926 + /* from ALSA API channel position to CEA slot */
6927 +-static int to_cea_slot(unsigned char c)
6928 ++static int to_cea_slot(int ordered_ca, unsigned char pos)
6929 + {
6930 +- struct channel_map_table *t = map_tables;
6931 +- for (; t->map; t++) {
6932 +- if (t->map == c)
6933 +- return t->cea_slot;
6934 +- }
6935 +- return 0x0f;
6936 +-}
6937 ++ int mask = to_spk_mask(pos);
6938 ++ int i;
6939 +
6940 +-/* from CEA slot to ALSA API channel position */
6941 +-static int from_cea_slot(unsigned char c)
6942 +-{
6943 +- struct channel_map_table *t = map_tables;
6944 +- for (; t->map; t++) {
6945 +- if (t->cea_slot == c)
6946 +- return t->map;
6947 ++ if (mask) {
6948 ++ for (i = 0; i < 8; i++) {
6949 ++ if (channel_allocations[ordered_ca].speakers[7 - i] == mask)
6950 ++ return i;
6951 ++ }
6952 + }
6953 +- return 0;
6954 ++
6955 ++ return -1;
6956 + }
6957 +
6958 + /* from speaker bit mask to ALSA API channel position */
6959 +@@ -705,6 +787,14 @@ static int spk_to_chmap(int spk)
6960 + return 0;
6961 + }
6962 +
6963 ++/* from CEA slot to ALSA API channel position */
6964 ++static int from_cea_slot(int ordered_ca, unsigned char slot)
6965 ++{
6966 ++ int mask = channel_allocations[ordered_ca].speakers[7 - slot];
6967 ++
6968 ++ return spk_to_chmap(mask);
6969 ++}
6970 ++
6971 + /* get the CA index corresponding to the given ALSA API channel map */
6972 + static int hdmi_manual_channel_allocation(int chs, unsigned char *map)
6973 + {
6974 +@@ -731,16 +821,27 @@ static int hdmi_manual_channel_allocation(int chs, unsigned char *map)
6975 + /* set up the channel slots for the given ALSA API channel map */
6976 + static int hdmi_manual_setup_channel_mapping(struct hda_codec *codec,
6977 + hda_nid_t pin_nid,
6978 +- int chs, unsigned char *map)
6979 ++ int chs, unsigned char *map,
6980 ++ int ca)
6981 + {
6982 +- int i;
6983 +- for (i = 0; i < 8; i++) {
6984 ++ int ordered_ca = get_channel_allocation_order(ca);
6985 ++ int alsa_pos, hdmi_slot;
6986 ++ int assignments[8] = {[0 ... 7] = 0xf};
6987 ++
6988 ++ for (alsa_pos = 0; alsa_pos < chs; alsa_pos++) {
6989 ++
6990 ++ hdmi_slot = to_cea_slot(ordered_ca, map[alsa_pos]);
6991 ++
6992 ++ if (hdmi_slot < 0)
6993 ++ continue; /* unassigned channel */
6994 ++
6995 ++ assignments[hdmi_slot] = alsa_pos;
6996 ++ }
6997 ++
6998 ++ for (hdmi_slot = 0; hdmi_slot < 8; hdmi_slot++) {
6999 + int val, err;
7000 +- if (i < chs)
7001 +- val = to_cea_slot(map[i]);
7002 +- else
7003 +- val = 0xf;
7004 +- val |= (i << 4);
7005 ++
7006 ++ val = (assignments[hdmi_slot] << 4) | hdmi_slot;
7007 + err = snd_hda_codec_write(codec, pin_nid, 0,
7008 + AC_VERB_SET_HDMI_CHAN_SLOT, val);
7009 + if (err)
7010 +@@ -756,7 +857,7 @@ static void hdmi_setup_fake_chmap(unsigned char *map, int ca)
7011 + int ordered_ca = get_channel_allocation_order(ca);
7012 + for (i = 0; i < 8; i++) {
7013 + if (i < channel_allocations[ordered_ca].channels)
7014 +- map[i] = from_cea_slot(hdmi_channel_mapping[ca][i] & 0x0f);
7015 ++ map[i] = from_cea_slot(ordered_ca, hdmi_channel_mapping[ca][i] & 0x0f);
7016 + else
7017 + map[i] = 0;
7018 + }
7019 +@@ -769,11 +870,13 @@ static void hdmi_setup_channel_mapping(struct hda_codec *codec,
7020 + {
7021 + if (!non_pcm && chmap_set) {
7022 + hdmi_manual_setup_channel_mapping(codec, pin_nid,
7023 +- channels, map);
7024 ++ channels, map, ca);
7025 + } else {
7026 + hdmi_std_setup_channel_mapping(codec, pin_nid, non_pcm, ca);
7027 + hdmi_setup_fake_chmap(map, ca);
7028 + }
7029 ++
7030 ++ hdmi_debug_channel_mapping(codec, pin_nid);
7031 + }
7032 +
7033 + /*
7034 +@@ -903,8 +1006,9 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
7035 + {
7036 + hda_nid_t pin_nid = per_pin->pin_nid;
7037 + int channels = per_pin->channels;
7038 ++ int active_channels;
7039 + struct hdmi_eld *eld;
7040 +- int ca;
7041 ++ int ca, ordered_ca;
7042 + union audio_infoframe ai;
7043 +
7044 + if (!channels)
7045 +@@ -926,6 +1030,11 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
7046 + if (ca < 0)
7047 + ca = 0;
7048 +
7049 ++ ordered_ca = get_channel_allocation_order(ca);
7050 ++ active_channels = channel_allocations[ordered_ca].channels;
7051 ++
7052 ++ hdmi_set_channel_count(codec, per_pin->cvt_nid, active_channels);
7053 ++
7054 + memset(&ai, 0, sizeof(ai));
7055 + if (eld->info.conn_type == 0) { /* HDMI */
7056 + struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi;
7057 +@@ -933,7 +1042,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
7058 + hdmi_ai->type = 0x84;
7059 + hdmi_ai->ver = 0x01;
7060 + hdmi_ai->len = 0x0a;
7061 +- hdmi_ai->CC02_CT47 = channels - 1;
7062 ++ hdmi_ai->CC02_CT47 = active_channels - 1;
7063 + hdmi_ai->CA = ca;
7064 + hdmi_checksum_audio_infoframe(hdmi_ai);
7065 + } else if (eld->info.conn_type == 1) { /* DisplayPort */
7066 +@@ -942,7 +1051,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
7067 + dp_ai->type = 0x84;
7068 + dp_ai->len = 0x1b;
7069 + dp_ai->ver = 0x11 << 2;
7070 +- dp_ai->CC02_CT47 = channels - 1;
7071 ++ dp_ai->CC02_CT47 = active_channels - 1;
7072 + dp_ai->CA = ca;
7073 + } else {
7074 + snd_printd("HDMI: unknown connection type at pin %d\n",
7075 +@@ -966,9 +1075,9 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
7076 + if (!hdmi_infoframe_uptodate(codec, pin_nid, ai.bytes,
7077 + sizeof(ai))) {
7078 + snd_printdd("hdmi_setup_audio_infoframe: "
7079 +- "pin=%d channels=%d\n",
7080 ++ "pin=%d channels=%d ca=0x%02x\n",
7081 + pin_nid,
7082 +- channels);
7083 ++ active_channels, ca);
7084 + hdmi_stop_infoframe_trans(codec, pin_nid);
7085 + hdmi_fill_audio_infoframe(codec, pin_nid,
7086 + ai.bytes, sizeof(ai));
7087 +@@ -983,7 +1092,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
7088 + * Unsolicited events
7089 + */
7090 +
7091 +-static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
7092 ++static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
7093 +
7094 + static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
7095 + {
7096 +@@ -1009,8 +1118,8 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
7097 + if (pin_idx < 0)
7098 + return;
7099 +
7100 +- hdmi_present_sense(get_pin(spec, pin_idx), 1);
7101 +- snd_hda_jack_report_sync(codec);
7102 ++ if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
7103 ++ snd_hda_jack_report_sync(codec);
7104 + }
7105 +
7106 + static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
7107 +@@ -1160,7 +1269,16 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
7108 + return 0;
7109 + }
7110 +
7111 +-static void haswell_config_cvts(struct hda_codec *codec,
7112 ++/* Intel HDMI workaround to fix audio routing issue:
7113 ++ * For some Intel display codecs, pins share the same connection list.
7114 ++ * So a conveter can be selected by multiple pins and playback on any of these
7115 ++ * pins will generate sound on the external display, because audio flows from
7116 ++ * the same converter to the display pipeline. Also muting one pin may make
7117 ++ * other pins have no sound output.
7118 ++ * So this function assures that an assigned converter for a pin is not selected
7119 ++ * by any other pins.
7120 ++ */
7121 ++static void intel_not_share_assigned_cvt(struct hda_codec *codec,
7122 + hda_nid_t pin_nid, int mux_idx)
7123 + {
7124 + struct hdmi_spec *spec = codec->spec;
7125 +@@ -1231,6 +1349,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
7126 + per_cvt = get_cvt(spec, cvt_idx);
7127 + /* Claim converter */
7128 + per_cvt->assigned = 1;
7129 ++ per_pin->cvt_nid = per_cvt->cvt_nid;
7130 + hinfo->nid = per_cvt->cvt_nid;
7131 +
7132 + snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
7133 +@@ -1238,8 +1357,8 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
7134 + mux_idx);
7135 +
7136 + /* configure unused pins to choose other converters */
7137 +- if (is_haswell(codec))
7138 +- haswell_config_cvts(codec, per_pin->pin_nid, mux_idx);
7139 ++ if (is_haswell(codec) || is_valleyview(codec))
7140 ++ intel_not_share_assigned_cvt(codec, per_pin->pin_nid, mux_idx);
7141 +
7142 + snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
7143 +
7144 +@@ -1297,7 +1416,7 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
7145 + return 0;
7146 + }
7147 +
7148 +-static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
7149 ++static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
7150 + {
7151 + struct hda_codec *codec = per_pin->codec;
7152 + struct hdmi_spec *spec = codec->spec;
7153 +@@ -1312,10 +1431,15 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
7154 + * specification worked this way. Hence, we just ignore the data in
7155 + * the unsolicited response to avoid custom WARs.
7156 + */
7157 +- int present = snd_hda_pin_sense(codec, pin_nid);
7158 ++ int present;
7159 + bool update_eld = false;
7160 + bool eld_changed = false;
7161 ++ bool ret;
7162 +
7163 ++ snd_hda_power_up(codec);
7164 ++ present = snd_hda_pin_sense(codec, pin_nid);
7165 ++
7166 ++ mutex_lock(&per_pin->lock);
7167 + pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
7168 + if (pin_eld->monitor_present)
7169 + eld->eld_valid = !!(present & AC_PINSENSE_ELDV);
7170 +@@ -1345,11 +1469,10 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
7171 + queue_delayed_work(codec->bus->workq,
7172 + &per_pin->work,
7173 + msecs_to_jiffies(300));
7174 +- return;
7175 ++ goto unlock;
7176 + }
7177 + }
7178 +
7179 +- mutex_lock(&pin_eld->lock);
7180 + if (pin_eld->eld_valid && !eld->eld_valid) {
7181 + update_eld = true;
7182 + eld_changed = true;
7183 +@@ -1374,12 +1497,19 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
7184 + hdmi_setup_audio_infoframe(codec, per_pin,
7185 + per_pin->non_pcm);
7186 + }
7187 +- mutex_unlock(&pin_eld->lock);
7188 +
7189 + if (eld_changed)
7190 + snd_ctl_notify(codec->bus->card,
7191 + SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
7192 + &per_pin->eld_ctl->id);
7193 ++ unlock:
7194 ++ if ((codec->vendor_id & 0xffff0000) == 0x10020000)
7195 ++ ret = true; /* AMD codecs create ELD by itself */
7196 ++ else
7197 ++ ret = !repoll || !pin_eld->monitor_present || pin_eld->eld_valid;
7198 ++ mutex_unlock(&per_pin->lock);
7199 ++ snd_hda_power_down(codec);
7200 ++ return ret;
7201 + }
7202 +
7203 + static void hdmi_repoll_eld(struct work_struct *work)
7204 +@@ -1390,7 +1520,8 @@ static void hdmi_repoll_eld(struct work_struct *work)
7205 + if (per_pin->repoll_count++ > 6)
7206 + per_pin->repoll_count = 0;
7207 +
7208 +- hdmi_present_sense(per_pin, per_pin->repoll_count);
7209 ++ if (hdmi_present_sense(per_pin, per_pin->repoll_count))
7210 ++ snd_hda_jack_report_sync(per_pin->codec);
7211 + }
7212 +
7213 + static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
7214 +@@ -1551,12 +1682,12 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
7215 + int pinctl;
7216 +
7217 + non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
7218 ++ mutex_lock(&per_pin->lock);
7219 + per_pin->channels = substream->runtime->channels;
7220 + per_pin->setup = true;
7221 +
7222 +- hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
7223 +-
7224 + hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
7225 ++ mutex_unlock(&per_pin->lock);
7226 +
7227 + if (spec->dyn_pin_out) {
7228 + pinctl = snd_hda_codec_read(codec, pin_nid, 0,
7229 +@@ -1611,11 +1742,14 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
7230 + }
7231 +
7232 + snd_hda_spdif_ctls_unassign(codec, pin_idx);
7233 ++
7234 ++ mutex_lock(&per_pin->lock);
7235 + per_pin->chmap_set = false;
7236 + memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
7237 +
7238 + per_pin->setup = false;
7239 + per_pin->channels = 0;
7240 ++ mutex_unlock(&per_pin->lock);
7241 + }
7242 +
7243 + return 0;
7244 +@@ -1650,8 +1784,6 @@ static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
7245 + struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
7246 + struct hda_codec *codec = info->private_data;
7247 + struct hdmi_spec *spec = codec->spec;
7248 +- const unsigned int valid_mask =
7249 +- FL | FR | RL | RR | LFE | FC | RLC | RRC;
7250 + unsigned int __user *dst;
7251 + int chs, count = 0;
7252 +
7253 +@@ -1669,8 +1801,6 @@ static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
7254 + int chs_bytes = chs * 4;
7255 + if (cap->channels != chs)
7256 + continue;
7257 +- if (cap->spk_mask & ~valid_mask)
7258 +- continue;
7259 + if (size < 8)
7260 + return -ENOMEM;
7261 + if (put_user(SNDRV_CTL_TLVT_CHMAP_VAR, dst) ||
7262 +@@ -1748,10 +1878,12 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
7263 + ca = hdmi_manual_channel_allocation(ARRAY_SIZE(chmap), chmap);
7264 + if (ca < 0)
7265 + return -EINVAL;
7266 ++ mutex_lock(&per_pin->lock);
7267 + per_pin->chmap_set = true;
7268 + memcpy(per_pin->chmap, chmap, sizeof(chmap));
7269 + if (prepared)
7270 + hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm);
7271 ++ mutex_unlock(&per_pin->lock);
7272 +
7273 + return 0;
7274 + }
7275 +@@ -1868,12 +2000,11 @@ static int generic_hdmi_init_per_pins(struct hda_codec *codec)
7276 +
7277 + for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
7278 + struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
7279 +- struct hdmi_eld *eld = &per_pin->sink_eld;
7280 +
7281 + per_pin->codec = codec;
7282 +- mutex_init(&eld->lock);
7283 ++ mutex_init(&per_pin->lock);
7284 + INIT_DELAYED_WORK(&per_pin->work, hdmi_repoll_eld);
7285 +- snd_hda_eld_proc_new(codec, eld, pin_idx);
7286 ++ eld_proc_new(per_pin, pin_idx);
7287 + }
7288 + return 0;
7289 + }
7290 +@@ -1914,10 +2045,9 @@ static void generic_hdmi_free(struct hda_codec *codec)
7291 +
7292 + for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
7293 + struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
7294 +- struct hdmi_eld *eld = &per_pin->sink_eld;
7295 +
7296 + cancel_delayed_work(&per_pin->work);
7297 +- snd_hda_eld_proc_free(codec, eld);
7298 ++ eld_proc_free(per_pin);
7299 + }
7300 +
7301 + flush_workqueue(codec->bus->workq);
7302 +@@ -2717,6 +2847,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
7303 + { .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
7304 + { .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi },
7305 + { .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
7306 ++{ .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi },
7307 + { .id = 0x808629fb, .name = "Crestline HDMI", .patch = patch_generic_hdmi },
7308 + {} /* terminator */
7309 + };
7310 +@@ -2771,6 +2902,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862805");
7311 + MODULE_ALIAS("snd-hda-codec-id:80862806");
7312 + MODULE_ALIAS("snd-hda-codec-id:80862807");
7313 + MODULE_ALIAS("snd-hda-codec-id:80862880");
7314 ++MODULE_ALIAS("snd-hda-codec-id:80862882");
7315 + MODULE_ALIAS("snd-hda-codec-id:808629fb");
7316 +
7317 + MODULE_LICENSE("GPL");
7318 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7319 +index 1be437f533a6..deddee9c1565 100644
7320 +--- a/sound/pci/hda/patch_realtek.c
7321 ++++ b/sound/pci/hda/patch_realtek.c
7322 +@@ -3464,6 +3464,19 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
7323 + alc_fixup_headset_mode(codec, fix, action);
7324 + }
7325 +
7326 ++static void alc_no_shutup(struct hda_codec *codec)
7327 ++{
7328 ++}
7329 ++
7330 ++static void alc_fixup_no_shutup(struct hda_codec *codec,
7331 ++ const struct hda_fixup *fix, int action)
7332 ++{
7333 ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
7334 ++ struct alc_spec *spec = codec->spec;
7335 ++ spec->shutup = alc_no_shutup;
7336 ++ }
7337 ++}
7338 ++
7339 + static void alc_fixup_headset_mode_alc668(struct hda_codec *codec,
7340 + const struct hda_fixup *fix, int action)
7341 + {
7342 +@@ -3674,6 +3687,7 @@ enum {
7343 + ALC269_FIXUP_HP_GPIO_LED,
7344 + ALC269_FIXUP_INV_DMIC,
7345 + ALC269_FIXUP_LENOVO_DOCK,
7346 ++ ALC269_FIXUP_NO_SHUTUP,
7347 + ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
7348 + ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
7349 + ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
7350 +@@ -3840,6 +3854,10 @@ static const struct hda_fixup alc269_fixups[] = {
7351 + .type = HDA_FIXUP_FUNC,
7352 + .v.func = alc_fixup_inv_dmic_0x12,
7353 + },
7354 ++ [ALC269_FIXUP_NO_SHUTUP] = {
7355 ++ .type = HDA_FIXUP_FUNC,
7356 ++ .v.func = alc_fixup_no_shutup,
7357 ++ },
7358 + [ALC269_FIXUP_LENOVO_DOCK] = {
7359 + .type = HDA_FIXUP_PINS,
7360 + .v.pins = (const struct hda_pintbl[]) {
7361 +@@ -4000,6 +4018,7 @@ static const struct hda_fixup alc269_fixups[] = {
7362 + };
7363 +
7364 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7365 ++ SND_PCI_QUIRK(0x1025, 0x0283, "Acer TravelMate 8371", ALC269_FIXUP_INV_DMIC),
7366 + SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
7367 + SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
7368 + SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
7369 +@@ -4089,6 +4108,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7370 + SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
7371 + SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
7372 + SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
7373 ++ SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
7374 + SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
7375 + SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
7376 + SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
7377 +diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
7378 +index 77acd790ea47..eb7ad7706205 100644
7379 +--- a/sound/pci/oxygen/xonar_dg.c
7380 ++++ b/sound/pci/oxygen/xonar_dg.c
7381 +@@ -294,6 +294,16 @@ static int output_switch_put(struct snd_kcontrol *ctl,
7382 + oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
7383 + data->output_sel == 1 ? GPIO_HP_REAR : 0,
7384 + GPIO_HP_REAR);
7385 ++ oxygen_write8_masked(chip, OXYGEN_PLAY_ROUTING,
7386 ++ data->output_sel == 0 ?
7387 ++ OXYGEN_PLAY_MUTE01 :
7388 ++ OXYGEN_PLAY_MUTE23 |
7389 ++ OXYGEN_PLAY_MUTE45 |
7390 ++ OXYGEN_PLAY_MUTE67,
7391 ++ OXYGEN_PLAY_MUTE01 |
7392 ++ OXYGEN_PLAY_MUTE23 |
7393 ++ OXYGEN_PLAY_MUTE45 |
7394 ++ OXYGEN_PLAY_MUTE67);
7395 + }
7396 + mutex_unlock(&chip->mutex);
7397 + return changed;
7398 +@@ -596,7 +606,7 @@ struct oxygen_model model_xonar_dg = {
7399 + .model_data_size = sizeof(struct dg),
7400 + .device_config = PLAYBACK_0_TO_I2S |
7401 + PLAYBACK_1_TO_SPDIF |
7402 +- CAPTURE_0_FROM_I2S_2 |
7403 ++ CAPTURE_0_FROM_I2S_1 |
7404 + CAPTURE_1_FROM_SPDIF,
7405 + .dac_channels_pcm = 6,
7406 + .dac_channels_mixer = 0,
7407 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
7408 +index 95558ef4a7a0..be4db47cb2d9 100644
7409 +--- a/sound/usb/mixer.c
7410 ++++ b/sound/usb/mixer.c
7411 +@@ -883,6 +883,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
7412 + }
7413 + break;
7414 +
7415 ++ case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
7416 + case USB_ID(0x046d, 0x0808):
7417 + case USB_ID(0x046d, 0x0809):
7418 + case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */