Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.14 commit in: /
Date: Wed, 29 Apr 2015 17:04:19
Message-Id: 1430327035.138693596fe518aaba934dd7f85b3b27c043084b.mpagano@gentoo
1 commit: 138693596fe518aaba934dd7f85b3b27c043084b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 29 17:03:55 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 29 17:03:55 2015 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=13869359
7
8 Linux patch 3.14.40
9
10 0000_README | 4 +
11 1039_linux-3.14.40.patch | 2462 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2466 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index e03878f..f33de99 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -198,6 +198,10 @@ Patch: 1038_linux-3.14.39.patch
19 From: http://www.kernel.org
20 Desc: Linux 3.14.39
21
22 +Patch: 1039_linux-3.14.40.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 3.14.40
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1039_linux-3.14.40.patch b/1039_linux-3.14.40.patch
31 new file mode 100644
32 index 0000000..550549b
33 --- /dev/null
34 +++ b/1039_linux-3.14.40.patch
35 @@ -0,0 +1,2462 @@
36 +diff --git a/Makefile b/Makefile
37 +index b40845e11b84..070e0ebb9231 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 3
42 + PATCHLEVEL = 14
43 +-SUBLEVEL = 39
44 ++SUBLEVEL = 40
45 + EXTRAVERSION =
46 + NAME = Remembering Coco
47 +
48 +diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
49 +index 98838a05ba6d..9d0ac091a52a 100644
50 +--- a/arch/alpha/mm/fault.c
51 ++++ b/arch/alpha/mm/fault.c
52 +@@ -156,6 +156,8 @@ retry:
53 + if (unlikely(fault & VM_FAULT_ERROR)) {
54 + if (fault & VM_FAULT_OOM)
55 + goto out_of_memory;
56 ++ else if (fault & VM_FAULT_SIGSEGV)
57 ++ goto bad_area;
58 + else if (fault & VM_FAULT_SIGBUS)
59 + goto do_sigbus;
60 + BUG();
61 +diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
62 +index 9c69552350c4..01e18b58dfa4 100644
63 +--- a/arch/arc/mm/fault.c
64 ++++ b/arch/arc/mm/fault.c
65 +@@ -162,6 +162,8 @@ good_area:
66 + /* TBD: switch to pagefault_out_of_memory() */
67 + if (fault & VM_FAULT_OOM)
68 + goto out_of_memory;
69 ++ else if (fault & VM_FAULT_SIGSEGV)
70 ++ goto bad_area;
71 + else if (fault & VM_FAULT_SIGBUS)
72 + goto do_sigbus;
73 +
74 +diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
75 +index 626989fec4d3..9fd61c72a33a 100644
76 +--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
77 ++++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
78 +@@ -43,7 +43,7 @@
79 + #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
80 + #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
81 + #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
82 +-#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
83 ++#define PMD_SECT_AP2 (_AT(pmdval_t, 1) << 7) /* read only */
84 + #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
85 + #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
86 + #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
87 +@@ -72,6 +72,7 @@
88 + #define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
89 + #define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
90 + #define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
91 ++#define PTE_AP2 (_AT(pteval_t, 1) << 7) /* AP[2] */
92 + #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
93 + #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
94 + #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
95 +diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
96 +index 85c60adc8b60..06e0bc0f8b00 100644
97 +--- a/arch/arm/include/asm/pgtable-3level.h
98 ++++ b/arch/arm/include/asm/pgtable-3level.h
99 +@@ -79,18 +79,19 @@
100 + #define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
101 + #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
102 + #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
103 +-#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
104 + #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
105 + #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
106 + #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
107 +-#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
108 +-#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
109 ++#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
110 ++#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
111 + #define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
112 ++#define L_PTE_RDONLY (_AT(pteval_t, 1) << 58) /* READ ONLY */
113 +
114 +-#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
115 +-#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
116 +-#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
117 +-#define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
118 ++#define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
119 ++#define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
120 ++#define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
121 ++#define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
122 ++#define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
123 +
124 + /*
125 + * To be used in assembly code with the upper page attributes.
126 +@@ -207,27 +208,32 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
127 + #define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
128 + #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
129 +
130 +-#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
131 ++#define pmd_isset(pmd, val) ((u32)(val) == (val) ? pmd_val(pmd) & (val) \
132 ++ : !!(pmd_val(pmd) & (val)))
133 ++#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
134 ++
135 ++#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
136 +
137 + #define __HAVE_ARCH_PMD_WRITE
138 +-#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
139 ++#define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
140 ++#define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
141 +
142 + #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
143 + #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
144 +
145 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
146 +-#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
147 +-#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
148 ++#define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
149 ++#define pmd_trans_splitting(pmd) (pmd_isset((pmd), L_PMD_SECT_SPLITTING))
150 + #endif
151 +
152 + #define PMD_BIT_FUNC(fn,op) \
153 + static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
154 +
155 +-PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY);
156 ++PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY);
157 + PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
158 +-PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
159 +-PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY);
160 +-PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY);
161 ++PMD_BIT_FUNC(mksplitting, |= L_PMD_SECT_SPLITTING);
162 ++PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY);
163 ++PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY);
164 + PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
165 +
166 + #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
167 +@@ -241,8 +247,8 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
168 +
169 + static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
170 + {
171 +- const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY |
172 +- PMD_SECT_VALID | PMD_SECT_NONE;
173 ++ const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | L_PMD_SECT_RDONLY |
174 ++ L_PMD_SECT_VALID | L_PMD_SECT_NONE;
175 + pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
176 + return pmd;
177 + }
178 +@@ -253,8 +259,13 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
179 + BUG_ON(addr >= TASK_SIZE);
180 +
181 + /* create a faulting entry if PROT_NONE protected */
182 +- if (pmd_val(pmd) & PMD_SECT_NONE)
183 +- pmd_val(pmd) &= ~PMD_SECT_VALID;
184 ++ if (pmd_val(pmd) & L_PMD_SECT_NONE)
185 ++ pmd_val(pmd) &= ~L_PMD_SECT_VALID;
186 ++
187 ++ if (pmd_write(pmd) && pmd_dirty(pmd))
188 ++ pmd_val(pmd) &= ~PMD_SECT_AP2;
189 ++ else
190 ++ pmd_val(pmd) |= PMD_SECT_AP2;
191 +
192 + *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
193 + flush_pmd_entry(pmdp);
194 +diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
195 +index 7d59b524f2af..89dba131703b 100644
196 +--- a/arch/arm/include/asm/pgtable.h
197 ++++ b/arch/arm/include/asm/pgtable.h
198 +@@ -214,12 +214,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
199 +
200 + #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
201 +
202 ++#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \
203 ++ : !!(pte_val(pte) & (val)))
204 ++#define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
205 ++
206 + #define pte_none(pte) (!pte_val(pte))
207 +-#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
208 +-#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
209 +-#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
210 +-#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
211 +-#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
212 ++#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT))
213 ++#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY))
214 ++#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY))
215 ++#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG))
216 ++#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
217 + #define pte_special(pte) (0)
218 +
219 + #define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
220 +diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
221 +index 22e3ad63500c..eb81123a845d 100644
222 +--- a/arch/arm/mm/proc-v7-3level.S
223 ++++ b/arch/arm/mm/proc-v7-3level.S
224 +@@ -86,8 +86,13 @@ ENTRY(cpu_v7_set_pte_ext)
225 + tst rh, #1 << (57 - 32) @ L_PTE_NONE
226 + bicne rl, #L_PTE_VALID
227 + bne 1f
228 +- tst rh, #1 << (55 - 32) @ L_PTE_DIRTY
229 +- orreq rl, #L_PTE_RDONLY
230 ++
231 ++ eor ip, rh, #1 << (55 - 32) @ toggle L_PTE_DIRTY in temp reg to
232 ++ @ test for !L_PTE_DIRTY || L_PTE_RDONLY
233 ++ tst ip, #1 << (55 - 32) | 1 << (58 - 32)
234 ++ orrne rl, #PTE_AP2
235 ++ biceq rl, #PTE_AP2
236 ++
237 + 1: strd r2, r3, [r0]
238 + ALT_SMP(W(nop))
239 + ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
240 +diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
241 +index 0eca93327195..d223a8b57c1e 100644
242 +--- a/arch/avr32/mm/fault.c
243 ++++ b/arch/avr32/mm/fault.c
244 +@@ -142,6 +142,8 @@ good_area:
245 + if (unlikely(fault & VM_FAULT_ERROR)) {
246 + if (fault & VM_FAULT_OOM)
247 + goto out_of_memory;
248 ++ else if (fault & VM_FAULT_SIGSEGV)
249 ++ goto bad_area;
250 + else if (fault & VM_FAULT_SIGBUS)
251 + goto do_sigbus;
252 + BUG();
253 +diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
254 +index 1790f22e71a2..2686a7aa8ec8 100644
255 +--- a/arch/cris/mm/fault.c
256 ++++ b/arch/cris/mm/fault.c
257 +@@ -176,6 +176,8 @@ retry:
258 + if (unlikely(fault & VM_FAULT_ERROR)) {
259 + if (fault & VM_FAULT_OOM)
260 + goto out_of_memory;
261 ++ else if (fault & VM_FAULT_SIGSEGV)
262 ++ goto bad_area;
263 + else if (fault & VM_FAULT_SIGBUS)
264 + goto do_sigbus;
265 + BUG();
266 +diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
267 +index 9a66372fc7c7..ec4917ddf678 100644
268 +--- a/arch/frv/mm/fault.c
269 ++++ b/arch/frv/mm/fault.c
270 +@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
271 + if (unlikely(fault & VM_FAULT_ERROR)) {
272 + if (fault & VM_FAULT_OOM)
273 + goto out_of_memory;
274 ++ else if (fault & VM_FAULT_SIGSEGV)
275 ++ goto bad_area;
276 + else if (fault & VM_FAULT_SIGBUS)
277 + goto do_sigbus;
278 + BUG();
279 +diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
280 +index 7225dad87094..ba5ba7accd0d 100644
281 +--- a/arch/ia64/mm/fault.c
282 ++++ b/arch/ia64/mm/fault.c
283 +@@ -172,6 +172,8 @@ retry:
284 + */
285 + if (fault & VM_FAULT_OOM) {
286 + goto out_of_memory;
287 ++ } else if (fault & VM_FAULT_SIGSEGV) {
288 ++ goto bad_area;
289 + } else if (fault & VM_FAULT_SIGBUS) {
290 + signal = SIGBUS;
291 + goto bad_area;
292 +diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
293 +index e9c6a8014bd6..e3d4d4890104 100644
294 +--- a/arch/m32r/mm/fault.c
295 ++++ b/arch/m32r/mm/fault.c
296 +@@ -200,6 +200,8 @@ good_area:
297 + if (unlikely(fault & VM_FAULT_ERROR)) {
298 + if (fault & VM_FAULT_OOM)
299 + goto out_of_memory;
300 ++ else if (fault & VM_FAULT_SIGSEGV)
301 ++ goto bad_area;
302 + else if (fault & VM_FAULT_SIGBUS)
303 + goto do_sigbus;
304 + BUG();
305 +diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
306 +index 2bd7487440c4..b2f04aee46ec 100644
307 +--- a/arch/m68k/mm/fault.c
308 ++++ b/arch/m68k/mm/fault.c
309 +@@ -145,6 +145,8 @@ good_area:
310 + if (unlikely(fault & VM_FAULT_ERROR)) {
311 + if (fault & VM_FAULT_OOM)
312 + goto out_of_memory;
313 ++ else if (fault & VM_FAULT_SIGSEGV)
314 ++ goto map_err;
315 + else if (fault & VM_FAULT_SIGBUS)
316 + goto bus_err;
317 + BUG();
318 +diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
319 +index 332680e5ebf2..2de5dc695a87 100644
320 +--- a/arch/metag/mm/fault.c
321 ++++ b/arch/metag/mm/fault.c
322 +@@ -141,6 +141,8 @@ good_area:
323 + if (unlikely(fault & VM_FAULT_ERROR)) {
324 + if (fault & VM_FAULT_OOM)
325 + goto out_of_memory;
326 ++ else if (fault & VM_FAULT_SIGSEGV)
327 ++ goto bad_area;
328 + else if (fault & VM_FAULT_SIGBUS)
329 + goto do_sigbus;
330 + BUG();
331 +diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
332 +index fa4cf52aa7a6..d46a5ebb7570 100644
333 +--- a/arch/microblaze/mm/fault.c
334 ++++ b/arch/microblaze/mm/fault.c
335 +@@ -224,6 +224,8 @@ good_area:
336 + if (unlikely(fault & VM_FAULT_ERROR)) {
337 + if (fault & VM_FAULT_OOM)
338 + goto out_of_memory;
339 ++ else if (fault & VM_FAULT_SIGSEGV)
340 ++ goto bad_area;
341 + else if (fault & VM_FAULT_SIGBUS)
342 + goto do_sigbus;
343 + BUG();
344 +diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
345 +index becc42bb1849..70ab5d664332 100644
346 +--- a/arch/mips/mm/fault.c
347 ++++ b/arch/mips/mm/fault.c
348 +@@ -158,6 +158,8 @@ good_area:
349 + if (unlikely(fault & VM_FAULT_ERROR)) {
350 + if (fault & VM_FAULT_OOM)
351 + goto out_of_memory;
352 ++ else if (fault & VM_FAULT_SIGSEGV)
353 ++ goto bad_area;
354 + else if (fault & VM_FAULT_SIGBUS)
355 + goto do_sigbus;
356 + BUG();
357 +diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
358 +index 3516cbdf1ee9..0c2cc5d39c8e 100644
359 +--- a/arch/mn10300/mm/fault.c
360 ++++ b/arch/mn10300/mm/fault.c
361 +@@ -262,6 +262,8 @@ good_area:
362 + if (unlikely(fault & VM_FAULT_ERROR)) {
363 + if (fault & VM_FAULT_OOM)
364 + goto out_of_memory;
365 ++ else if (fault & VM_FAULT_SIGSEGV)
366 ++ goto bad_area;
367 + else if (fault & VM_FAULT_SIGBUS)
368 + goto do_sigbus;
369 + BUG();
370 +diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
371 +index 0703acf7d327..230ac20ae794 100644
372 +--- a/arch/openrisc/mm/fault.c
373 ++++ b/arch/openrisc/mm/fault.c
374 +@@ -171,6 +171,8 @@ good_area:
375 + if (unlikely(fault & VM_FAULT_ERROR)) {
376 + if (fault & VM_FAULT_OOM)
377 + goto out_of_memory;
378 ++ else if (fault & VM_FAULT_SIGSEGV)
379 ++ goto bad_area;
380 + else if (fault & VM_FAULT_SIGBUS)
381 + goto do_sigbus;
382 + BUG();
383 +diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
384 +index d72197f0ddb8..d27e38874e81 100644
385 +--- a/arch/parisc/mm/fault.c
386 ++++ b/arch/parisc/mm/fault.c
387 +@@ -256,6 +256,8 @@ good_area:
388 + */
389 + if (fault & VM_FAULT_OOM)
390 + goto out_of_memory;
391 ++ else if (fault & VM_FAULT_SIGSEGV)
392 ++ goto bad_area;
393 + else if (fault & VM_FAULT_SIGBUS)
394 + goto bad_area;
395 + BUG();
396 +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
397 +index 51ab9e7e6c39..010fabf3828c 100644
398 +--- a/arch/powerpc/mm/fault.c
399 ++++ b/arch/powerpc/mm/fault.c
400 +@@ -432,6 +432,8 @@ good_area:
401 + */
402 + fault = handle_mm_fault(mm, vma, address, flags);
403 + if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
404 ++ if (fault & VM_FAULT_SIGSEGV)
405 ++ goto bad_area;
406 + rc = mm_fault_error(regs, address, fault);
407 + if (rc >= MM_FAULT_RETURN)
408 + goto bail;
409 +diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c
410 +index 641e7273d75a..62f3e4e48a0b 100644
411 +--- a/arch/powerpc/platforms/cell/spu_fault.c
412 ++++ b/arch/powerpc/platforms/cell/spu_fault.c
413 +@@ -75,7 +75,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
414 + if (*flt & VM_FAULT_OOM) {
415 + ret = -ENOMEM;
416 + goto out_unlock;
417 +- } else if (*flt & VM_FAULT_SIGBUS) {
418 ++ } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
419 + ret = -EFAULT;
420 + goto out_unlock;
421 + }
422 +diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
423 +index 87ba7cf99cd7..65d633f20d37 100644
424 +--- a/arch/powerpc/platforms/cell/spufs/inode.c
425 ++++ b/arch/powerpc/platforms/cell/spufs/inode.c
426 +@@ -164,7 +164,7 @@ static void spufs_prune_dir(struct dentry *dir)
427 + struct dentry *dentry, *tmp;
428 +
429 + mutex_lock(&dir->d_inode->i_mutex);
430 +- list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
431 ++ list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
432 + spin_lock(&dentry->d_lock);
433 + if (!(d_unhashed(dentry)) && dentry->d_inode) {
434 + dget_dlock(dentry);
435 +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
436 +index d95265b2719f..8e95432cc3b2 100644
437 +--- a/arch/s390/mm/fault.c
438 ++++ b/arch/s390/mm/fault.c
439 +@@ -239,6 +239,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
440 + do_no_context(regs);
441 + else
442 + pagefault_out_of_memory();
443 ++ } else if (fault & VM_FAULT_SIGSEGV) {
444 ++ /* Kernel mode? Handle exceptions or die */
445 ++ if (!user_mode(regs))
446 ++ do_no_context(regs);
447 ++ else
448 ++ do_sigsegv(regs, SEGV_MAPERR);
449 + } else if (fault & VM_FAULT_SIGBUS) {
450 + /* Kernel mode? Handle exceptions or die */
451 + if (!user_mode(regs))
452 +diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
453 +index 52238983527d..6860beb2a280 100644
454 +--- a/arch/score/mm/fault.c
455 ++++ b/arch/score/mm/fault.c
456 +@@ -114,6 +114,8 @@ good_area:
457 + if (unlikely(fault & VM_FAULT_ERROR)) {
458 + if (fault & VM_FAULT_OOM)
459 + goto out_of_memory;
460 ++ else if (fault & VM_FAULT_SIGSEGV)
461 ++ goto bad_area;
462 + else if (fault & VM_FAULT_SIGBUS)
463 + goto do_sigbus;
464 + BUG();
465 +diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
466 +index 541dc6101508..a58fec9b55e0 100644
467 +--- a/arch/sh/mm/fault.c
468 ++++ b/arch/sh/mm/fault.c
469 +@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
470 + } else {
471 + if (fault & VM_FAULT_SIGBUS)
472 + do_sigbus(regs, error_code, address);
473 ++ else if (fault & VM_FAULT_SIGSEGV)
474 ++ bad_area(regs, error_code, address);
475 + else
476 + BUG();
477 + }
478 +diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
479 +index 59dbd4645725..163c78712110 100644
480 +--- a/arch/sparc/mm/fault_32.c
481 ++++ b/arch/sparc/mm/fault_32.c
482 +@@ -252,6 +252,8 @@ good_area:
483 + if (unlikely(fault & VM_FAULT_ERROR)) {
484 + if (fault & VM_FAULT_OOM)
485 + goto out_of_memory;
486 ++ else if (fault & VM_FAULT_SIGSEGV)
487 ++ goto bad_area;
488 + else if (fault & VM_FAULT_SIGBUS)
489 + goto do_sigbus;
490 + BUG();
491 +diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
492 +index 45a413e4380a..0d6de79105b6 100644
493 +--- a/arch/sparc/mm/fault_64.c
494 ++++ b/arch/sparc/mm/fault_64.c
495 +@@ -448,6 +448,8 @@ good_area:
496 + if (unlikely(fault & VM_FAULT_ERROR)) {
497 + if (fault & VM_FAULT_OOM)
498 + goto out_of_memory;
499 ++ else if (fault & VM_FAULT_SIGSEGV)
500 ++ goto bad_area;
501 + else if (fault & VM_FAULT_SIGBUS)
502 + goto do_sigbus;
503 + BUG();
504 +diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
505 +index 6c0571216a9d..c6d2a76d91a8 100644
506 +--- a/arch/tile/mm/fault.c
507 ++++ b/arch/tile/mm/fault.c
508 +@@ -444,6 +444,8 @@ good_area:
509 + if (unlikely(fault & VM_FAULT_ERROR)) {
510 + if (fault & VM_FAULT_OOM)
511 + goto out_of_memory;
512 ++ else if (fault & VM_FAULT_SIGSEGV)
513 ++ goto bad_area;
514 + else if (fault & VM_FAULT_SIGBUS)
515 + goto do_sigbus;
516 + BUG();
517 +diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
518 +index 974b87474a99..53b832033d9b 100644
519 +--- a/arch/um/kernel/trap.c
520 ++++ b/arch/um/kernel/trap.c
521 +@@ -80,6 +80,8 @@ good_area:
522 + if (unlikely(fault & VM_FAULT_ERROR)) {
523 + if (fault & VM_FAULT_OOM) {
524 + goto out_of_memory;
525 ++ } else if (fault & VM_FAULT_SIGSEGV) {
526 ++ goto out;
527 + } else if (fault & VM_FAULT_SIGBUS) {
528 + err = -EACCES;
529 + goto out;
530 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
531 +index 09651d4a9038..cf1eeeafdfa3 100644
532 +--- a/arch/x86/kvm/emulate.c
533 ++++ b/arch/x86/kvm/emulate.c
534 +@@ -2258,7 +2258,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
535 + * Not recognized on AMD in compat mode (but is recognized in legacy
536 + * mode).
537 + */
538 +- if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
539 ++ if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
540 + && !vendor_intel(ctxt))
541 + return emulate_ud(ctxt);
542 +
543 +@@ -2271,25 +2271,13 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
544 + setup_syscalls_segments(ctxt, &cs, &ss);
545 +
546 + ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
547 +- switch (ctxt->mode) {
548 +- case X86EMUL_MODE_PROT32:
549 +- if ((msr_data & 0xfffc) == 0x0)
550 +- return emulate_gp(ctxt, 0);
551 +- break;
552 +- case X86EMUL_MODE_PROT64:
553 +- if (msr_data == 0x0)
554 +- return emulate_gp(ctxt, 0);
555 +- break;
556 +- default:
557 +- break;
558 +- }
559 ++ if ((msr_data & 0xfffc) == 0x0)
560 ++ return emulate_gp(ctxt, 0);
561 +
562 + ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
563 +- cs_sel = (u16)msr_data;
564 +- cs_sel &= ~SELECTOR_RPL_MASK;
565 ++ cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
566 + ss_sel = cs_sel + 8;
567 +- ss_sel &= ~SELECTOR_RPL_MASK;
568 +- if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
569 ++ if (efer & EFER_LMA) {
570 + cs.d = 0;
571 + cs.l = 1;
572 + }
573 +@@ -2298,10 +2286,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
574 + ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
575 +
576 + ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
577 +- ctxt->_eip = msr_data;
578 ++ ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
579 +
580 + ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
581 +- *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
582 ++ *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
583 ++ (u32)msr_data;
584 +
585 + return X86EMUL_CONTINUE;
586 + }
587 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
588 +index a10c8c792161..ebc551c82605 100644
589 +--- a/arch/x86/mm/fault.c
590 ++++ b/arch/x86/mm/fault.c
591 +@@ -833,11 +833,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
592 + unsigned int fault)
593 + {
594 + struct task_struct *tsk = current;
595 +- struct mm_struct *mm = tsk->mm;
596 + int code = BUS_ADRERR;
597 +
598 +- up_read(&mm->mmap_sem);
599 +-
600 + /* Kernel mode? Handle exceptions or die: */
601 + if (!(error_code & PF_USER)) {
602 + no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
603 +@@ -868,7 +865,6 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
604 + unsigned long address, unsigned int fault)
605 + {
606 + if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
607 +- up_read(&current->mm->mmap_sem);
608 + no_context(regs, error_code, address, 0, 0);
609 + return;
610 + }
611 +@@ -876,14 +872,11 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
612 + if (fault & VM_FAULT_OOM) {
613 + /* Kernel mode? Handle exceptions or die: */
614 + if (!(error_code & PF_USER)) {
615 +- up_read(&current->mm->mmap_sem);
616 + no_context(regs, error_code, address,
617 + SIGSEGV, SEGV_MAPERR);
618 + return;
619 + }
620 +
621 +- up_read(&current->mm->mmap_sem);
622 +-
623 + /*
624 + * We ran out of memory, call the OOM killer, and return the
625 + * userspace (which will retry the fault, or kill us if we got
626 +@@ -894,6 +887,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
627 + if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
628 + VM_FAULT_HWPOISON_LARGE))
629 + do_sigbus(regs, error_code, address, fault);
630 ++ else if (fault & VM_FAULT_SIGSEGV)
631 ++ bad_area_nosemaphore(regs, error_code, address);
632 + else
633 + BUG();
634 + }
635 +@@ -1216,6 +1211,7 @@ good_area:
636 + return;
637 +
638 + if (unlikely(fault & VM_FAULT_ERROR)) {
639 ++ up_read(&mm->mmap_sem);
640 + mm_fault_error(regs, error_code, address, fault);
641 + return;
642 + }
643 +diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
644 +index b57c4f91f487..9e3571a6535c 100644
645 +--- a/arch/xtensa/mm/fault.c
646 ++++ b/arch/xtensa/mm/fault.c
647 +@@ -117,6 +117,8 @@ good_area:
648 + if (unlikely(fault & VM_FAULT_ERROR)) {
649 + if (fault & VM_FAULT_OOM)
650 + goto out_of_memory;
651 ++ else if (fault & VM_FAULT_SIGSEGV)
652 ++ goto bad_area;
653 + else if (fault & VM_FAULT_SIGBUS)
654 + goto do_sigbus;
655 + BUG();
656 +diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
657 +index f667e37394da..5afe556a70f8 100644
658 +--- a/drivers/bluetooth/ath3k.c
659 ++++ b/drivers/bluetooth/ath3k.c
660 +@@ -62,51 +62,59 @@ static const struct usb_device_id ath3k_table[] = {
661 + { USB_DEVICE(0x0CF3, 0x3000) },
662 +
663 + /* Atheros AR3011 with sflash firmware*/
664 ++ { USB_DEVICE(0x0489, 0xE027) },
665 ++ { USB_DEVICE(0x0489, 0xE03D) },
666 ++ { USB_DEVICE(0x0930, 0x0215) },
667 + { USB_DEVICE(0x0CF3, 0x3002) },
668 + { USB_DEVICE(0x0CF3, 0xE019) },
669 + { USB_DEVICE(0x13d3, 0x3304) },
670 +- { USB_DEVICE(0x0930, 0x0215) },
671 +- { USB_DEVICE(0x0489, 0xE03D) },
672 +- { USB_DEVICE(0x0489, 0xE027) },
673 +
674 + /* Atheros AR9285 Malbec with sflash firmware */
675 + { USB_DEVICE(0x03F0, 0x311D) },
676 +
677 + /* Atheros AR3012 with sflash firmware*/
678 +- { USB_DEVICE(0x0CF3, 0x0036) },
679 +- { USB_DEVICE(0x0CF3, 0x3004) },
680 +- { USB_DEVICE(0x0CF3, 0x3008) },
681 +- { USB_DEVICE(0x0CF3, 0x311D) },
682 +- { USB_DEVICE(0x0CF3, 0x817a) },
683 +- { USB_DEVICE(0x13d3, 0x3375) },
684 ++ { USB_DEVICE(0x0489, 0xe04d) },
685 ++ { USB_DEVICE(0x0489, 0xe04e) },
686 ++ { USB_DEVICE(0x0489, 0xe057) },
687 ++ { USB_DEVICE(0x0489, 0xe056) },
688 ++ { USB_DEVICE(0x0489, 0xe05f) },
689 ++ { USB_DEVICE(0x0489, 0xe078) },
690 ++ { USB_DEVICE(0x04c5, 0x1330) },
691 + { USB_DEVICE(0x04CA, 0x3004) },
692 + { USB_DEVICE(0x04CA, 0x3005) },
693 + { USB_DEVICE(0x04CA, 0x3006) },
694 + { USB_DEVICE(0x04CA, 0x3007) },
695 + { USB_DEVICE(0x04CA, 0x3008) },
696 + { USB_DEVICE(0x04CA, 0x300b) },
697 +- { USB_DEVICE(0x13d3, 0x3362) },
698 +- { USB_DEVICE(0x0CF3, 0xE004) },
699 +- { USB_DEVICE(0x0CF3, 0xE005) },
700 ++ { USB_DEVICE(0x04CA, 0x3010) },
701 + { USB_DEVICE(0x0930, 0x0219) },
702 + { USB_DEVICE(0x0930, 0x0220) },
703 +- { USB_DEVICE(0x0489, 0xe057) },
704 +- { USB_DEVICE(0x13d3, 0x3393) },
705 +- { USB_DEVICE(0x0489, 0xe04e) },
706 +- { USB_DEVICE(0x0489, 0xe056) },
707 +- { USB_DEVICE(0x0489, 0xe04d) },
708 +- { USB_DEVICE(0x04c5, 0x1330) },
709 +- { USB_DEVICE(0x13d3, 0x3402) },
710 ++ { USB_DEVICE(0x0930, 0x0227) },
711 ++ { USB_DEVICE(0x0b05, 0x17d0) },
712 ++ { USB_DEVICE(0x0CF3, 0x0036) },
713 ++ { USB_DEVICE(0x0CF3, 0x3004) },
714 ++ { USB_DEVICE(0x0CF3, 0x3008) },
715 ++ { USB_DEVICE(0x0CF3, 0x311D) },
716 ++ { USB_DEVICE(0x0CF3, 0x311E) },
717 ++ { USB_DEVICE(0x0CF3, 0x311F) },
718 + { USB_DEVICE(0x0cf3, 0x3121) },
719 ++ { USB_DEVICE(0x0CF3, 0x817a) },
720 + { USB_DEVICE(0x0cf3, 0xe003) },
721 +- { USB_DEVICE(0x0489, 0xe05f) },
722 ++ { USB_DEVICE(0x0CF3, 0xE004) },
723 ++ { USB_DEVICE(0x0CF3, 0xE005) },
724 ++ { USB_DEVICE(0x13d3, 0x3362) },
725 ++ { USB_DEVICE(0x13d3, 0x3375) },
726 ++ { USB_DEVICE(0x13d3, 0x3393) },
727 ++ { USB_DEVICE(0x13d3, 0x3402) },
728 ++ { USB_DEVICE(0x13d3, 0x3408) },
729 ++ { USB_DEVICE(0x13d3, 0x3432) },
730 +
731 + /* Atheros AR5BBU12 with sflash firmware */
732 + { USB_DEVICE(0x0489, 0xE02C) },
733 +
734 + /* Atheros AR5BBU22 with sflash firmware */
735 +- { USB_DEVICE(0x0489, 0xE03C) },
736 + { USB_DEVICE(0x0489, 0xE036) },
737 ++ { USB_DEVICE(0x0489, 0xE03C) },
738 +
739 + { } /* Terminating entry */
740 + };
741 +@@ -119,37 +127,45 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
742 + static const struct usb_device_id ath3k_blist_tbl[] = {
743 +
744 + /* Atheros AR3012 with sflash firmware*/
745 +- { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
746 +- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
747 +- { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
748 +- { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
749 +- { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
750 +- { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
751 ++ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
752 ++ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
753 ++ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
754 ++ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
755 ++ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
756 ++ { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
757 ++ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
758 + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
759 + { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
760 + { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
761 + { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
762 + { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
763 + { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
764 +- { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
765 +- { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
766 +- { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
767 ++ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
768 + { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
769 + { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
770 +- { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
771 +- { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
772 +- { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
773 +- { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
774 +- { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
775 +- { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
776 +- { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
777 ++ { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
778 ++ { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
779 ++ { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
780 ++ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
781 ++ { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
782 ++ { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
783 ++ { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
784 ++ { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
785 + { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
786 ++ { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
787 ++ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
788 ++ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
789 + { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
790 +- { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
791 ++ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
792 ++ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
793 ++ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
794 ++ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
795 ++ { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
796 ++ { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
797 +
798 + /* Atheros AR5BBU22 with sflash firmware */
799 +- { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
800 + { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
801 ++ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
802 +
803 + { } /* Terminating entry */
804 + };
805 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
806 +index e00c3f84a4cf..03b331798e16 100644
807 +--- a/drivers/bluetooth/btusb.c
808 ++++ b/drivers/bluetooth/btusb.c
809 +@@ -49,6 +49,7 @@ static struct usb_driver btusb_driver;
810 + #define BTUSB_WRONG_SCO_MTU 0x40
811 + #define BTUSB_ATH3012 0x80
812 + #define BTUSB_INTEL 0x100
813 ++#define BTUSB_INTEL_BOOT 0x200
814 +
815 + static const struct usb_device_id btusb_table[] = {
816 + /* Generic Bluetooth USB device */
817 +@@ -101,21 +102,31 @@ static const struct usb_device_id btusb_table[] = {
818 + { USB_DEVICE(0x0c10, 0x0000) },
819 +
820 + /* Broadcom BCM20702A0 */
821 ++ { USB_DEVICE(0x0489, 0xe042) },
822 ++ { USB_DEVICE(0x04ca, 0x2003) },
823 + { USB_DEVICE(0x0b05, 0x17b5) },
824 + { USB_DEVICE(0x0b05, 0x17cb) },
825 +- { USB_DEVICE(0x04ca, 0x2003) },
826 +- { USB_DEVICE(0x0489, 0xe042) },
827 + { USB_DEVICE(0x413c, 0x8197) },
828 +
829 + /* Foxconn - Hon Hai */
830 + { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
831 +
832 +- /*Broadcom devices with vendor specific id */
833 ++ /* Broadcom devices with vendor specific id */
834 + { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
835 +
836 ++ /* ASUSTek Computer - Broadcom based */
837 ++ { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01) },
838 ++
839 + /* Belkin F8065bf - Broadcom based */
840 + { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
841 +
842 ++ /* IMC Networks - Broadcom based */
843 ++ { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) },
844 ++
845 ++ /* Intel Bluetooth USB Bootloader (RAM module) */
846 ++ { USB_DEVICE(0x8087, 0x0a5a),
847 ++ .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
848 ++
849 + { } /* Terminating entry */
850 + };
851 +
852 +@@ -129,56 +140,64 @@ static const struct usb_device_id blacklist_table[] = {
853 + { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE },
854 +
855 + /* Atheros 3011 with sflash firmware */
856 ++ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
857 ++ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
858 ++ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
859 + { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
860 + { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
861 + { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
862 +- { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
863 +- { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
864 +- { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
865 +
866 + /* Atheros AR9285 Malbec with sflash firmware */
867 + { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
868 +
869 + /* Atheros 3012 with sflash firmware */
870 +- { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
871 +- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
872 +- { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
873 +- { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
874 +- { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
875 +- { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
876 ++ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
877 ++ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
878 ++ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
879 ++ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
880 ++ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
881 ++ { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
882 ++ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
883 + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
884 + { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
885 + { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
886 + { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
887 + { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
888 + { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
889 +- { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
890 +- { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
891 +- { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
892 ++ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
893 + { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
894 + { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
895 +- { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
896 +- { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
897 +- { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
898 +- { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
899 +- { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
900 +- { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
901 +- { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
902 ++ { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
903 ++ { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
904 ++ { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
905 ++ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
906 ++ { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
907 ++ { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
908 ++ { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
909 ++ { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 },
910 + { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
911 ++ { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
912 + { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
913 +- { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
914 ++ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
915 ++ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
916 ++ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
917 ++ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
918 ++ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
919 ++ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
920 ++ { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
921 ++ { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
922 +
923 + /* Atheros AR5BBU12 with sflash firmware */
924 + { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
925 +
926 + /* Atheros AR5BBU12 with sflash firmware */
927 +- { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
928 + { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
929 ++ { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
930 +
931 + /* Broadcom BCM2035 */
932 +- { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
933 +- { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
934 + { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
935 ++ { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
936 ++ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
937 +
938 + /* Broadcom BCM2045 */
939 + { USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU },
940 +@@ -1491,6 +1510,9 @@ static int btusb_probe(struct usb_interface *intf,
941 + if (id->driver_info & BTUSB_INTEL)
942 + hdev->setup = btusb_setup_intel;
943 +
944 ++ if (id->driver_info & BTUSB_INTEL_BOOT)
945 ++ set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
946 ++
947 + /* Interface numbers are hardcoded in the specification */
948 + data->isoc = usb_ifnum_to_if(data->udev, 1);
949 +
950 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
951 +index 54e2abe671f7..c611bcc01f7e 100644
952 +--- a/drivers/edac/sb_edac.c
953 ++++ b/drivers/edac/sb_edac.c
954 +@@ -285,8 +285,9 @@ static const u32 correrrthrsld[] = {
955 + * sbridge structs
956 + */
957 +
958 +-#define NUM_CHANNELS 4
959 +-#define MAX_DIMMS 3 /* Max DIMMS per channel */
960 ++#define NUM_CHANNELS 4
961 ++#define MAX_DIMMS 3 /* Max DIMMS per channel */
962 ++#define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */
963 +
964 + enum type {
965 + SANDY_BRIDGE,
966 +@@ -1750,6 +1751,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
967 +
968 + /* FIXME: need support for channel mask */
969 +
970 ++ if (channel == CHANNEL_UNSPECIFIED)
971 ++ channel = -1;
972 ++
973 + /* Call the helper to output message */
974 + edac_mc_handle_error(tp_event, mci, core_err_cnt,
975 + m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
976 +diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
977 +index dcde56057fe1..3177498f3eab 100644
978 +--- a/drivers/net/bonding/bond_3ad.c
979 ++++ b/drivers/net/bonding/bond_3ad.c
980 +@@ -2479,7 +2479,7 @@ out:
981 + return NETDEV_TX_OK;
982 + err_free:
983 + /* no suitable interface, frame not sent */
984 +- kfree_skb(skb);
985 ++ dev_kfree_skb_any(skb);
986 + goto out;
987 + }
988 +
989 +diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
990 +index e8f133e926aa..c67bbc9c36dc 100644
991 +--- a/drivers/net/bonding/bond_alb.c
992 ++++ b/drivers/net/bonding/bond_alb.c
993 +@@ -1479,7 +1479,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
994 + }
995 +
996 + /* no suitable interface, frame not sent */
997 +- kfree_skb(skb);
998 ++ dev_kfree_skb_any(skb);
999 + out:
1000 + return NETDEV_TX_OK;
1001 + }
1002 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1003 +index 15379824d77d..32b0e7055b1e 100644
1004 +--- a/drivers/net/bonding/bond_main.c
1005 ++++ b/drivers/net/bonding/bond_main.c
1006 +@@ -3568,7 +3568,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
1007 + }
1008 + }
1009 + /* no slave that can tx has been found */
1010 +- kfree_skb(skb);
1011 ++ dev_kfree_skb_any(skb);
1012 + }
1013 +
1014 + /**
1015 +@@ -3650,7 +3650,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
1016 + if (slave)
1017 + bond_dev_queue_xmit(bond, skb, slave->dev);
1018 + else
1019 +- kfree_skb(skb);
1020 ++ dev_kfree_skb_any(skb);
1021 +
1022 + return NETDEV_TX_OK;
1023 + }
1024 +@@ -3698,7 +3698,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
1025 + if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
1026 + bond_dev_queue_xmit(bond, skb, slave->dev);
1027 + else
1028 +- kfree_skb(skb);
1029 ++ dev_kfree_skb_any(skb);
1030 +
1031 + return NETDEV_TX_OK;
1032 + }
1033 +@@ -3785,7 +3785,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
1034 + pr_err("%s: Error: Unknown bonding mode %d\n",
1035 + dev->name, bond->params.mode);
1036 + WARN_ON_ONCE(1);
1037 +- kfree_skb(skb);
1038 ++ dev_kfree_skb_any(skb);
1039 + return NETDEV_TX_OK;
1040 + }
1041 + }
1042 +@@ -3806,7 +3806,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
1043 + if (bond_has_slaves(bond))
1044 + ret = __bond_start_xmit(skb, dev);
1045 + else
1046 +- kfree_skb(skb);
1047 ++ dev_kfree_skb_any(skb);
1048 + rcu_read_unlock();
1049 +
1050 + return ret;
1051 +diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
1052 +index 6c9e1c9bdeb8..0c8a16866603 100644
1053 +--- a/drivers/net/ethernet/broadcom/bnx2.c
1054 ++++ b/drivers/net/ethernet/broadcom/bnx2.c
1055 +@@ -2886,7 +2886,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
1056 + sw_cons = BNX2_NEXT_TX_BD(sw_cons);
1057 +
1058 + tx_bytes += skb->len;
1059 +- dev_kfree_skb(skb);
1060 ++ dev_kfree_skb_any(skb);
1061 + tx_pkt++;
1062 + if (tx_pkt == budget)
1063 + break;
1064 +@@ -6640,7 +6640,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
1065 +
1066 + mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
1067 + if (dma_mapping_error(&bp->pdev->dev, mapping)) {
1068 +- dev_kfree_skb(skb);
1069 ++ dev_kfree_skb_any(skb);
1070 + return NETDEV_TX_OK;
1071 + }
1072 +
1073 +@@ -6733,7 +6733,7 @@ dma_error:
1074 + PCI_DMA_TODEVICE);
1075 + }
1076 +
1077 +- dev_kfree_skb(skb);
1078 ++ dev_kfree_skb_any(skb);
1079 + return NETDEV_TX_OK;
1080 + }
1081 +
1082 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1083 +index 82061139b215..bc65dc85a622 100644
1084 +--- a/drivers/net/ethernet/broadcom/tg3.c
1085 ++++ b/drivers/net/ethernet/broadcom/tg3.c
1086 +@@ -6593,7 +6593,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
1087 + pkts_compl++;
1088 + bytes_compl += skb->len;
1089 +
1090 +- dev_kfree_skb(skb);
1091 ++ dev_kfree_skb_any(skb);
1092 +
1093 + if (unlikely(tx_bug)) {
1094 + tg3_tx_recover(tp);
1095 +@@ -6925,7 +6925,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
1096 + if (len > (tp->dev->mtu + ETH_HLEN) &&
1097 + skb->protocol != htons(ETH_P_8021Q) &&
1098 + skb->protocol != htons(ETH_P_8021AD)) {
1099 +- dev_kfree_skb(skb);
1100 ++ dev_kfree_skb_any(skb);
1101 + goto drop_it_no_recycle;
1102 + }
1103 +
1104 +@@ -7808,7 +7808,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
1105 + PCI_DMA_TODEVICE);
1106 + /* Make sure the mapping succeeded */
1107 + if (pci_dma_mapping_error(tp->pdev, new_addr)) {
1108 +- dev_kfree_skb(new_skb);
1109 ++ dev_kfree_skb_any(new_skb);
1110 + ret = -1;
1111 + } else {
1112 + u32 save_entry = *entry;
1113 +@@ -7823,13 +7823,13 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
1114 + new_skb->len, base_flags,
1115 + mss, vlan)) {
1116 + tg3_tx_skb_unmap(tnapi, save_entry, -1);
1117 +- dev_kfree_skb(new_skb);
1118 ++ dev_kfree_skb_any(new_skb);
1119 + ret = -1;
1120 + }
1121 + }
1122 + }
1123 +
1124 +- dev_kfree_skb(skb);
1125 ++ dev_kfree_skb_any(skb);
1126 + *pskb = new_skb;
1127 + return ret;
1128 + }
1129 +@@ -7872,7 +7872,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
1130 + } while (segs);
1131 +
1132 + tg3_tso_bug_end:
1133 +- dev_kfree_skb(skb);
1134 ++ dev_kfree_skb_any(skb);
1135 +
1136 + return NETDEV_TX_OK;
1137 + }
1138 +@@ -8110,7 +8110,7 @@ dma_error:
1139 + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
1140 + tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
1141 + drop:
1142 +- dev_kfree_skb(skb);
1143 ++ dev_kfree_skb_any(skb);
1144 + drop_nofree:
1145 + tp->tx_dropped++;
1146 + return NETDEV_TX_OK;
1147 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
1148 +index 80bfa0391913..075e7e7abea9 100644
1149 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
1150 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
1151 +@@ -1883,7 +1883,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
1152 + queue_tail_inc(txq);
1153 + } while (cur_index != last_index);
1154 +
1155 +- kfree_skb(sent_skb);
1156 ++ dev_kfree_skb_any(sent_skb);
1157 + return num_wrbs;
1158 + }
1159 +
1160 +diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
1161 +index ad5a5aadc7e1..70eb4d27b4fa 100644
1162 +--- a/drivers/net/ethernet/freescale/gianfar.c
1163 ++++ b/drivers/net/ethernet/freescale/gianfar.c
1164 +@@ -2152,13 +2152,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1165 + skb_new = skb_realloc_headroom(skb, fcb_len);
1166 + if (!skb_new) {
1167 + dev->stats.tx_errors++;
1168 +- kfree_skb(skb);
1169 ++ dev_kfree_skb_any(skb);
1170 + return NETDEV_TX_OK;
1171 + }
1172 +
1173 + if (skb->sk)
1174 + skb_set_owner_w(skb_new, skb->sk);
1175 +- consume_skb(skb);
1176 ++ dev_consume_skb_any(skb);
1177 + skb = skb_new;
1178 + }
1179 +
1180 +diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
1181 +index 57e390cbe6d0..f42c201f727f 100644
1182 +--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
1183 ++++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
1184 +@@ -1521,12 +1521,12 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1185 + int tso;
1186 +
1187 + if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1188 +- dev_kfree_skb(skb);
1189 ++ dev_kfree_skb_any(skb);
1190 + return NETDEV_TX_OK;
1191 + }
1192 +
1193 + if (skb->len <= 0) {
1194 +- dev_kfree_skb(skb);
1195 ++ dev_kfree_skb_any(skb);
1196 + return NETDEV_TX_OK;
1197 + }
1198 +
1199 +@@ -1543,7 +1543,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1200 +
1201 + tso = ixgb_tso(adapter, skb);
1202 + if (tso < 0) {
1203 +- dev_kfree_skb(skb);
1204 ++ dev_kfree_skb_any(skb);
1205 + return NETDEV_TX_OK;
1206 + }
1207 +
1208 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1209 +index 2f83f3489fdb..8be0f3e1e8e9 100644
1210 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1211 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1212 +@@ -2497,13 +2497,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1213 + netif_carrier_off(dev);
1214 + mlx4_en_set_default_moderation(priv);
1215 +
1216 +- err = register_netdev(dev);
1217 +- if (err) {
1218 +- en_err(priv, "Netdev registration failed for port %d\n", port);
1219 +- goto out;
1220 +- }
1221 +- priv->registered = 1;
1222 +-
1223 + en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
1224 + en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
1225 +
1226 +@@ -2543,6 +2536,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1227 + queue_delayed_work(mdev->workqueue, &priv->service_task,
1228 + SERVICE_TASK_DELAY);
1229 +
1230 ++ err = register_netdev(dev);
1231 ++ if (err) {
1232 ++ en_err(priv, "Netdev registration failed for port %d\n", port);
1233 ++ goto out;
1234 ++ }
1235 ++
1236 ++ priv->registered = 1;
1237 ++
1238 + return 0;
1239 +
1240 + out:
1241 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
1242 +index 13457032d15f..019a04a31384 100644
1243 +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
1244 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
1245 +@@ -325,7 +325,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
1246 + }
1247 + }
1248 + }
1249 +- dev_kfree_skb(skb);
1250 ++ dev_kfree_skb_any(skb);
1251 + return tx_info->nr_txbb;
1252 + }
1253 +
1254 +diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
1255 +index 737c1a881f78..a3c1daa7ad5c 100644
1256 +--- a/drivers/net/ethernet/realtek/8139cp.c
1257 ++++ b/drivers/net/ethernet/realtek/8139cp.c
1258 +@@ -899,7 +899,7 @@ out_unlock:
1259 +
1260 + return NETDEV_TX_OK;
1261 + out_dma_error:
1262 +- kfree_skb(skb);
1263 ++ dev_kfree_skb_any(skb);
1264 + cp->dev->stats.tx_dropped++;
1265 + goto out_unlock;
1266 + }
1267 +diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
1268 +index da5972eefdd2..8cb2f357026e 100644
1269 +--- a/drivers/net/ethernet/realtek/8139too.c
1270 ++++ b/drivers/net/ethernet/realtek/8139too.c
1271 +@@ -1717,9 +1717,9 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
1272 + if (len < ETH_ZLEN)
1273 + memset(tp->tx_buf[entry], 0, ETH_ZLEN);
1274 + skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
1275 +- dev_kfree_skb(skb);
1276 ++ dev_kfree_skb_any(skb);
1277 + } else {
1278 +- dev_kfree_skb(skb);
1279 ++ dev_kfree_skb_any(skb);
1280 + dev->stats.tx_dropped++;
1281 + return NETDEV_TX_OK;
1282 + }
1283 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
1284 +index 3ff7bc3e7a23..90c14d16f261 100644
1285 +--- a/drivers/net/ethernet/realtek/r8169.c
1286 ++++ b/drivers/net/ethernet/realtek/r8169.c
1287 +@@ -5834,7 +5834,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
1288 + tp->TxDescArray + entry);
1289 + if (skb) {
1290 + tp->dev->stats.tx_dropped++;
1291 +- dev_kfree_skb(skb);
1292 ++ dev_kfree_skb_any(skb);
1293 + tx_skb->skb = NULL;
1294 + }
1295 + }
1296 +@@ -6059,7 +6059,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
1297 + err_dma_1:
1298 + rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
1299 + err_dma_0:
1300 +- dev_kfree_skb(skb);
1301 ++ dev_kfree_skb_any(skb);
1302 + err_update_stats:
1303 + dev->stats.tx_dropped++;
1304 + return NETDEV_TX_OK;
1305 +@@ -6142,7 +6142,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
1306 + tp->tx_stats.packets++;
1307 + tp->tx_stats.bytes += tx_skb->skb->len;
1308 + u64_stats_update_end(&tp->tx_stats.syncp);
1309 +- dev_kfree_skb(tx_skb->skb);
1310 ++ dev_kfree_skb_any(tx_skb->skb);
1311 + tx_skb->skb = NULL;
1312 + }
1313 + dirty_tx++;
1314 +diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
1315 +index cbd663ed030c..19405ed56cab 100644
1316 +--- a/drivers/staging/lustre/lustre/llite/dcache.c
1317 ++++ b/drivers/staging/lustre/lustre/llite/dcache.c
1318 +@@ -278,7 +278,7 @@ void ll_invalidate_aliases(struct inode *inode)
1319 + inode->i_ino, inode->i_generation, inode);
1320 +
1321 + ll_lock_dcache(inode);
1322 +- ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
1323 ++ ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_u.d_alias) {
1324 + CDEBUG(D_DENTRY, "dentry in drop %.*s (%p) parent %p "
1325 + "inode %p flags %d\n", dentry->d_name.len,
1326 + dentry->d_name.name, dentry, dentry->d_parent,
1327 +diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
1328 +index 6cfdb9e4b74b..5ae562ea95f7 100644
1329 +--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
1330 ++++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
1331 +@@ -678,7 +678,7 @@ void lustre_dump_dentry(struct dentry *dentry, int recur)
1332 + return;
1333 +
1334 + list_for_each(tmp, &dentry->d_subdirs) {
1335 +- struct dentry *d = list_entry(tmp, struct dentry, d_u.d_child);
1336 ++ struct dentry *d = list_entry(tmp, struct dentry, d_child);
1337 + lustre_dump_dentry(d, recur - 1);
1338 + }
1339 + }
1340 +diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
1341 +index fc8d264f6c9a..8e9a9e95b5cc 100644
1342 +--- a/drivers/staging/lustre/lustre/llite/namei.c
1343 ++++ b/drivers/staging/lustre/lustre/llite/namei.c
1344 +@@ -175,14 +175,14 @@ static void ll_invalidate_negative_children(struct inode *dir)
1345 + struct ll_d_hlist_node *p;
1346 +
1347 + ll_lock_dcache(dir);
1348 +- ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry, d_alias) {
1349 ++ ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry, d_u.d_alias) {
1350 + spin_lock(&dentry->d_lock);
1351 + if (!list_empty(&dentry->d_subdirs)) {
1352 + struct dentry *child;
1353 +
1354 + list_for_each_entry_safe(child, tmp_subdir,
1355 + &dentry->d_subdirs,
1356 +- d_u.d_child) {
1357 ++ d_child) {
1358 + if (child->d_inode == NULL)
1359 + d_lustre_invalidate(child, 1);
1360 + }
1361 +@@ -364,7 +364,7 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
1362 + discon_alias = invalid_alias = NULL;
1363 +
1364 + ll_lock_dcache(inode);
1365 +- ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
1366 ++ ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry, d_u.d_alias) {
1367 + LASSERT(alias != dentry);
1368 +
1369 + spin_lock(&alias->d_lock);
1370 +@@ -953,7 +953,7 @@ static void ll_get_child_fid(struct inode * dir, struct qstr *name,
1371 + {
1372 + struct dentry *parent, *child;
1373 +
1374 +- parent = ll_d_hlist_entry(dir->i_dentry, struct dentry, d_alias);
1375 ++ parent = ll_d_hlist_entry(dir->i_dentry, struct dentry, d_u.d_alias);
1376 + child = d_lookup(parent, name);
1377 + if (child) {
1378 + if (child->d_inode)
1379 +diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
1380 +index 93cbfbb7e7f7..6096771e2400 100644
1381 +--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
1382 ++++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
1383 +@@ -642,7 +642,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
1384 + return 0;
1385 + }
1386 +
1387 +- if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
1388 ++ if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
1389 + CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
1390 + return -EFAULT;
1391 + }
1392 +diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
1393 +index d9a43674cb94..9cca0ea4e479 100644
1394 +--- a/fs/affs/amigaffs.c
1395 ++++ b/fs/affs/amigaffs.c
1396 +@@ -126,7 +126,7 @@ affs_fix_dcache(struct inode *inode, u32 entry_ino)
1397 + {
1398 + struct dentry *dentry;
1399 + spin_lock(&inode->i_lock);
1400 +- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
1401 ++ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1402 + if (entry_ino == (u32)(long)dentry->d_fsdata) {
1403 + dentry->d_fsdata = (void *)inode->i_ino;
1404 + break;
1405 +diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
1406 +index 394e90b02c5e..edb46e67d5ca 100644
1407 +--- a/fs/autofs4/expire.c
1408 ++++ b/fs/autofs4/expire.c
1409 +@@ -91,7 +91,7 @@ static struct dentry *get_next_positive_subdir(struct dentry *prev,
1410 + spin_lock(&root->d_lock);
1411 +
1412 + if (prev)
1413 +- next = prev->d_u.d_child.next;
1414 ++ next = prev->d_child.next;
1415 + else {
1416 + prev = dget_dlock(root);
1417 + next = prev->d_subdirs.next;
1418 +@@ -105,13 +105,13 @@ cont:
1419 + return NULL;
1420 + }
1421 +
1422 +- q = list_entry(next, struct dentry, d_u.d_child);
1423 ++ q = list_entry(next, struct dentry, d_child);
1424 +
1425 + spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
1426 + /* Already gone or negative dentry (under construction) - try next */
1427 + if (!d_count(q) || !simple_positive(q)) {
1428 + spin_unlock(&q->d_lock);
1429 +- next = q->d_u.d_child.next;
1430 ++ next = q->d_child.next;
1431 + goto cont;
1432 + }
1433 + dget_dlock(q);
1434 +@@ -161,13 +161,13 @@ again:
1435 + goto relock;
1436 + }
1437 + spin_unlock(&p->d_lock);
1438 +- next = p->d_u.d_child.next;
1439 ++ next = p->d_child.next;
1440 + p = parent;
1441 + if (next != &parent->d_subdirs)
1442 + break;
1443 + }
1444 + }
1445 +- ret = list_entry(next, struct dentry, d_u.d_child);
1446 ++ ret = list_entry(next, struct dentry, d_child);
1447 +
1448 + spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
1449 + /* Negative dentry - try next */
1450 +@@ -461,7 +461,7 @@ found:
1451 + spin_lock(&sbi->lookup_lock);
1452 + spin_lock(&expired->d_parent->d_lock);
1453 + spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
1454 +- list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
1455 ++ list_move(&expired->d_parent->d_subdirs, &expired->d_child);
1456 + spin_unlock(&expired->d_lock);
1457 + spin_unlock(&expired->d_parent->d_lock);
1458 + spin_unlock(&sbi->lookup_lock);
1459 +diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
1460 +index cc87c1abac97..9e016e6fb582 100644
1461 +--- a/fs/autofs4/root.c
1462 ++++ b/fs/autofs4/root.c
1463 +@@ -655,7 +655,7 @@ static void autofs_clear_leaf_automount_flags(struct dentry *dentry)
1464 + /* only consider parents below dentrys in the root */
1465 + if (IS_ROOT(parent->d_parent))
1466 + return;
1467 +- d_child = &dentry->d_u.d_child;
1468 ++ d_child = &dentry->d_child;
1469 + /* Set parent managed if it's becoming empty */
1470 + if (d_child->next == &parent->d_subdirs &&
1471 + d_child->prev == &parent->d_subdirs)
1472 +diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
1473 +index 5e0982aa7000..18e14cf8f223 100644
1474 +--- a/fs/ceph/dir.c
1475 ++++ b/fs/ceph/dir.c
1476 +@@ -111,7 +111,7 @@ static int fpos_cmp(loff_t l, loff_t r)
1477 + /*
1478 + * When possible, we try to satisfy a readdir by peeking at the
1479 + * dcache. We make this work by carefully ordering dentries on
1480 +- * d_u.d_child when we initially get results back from the MDS, and
1481 ++ * d_child when we initially get results back from the MDS, and
1482 + * falling back to a "normal" sync readdir if any dentries in the dir
1483 + * are dropped.
1484 + *
1485 +@@ -146,11 +146,11 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx)
1486 + p = parent->d_subdirs.prev;
1487 + dout(" initial p %p/%p\n", p->prev, p->next);
1488 + } else {
1489 +- p = last->d_u.d_child.prev;
1490 ++ p = last->d_child.prev;
1491 + }
1492 +
1493 + more:
1494 +- dentry = list_entry(p, struct dentry, d_u.d_child);
1495 ++ dentry = list_entry(p, struct dentry, d_child);
1496 + di = ceph_dentry(dentry);
1497 + while (1) {
1498 + dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
1499 +@@ -172,7 +172,7 @@ more:
1500 + !dentry->d_inode ? " null" : "");
1501 + spin_unlock(&dentry->d_lock);
1502 + p = p->prev;
1503 +- dentry = list_entry(p, struct dentry, d_u.d_child);
1504 ++ dentry = list_entry(p, struct dentry, d_child);
1505 + di = ceph_dentry(dentry);
1506 + }
1507 +
1508 +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
1509 +index 6471f9c83428..ee24490ee925 100644
1510 +--- a/fs/ceph/inode.c
1511 ++++ b/fs/ceph/inode.c
1512 +@@ -1289,7 +1289,7 @@ retry_lookup:
1513 + /* reorder parent's d_subdirs */
1514 + spin_lock(&parent->d_lock);
1515 + spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
1516 +- list_move(&dn->d_u.d_child, &parent->d_subdirs);
1517 ++ list_move(&dn->d_child, &parent->d_subdirs);
1518 + spin_unlock(&dn->d_lock);
1519 + spin_unlock(&parent->d_lock);
1520 + }
1521 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
1522 +index f2ddcf7ac9c3..7ee427e16f3b 100644
1523 +--- a/fs/cifs/inode.c
1524 ++++ b/fs/cifs/inode.c
1525 +@@ -883,7 +883,7 @@ inode_has_hashed_dentries(struct inode *inode)
1526 + struct dentry *dentry;
1527 +
1528 + spin_lock(&inode->i_lock);
1529 +- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
1530 ++ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1531 + if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
1532 + spin_unlock(&inode->i_lock);
1533 + return true;
1534 +diff --git a/fs/coda/cache.c b/fs/coda/cache.c
1535 +index 1da168c61d35..9bc1147a6c5d 100644
1536 +--- a/fs/coda/cache.c
1537 ++++ b/fs/coda/cache.c
1538 +@@ -92,7 +92,7 @@ static void coda_flag_children(struct dentry *parent, int flag)
1539 + struct dentry *de;
1540 +
1541 + spin_lock(&parent->d_lock);
1542 +- list_for_each_entry(de, &parent->d_subdirs, d_u.d_child) {
1543 ++ list_for_each_entry(de, &parent->d_subdirs, d_child) {
1544 + /* don't know what to do with negative dentries */
1545 + if (de->d_inode )
1546 + coda_flag_inode(de->d_inode, flag);
1547 +diff --git a/fs/dcache.c b/fs/dcache.c
1548 +index 436612777203..c345f5f2b508 100644
1549 +--- a/fs/dcache.c
1550 ++++ b/fs/dcache.c
1551 +@@ -44,7 +44,7 @@
1552 + /*
1553 + * Usage:
1554 + * dcache->d_inode->i_lock protects:
1555 +- * - i_dentry, d_alias, d_inode of aliases
1556 ++ * - i_dentry, d_u.d_alias, d_inode of aliases
1557 + * dcache_hash_bucket lock protects:
1558 + * - the dcache hash table
1559 + * s_anon bl list spinlock protects:
1560 +@@ -59,7 +59,7 @@
1561 + * - d_unhashed()
1562 + * - d_parent and d_subdirs
1563 + * - childrens' d_child and d_parent
1564 +- * - d_alias, d_inode
1565 ++ * - d_u.d_alias, d_inode
1566 + *
1567 + * Ordering:
1568 + * dentry->d_inode->i_lock
1569 +@@ -239,7 +239,6 @@ static void __d_free(struct rcu_head *head)
1570 + {
1571 + struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
1572 +
1573 +- WARN_ON(!hlist_unhashed(&dentry->d_alias));
1574 + if (dname_external(dentry))
1575 + kfree(dentry->d_name.name);
1576 + kmem_cache_free(dentry_cache, dentry);
1577 +@@ -250,6 +249,7 @@ static void __d_free(struct rcu_head *head)
1578 + */
1579 + static void d_free(struct dentry *dentry)
1580 + {
1581 ++ WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
1582 + BUG_ON((int)dentry->d_lockref.count > 0);
1583 + this_cpu_dec(nr_dentry);
1584 + if (dentry->d_op && dentry->d_op->d_release)
1585 +@@ -288,7 +288,7 @@ static void dentry_iput(struct dentry * dentry)
1586 + struct inode *inode = dentry->d_inode;
1587 + if (inode) {
1588 + dentry->d_inode = NULL;
1589 +- hlist_del_init(&dentry->d_alias);
1590 ++ hlist_del_init(&dentry->d_u.d_alias);
1591 + spin_unlock(&dentry->d_lock);
1592 + spin_unlock(&inode->i_lock);
1593 + if (!inode->i_nlink)
1594 +@@ -313,7 +313,7 @@ static void dentry_unlink_inode(struct dentry * dentry)
1595 + struct inode *inode = dentry->d_inode;
1596 + __d_clear_type(dentry);
1597 + dentry->d_inode = NULL;
1598 +- hlist_del_init(&dentry->d_alias);
1599 ++ hlist_del_init(&dentry->d_u.d_alias);
1600 + dentry_rcuwalk_barrier(dentry);
1601 + spin_unlock(&dentry->d_lock);
1602 + spin_unlock(&inode->i_lock);
1603 +@@ -435,7 +435,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
1604 + __releases(parent->d_lock)
1605 + __releases(dentry->d_inode->i_lock)
1606 + {
1607 +- list_del(&dentry->d_u.d_child);
1608 ++ list_del(&dentry->d_child);
1609 + /*
1610 + * Inform d_walk() that we are no longer attached to the
1611 + * dentry tree
1612 +@@ -737,7 +737,7 @@ static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
1613 +
1614 + again:
1615 + discon_alias = NULL;
1616 +- hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1617 ++ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
1618 + spin_lock(&alias->d_lock);
1619 + if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
1620 + if (IS_ROOT(alias) &&
1621 +@@ -790,7 +790,7 @@ void d_prune_aliases(struct inode *inode)
1622 + struct dentry *dentry;
1623 + restart:
1624 + spin_lock(&inode->i_lock);
1625 +- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
1626 ++ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1627 + spin_lock(&dentry->d_lock);
1628 + if (!dentry->d_lockref.count) {
1629 + /*
1630 +@@ -1091,7 +1091,7 @@ repeat:
1631 + resume:
1632 + while (next != &this_parent->d_subdirs) {
1633 + struct list_head *tmp = next;
1634 +- struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1635 ++ struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1636 + next = tmp->next;
1637 +
1638 + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1639 +@@ -1143,7 +1143,7 @@ resume:
1640 + goto rename_retry;
1641 + }
1642 + rcu_read_unlock();
1643 +- next = child->d_u.d_child.next;
1644 ++ next = child->d_child.next;
1645 + goto resume;
1646 + }
1647 + if (need_seqretry(&rename_lock, seq)) {
1648 +@@ -1524,8 +1524,8 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1649 + INIT_HLIST_BL_NODE(&dentry->d_hash);
1650 + INIT_LIST_HEAD(&dentry->d_lru);
1651 + INIT_LIST_HEAD(&dentry->d_subdirs);
1652 +- INIT_HLIST_NODE(&dentry->d_alias);
1653 +- INIT_LIST_HEAD(&dentry->d_u.d_child);
1654 ++ INIT_HLIST_NODE(&dentry->d_u.d_alias);
1655 ++ INIT_LIST_HEAD(&dentry->d_child);
1656 + d_set_d_op(dentry, dentry->d_sb->s_d_op);
1657 +
1658 + this_cpu_inc(nr_dentry);
1659 +@@ -1555,7 +1555,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1660 + */
1661 + __dget_dlock(parent);
1662 + dentry->d_parent = parent;
1663 +- list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1664 ++ list_add(&dentry->d_child, &parent->d_subdirs);
1665 + spin_unlock(&parent->d_lock);
1666 +
1667 + return dentry;
1668 +@@ -1648,7 +1648,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1669 + spin_lock(&dentry->d_lock);
1670 + __d_set_type(dentry, add_flags);
1671 + if (inode)
1672 +- hlist_add_head(&dentry->d_alias, &inode->i_dentry);
1673 ++ hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1674 + dentry->d_inode = inode;
1675 + dentry_rcuwalk_barrier(dentry);
1676 + spin_unlock(&dentry->d_lock);
1677 +@@ -1672,7 +1672,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1678 +
1679 + void d_instantiate(struct dentry *entry, struct inode * inode)
1680 + {
1681 +- BUG_ON(!hlist_unhashed(&entry->d_alias));
1682 ++ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1683 + if (inode)
1684 + spin_lock(&inode->i_lock);
1685 + __d_instantiate(entry, inode);
1686 +@@ -1711,7 +1711,7 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
1687 + return NULL;
1688 + }
1689 +
1690 +- hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1691 ++ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
1692 + /*
1693 + * Don't need alias->d_lock here, because aliases with
1694 + * d_parent == entry->d_parent are not subject to name or
1695 +@@ -1737,7 +1737,7 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1696 + {
1697 + struct dentry *result;
1698 +
1699 +- BUG_ON(!hlist_unhashed(&entry->d_alias));
1700 ++ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1701 +
1702 + if (inode)
1703 + spin_lock(&inode->i_lock);
1704 +@@ -1768,7 +1768,7 @@ EXPORT_SYMBOL(d_instantiate_unique);
1705 + */
1706 + int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1707 + {
1708 +- BUG_ON(!hlist_unhashed(&entry->d_alias));
1709 ++ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1710 +
1711 + spin_lock(&inode->i_lock);
1712 + if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1713 +@@ -1807,7 +1807,7 @@ static struct dentry * __d_find_any_alias(struct inode *inode)
1714 +
1715 + if (hlist_empty(&inode->i_dentry))
1716 + return NULL;
1717 +- alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1718 ++ alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1719 + __dget(alias);
1720 + return alias;
1721 + }
1722 +@@ -1884,7 +1884,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
1723 + spin_lock(&tmp->d_lock);
1724 + tmp->d_inode = inode;
1725 + tmp->d_flags |= add_flags;
1726 +- hlist_add_head(&tmp->d_alias, &inode->i_dentry);
1727 ++ hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
1728 + hlist_bl_lock(&tmp->d_sb->s_anon);
1729 + hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1730 + hlist_bl_unlock(&tmp->d_sb->s_anon);
1731 +@@ -2327,7 +2327,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent)
1732 + struct dentry *child;
1733 +
1734 + spin_lock(&dparent->d_lock);
1735 +- list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
1736 ++ list_for_each_entry(child, &dparent->d_subdirs, d_child) {
1737 + if (dentry == child) {
1738 + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1739 + __dget_dlock(dentry);
1740 +@@ -2574,8 +2574,8 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
1741 + /* Unhash the target: dput() will then get rid of it */
1742 + __d_drop(target);
1743 +
1744 +- list_del(&dentry->d_u.d_child);
1745 +- list_del(&target->d_u.d_child);
1746 ++ list_del(&dentry->d_child);
1747 ++ list_del(&target->d_child);
1748 +
1749 + /* Switch the names.. */
1750 + switch_names(dentry, target);
1751 +@@ -2585,15 +2585,15 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
1752 + if (IS_ROOT(dentry)) {
1753 + dentry->d_parent = target->d_parent;
1754 + target->d_parent = target;
1755 +- INIT_LIST_HEAD(&target->d_u.d_child);
1756 ++ INIT_LIST_HEAD(&target->d_child);
1757 + } else {
1758 + swap(dentry->d_parent, target->d_parent);
1759 +
1760 + /* And add them back to the (new) parent lists */
1761 +- list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
1762 ++ list_add(&target->d_child, &target->d_parent->d_subdirs);
1763 + }
1764 +
1765 +- list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
1766 ++ list_add(&dentry->d_child, &dentry->d_parent->d_subdirs);
1767 +
1768 + write_seqcount_end(&target->d_seq);
1769 + write_seqcount_end(&dentry->d_seq);
1770 +@@ -2700,9 +2700,9 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
1771 + swap(dentry->d_name.hash, anon->d_name.hash);
1772 +
1773 + dentry->d_parent = dentry;
1774 +- list_del_init(&dentry->d_u.d_child);
1775 ++ list_del_init(&dentry->d_child);
1776 + anon->d_parent = dparent;
1777 +- list_move(&anon->d_u.d_child, &dparent->d_subdirs);
1778 ++ list_move(&anon->d_child, &dparent->d_subdirs);
1779 +
1780 + write_seqcount_end(&dentry->d_seq);
1781 + write_seqcount_end(&anon->d_seq);
1782 +@@ -3333,7 +3333,7 @@ void d_tmpfile(struct dentry *dentry, struct inode *inode)
1783 + {
1784 + inode_dec_link_count(inode);
1785 + BUG_ON(dentry->d_name.name != dentry->d_iname ||
1786 +- !hlist_unhashed(&dentry->d_alias) ||
1787 ++ !hlist_unhashed(&dentry->d_u.d_alias) ||
1788 + !d_unlinked(dentry));
1789 + spin_lock(&dentry->d_parent->d_lock);
1790 + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1791 +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
1792 +index 1ff8fe5dab0d..4a9f0e0c6644 100644
1793 +--- a/fs/debugfs/inode.c
1794 ++++ b/fs/debugfs/inode.c
1795 +@@ -552,7 +552,7 @@ void debugfs_remove_recursive(struct dentry *dentry)
1796 + * use the d_u.d_child as the rcu head and corrupt this list.
1797 + */
1798 + spin_lock(&parent->d_lock);
1799 +- list_for_each_entry(child, &parent->d_subdirs, d_u.d_child) {
1800 ++ list_for_each_entry(child, &parent->d_subdirs, d_child) {
1801 + if (!debugfs_positive(child))
1802 + continue;
1803 +
1804 +diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
1805 +index 48a359dd286e..831d4f057e15 100644
1806 +--- a/fs/exportfs/expfs.c
1807 ++++ b/fs/exportfs/expfs.c
1808 +@@ -50,7 +50,7 @@ find_acceptable_alias(struct dentry *result,
1809 +
1810 + inode = result->d_inode;
1811 + spin_lock(&inode->i_lock);
1812 +- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
1813 ++ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1814 + dget(dentry);
1815 + spin_unlock(&inode->i_lock);
1816 + if (toput)
1817 +diff --git a/fs/libfs.c b/fs/libfs.c
1818 +index a1844244246f..868c0b70a30e 100644
1819 +--- a/fs/libfs.c
1820 ++++ b/fs/libfs.c
1821 +@@ -113,18 +113,18 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
1822 +
1823 + spin_lock(&dentry->d_lock);
1824 + /* d_lock not required for cursor */
1825 +- list_del(&cursor->d_u.d_child);
1826 ++ list_del(&cursor->d_child);
1827 + p = dentry->d_subdirs.next;
1828 + while (n && p != &dentry->d_subdirs) {
1829 + struct dentry *next;
1830 +- next = list_entry(p, struct dentry, d_u.d_child);
1831 ++ next = list_entry(p, struct dentry, d_child);
1832 + spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
1833 + if (simple_positive(next))
1834 + n--;
1835 + spin_unlock(&next->d_lock);
1836 + p = p->next;
1837 + }
1838 +- list_add_tail(&cursor->d_u.d_child, p);
1839 ++ list_add_tail(&cursor->d_child, p);
1840 + spin_unlock(&dentry->d_lock);
1841 + }
1842 + }
1843 +@@ -149,7 +149,7 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
1844 + {
1845 + struct dentry *dentry = file->f_path.dentry;
1846 + struct dentry *cursor = file->private_data;
1847 +- struct list_head *p, *q = &cursor->d_u.d_child;
1848 ++ struct list_head *p, *q = &cursor->d_child;
1849 +
1850 + if (!dir_emit_dots(file, ctx))
1851 + return 0;
1852 +@@ -158,7 +158,7 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
1853 + list_move(q, &dentry->d_subdirs);
1854 +
1855 + for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
1856 +- struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
1857 ++ struct dentry *next = list_entry(p, struct dentry, d_child);
1858 + spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
1859 + if (!simple_positive(next)) {
1860 + spin_unlock(&next->d_lock);
1861 +@@ -286,7 +286,7 @@ int simple_empty(struct dentry *dentry)
1862 + int ret = 0;
1863 +
1864 + spin_lock(&dentry->d_lock);
1865 +- list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) {
1866 ++ list_for_each_entry(child, &dentry->d_subdirs, d_child) {
1867 + spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
1868 + if (simple_positive(child)) {
1869 + spin_unlock(&child->d_lock);
1870 +diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
1871 +index c320ac52353e..dc9747d6a4d0 100644
1872 +--- a/fs/ncpfs/dir.c
1873 ++++ b/fs/ncpfs/dir.c
1874 +@@ -406,7 +406,7 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
1875 + spin_lock(&parent->d_lock);
1876 + next = parent->d_subdirs.next;
1877 + while (next != &parent->d_subdirs) {
1878 +- dent = list_entry(next, struct dentry, d_u.d_child);
1879 ++ dent = list_entry(next, struct dentry, d_child);
1880 + if ((unsigned long)dent->d_fsdata == fpos) {
1881 + if (dent->d_inode)
1882 + dget(dent);
1883 +diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
1884 +index 32c06587351a..6d5e7c56c79d 100644
1885 +--- a/fs/ncpfs/ncplib_kernel.h
1886 ++++ b/fs/ncpfs/ncplib_kernel.h
1887 +@@ -194,7 +194,7 @@ ncp_renew_dentries(struct dentry *parent)
1888 + spin_lock(&parent->d_lock);
1889 + next = parent->d_subdirs.next;
1890 + while (next != &parent->d_subdirs) {
1891 +- dentry = list_entry(next, struct dentry, d_u.d_child);
1892 ++ dentry = list_entry(next, struct dentry, d_child);
1893 +
1894 + if (dentry->d_fsdata == NULL)
1895 + ncp_age_dentry(server, dentry);
1896 +@@ -216,7 +216,7 @@ ncp_invalidate_dircache_entries(struct dentry *parent)
1897 + spin_lock(&parent->d_lock);
1898 + next = parent->d_subdirs.next;
1899 + while (next != &parent->d_subdirs) {
1900 +- dentry = list_entry(next, struct dentry, d_u.d_child);
1901 ++ dentry = list_entry(next, struct dentry, d_child);
1902 + dentry->d_fsdata = NULL;
1903 + ncp_age_dentry(server, dentry);
1904 + next = next->next;
1905 +diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
1906 +index 66984a9aafaa..5b8ab0e444f9 100644
1907 +--- a/fs/nfs/getroot.c
1908 ++++ b/fs/nfs/getroot.c
1909 +@@ -58,7 +58,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
1910 + */
1911 + spin_lock(&sb->s_root->d_inode->i_lock);
1912 + spin_lock(&sb->s_root->d_lock);
1913 +- hlist_del_init(&sb->s_root->d_alias);
1914 ++ hlist_del_init(&sb->s_root->d_u.d_alias);
1915 + spin_unlock(&sb->s_root->d_lock);
1916 + spin_unlock(&sb->s_root->d_inode->i_lock);
1917 + }
1918 +diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
1919 +index 9d3e9c50066a..700129940c6e 100644
1920 +--- a/fs/notify/fsnotify.c
1921 ++++ b/fs/notify/fsnotify.c
1922 +@@ -63,14 +63,14 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
1923 + spin_lock(&inode->i_lock);
1924 + /* run all of the dentries associated with this inode. Since this is a
1925 + * directory, there damn well better only be one item on this list */
1926 +- hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1927 ++ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
1928 + struct dentry *child;
1929 +
1930 + /* run all of the children of the original inode and fix their
1931 + * d_flags to indicate parental interest (their parent is the
1932 + * original inode) */
1933 + spin_lock(&alias->d_lock);
1934 +- list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
1935 ++ list_for_each_entry(child, &alias->d_subdirs, d_child) {
1936 + if (!child->d_inode)
1937 + continue;
1938 +
1939 +diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
1940 +index 0d3a97d2d5f6..116748502bae 100644
1941 +--- a/fs/ocfs2/dcache.c
1942 ++++ b/fs/ocfs2/dcache.c
1943 +@@ -173,7 +173,7 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
1944 + struct dentry *dentry;
1945 +
1946 + spin_lock(&inode->i_lock);
1947 +- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
1948 ++ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1949 + spin_lock(&dentry->d_lock);
1950 + if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
1951 + trace_ocfs2_find_local_alias(dentry->d_name.len,
1952 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
1953 +index c2546717fc2b..eaa7374305a3 100644
1954 +--- a/fs/proc/task_mmu.c
1955 ++++ b/fs/proc/task_mmu.c
1956 +@@ -993,9 +993,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
1957 + struct vm_area_struct *vma;
1958 + struct pagemapread *pm = walk->private;
1959 + spinlock_t *ptl;
1960 +- pte_t *pte;
1961 ++ pte_t *pte, *orig_pte;
1962 + int err = 0;
1963 +- pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
1964 +
1965 + /* find the first VMA at or above 'addr' */
1966 + vma = find_vma(walk->mm, addr);
1967 +@@ -1009,6 +1008,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
1968 +
1969 + for (; addr != end; addr += PAGE_SIZE) {
1970 + unsigned long offset;
1971 ++ pagemap_entry_t pme;
1972 +
1973 + offset = (addr & ~PAGEMAP_WALK_MASK) >>
1974 + PAGE_SHIFT;
1975 +@@ -1023,32 +1023,55 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
1976 +
1977 + if (pmd_trans_unstable(pmd))
1978 + return 0;
1979 +- for (; addr != end; addr += PAGE_SIZE) {
1980 +- int flags2;
1981 +-
1982 +- /* check to see if we've left 'vma' behind
1983 +- * and need a new, higher one */
1984 +- if (vma && (addr >= vma->vm_end)) {
1985 +- vma = find_vma(walk->mm, addr);
1986 +- if (vma && (vma->vm_flags & VM_SOFTDIRTY))
1987 +- flags2 = __PM_SOFT_DIRTY;
1988 +- else
1989 +- flags2 = 0;
1990 +- pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
1991 ++
1992 ++ while (1) {
1993 ++ /* End of address space hole, which we mark as non-present. */
1994 ++ unsigned long hole_end;
1995 ++
1996 ++ if (vma)
1997 ++ hole_end = min(end, vma->vm_start);
1998 ++ else
1999 ++ hole_end = end;
2000 ++
2001 ++ for (; addr < hole_end; addr += PAGE_SIZE) {
2002 ++ pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
2003 ++
2004 ++ err = add_to_pagemap(addr, &pme, pm);
2005 ++ if (err)
2006 ++ return err;
2007 + }
2008 +
2009 +- /* check that 'vma' actually covers this address,
2010 +- * and that it isn't a huge page vma */
2011 +- if (vma && (vma->vm_start <= addr) &&
2012 +- !is_vm_hugetlb_page(vma)) {
2013 +- pte = pte_offset_map(pmd, addr);
2014 ++ if (!vma || vma->vm_start >= end)
2015 ++ break;
2016 ++ /*
2017 ++ * We can't possibly be in a hugetlb VMA. In general,
2018 ++ * for a mm_walk with a pmd_entry and a hugetlb_entry,
2019 ++ * the pmd_entry can only be called on addresses in a
2020 ++ * hugetlb if the walk starts in a non-hugetlb VMA and
2021 ++ * spans a hugepage VMA. Since pagemap_read walks are
2022 ++ * PMD-sized and PMD-aligned, this will never be true.
2023 ++ */
2024 ++ BUG_ON(is_vm_hugetlb_page(vma));
2025 ++
2026 ++ /* Addresses in the VMA. */
2027 ++ orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2028 ++ for (; addr < min(end, vma->vm_end); pte++, addr += PAGE_SIZE) {
2029 ++ pagemap_entry_t pme;
2030 ++
2031 + pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
2032 +- /* unmap before userspace copy */
2033 +- pte_unmap(pte);
2034 ++ err = add_to_pagemap(addr, &pme, pm);
2035 ++ if (err)
2036 ++ break;
2037 + }
2038 +- err = add_to_pagemap(addr, &pme, pm);
2039 ++ pte_unmap_unlock(orig_pte, ptl);
2040 ++
2041 + if (err)
2042 + return err;
2043 ++
2044 ++ if (addr == end)
2045 ++ break;
2046 ++
2047 ++ vma = find_vma(walk->mm, addr);
2048 + }
2049 +
2050 + cond_resched();
2051 +diff --git a/include/linux/dcache.h b/include/linux/dcache.h
2052 +index 3b50cac7ccb3..0f0eb1c1e676 100644
2053 +--- a/include/linux/dcache.h
2054 ++++ b/include/linux/dcache.h
2055 +@@ -124,15 +124,15 @@ struct dentry {
2056 + void *d_fsdata; /* fs-specific data */
2057 +
2058 + struct list_head d_lru; /* LRU list */
2059 ++ struct list_head d_child; /* child of parent list */
2060 ++ struct list_head d_subdirs; /* our children */
2061 + /*
2062 +- * d_child and d_rcu can share memory
2063 ++ * d_alias and d_rcu can share memory
2064 + */
2065 + union {
2066 +- struct list_head d_child; /* child of parent list */
2067 ++ struct hlist_node d_alias; /* inode alias list */
2068 + struct rcu_head d_rcu;
2069 + } d_u;
2070 +- struct list_head d_subdirs; /* our children */
2071 +- struct hlist_node d_alias; /* inode alias list */
2072 + };
2073 +
2074 + /*
2075 +diff --git a/include/linux/mm.h b/include/linux/mm.h
2076 +index 46b8ab56b9db..a7b311dfa742 100644
2077 +--- a/include/linux/mm.h
2078 ++++ b/include/linux/mm.h
2079 +@@ -1009,6 +1009,7 @@ static inline int page_mapped(struct page *page)
2080 + #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
2081 + #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
2082 + #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
2083 ++#define VM_FAULT_SIGSEGV 0x0040
2084 +
2085 + #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
2086 + #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
2087 +@@ -1017,8 +1018,9 @@ static inline int page_mapped(struct page *page)
2088 +
2089 + #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
2090 +
2091 +-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
2092 +- VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
2093 ++#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
2094 ++ VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
2095 ++ VM_FAULT_FALLBACK)
2096 +
2097 + /* Encode hstate index for a hwpoisoned large page */
2098 + #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
2099 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
2100 +index 911718fa92ed..bf46cc813451 100644
2101 +--- a/include/linux/netdevice.h
2102 ++++ b/include/linux/netdevice.h
2103 +@@ -1880,6 +1880,12 @@ void netdev_freemem(struct net_device *dev);
2104 + void synchronize_net(void);
2105 + int init_dummy_netdev(struct net_device *dev);
2106 +
2107 ++DECLARE_PER_CPU(int, xmit_recursion);
2108 ++static inline int dev_recursion_level(void)
2109 ++{
2110 ++ return this_cpu_read(xmit_recursion);
2111 ++}
2112 ++
2113 + struct net_device *dev_get_by_index(struct net *net, int ifindex);
2114 + struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2115 + struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2116 +diff --git a/include/linux/sched.h b/include/linux/sched.h
2117 +index 218b058060f1..91fe6a38b307 100644
2118 +--- a/include/linux/sched.h
2119 ++++ b/include/linux/sched.h
2120 +@@ -1695,7 +1695,7 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
2121 + }
2122 +
2123 +
2124 +-static int pid_alive(const struct task_struct *p);
2125 ++static inline int pid_alive(const struct task_struct *p);
2126 + static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
2127 + {
2128 + pid_t pid = 0;
2129 +diff --git a/include/net/ip.h b/include/net/ip.h
2130 +index 3446cdd29608..5128fa7a8302 100644
2131 +--- a/include/net/ip.h
2132 ++++ b/include/net/ip.h
2133 +@@ -407,22 +407,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
2134 +
2135 + #endif
2136 +
2137 +-static inline int sk_mc_loop(struct sock *sk)
2138 +-{
2139 +- if (!sk)
2140 +- return 1;
2141 +- switch (sk->sk_family) {
2142 +- case AF_INET:
2143 +- return inet_sk(sk)->mc_loop;
2144 +-#if IS_ENABLED(CONFIG_IPV6)
2145 +- case AF_INET6:
2146 +- return inet6_sk(sk)->mc_loop;
2147 +-#endif
2148 +- }
2149 +- WARN_ON(1);
2150 +- return 1;
2151 +-}
2152 +-
2153 + bool ip_call_ra_chain(struct sk_buff *skb);
2154 +
2155 + /*
2156 +diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
2157 +index 2e74c6cfa612..ee2d53ae62fe 100644
2158 +--- a/include/net/ip6_route.h
2159 ++++ b/include/net/ip6_route.h
2160 +@@ -168,7 +168,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
2161 +
2162 + static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
2163 + {
2164 +- struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
2165 ++ struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
2166 ++ inet6_sk(skb->sk) : NULL;
2167 +
2168 + return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
2169 + skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
2170 +diff --git a/include/net/sock.h b/include/net/sock.h
2171 +index f66b2b19a6e4..0c79a740e97d 100644
2172 +--- a/include/net/sock.h
2173 ++++ b/include/net/sock.h
2174 +@@ -1815,6 +1815,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
2175 +
2176 + struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
2177 +
2178 ++bool sk_mc_loop(struct sock *sk);
2179 ++
2180 + static inline bool sk_can_gso(const struct sock *sk)
2181 + {
2182 + return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
2183 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
2184 +index 550e2050d778..18711f326260 100644
2185 +--- a/kernel/cgroup.c
2186 ++++ b/kernel/cgroup.c
2187 +@@ -971,7 +971,7 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
2188 + parent = dentry->d_parent;
2189 + spin_lock(&parent->d_lock);
2190 + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2191 +- list_del_init(&dentry->d_u.d_child);
2192 ++ list_del_init(&dentry->d_child);
2193 + spin_unlock(&dentry->d_lock);
2194 + spin_unlock(&parent->d_lock);
2195 + remove_dir(dentry);
2196 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2197 +index 813b021379f5..a2d62b3b90c7 100644
2198 +--- a/kernel/trace/trace.c
2199 ++++ b/kernel/trace/trace.c
2200 +@@ -6158,7 +6158,7 @@ static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t m
2201 + int ret;
2202 +
2203 + /* Paranoid: Make sure the parent is the "instances" directory */
2204 +- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
2205 ++ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
2206 + if (WARN_ON_ONCE(parent != trace_instance_dir))
2207 + return -ENOENT;
2208 +
2209 +@@ -6185,7 +6185,7 @@ static int instance_rmdir(struct inode *inode, struct dentry *dentry)
2210 + int ret;
2211 +
2212 + /* Paranoid: Make sure the parent is the "instances" directory */
2213 +- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
2214 ++ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
2215 + if (WARN_ON_ONCE(parent != trace_instance_dir))
2216 + return -ENOENT;
2217 +
2218 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
2219 +index e4c4efc4ba0d..c6646a58d23e 100644
2220 +--- a/kernel/trace/trace_events.c
2221 ++++ b/kernel/trace/trace_events.c
2222 +@@ -428,7 +428,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
2223 +
2224 + if (dir) {
2225 + spin_lock(&dir->d_lock); /* probably unneeded */
2226 +- list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
2227 ++ list_for_each_entry(child, &dir->d_subdirs, d_child) {
2228 + if (child->d_inode) /* probably unneeded */
2229 + child->d_inode->i_private = NULL;
2230 + }
2231 +diff --git a/mm/ksm.c b/mm/ksm.c
2232 +index 68710e80994a..5e706e391a02 100644
2233 +--- a/mm/ksm.c
2234 ++++ b/mm/ksm.c
2235 +@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
2236 + else
2237 + ret = VM_FAULT_WRITE;
2238 + put_page(page);
2239 +- } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
2240 ++ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
2241 + /*
2242 + * We must loop because handle_mm_fault() may back out if there's
2243 + * any difficulty e.g. if pte accessed bit gets updated concurrently.
2244 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
2245 +index a98c7fce470a..ffc7bf0458fb 100644
2246 +--- a/mm/memory-failure.c
2247 ++++ b/mm/memory-failure.c
2248 +@@ -1645,8 +1645,6 @@ static int __soft_offline_page(struct page *page, int flags)
2249 + * setting PG_hwpoison.
2250 + */
2251 + if (!is_free_buddy_page(page))
2252 +- lru_add_drain_all();
2253 +- if (!is_free_buddy_page(page))
2254 + drain_all_pages();
2255 + SetPageHWPoison(page);
2256 + if (!is_free_buddy_page(page))
2257 +diff --git a/mm/memory.c b/mm/memory.c
2258 +index 102af096cbc5..749e1c68d490 100644
2259 +--- a/mm/memory.c
2260 ++++ b/mm/memory.c
2261 +@@ -1836,7 +1836,8 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
2262 + else
2263 + return -EFAULT;
2264 + }
2265 +- if (ret & VM_FAULT_SIGBUS)
2266 ++ if (ret & (VM_FAULT_SIGBUS |
2267 ++ VM_FAULT_SIGSEGV))
2268 + return i ? i : -EFAULT;
2269 + BUG();
2270 + }
2271 +@@ -1946,7 +1947,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
2272 + return -ENOMEM;
2273 + if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2274 + return -EHWPOISON;
2275 +- if (ret & VM_FAULT_SIGBUS)
2276 ++ if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2277 + return -EFAULT;
2278 + BUG();
2279 + }
2280 +@@ -3235,7 +3236,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2281 +
2282 + /* Check if we need to add a guard page to the stack */
2283 + if (check_stack_guard_page(vma, address) < 0)
2284 +- return VM_FAULT_SIGBUS;
2285 ++ return VM_FAULT_SIGSEGV;
2286 +
2287 + /* Use the zero-page for reads */
2288 + if (!(flags & FAULT_FLAG_WRITE)) {
2289 +diff --git a/net/core/dev.c b/net/core/dev.c
2290 +index f6d8d7fe29ab..73abbd77d72c 100644
2291 +--- a/net/core/dev.c
2292 ++++ b/net/core/dev.c
2293 +@@ -2775,7 +2775,9 @@ static void skb_update_prio(struct sk_buff *skb)
2294 + #define skb_update_prio(skb)
2295 + #endif
2296 +
2297 +-static DEFINE_PER_CPU(int, xmit_recursion);
2298 ++DEFINE_PER_CPU(int, xmit_recursion);
2299 ++EXPORT_SYMBOL(xmit_recursion);
2300 ++
2301 + #define RECURSION_LIMIT 10
2302 +
2303 + /**
2304 +diff --git a/net/core/sock.c b/net/core/sock.c
2305 +index c8069561bdb7..650dd58ebd05 100644
2306 +--- a/net/core/sock.c
2307 ++++ b/net/core/sock.c
2308 +@@ -659,6 +659,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
2309 + sock_reset_flag(sk, bit);
2310 + }
2311 +
2312 ++bool sk_mc_loop(struct sock *sk)
2313 ++{
2314 ++ if (dev_recursion_level())
2315 ++ return false;
2316 ++ if (!sk)
2317 ++ return true;
2318 ++ switch (sk->sk_family) {
2319 ++ case AF_INET:
2320 ++ return inet_sk(sk)->mc_loop;
2321 ++#if IS_ENABLED(CONFIG_IPV6)
2322 ++ case AF_INET6:
2323 ++ return inet6_sk(sk)->mc_loop;
2324 ++#endif
2325 ++ }
2326 ++ WARN_ON(1);
2327 ++ return true;
2328 ++}
2329 ++EXPORT_SYMBOL(sk_mc_loop);
2330 ++
2331 + /*
2332 + * This is meant for all protocols to use and covers goings on
2333 + * at the socket level. Everything here is generic.
2334 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2335 +index 22917918fa80..9fbd69efa999 100644
2336 +--- a/net/ipv4/tcp_input.c
2337 ++++ b/net/ipv4/tcp_input.c
2338 +@@ -3064,10 +3064,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
2339 + if (seq_rtt < 0) {
2340 + seq_rtt = ca_seq_rtt;
2341 + }
2342 +- if (!(sacked & TCPCB_SACKED_ACKED))
2343 ++ if (!(sacked & TCPCB_SACKED_ACKED)) {
2344 + reord = min(pkts_acked, reord);
2345 +- if (!after(scb->end_seq, tp->high_seq))
2346 +- flag |= FLAG_ORIG_SACK_ACKED;
2347 ++ if (!after(scb->end_seq, tp->high_seq))
2348 ++ flag |= FLAG_ORIG_SACK_ACKED;
2349 ++ }
2350 + }
2351 +
2352 + if (sacked & TCPCB_SACKED_ACKED)
2353 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
2354 +index b7effad5a58c..e2f8bd0d35ed 100644
2355 +--- a/net/ipv4/tcp_ipv4.c
2356 ++++ b/net/ipv4/tcp_ipv4.c
2357 +@@ -1875,7 +1875,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
2358 + skb->sk = sk;
2359 + skb->destructor = sock_edemux;
2360 + if (sk->sk_state != TCP_TIME_WAIT) {
2361 +- struct dst_entry *dst = sk->sk_rx_dst;
2362 ++ struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
2363 +
2364 + if (dst)
2365 + dst = dst_check(dst, 0);
2366 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
2367 +index 96f64e59d70c..8c70c73da347 100644
2368 +--- a/net/ipv4/tcp_output.c
2369 ++++ b/net/ipv4/tcp_output.c
2370 +@@ -2796,6 +2796,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2371 + }
2372 + #endif
2373 +
2374 ++ /* Do not fool tcpdump (if any), clean our debris */
2375 ++ skb->tstamp.tv64 = 0;
2376 + return skb;
2377 + }
2378 + EXPORT_SYMBOL(tcp_make_synack);
2379 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2380 +index d7907ecf0b75..066d0b03f2b8 100644
2381 +--- a/net/ipv6/ip6_output.c
2382 ++++ b/net/ipv6/ip6_output.c
2383 +@@ -555,7 +555,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
2384 + {
2385 + struct sk_buff *frag;
2386 + struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
2387 +- struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
2388 ++ struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
2389 ++ inet6_sk(skb->sk) : NULL;
2390 + struct ipv6hdr *tmp_hdr;
2391 + struct frag_hdr *fh;
2392 + unsigned int mtu, hlen, left, len;
2393 +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
2394 +index 09a22f4f36c9..bcd65186b497 100644
2395 +--- a/net/ipv6/ndisc.c
2396 ++++ b/net/ipv6/ndisc.c
2397 +@@ -1193,7 +1193,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
2398 + if (rt)
2399 + rt6_set_expires(rt, jiffies + (HZ * lifetime));
2400 + if (ra_msg->icmph.icmp6_hop_limit) {
2401 +- in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
2402 ++ /* Only set hop_limit on the interface if it is higher than
2403 ++ * the current hop_limit.
2404 ++ */
2405 ++ if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
2406 ++ in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
2407 ++ } else {
2408 ++ ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
2409 ++ }
2410 + if (rt)
2411 + dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
2412 + ra_msg->icmph.icmp6_hop_limit);
2413 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
2414 +index a4f890dd223a..9d4332dba8ea 100644
2415 +--- a/net/ipv6/tcp_ipv6.c
2416 ++++ b/net/ipv6/tcp_ipv6.c
2417 +@@ -1633,7 +1633,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
2418 + skb->sk = sk;
2419 + skb->destructor = sock_edemux;
2420 + if (sk->sk_state != TCP_TIME_WAIT) {
2421 +- struct dst_entry *dst = sk->sk_rx_dst;
2422 ++ struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
2423 +
2424 + if (dst)
2425 + dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
2426 +diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
2427 +index d25f29377648..957c1db66652 100644
2428 +--- a/net/netfilter/nf_conntrack_proto_generic.c
2429 ++++ b/net/netfilter/nf_conntrack_proto_generic.c
2430 +@@ -14,6 +14,30 @@
2431 +
2432 + static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
2433 +
2434 ++static bool nf_generic_should_process(u8 proto)
2435 ++{
2436 ++ switch (proto) {
2437 ++#ifdef CONFIG_NF_CT_PROTO_SCTP_MODULE
2438 ++ case IPPROTO_SCTP:
2439 ++ return false;
2440 ++#endif
2441 ++#ifdef CONFIG_NF_CT_PROTO_DCCP_MODULE
2442 ++ case IPPROTO_DCCP:
2443 ++ return false;
2444 ++#endif
2445 ++#ifdef CONFIG_NF_CT_PROTO_GRE_MODULE
2446 ++ case IPPROTO_GRE:
2447 ++ return false;
2448 ++#endif
2449 ++#ifdef CONFIG_NF_CT_PROTO_UDPLITE_MODULE
2450 ++ case IPPROTO_UDPLITE:
2451 ++ return false;
2452 ++#endif
2453 ++ default:
2454 ++ return true;
2455 ++ }
2456 ++}
2457 ++
2458 + static inline struct nf_generic_net *generic_pernet(struct net *net)
2459 + {
2460 + return &net->ct.nf_ct_proto.generic;
2461 +@@ -67,7 +91,7 @@ static int generic_packet(struct nf_conn *ct,
2462 + static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
2463 + unsigned int dataoff, unsigned int *timeouts)
2464 + {
2465 +- return true;
2466 ++ return nf_generic_should_process(nf_ct_protonum(ct));
2467 + }
2468 +
2469 + #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
2470 +diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
2471 +index 6c4cbd97a673..fc68bf6e4889 100644
2472 +--- a/security/selinux/selinuxfs.c
2473 ++++ b/security/selinux/selinuxfs.c
2474 +@@ -1200,7 +1200,7 @@ static void sel_remove_entries(struct dentry *de)
2475 + spin_lock(&de->d_lock);
2476 + node = de->d_subdirs.next;
2477 + while (node != &de->d_subdirs) {
2478 +- struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
2479 ++ struct dentry *d = list_entry(node, struct dentry, d_child);
2480 +
2481 + spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
2482 + list_del_init(node);
2483 +@@ -1674,12 +1674,12 @@ static void sel_remove_classes(void)
2484 +
2485 + list_for_each(class_node, &class_dir->d_subdirs) {
2486 + struct dentry *class_subdir = list_entry(class_node,
2487 +- struct dentry, d_u.d_child);
2488 ++ struct dentry, d_child);
2489 + struct list_head *class_subdir_node;
2490 +
2491 + list_for_each(class_subdir_node, &class_subdir->d_subdirs) {
2492 + struct dentry *d = list_entry(class_subdir_node,
2493 +- struct dentry, d_u.d_child);
2494 ++ struct dentry, d_child);
2495 +
2496 + if (d->d_inode)
2497 + if (d->d_inode->i_mode & S_IFDIR)