1 |
commit: bd9aa661274bf85773d07bc63b11f0ada2ac9eb2 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Tue Jul 1 11:32:32 2014 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Tue Jul 1 11:32:32 2014 +0000 |
6 |
URL: http://git.overlays.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=bd9aa661 |
7 |
|
8 |
Linux patch 3.15.3 |
9 |
|
10 |
--- |
11 |
0000_README | 4 + |
12 |
1002_linux-3.15.3.patch | 5807 +++++++++++++++++++++++++++++++++++++++++++++++ |
13 |
2 files changed, 5811 insertions(+) |
14 |
|
15 |
diff --git a/0000_README b/0000_README |
16 |
index 58bb467..5171464 100644 |
17 |
--- a/0000_README |
18 |
+++ b/0000_README |
19 |
@@ -51,6 +51,10 @@ Patch: 1001_linux-3.15.2.patch |
20 |
From: http://www.kernel.org |
21 |
Desc: Linux 3.15.2 |
22 |
|
23 |
+Patch: 1002_linux-3.15.3.patch |
24 |
+From: http://www.kernel.org |
25 |
+Desc: Linux 3.15.3 |
26 |
+ |
27 |
Patch: 1700_enable-thinkpad-micled.patch |
28 |
From: https://bugs.gentoo.org/show_bug.cgi?id=449248 |
29 |
Desc: Enable mic mute led in thinkpads |
30 |
|
31 |
diff --git a/1002_linux-3.15.3.patch b/1002_linux-3.15.3.patch |
32 |
new file mode 100644 |
33 |
index 0000000..1d380d5 |
34 |
--- /dev/null |
35 |
+++ b/1002_linux-3.15.3.patch |
36 |
@@ -0,0 +1,5807 @@ |
37 |
+diff --git a/Documentation/vm/hwpoison.txt b/Documentation/vm/hwpoison.txt |
38 |
+index 550068466605..6ae89a9edf2a 100644 |
39 |
+--- a/Documentation/vm/hwpoison.txt |
40 |
++++ b/Documentation/vm/hwpoison.txt |
41 |
+@@ -84,6 +84,11 @@ PR_MCE_KILL |
42 |
+ PR_MCE_KILL_EARLY: Early kill |
43 |
+ PR_MCE_KILL_LATE: Late kill |
44 |
+ PR_MCE_KILL_DEFAULT: Use system global default |
45 |
++ Note that if you want to have a dedicated thread which handles |
46 |
++ the SIGBUS(BUS_MCEERR_AO) on behalf of the process, you should |
47 |
++ call prctl(PR_MCE_KILL_EARLY) on the designated thread. Otherwise, |
48 |
++ the SIGBUS is sent to the main thread. |
49 |
++ |
50 |
+ PR_MCE_KILL_GET |
51 |
+ return current mode |
52 |
+ |
53 |
+diff --git a/Makefile b/Makefile |
54 |
+index 475e0853a2f4..2e37d8b0bb96 100644 |
55 |
+--- a/Makefile |
56 |
++++ b/Makefile |
57 |
+@@ -1,6 +1,6 @@ |
58 |
+ VERSION = 3 |
59 |
+ PATCHLEVEL = 15 |
60 |
+-SUBLEVEL = 2 |
61 |
++SUBLEVEL = 3 |
62 |
+ EXTRAVERSION = |
63 |
+ NAME = Shuffling Zombie Juror |
64 |
+ |
65 |
+diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts |
66 |
+index 5d42feb31049..178382ca594f 100644 |
67 |
+--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts |
68 |
++++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts |
69 |
+@@ -25,7 +25,7 @@ |
70 |
+ |
71 |
+ memory { |
72 |
+ device_type = "memory"; |
73 |
+- reg = <0 0x00000000 0 0xC0000000>; /* 3 GB */ |
74 |
++ reg = <0 0x00000000 0 0x40000000>; /* 1 GB soldered on */ |
75 |
+ }; |
76 |
+ |
77 |
+ soc { |
78 |
+diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c |
79 |
+index af4e8c8a5422..6582c4adc182 100644 |
80 |
+--- a/arch/arm/kernel/stacktrace.c |
81 |
++++ b/arch/arm/kernel/stacktrace.c |
82 |
+@@ -83,13 +83,16 @@ static int save_trace(struct stackframe *frame, void *d) |
83 |
+ return trace->nr_entries >= trace->max_entries; |
84 |
+ } |
85 |
+ |
86 |
+-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
87 |
++/* This must be noinline to so that our skip calculation works correctly */ |
88 |
++static noinline void __save_stack_trace(struct task_struct *tsk, |
89 |
++ struct stack_trace *trace, unsigned int nosched) |
90 |
+ { |
91 |
+ struct stack_trace_data data; |
92 |
+ struct stackframe frame; |
93 |
+ |
94 |
+ data.trace = trace; |
95 |
+ data.skip = trace->skip; |
96 |
++ data.no_sched_functions = nosched; |
97 |
+ |
98 |
+ if (tsk != current) { |
99 |
+ #ifdef CONFIG_SMP |
100 |
+@@ -102,7 +105,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
101 |
+ trace->entries[trace->nr_entries++] = ULONG_MAX; |
102 |
+ return; |
103 |
+ #else |
104 |
+- data.no_sched_functions = 1; |
105 |
+ frame.fp = thread_saved_fp(tsk); |
106 |
+ frame.sp = thread_saved_sp(tsk); |
107 |
+ frame.lr = 0; /* recovered from the stack */ |
108 |
+@@ -111,11 +113,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
109 |
+ } else { |
110 |
+ register unsigned long current_sp asm ("sp"); |
111 |
+ |
112 |
+- data.no_sched_functions = 0; |
113 |
++ /* We don't want this function nor the caller */ |
114 |
++ data.skip += 2; |
115 |
+ frame.fp = (unsigned long)__builtin_frame_address(0); |
116 |
+ frame.sp = current_sp; |
117 |
+ frame.lr = (unsigned long)__builtin_return_address(0); |
118 |
+- frame.pc = (unsigned long)save_stack_trace_tsk; |
119 |
++ frame.pc = (unsigned long)__save_stack_trace; |
120 |
+ } |
121 |
+ |
122 |
+ walk_stackframe(&frame, save_trace, &data); |
123 |
+@@ -123,9 +126,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
124 |
+ trace->entries[trace->nr_entries++] = ULONG_MAX; |
125 |
+ } |
126 |
+ |
127 |
++void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
128 |
++{ |
129 |
++ __save_stack_trace(tsk, trace, 1); |
130 |
++} |
131 |
++ |
132 |
+ void save_stack_trace(struct stack_trace *trace) |
133 |
+ { |
134 |
+- save_stack_trace_tsk(current, trace); |
135 |
++ __save_stack_trace(current, trace, 0); |
136 |
+ } |
137 |
+ EXPORT_SYMBOL_GPL(save_stack_trace); |
138 |
+ #endif |
139 |
+diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c |
140 |
+index 65d2acb31498..5b45d266d83e 100644 |
141 |
+--- a/arch/arm/mach-omap1/board-h2.c |
142 |
++++ b/arch/arm/mach-omap1/board-h2.c |
143 |
+@@ -346,7 +346,7 @@ static struct omap_usb_config h2_usb_config __initdata = { |
144 |
+ /* usb1 has a Mini-AB port and external isp1301 transceiver */ |
145 |
+ .otg = 2, |
146 |
+ |
147 |
+-#ifdef CONFIG_USB_GADGET_OMAP |
148 |
++#if IS_ENABLED(CONFIG_USB_OMAP) |
149 |
+ .hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */ |
150 |
+ /* .hmc_mode = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */ |
151 |
+ #elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) |
152 |
+diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c |
153 |
+index 816ecd13f81e..bfed4f928663 100644 |
154 |
+--- a/arch/arm/mach-omap1/board-h3.c |
155 |
++++ b/arch/arm/mach-omap1/board-h3.c |
156 |
+@@ -366,7 +366,7 @@ static struct omap_usb_config h3_usb_config __initdata = { |
157 |
+ /* usb1 has a Mini-AB port and external isp1301 transceiver */ |
158 |
+ .otg = 2, |
159 |
+ |
160 |
+-#ifdef CONFIG_USB_GADGET_OMAP |
161 |
++#if IS_ENABLED(CONFIG_USB_OMAP) |
162 |
+ .hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */ |
163 |
+ #elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) |
164 |
+ /* NONSTANDARD CABLE NEEDED (B-to-Mini-B) */ |
165 |
+diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c |
166 |
+index bd5f02e9c354..c49ce83cc1eb 100644 |
167 |
+--- a/arch/arm/mach-omap1/board-innovator.c |
168 |
++++ b/arch/arm/mach-omap1/board-innovator.c |
169 |
+@@ -312,7 +312,7 @@ static struct omap_usb_config h2_usb_config __initdata = { |
170 |
+ /* usb1 has a Mini-AB port and external isp1301 transceiver */ |
171 |
+ .otg = 2, |
172 |
+ |
173 |
+-#ifdef CONFIG_USB_GADGET_OMAP |
174 |
++#if IS_ENABLED(CONFIG_USB_OMAP) |
175 |
+ .hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */ |
176 |
+ /* .hmc_mode = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */ |
177 |
+ #elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) |
178 |
+diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c |
179 |
+index 3a0262156e93..7436d4cf6596 100644 |
180 |
+--- a/arch/arm/mach-omap1/board-osk.c |
181 |
++++ b/arch/arm/mach-omap1/board-osk.c |
182 |
+@@ -283,7 +283,7 @@ static struct omap_usb_config osk_usb_config __initdata = { |
183 |
+ * be used, with a NONSTANDARD gender-bending cable/dongle, as |
184 |
+ * a peripheral. |
185 |
+ */ |
186 |
+-#ifdef CONFIG_USB_GADGET_OMAP |
187 |
++#if IS_ENABLED(CONFIG_USB_OMAP) |
188 |
+ .register_dev = 1, |
189 |
+ .hmc_mode = 0, |
190 |
+ #else |
191 |
+diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c |
192 |
+index 4349e82debfe..17cd39360afe 100644 |
193 |
+--- a/arch/arm/mach-omap2/gpmc-nand.c |
194 |
++++ b/arch/arm/mach-omap2/gpmc-nand.c |
195 |
+@@ -46,7 +46,7 @@ static struct platform_device gpmc_nand_device = { |
196 |
+ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt) |
197 |
+ { |
198 |
+ /* platforms which support all ECC schemes */ |
199 |
+- if (soc_is_am33xx() || cpu_is_omap44xx() || |
200 |
++ if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() || |
201 |
+ soc_is_omap54xx() || soc_is_dra7xx()) |
202 |
+ return 1; |
203 |
+ |
204 |
+diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c |
205 |
+index 54ee6163c181..66781bf34077 100644 |
206 |
+--- a/arch/arm/mm/hugetlbpage.c |
207 |
++++ b/arch/arm/mm/hugetlbpage.c |
208 |
+@@ -56,8 +56,3 @@ int pmd_huge(pmd_t pmd) |
209 |
+ { |
210 |
+ return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); |
211 |
+ } |
212 |
+- |
213 |
+-int pmd_huge_support(void) |
214 |
+-{ |
215 |
+- return 1; |
216 |
+-} |
217 |
+diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S |
218 |
+index 01a719e18bb0..22e3ad63500c 100644 |
219 |
+--- a/arch/arm/mm/proc-v7-3level.S |
220 |
++++ b/arch/arm/mm/proc-v7-3level.S |
221 |
+@@ -64,6 +64,14 @@ ENTRY(cpu_v7_switch_mm) |
222 |
+ mov pc, lr |
223 |
+ ENDPROC(cpu_v7_switch_mm) |
224 |
+ |
225 |
++#ifdef __ARMEB__ |
226 |
++#define rl r3 |
227 |
++#define rh r2 |
228 |
++#else |
229 |
++#define rl r2 |
230 |
++#define rh r3 |
231 |
++#endif |
232 |
++ |
233 |
+ /* |
234 |
+ * cpu_v7_set_pte_ext(ptep, pte) |
235 |
+ * |
236 |
+@@ -73,13 +81,13 @@ ENDPROC(cpu_v7_switch_mm) |
237 |
+ */ |
238 |
+ ENTRY(cpu_v7_set_pte_ext) |
239 |
+ #ifdef CONFIG_MMU |
240 |
+- tst r2, #L_PTE_VALID |
241 |
++ tst rl, #L_PTE_VALID |
242 |
+ beq 1f |
243 |
+- tst r3, #1 << (57 - 32) @ L_PTE_NONE |
244 |
+- bicne r2, #L_PTE_VALID |
245 |
++ tst rh, #1 << (57 - 32) @ L_PTE_NONE |
246 |
++ bicne rl, #L_PTE_VALID |
247 |
+ bne 1f |
248 |
+- tst r3, #1 << (55 - 32) @ L_PTE_DIRTY |
249 |
+- orreq r2, #L_PTE_RDONLY |
250 |
++ tst rh, #1 << (55 - 32) @ L_PTE_DIRTY |
251 |
++ orreq rl, #L_PTE_RDONLY |
252 |
+ 1: strd r2, r3, [r0] |
253 |
+ ALT_SMP(W(nop)) |
254 |
+ ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte |
255 |
+diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild |
256 |
+index 83f71b3004a8..f06a9c2d399e 100644 |
257 |
+--- a/arch/arm64/include/asm/Kbuild |
258 |
++++ b/arch/arm64/include/asm/Kbuild |
259 |
+@@ -30,7 +30,6 @@ generic-y += msgbuf.h |
260 |
+ generic-y += mutex.h |
261 |
+ generic-y += pci.h |
262 |
+ generic-y += poll.h |
263 |
+-generic-y += posix_types.h |
264 |
+ generic-y += preempt.h |
265 |
+ generic-y += resource.h |
266 |
+ generic-y += rwsem.h |
267 |
+diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h |
268 |
+index 3a4572ec3273..dc82e52acdb3 100644 |
269 |
+--- a/arch/arm64/include/asm/dma-mapping.h |
270 |
++++ b/arch/arm64/include/asm/dma-mapping.h |
271 |
+@@ -26,8 +26,6 @@ |
272 |
+ #include <xen/xen.h> |
273 |
+ #include <asm/xen/hypervisor.h> |
274 |
+ |
275 |
+-#define ARCH_HAS_DMA_GET_REQUIRED_MASK |
276 |
+- |
277 |
+ #define DMA_ERROR_CODE (~(dma_addr_t)0) |
278 |
+ extern struct dma_map_ops *dma_ops; |
279 |
+ extern struct dma_map_ops coherent_swiotlb_dma_ops; |
280 |
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h |
281 |
+index 7b1c67a0b485..d123f0eea332 100644 |
282 |
+--- a/arch/arm64/include/asm/pgtable.h |
283 |
++++ b/arch/arm64/include/asm/pgtable.h |
284 |
+@@ -253,7 +253,7 @@ static inline pmd_t pte_pmd(pte_t pte) |
285 |
+ #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) |
286 |
+ #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) |
287 |
+ #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) |
288 |
+-#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) &= ~PMD_TYPE_MASK)) |
289 |
++#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK)) |
290 |
+ |
291 |
+ #define __HAVE_ARCH_PMD_WRITE |
292 |
+ #define pmd_write(pmd) pte_write(pmd_pte(pmd)) |
293 |
+diff --git a/arch/arm64/include/uapi/asm/posix_types.h b/arch/arm64/include/uapi/asm/posix_types.h |
294 |
+new file mode 100644 |
295 |
+index 000000000000..7985ff60ca3f |
296 |
+--- /dev/null |
297 |
++++ b/arch/arm64/include/uapi/asm/posix_types.h |
298 |
+@@ -0,0 +1,10 @@ |
299 |
++#ifndef __ASM_POSIX_TYPES_H |
300 |
++#define __ASM_POSIX_TYPES_H |
301 |
++ |
302 |
++typedef unsigned short __kernel_old_uid_t; |
303 |
++typedef unsigned short __kernel_old_gid_t; |
304 |
++#define __kernel_old_uid_t __kernel_old_uid_t |
305 |
++ |
306 |
++#include <asm-generic/posix_types.h> |
307 |
++ |
308 |
++#endif /* __ASM_POSIX_TYPES_H */ |
309 |
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c |
310 |
+index 6a8928bba03c..7a50b86464cc 100644 |
311 |
+--- a/arch/arm64/kernel/ptrace.c |
312 |
++++ b/arch/arm64/kernel/ptrace.c |
313 |
+@@ -650,11 +650,16 @@ static int compat_gpr_get(struct task_struct *target, |
314 |
+ reg = task_pt_regs(target)->regs[idx]; |
315 |
+ } |
316 |
+ |
317 |
+- ret = copy_to_user(ubuf, ®, sizeof(reg)); |
318 |
+- if (ret) |
319 |
+- break; |
320 |
+- |
321 |
+- ubuf += sizeof(reg); |
322 |
++ if (kbuf) { |
323 |
++ memcpy(kbuf, ®, sizeof(reg)); |
324 |
++ kbuf += sizeof(reg); |
325 |
++ } else { |
326 |
++ ret = copy_to_user(ubuf, ®, sizeof(reg)); |
327 |
++ if (ret) |
328 |
++ break; |
329 |
++ |
330 |
++ ubuf += sizeof(reg); |
331 |
++ } |
332 |
+ } |
333 |
+ |
334 |
+ return ret; |
335 |
+@@ -684,11 +689,16 @@ static int compat_gpr_set(struct task_struct *target, |
336 |
+ unsigned int idx = start + i; |
337 |
+ compat_ulong_t reg; |
338 |
+ |
339 |
+- ret = copy_from_user(®, ubuf, sizeof(reg)); |
340 |
+- if (ret) |
341 |
+- return ret; |
342 |
++ if (kbuf) { |
343 |
++ memcpy(®, kbuf, sizeof(reg)); |
344 |
++ kbuf += sizeof(reg); |
345 |
++ } else { |
346 |
++ ret = copy_from_user(®, ubuf, sizeof(reg)); |
347 |
++ if (ret) |
348 |
++ return ret; |
349 |
+ |
350 |
+- ubuf += sizeof(reg); |
351 |
++ ubuf += sizeof(reg); |
352 |
++ } |
353 |
+ |
354 |
+ switch (idx) { |
355 |
+ case 15: |
356 |
+@@ -821,6 +831,7 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, |
357 |
+ compat_ulong_t val) |
358 |
+ { |
359 |
+ int ret; |
360 |
++ mm_segment_t old_fs = get_fs(); |
361 |
+ |
362 |
+ if (off & 3 || off >= COMPAT_USER_SZ) |
363 |
+ return -EIO; |
364 |
+@@ -828,10 +839,13 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, |
365 |
+ if (off >= sizeof(compat_elf_gregset_t)) |
366 |
+ return 0; |
367 |
+ |
368 |
++ set_fs(KERNEL_DS); |
369 |
+ ret = copy_regset_from_user(tsk, &user_aarch32_view, |
370 |
+ REGSET_COMPAT_GPR, off, |
371 |
+ sizeof(compat_ulong_t), |
372 |
+ &val); |
373 |
++ set_fs(old_fs); |
374 |
++ |
375 |
+ return ret; |
376 |
+ } |
377 |
+ |
378 |
+diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c |
379 |
+index 31eb959e9aa8..023747bf4dd7 100644 |
380 |
+--- a/arch/arm64/mm/hugetlbpage.c |
381 |
++++ b/arch/arm64/mm/hugetlbpage.c |
382 |
+@@ -58,11 +58,6 @@ int pud_huge(pud_t pud) |
383 |
+ #endif |
384 |
+ } |
385 |
+ |
386 |
+-int pmd_huge_support(void) |
387 |
+-{ |
388 |
+- return 1; |
389 |
+-} |
390 |
+- |
391 |
+ static __init int setup_hugepagesz(char *opt) |
392 |
+ { |
393 |
+ unsigned long ps = memparse(opt, &opt); |
394 |
+diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c |
395 |
+index 1a871b78e570..344387a55406 100644 |
396 |
+--- a/arch/ia64/hp/common/sba_iommu.c |
397 |
++++ b/arch/ia64/hp/common/sba_iommu.c |
398 |
+@@ -242,7 +242,7 @@ struct ioc { |
399 |
+ struct pci_dev *sac_only_dev; |
400 |
+ }; |
401 |
+ |
402 |
+-static struct ioc *ioc_list; |
403 |
++static struct ioc *ioc_list, *ioc_found; |
404 |
+ static int reserve_sba_gart = 1; |
405 |
+ |
406 |
+ static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t); |
407 |
+@@ -1809,20 +1809,13 @@ static struct ioc_iommu ioc_iommu_info[] __initdata = { |
408 |
+ { SX2000_IOC_ID, "sx2000", NULL }, |
409 |
+ }; |
410 |
+ |
411 |
+-static struct ioc * |
412 |
+-ioc_init(unsigned long hpa, void *handle) |
413 |
++static void ioc_init(unsigned long hpa, struct ioc *ioc) |
414 |
+ { |
415 |
+- struct ioc *ioc; |
416 |
+ struct ioc_iommu *info; |
417 |
+ |
418 |
+- ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); |
419 |
+- if (!ioc) |
420 |
+- return NULL; |
421 |
+- |
422 |
+ ioc->next = ioc_list; |
423 |
+ ioc_list = ioc; |
424 |
+ |
425 |
+- ioc->handle = handle; |
426 |
+ ioc->ioc_hpa = ioremap(hpa, 0x1000); |
427 |
+ |
428 |
+ ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID); |
429 |
+@@ -1863,8 +1856,6 @@ ioc_init(unsigned long hpa, void *handle) |
430 |
+ "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n", |
431 |
+ ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF, |
432 |
+ hpa, ioc->iov_size >> 20, ioc->ibase); |
433 |
+- |
434 |
+- return ioc; |
435 |
+ } |
436 |
+ |
437 |
+ |
438 |
+@@ -2031,22 +2022,21 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) |
439 |
+ #endif |
440 |
+ } |
441 |
+ |
442 |
+-static int |
443 |
+-acpi_sba_ioc_add(struct acpi_device *device, |
444 |
+- const struct acpi_device_id *not_used) |
445 |
++static void acpi_sba_ioc_add(struct ioc *ioc) |
446 |
+ { |
447 |
+- struct ioc *ioc; |
448 |
++ acpi_handle handle = ioc->handle; |
449 |
+ acpi_status status; |
450 |
+ u64 hpa, length; |
451 |
+ struct acpi_device_info *adi; |
452 |
+ |
453 |
+- status = hp_acpi_csr_space(device->handle, &hpa, &length); |
454 |
++ ioc_found = ioc->next; |
455 |
++ status = hp_acpi_csr_space(handle, &hpa, &length); |
456 |
+ if (ACPI_FAILURE(status)) |
457 |
+- return 1; |
458 |
++ goto err; |
459 |
+ |
460 |
+- status = acpi_get_object_info(device->handle, &adi); |
461 |
++ status = acpi_get_object_info(handle, &adi); |
462 |
+ if (ACPI_FAILURE(status)) |
463 |
+- return 1; |
464 |
++ goto err; |
465 |
+ |
466 |
+ /* |
467 |
+ * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI |
468 |
+@@ -2067,13 +2057,13 @@ acpi_sba_ioc_add(struct acpi_device *device, |
469 |
+ if (!iovp_shift) |
470 |
+ iovp_shift = 12; |
471 |
+ |
472 |
+- ioc = ioc_init(hpa, device->handle); |
473 |
+- if (!ioc) |
474 |
+- return 1; |
475 |
+- |
476 |
++ ioc_init(hpa, ioc); |
477 |
+ /* setup NUMA node association */ |
478 |
+- sba_map_ioc_to_node(ioc, device->handle); |
479 |
+- return 0; |
480 |
++ sba_map_ioc_to_node(ioc, handle); |
481 |
++ return; |
482 |
++ |
483 |
++ err: |
484 |
++ kfree(ioc); |
485 |
+ } |
486 |
+ |
487 |
+ static const struct acpi_device_id hp_ioc_iommu_device_ids[] = { |
488 |
+@@ -2081,9 +2071,26 @@ static const struct acpi_device_id hp_ioc_iommu_device_ids[] = { |
489 |
+ {"HWP0004", 0}, |
490 |
+ {"", 0}, |
491 |
+ }; |
492 |
++ |
493 |
++static int acpi_sba_ioc_attach(struct acpi_device *device, |
494 |
++ const struct acpi_device_id *not_used) |
495 |
++{ |
496 |
++ struct ioc *ioc; |
497 |
++ |
498 |
++ ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); |
499 |
++ if (!ioc) |
500 |
++ return -ENOMEM; |
501 |
++ |
502 |
++ ioc->next = ioc_found; |
503 |
++ ioc_found = ioc; |
504 |
++ ioc->handle = device->handle; |
505 |
++ return 1; |
506 |
++} |
507 |
++ |
508 |
++ |
509 |
+ static struct acpi_scan_handler acpi_sba_ioc_handler = { |
510 |
+ .ids = hp_ioc_iommu_device_ids, |
511 |
+- .attach = acpi_sba_ioc_add, |
512 |
++ .attach = acpi_sba_ioc_attach, |
513 |
+ }; |
514 |
+ |
515 |
+ static int __init acpi_sba_ioc_init_acpi(void) |
516 |
+@@ -2118,9 +2125,12 @@ sba_init(void) |
517 |
+ #endif |
518 |
+ |
519 |
+ /* |
520 |
+- * ioc_list should be populated by the acpi_sba_ioc_handler's .attach() |
521 |
++ * ioc_found should be populated by the acpi_sba_ioc_handler's .attach() |
522 |
+ * routine, but that only happens if acpi_scan_init() has already run. |
523 |
+ */ |
524 |
++ while (ioc_found) |
525 |
++ acpi_sba_ioc_add(ioc_found); |
526 |
++ |
527 |
+ if (!ioc_list) { |
528 |
+ #ifdef CONFIG_IA64_GENERIC |
529 |
+ /* |
530 |
+diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c |
531 |
+index 68232db98baa..76069c18ee42 100644 |
532 |
+--- a/arch/ia64/mm/hugetlbpage.c |
533 |
++++ b/arch/ia64/mm/hugetlbpage.c |
534 |
+@@ -114,11 +114,6 @@ int pud_huge(pud_t pud) |
535 |
+ return 0; |
536 |
+ } |
537 |
+ |
538 |
+-int pmd_huge_support(void) |
539 |
+-{ |
540 |
+- return 0; |
541 |
+-} |
542 |
+- |
543 |
+ struct page * |
544 |
+ follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) |
545 |
+ { |
546 |
+diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c |
547 |
+index 042431509b56..3c52fa6d0f8e 100644 |
548 |
+--- a/arch/metag/mm/hugetlbpage.c |
549 |
++++ b/arch/metag/mm/hugetlbpage.c |
550 |
+@@ -110,11 +110,6 @@ int pud_huge(pud_t pud) |
551 |
+ return 0; |
552 |
+ } |
553 |
+ |
554 |
+-int pmd_huge_support(void) |
555 |
+-{ |
556 |
+- return 1; |
557 |
+-} |
558 |
+- |
559 |
+ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
560 |
+ pmd_t *pmd, int write) |
561 |
+ { |
562 |
+diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c |
563 |
+index 77e0ae036e7c..4ec8ee10d371 100644 |
564 |
+--- a/arch/mips/mm/hugetlbpage.c |
565 |
++++ b/arch/mips/mm/hugetlbpage.c |
566 |
+@@ -84,11 +84,6 @@ int pud_huge(pud_t pud) |
567 |
+ return (pud_val(pud) & _PAGE_HUGE) != 0; |
568 |
+ } |
569 |
+ |
570 |
+-int pmd_huge_support(void) |
571 |
+-{ |
572 |
+- return 1; |
573 |
+-} |
574 |
+- |
575 |
+ struct page * |
576 |
+ follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
577 |
+ pmd_t *pmd, int write) |
578 |
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c |
579 |
+index eb923654ba80..7e70ae968e5f 100644 |
580 |
+--- a/arch/powerpc/mm/hugetlbpage.c |
581 |
++++ b/arch/powerpc/mm/hugetlbpage.c |
582 |
+@@ -86,11 +86,6 @@ int pgd_huge(pgd_t pgd) |
583 |
+ */ |
584 |
+ return ((pgd_val(pgd) & 0x3) != 0x0); |
585 |
+ } |
586 |
+- |
587 |
+-int pmd_huge_support(void) |
588 |
+-{ |
589 |
+- return 1; |
590 |
+-} |
591 |
+ #else |
592 |
+ int pmd_huge(pmd_t pmd) |
593 |
+ { |
594 |
+@@ -106,11 +101,6 @@ int pgd_huge(pgd_t pgd) |
595 |
+ { |
596 |
+ return 0; |
597 |
+ } |
598 |
+- |
599 |
+-int pmd_huge_support(void) |
600 |
+-{ |
601 |
+- return 0; |
602 |
+-} |
603 |
+ #endif |
604 |
+ |
605 |
+ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
606 |
+diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h |
607 |
+index bbf8141408cd..2bed4f02a558 100644 |
608 |
+--- a/arch/s390/include/asm/lowcore.h |
609 |
++++ b/arch/s390/include/asm/lowcore.h |
610 |
+@@ -142,9 +142,9 @@ struct _lowcore { |
611 |
+ __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */ |
612 |
+ |
613 |
+ /* Interrupt response block */ |
614 |
+- __u8 irb[64]; /* 0x0300 */ |
615 |
++ __u8 irb[96]; /* 0x0300 */ |
616 |
+ |
617 |
+- __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */ |
618 |
++ __u8 pad_0x0360[0x0e00-0x0360]; /* 0x0360 */ |
619 |
+ |
620 |
+ /* |
621 |
+ * 0xe00 contains the address of the IPL Parameter Information |
622 |
+@@ -288,12 +288,13 @@ struct _lowcore { |
623 |
+ __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */ |
624 |
+ |
625 |
+ /* Interrupt response block. */ |
626 |
+- __u8 irb[64]; /* 0x0400 */ |
627 |
++ __u8 irb[96]; /* 0x0400 */ |
628 |
++ __u8 pad_0x0460[0x0480-0x0460]; /* 0x0460 */ |
629 |
+ |
630 |
+ /* Per cpu primary space access list */ |
631 |
+- __u32 paste[16]; /* 0x0440 */ |
632 |
++ __u32 paste[16]; /* 0x0480 */ |
633 |
+ |
634 |
+- __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */ |
635 |
++ __u8 pad_0x04c0[0x0e00-0x04c0]; /* 0x04c0 */ |
636 |
+ |
637 |
+ /* |
638 |
+ * 0xe00 contains the address of the IPL Parameter Information |
639 |
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c |
640 |
+index 386d37a228bb..0931b110c826 100644 |
641 |
+--- a/arch/s390/kernel/time.c |
642 |
++++ b/arch/s390/kernel/time.c |
643 |
+@@ -226,7 +226,7 @@ void update_vsyscall(struct timekeeper *tk) |
644 |
+ vdso_data->wtom_clock_sec = |
645 |
+ tk->xtime_sec + tk->wall_to_monotonic.tv_sec; |
646 |
+ vdso_data->wtom_clock_nsec = tk->xtime_nsec + |
647 |
+- + (tk->wall_to_monotonic.tv_nsec << tk->shift); |
648 |
++ + ((u64) tk->wall_to_monotonic.tv_nsec << tk->shift); |
649 |
+ nsecps = (u64) NSEC_PER_SEC << tk->shift; |
650 |
+ while (vdso_data->wtom_clock_nsec >= nsecps) { |
651 |
+ vdso_data->wtom_clock_nsec -= nsecps; |
652 |
+diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c |
653 |
+index 0727a55d87d9..0ff66a7e29bb 100644 |
654 |
+--- a/arch/s390/mm/hugetlbpage.c |
655 |
++++ b/arch/s390/mm/hugetlbpage.c |
656 |
+@@ -220,11 +220,6 @@ int pud_huge(pud_t pud) |
657 |
+ return 0; |
658 |
+ } |
659 |
+ |
660 |
+-int pmd_huge_support(void) |
661 |
+-{ |
662 |
+- return 1; |
663 |
+-} |
664 |
+- |
665 |
+ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
666 |
+ pmd_t *pmdp, int write) |
667 |
+ { |
668 |
+diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c |
669 |
+index 0d676a41081e..d7762349ea48 100644 |
670 |
+--- a/arch/sh/mm/hugetlbpage.c |
671 |
++++ b/arch/sh/mm/hugetlbpage.c |
672 |
+@@ -83,11 +83,6 @@ int pud_huge(pud_t pud) |
673 |
+ return 0; |
674 |
+ } |
675 |
+ |
676 |
+-int pmd_huge_support(void) |
677 |
+-{ |
678 |
+- return 0; |
679 |
+-} |
680 |
+- |
681 |
+ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
682 |
+ pmd_t *pmd, int write) |
683 |
+ { |
684 |
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c |
685 |
+index 9bd9ce80bf77..d329537739c6 100644 |
686 |
+--- a/arch/sparc/mm/hugetlbpage.c |
687 |
++++ b/arch/sparc/mm/hugetlbpage.c |
688 |
+@@ -231,11 +231,6 @@ int pud_huge(pud_t pud) |
689 |
+ return 0; |
690 |
+ } |
691 |
+ |
692 |
+-int pmd_huge_support(void) |
693 |
+-{ |
694 |
+- return 0; |
695 |
+-} |
696 |
+- |
697 |
+ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
698 |
+ pmd_t *pmd, int write) |
699 |
+ { |
700 |
+diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c |
701 |
+index 0cb3bbaa580c..e514899e1100 100644 |
702 |
+--- a/arch/tile/mm/hugetlbpage.c |
703 |
++++ b/arch/tile/mm/hugetlbpage.c |
704 |
+@@ -166,11 +166,6 @@ int pud_huge(pud_t pud) |
705 |
+ return !!(pud_val(pud) & _PAGE_HUGE_PAGE); |
706 |
+ } |
707 |
+ |
708 |
+-int pmd_huge_support(void) |
709 |
+-{ |
710 |
+- return 1; |
711 |
+-} |
712 |
+- |
713 |
+ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
714 |
+ pmd_t *pmd, int write) |
715 |
+ { |
716 |
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig |
717 |
+index 25d2c6f7325e..6b8b429c832f 100644 |
718 |
+--- a/arch/x86/Kconfig |
719 |
++++ b/arch/x86/Kconfig |
720 |
+@@ -1871,6 +1871,10 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK |
721 |
+ def_bool y |
722 |
+ depends on X86_64 || X86_PAE |
723 |
+ |
724 |
++config ARCH_ENABLE_HUGEPAGE_MIGRATION |
725 |
++ def_bool y |
726 |
++ depends on X86_64 && HUGETLB_PAGE && MIGRATION |
727 |
++ |
728 |
+ menu "Power management and ACPI options" |
729 |
+ |
730 |
+ config ARCH_HIBERNATION_HEADER |
731 |
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S |
732 |
+index a2a4f4697889..6491353cc9aa 100644 |
733 |
+--- a/arch/x86/kernel/entry_32.S |
734 |
++++ b/arch/x86/kernel/entry_32.S |
735 |
+@@ -431,9 +431,10 @@ sysenter_past_esp: |
736 |
+ jnz sysenter_audit |
737 |
+ sysenter_do_call: |
738 |
+ cmpl $(NR_syscalls), %eax |
739 |
+- jae syscall_badsys |
740 |
++ jae sysenter_badsys |
741 |
+ call *sys_call_table(,%eax,4) |
742 |
+ movl %eax,PT_EAX(%esp) |
743 |
++sysenter_after_call: |
744 |
+ LOCKDEP_SYS_EXIT |
745 |
+ DISABLE_INTERRUPTS(CLBR_ANY) |
746 |
+ TRACE_IRQS_OFF |
747 |
+@@ -551,11 +552,6 @@ ENTRY(iret_exc) |
748 |
+ |
749 |
+ CFI_RESTORE_STATE |
750 |
+ ldt_ss: |
751 |
+- larl PT_OLDSS(%esp), %eax |
752 |
+- jnz restore_nocheck |
753 |
+- testl $0x00400000, %eax # returning to 32bit stack? |
754 |
+- jnz restore_nocheck # allright, normal return |
755 |
+- |
756 |
+ #ifdef CONFIG_PARAVIRT |
757 |
+ /* |
758 |
+ * The kernel can't run on a non-flat stack if paravirt mode |
759 |
+@@ -688,7 +684,12 @@ END(syscall_fault) |
760 |
+ |
761 |
+ syscall_badsys: |
762 |
+ movl $-ENOSYS,PT_EAX(%esp) |
763 |
+- jmp resume_userspace |
764 |
++ jmp syscall_exit |
765 |
++END(syscall_badsys) |
766 |
++ |
767 |
++sysenter_badsys: |
768 |
++ movl $-ENOSYS,PT_EAX(%esp) |
769 |
++ jmp sysenter_after_call |
770 |
+ END(syscall_badsys) |
771 |
+ CFI_ENDPROC |
772 |
+ /* |
773 |
+diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c |
774 |
+index 8c9f647ff9e1..8b977ebf9388 100644 |
775 |
+--- a/arch/x86/mm/hugetlbpage.c |
776 |
++++ b/arch/x86/mm/hugetlbpage.c |
777 |
+@@ -58,11 +58,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
778 |
+ { |
779 |
+ return NULL; |
780 |
+ } |
781 |
+- |
782 |
+-int pmd_huge_support(void) |
783 |
+-{ |
784 |
+- return 0; |
785 |
+-} |
786 |
+ #else |
787 |
+ |
788 |
+ struct page * |
789 |
+@@ -80,11 +75,6 @@ int pud_huge(pud_t pud) |
790 |
+ { |
791 |
+ return !!(pud_val(pud) & _PAGE_PSE); |
792 |
+ } |
793 |
+- |
794 |
+-int pmd_huge_support(void) |
795 |
+-{ |
796 |
+- return 1; |
797 |
+-} |
798 |
+ #endif |
799 |
+ |
800 |
+ #ifdef CONFIG_HUGETLB_PAGE |
801 |
+diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl |
802 |
+index 04376ac3d9ef..ec255a1646d2 100644 |
803 |
+--- a/arch/x86/syscalls/syscall_64.tbl |
804 |
++++ b/arch/x86/syscalls/syscall_64.tbl |
805 |
+@@ -212,10 +212,10 @@ |
806 |
+ 203 common sched_setaffinity sys_sched_setaffinity |
807 |
+ 204 common sched_getaffinity sys_sched_getaffinity |
808 |
+ 205 64 set_thread_area |
809 |
+-206 common io_setup sys_io_setup |
810 |
++206 64 io_setup sys_io_setup |
811 |
+ 207 common io_destroy sys_io_destroy |
812 |
+ 208 common io_getevents sys_io_getevents |
813 |
+-209 common io_submit sys_io_submit |
814 |
++209 64 io_submit sys_io_submit |
815 |
+ 210 common io_cancel sys_io_cancel |
816 |
+ 211 64 get_thread_area |
817 |
+ 212 common lookup_dcookie sys_lookup_dcookie |
818 |
+@@ -359,3 +359,5 @@ |
819 |
+ 540 x32 process_vm_writev compat_sys_process_vm_writev |
820 |
+ 541 x32 setsockopt compat_sys_setsockopt |
821 |
+ 542 x32 getsockopt compat_sys_getsockopt |
822 |
++543 x32 io_setup compat_sys_io_setup |
823 |
++544 x32 io_submit compat_sys_io_submit |
824 |
+diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c |
825 |
+index 77219336c7e0..6dc54b3c28b0 100644 |
826 |
+--- a/drivers/acpi/acpica/utstring.c |
827 |
++++ b/drivers/acpi/acpica/utstring.c |
828 |
+@@ -353,7 +353,7 @@ void acpi_ut_print_string(char *string, u16 max_length) |
829 |
+ } |
830 |
+ |
831 |
+ acpi_os_printf("\""); |
832 |
+- for (i = 0; string[i] && (i < max_length); i++) { |
833 |
++ for (i = 0; (i < max_length) && string[i]; i++) { |
834 |
+ |
835 |
+ /* Escape sequences */ |
836 |
+ |
837 |
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c |
838 |
+index cf925c4f36b7..ed9fca0250fa 100644 |
839 |
+--- a/drivers/acpi/bus.c |
840 |
++++ b/drivers/acpi/bus.c |
841 |
+@@ -52,6 +52,12 @@ struct proc_dir_entry *acpi_root_dir; |
842 |
+ EXPORT_SYMBOL(acpi_root_dir); |
843 |
+ |
844 |
+ #ifdef CONFIG_X86 |
845 |
++#ifdef CONFIG_ACPI_CUSTOM_DSDT |
846 |
++static inline int set_copy_dsdt(const struct dmi_system_id *id) |
847 |
++{ |
848 |
++ return 0; |
849 |
++} |
850 |
++#else |
851 |
+ static int set_copy_dsdt(const struct dmi_system_id *id) |
852 |
+ { |
853 |
+ printk(KERN_NOTICE "%s detected - " |
854 |
+@@ -59,6 +65,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id) |
855 |
+ acpi_gbl_copy_dsdt_locally = 1; |
856 |
+ return 0; |
857 |
+ } |
858 |
++#endif |
859 |
+ |
860 |
+ static struct dmi_system_id dsdt_dmi_table[] __initdata = { |
861 |
+ /* |
862 |
+diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c |
863 |
+index bba526148583..07c8c5a5ee95 100644 |
864 |
+--- a/drivers/acpi/utils.c |
865 |
++++ b/drivers/acpi/utils.c |
866 |
+@@ -30,6 +30,7 @@ |
867 |
+ #include <linux/types.h> |
868 |
+ #include <linux/hardirq.h> |
869 |
+ #include <linux/acpi.h> |
870 |
++#include <linux/dynamic_debug.h> |
871 |
+ |
872 |
+ #include "internal.h" |
873 |
+ |
874 |
+@@ -457,6 +458,24 @@ acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code, |
875 |
+ EXPORT_SYMBOL(acpi_evaluate_ost); |
876 |
+ |
877 |
+ /** |
878 |
++ * acpi_handle_path: Return the object path of handle |
879 |
++ * |
880 |
++ * Caller must free the returned buffer |
881 |
++ */ |
882 |
++static char *acpi_handle_path(acpi_handle handle) |
883 |
++{ |
884 |
++ struct acpi_buffer buffer = { |
885 |
++ .length = ACPI_ALLOCATE_BUFFER, |
886 |
++ .pointer = NULL |
887 |
++ }; |
888 |
++ |
889 |
++ if (in_interrupt() || |
890 |
++ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK) |
891 |
++ return NULL; |
892 |
++ return buffer.pointer; |
893 |
++} |
894 |
++ |
895 |
++/** |
896 |
+ * acpi_handle_printk: Print message with ACPI prefix and object path |
897 |
+ * |
898 |
+ * This function is called through acpi_handle_<level> macros and prints |
899 |
+@@ -469,29 +488,50 @@ acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...) |
900 |
+ { |
901 |
+ struct va_format vaf; |
902 |
+ va_list args; |
903 |
+- struct acpi_buffer buffer = { |
904 |
+- .length = ACPI_ALLOCATE_BUFFER, |
905 |
+- .pointer = NULL |
906 |
+- }; |
907 |
+ const char *path; |
908 |
+ |
909 |
+ va_start(args, fmt); |
910 |
+ vaf.fmt = fmt; |
911 |
+ vaf.va = &args; |
912 |
+ |
913 |
+- if (in_interrupt() || |
914 |
+- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK) |
915 |
+- path = "<n/a>"; |
916 |
+- else |
917 |
+- path = buffer.pointer; |
918 |
+- |
919 |
+- printk("%sACPI: %s: %pV", level, path, &vaf); |
920 |
++ path = acpi_handle_path(handle); |
921 |
++ printk("%sACPI: %s: %pV", level, path ? path : "<n/a>" , &vaf); |
922 |
+ |
923 |
+ va_end(args); |
924 |
+- kfree(buffer.pointer); |
925 |
++ kfree(path); |
926 |
+ } |
927 |
+ EXPORT_SYMBOL(acpi_handle_printk); |
928 |
+ |
929 |
++#if defined(CONFIG_DYNAMIC_DEBUG) |
930 |
++/** |
931 |
++ * __acpi_handle_debug: pr_debug with ACPI prefix and object path |
932 |
++ * |
933 |
++ * This function is called through acpi_handle_debug macro and debug |
934 |
++ * prints a message with ACPI prefix and object path. This function |
935 |
++ * acquires the global namespace mutex to obtain an object path. In |
936 |
++ * interrupt context, it shows the object path as <n/a>. |
937 |
++ */ |
938 |
++void |
939 |
++__acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, |
940 |
++ const char *fmt, ...) |
941 |
++{ |
942 |
++ struct va_format vaf; |
943 |
++ va_list args; |
944 |
++ const char *path; |
945 |
++ |
946 |
++ va_start(args, fmt); |
947 |
++ vaf.fmt = fmt; |
948 |
++ vaf.va = &args; |
949 |
++ |
950 |
++ path = acpi_handle_path(handle); |
951 |
++ __dynamic_pr_debug(descriptor, "ACPI: %s: %pV", path ? path : "<n/a>", &vaf); |
952 |
++ |
953 |
++ va_end(args); |
954 |
++ kfree(path); |
955 |
++} |
956 |
++EXPORT_SYMBOL(__acpi_handle_debug); |
957 |
++#endif |
958 |
++ |
959 |
+ /** |
960 |
+ * acpi_has_method: Check whether @handle has a method named @name |
961 |
+ * @handle: ACPI device handle |
962 |
+diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c |
963 |
+index 25538675d59e..c539d70b97ab 100644 |
964 |
+--- a/drivers/base/power/opp.c |
965 |
++++ b/drivers/base/power/opp.c |
966 |
+@@ -734,11 +734,9 @@ int of_init_opp_table(struct device *dev) |
967 |
+ unsigned long freq = be32_to_cpup(val++) * 1000; |
968 |
+ unsigned long volt = be32_to_cpup(val++); |
969 |
+ |
970 |
+- if (dev_pm_opp_add(dev, freq, volt)) { |
971 |
++ if (dev_pm_opp_add(dev, freq, volt)) |
972 |
+ dev_warn(dev, "%s: Failed to add OPP %ld\n", |
973 |
+ __func__, freq); |
974 |
+- continue; |
975 |
+- } |
976 |
+ nr -= 2; |
977 |
+ } |
978 |
+ |
979 |
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c |
980 |
+index cb9b1f8326c3..31e5bc1351b4 100644 |
981 |
+--- a/drivers/block/virtio_blk.c |
982 |
++++ b/drivers/block/virtio_blk.c |
983 |
+@@ -159,6 +159,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) |
984 |
+ unsigned int num; |
985 |
+ const bool last = (req->cmd_flags & REQ_END) != 0; |
986 |
+ int err; |
987 |
++ bool notify = false; |
988 |
+ |
989 |
+ BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); |
990 |
+ |
991 |
+@@ -211,10 +212,12 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) |
992 |
+ return BLK_MQ_RQ_QUEUE_ERROR; |
993 |
+ } |
994 |
+ |
995 |
+- if (last) |
996 |
+- virtqueue_kick(vblk->vq); |
997 |
+- |
998 |
++ if (last && virtqueue_kick_prepare(vblk->vq)) |
999 |
++ notify = true; |
1000 |
+ spin_unlock_irqrestore(&vblk->vq_lock, flags); |
1001 |
++ |
1002 |
++ if (notify) |
1003 |
++ virtqueue_notify(vblk->vq); |
1004 |
+ return BLK_MQ_RQ_QUEUE_OK; |
1005 |
+ } |
1006 |
+ |
1007 |
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c |
1008 |
+index 9849b5233bf4..48eccb350180 100644 |
1009 |
+--- a/drivers/block/zram/zram_drv.c |
1010 |
++++ b/drivers/block/zram/zram_drv.c |
1011 |
+@@ -572,10 +572,10 @@ static void zram_bio_discard(struct zram *zram, u32 index, |
1012 |
+ * skipping this logical block is appropriate here. |
1013 |
+ */ |
1014 |
+ if (offset) { |
1015 |
+- if (n < offset) |
1016 |
++ if (n <= (PAGE_SIZE - offset)) |
1017 |
+ return; |
1018 |
+ |
1019 |
+- n -= offset; |
1020 |
++ n -= (PAGE_SIZE - offset); |
1021 |
+ index++; |
1022 |
+ } |
1023 |
+ |
1024 |
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c |
1025 |
+index f1fbf4f1e5be..e00f8f5b5c8e 100644 |
1026 |
+--- a/drivers/bluetooth/hci_ldisc.c |
1027 |
++++ b/drivers/bluetooth/hci_ldisc.c |
1028 |
+@@ -118,10 +118,6 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) |
1029 |
+ |
1030 |
+ int hci_uart_tx_wakeup(struct hci_uart *hu) |
1031 |
+ { |
1032 |
+- struct tty_struct *tty = hu->tty; |
1033 |
+- struct hci_dev *hdev = hu->hdev; |
1034 |
+- struct sk_buff *skb; |
1035 |
+- |
1036 |
+ if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) { |
1037 |
+ set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); |
1038 |
+ return 0; |
1039 |
+@@ -129,6 +125,22 @@ int hci_uart_tx_wakeup(struct hci_uart *hu) |
1040 |
+ |
1041 |
+ BT_DBG(""); |
1042 |
+ |
1043 |
++ schedule_work(&hu->write_work); |
1044 |
++ |
1045 |
++ return 0; |
1046 |
++} |
1047 |
++ |
1048 |
++static void hci_uart_write_work(struct work_struct *work) |
1049 |
++{ |
1050 |
++ struct hci_uart *hu = container_of(work, struct hci_uart, write_work); |
1051 |
++ struct tty_struct *tty = hu->tty; |
1052 |
++ struct hci_dev *hdev = hu->hdev; |
1053 |
++ struct sk_buff *skb; |
1054 |
++ |
1055 |
++ /* REVISIT: should we cope with bad skbs or ->write() returning |
1056 |
++ * and error value ? |
1057 |
++ */ |
1058 |
++ |
1059 |
+ restart: |
1060 |
+ clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); |
1061 |
+ |
1062 |
+@@ -153,7 +165,6 @@ restart: |
1063 |
+ goto restart; |
1064 |
+ |
1065 |
+ clear_bit(HCI_UART_SENDING, &hu->tx_state); |
1066 |
+- return 0; |
1067 |
+ } |
1068 |
+ |
1069 |
+ static void hci_uart_init_work(struct work_struct *work) |
1070 |
+@@ -282,6 +293,7 @@ static int hci_uart_tty_open(struct tty_struct *tty) |
1071 |
+ tty->receive_room = 65536; |
1072 |
+ |
1073 |
+ INIT_WORK(&hu->init_ready, hci_uart_init_work); |
1074 |
++ INIT_WORK(&hu->write_work, hci_uart_write_work); |
1075 |
+ |
1076 |
+ spin_lock_init(&hu->rx_lock); |
1077 |
+ |
1078 |
+@@ -319,6 +331,8 @@ static void hci_uart_tty_close(struct tty_struct *tty) |
1079 |
+ if (hdev) |
1080 |
+ hci_uart_close(hdev); |
1081 |
+ |
1082 |
++ cancel_work_sync(&hu->write_work); |
1083 |
++ |
1084 |
+ if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) { |
1085 |
+ if (hdev) { |
1086 |
+ if (test_bit(HCI_UART_REGISTERED, &hu->flags)) |
1087 |
+diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h |
1088 |
+index fffa61ff5cb1..12df101ca942 100644 |
1089 |
+--- a/drivers/bluetooth/hci_uart.h |
1090 |
++++ b/drivers/bluetooth/hci_uart.h |
1091 |
+@@ -68,6 +68,7 @@ struct hci_uart { |
1092 |
+ unsigned long hdev_flags; |
1093 |
+ |
1094 |
+ struct work_struct init_ready; |
1095 |
++ struct work_struct write_work; |
1096 |
+ |
1097 |
+ struct hci_uart_proto *proto; |
1098 |
+ void *priv; |
1099 |
+diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c |
1100 |
+index 974321a2508d..14790304b84b 100644 |
1101 |
+--- a/drivers/char/applicom.c |
1102 |
++++ b/drivers/char/applicom.c |
1103 |
+@@ -345,7 +345,6 @@ out: |
1104 |
+ free_irq(apbs[i].irq, &dummy); |
1105 |
+ iounmap(apbs[i].RamIO); |
1106 |
+ } |
1107 |
+- pci_disable_device(dev); |
1108 |
+ return ret; |
1109 |
+ } |
1110 |
+ |
1111 |
+diff --git a/drivers/char/random.c b/drivers/char/random.c |
1112 |
+index 102c50d38902..2b6e4cd8de8e 100644 |
1113 |
+--- a/drivers/char/random.c |
1114 |
++++ b/drivers/char/random.c |
1115 |
+@@ -979,7 +979,6 @@ static void push_to_pool(struct work_struct *work) |
1116 |
+ static size_t account(struct entropy_store *r, size_t nbytes, int min, |
1117 |
+ int reserved) |
1118 |
+ { |
1119 |
+- int have_bytes; |
1120 |
+ int entropy_count, orig; |
1121 |
+ size_t ibytes; |
1122 |
+ |
1123 |
+@@ -988,17 +987,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, |
1124 |
+ /* Can we pull enough? */ |
1125 |
+ retry: |
1126 |
+ entropy_count = orig = ACCESS_ONCE(r->entropy_count); |
1127 |
+- have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); |
1128 |
+ ibytes = nbytes; |
1129 |
+ /* If limited, never pull more than available */ |
1130 |
+- if (r->limit) |
1131 |
+- ibytes = min_t(size_t, ibytes, have_bytes - reserved); |
1132 |
++ if (r->limit) { |
1133 |
++ int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); |
1134 |
++ |
1135 |
++ if ((have_bytes -= reserved) < 0) |
1136 |
++ have_bytes = 0; |
1137 |
++ ibytes = min_t(size_t, ibytes, have_bytes); |
1138 |
++ } |
1139 |
+ if (ibytes < min) |
1140 |
+ ibytes = 0; |
1141 |
+- if (have_bytes >= ibytes + reserved) |
1142 |
+- entropy_count -= ibytes << (ENTROPY_SHIFT + 3); |
1143 |
+- else |
1144 |
+- entropy_count = reserved << (ENTROPY_SHIFT + 3); |
1145 |
++ if ((entropy_count -= ibytes << (ENTROPY_SHIFT + 3)) < 0) |
1146 |
++ entropy_count = 0; |
1147 |
+ |
1148 |
+ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) |
1149 |
+ goto retry; |
1150 |
+diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c |
1151 |
+index 3846941801b8..5c948c9625d2 100644 |
1152 |
+--- a/drivers/extcon/extcon-max14577.c |
1153 |
++++ b/drivers/extcon/extcon-max14577.c |
1154 |
+@@ -650,7 +650,7 @@ static int max14577_muic_probe(struct platform_device *pdev) |
1155 |
+ unsigned int virq = 0; |
1156 |
+ |
1157 |
+ virq = regmap_irq_get_virq(max14577->irq_data, muic_irq->irq); |
1158 |
+- if (!virq) |
1159 |
++ if (virq <= 0) |
1160 |
+ return -EINVAL; |
1161 |
+ muic_irq->virq = virq; |
1162 |
+ |
1163 |
+@@ -710,13 +710,8 @@ static int max14577_muic_probe(struct platform_device *pdev) |
1164 |
+ * driver should notify cable state to upper layer. |
1165 |
+ */ |
1166 |
+ INIT_DELAYED_WORK(&info->wq_detcable, max14577_muic_detect_cable_wq); |
1167 |
+- ret = queue_delayed_work(system_power_efficient_wq, &info->wq_detcable, |
1168 |
++ queue_delayed_work(system_power_efficient_wq, &info->wq_detcable, |
1169 |
+ delay_jiffies); |
1170 |
+- if (ret < 0) { |
1171 |
+- dev_err(&pdev->dev, |
1172 |
+- "failed to schedule delayed work for cable detect\n"); |
1173 |
+- goto err_extcon; |
1174 |
+- } |
1175 |
+ |
1176 |
+ return ret; |
1177 |
+ |
1178 |
+diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c |
1179 |
+index da268fbc901b..4657a91acf56 100644 |
1180 |
+--- a/drivers/extcon/extcon-max77693.c |
1181 |
++++ b/drivers/extcon/extcon-max77693.c |
1182 |
+@@ -1193,7 +1193,7 @@ static int max77693_muic_probe(struct platform_device *pdev) |
1183 |
+ |
1184 |
+ |
1185 |
+ /* Initialize MUIC register by using platform data or default data */ |
1186 |
+- if (pdata->muic_data) { |
1187 |
++ if (pdata && pdata->muic_data) { |
1188 |
+ init_data = pdata->muic_data->init_data; |
1189 |
+ num_init_data = pdata->muic_data->num_init_data; |
1190 |
+ } else { |
1191 |
+@@ -1226,7 +1226,7 @@ static int max77693_muic_probe(struct platform_device *pdev) |
1192 |
+ = init_data[i].data; |
1193 |
+ } |
1194 |
+ |
1195 |
+- if (pdata->muic_data) { |
1196 |
++ if (pdata && pdata->muic_data) { |
1197 |
+ struct max77693_muic_platform_data *muic_pdata |
1198 |
+ = pdata->muic_data; |
1199 |
+ |
1200 |
+diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c |
1201 |
+index 6a00464658c5..5e1b88cecb76 100644 |
1202 |
+--- a/drivers/extcon/extcon-max8997.c |
1203 |
++++ b/drivers/extcon/extcon-max8997.c |
1204 |
+@@ -715,7 +715,7 @@ static int max8997_muic_probe(struct platform_device *pdev) |
1205 |
+ goto err_irq; |
1206 |
+ } |
1207 |
+ |
1208 |
+- if (pdata->muic_pdata) { |
1209 |
++ if (pdata && pdata->muic_pdata) { |
1210 |
+ struct max8997_muic_platform_data *muic_pdata |
1211 |
+ = pdata->muic_pdata; |
1212 |
+ |
1213 |
+diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c |
1214 |
+index 4b9dc836dcf9..e992abc5ef26 100644 |
1215 |
+--- a/drivers/firmware/efi/efi-pstore.c |
1216 |
++++ b/drivers/firmware/efi/efi-pstore.c |
1217 |
+@@ -40,7 +40,7 @@ struct pstore_read_data { |
1218 |
+ static inline u64 generic_id(unsigned long timestamp, |
1219 |
+ unsigned int part, int count) |
1220 |
+ { |
1221 |
+- return (timestamp * 100 + part) * 1000 + count; |
1222 |
++ return ((u64) timestamp * 100 + part) * 1000 + count; |
1223 |
+ } |
1224 |
+ |
1225 |
+ static int efi_pstore_read_func(struct efivar_entry *entry, void *data) |
1226 |
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c |
1227 |
+index 2bdae61c0ac0..12c663e86ca1 100644 |
1228 |
+--- a/drivers/gpu/drm/radeon/radeon_pm.c |
1229 |
++++ b/drivers/gpu/drm/radeon/radeon_pm.c |
1230 |
+@@ -984,6 +984,8 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) |
1231 |
+ if (enable) { |
1232 |
+ mutex_lock(&rdev->pm.mutex); |
1233 |
+ rdev->pm.dpm.uvd_active = true; |
1234 |
++ /* disable this for now */ |
1235 |
++#if 0 |
1236 |
+ if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) |
1237 |
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; |
1238 |
+ else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) |
1239 |
+@@ -993,6 +995,7 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) |
1240 |
+ else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) |
1241 |
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; |
1242 |
+ else |
1243 |
++#endif |
1244 |
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; |
1245 |
+ rdev->pm.dpm.state = dpm_state; |
1246 |
+ mutex_unlock(&rdev->pm.mutex); |
1247 |
+diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c |
1248 |
+index 1b65ae2433cd..a4ad270e8261 100644 |
1249 |
+--- a/drivers/gpu/drm/radeon/radeon_uvd.c |
1250 |
++++ b/drivers/gpu/drm/radeon/radeon_uvd.c |
1251 |
+@@ -812,7 +812,8 @@ void radeon_uvd_note_usage(struct radeon_device *rdev) |
1252 |
+ (rdev->pm.dpm.hd != hd)) { |
1253 |
+ rdev->pm.dpm.sd = sd; |
1254 |
+ rdev->pm.dpm.hd = hd; |
1255 |
+- streams_changed = true; |
1256 |
++ /* disable this for now */ |
1257 |
++ /*streams_changed = true;*/ |
1258 |
+ } |
1259 |
+ } |
1260 |
+ |
1261 |
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c |
1262 |
+index da52279de939..a5c7927c9bd2 100644 |
1263 |
+--- a/drivers/hid/hid-core.c |
1264 |
++++ b/drivers/hid/hid-core.c |
1265 |
+@@ -842,7 +842,17 @@ struct hid_report *hid_validate_values(struct hid_device *hid, |
1266 |
+ * ->numbered being checked, which may not always be the case when |
1267 |
+ * drivers go to access report values. |
1268 |
+ */ |
1269 |
+- report = hid->report_enum[type].report_id_hash[id]; |
1270 |
++ if (id == 0) { |
1271 |
++ /* |
1272 |
++ * Validating on id 0 means we should examine the first |
1273 |
++ * report in the list. |
1274 |
++ */ |
1275 |
++ report = list_entry( |
1276 |
++ hid->report_enum[type].report_list.next, |
1277 |
++ struct hid_report, list); |
1278 |
++ } else { |
1279 |
++ report = hid->report_enum[type].report_id_hash[id]; |
1280 |
++ } |
1281 |
+ if (!report) { |
1282 |
+ hid_err(hid, "missing %s %u\n", hid_report_names[type], id); |
1283 |
+ return NULL; |
1284 |
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c |
1285 |
+index 2e2d903db838..8d44a4060634 100644 |
1286 |
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c |
1287 |
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c |
1288 |
+@@ -41,11 +41,11 @@ |
1289 |
+ #include "iscsi_iser.h" |
1290 |
+ |
1291 |
+ /* Register user buffer memory and initialize passive rdma |
1292 |
+- * dto descriptor. Total data size is stored in |
1293 |
+- * iser_task->data[ISER_DIR_IN].data_len |
1294 |
++ * dto descriptor. Data size is stored in |
1295 |
++ * task->data[ISER_DIR_IN].data_len, Protection size |
1296 |
++ * os stored in task->prot[ISER_DIR_IN].data_len |
1297 |
+ */ |
1298 |
+-static int iser_prepare_read_cmd(struct iscsi_task *task, |
1299 |
+- unsigned int edtl) |
1300 |
++static int iser_prepare_read_cmd(struct iscsi_task *task) |
1301 |
+ |
1302 |
+ { |
1303 |
+ struct iscsi_iser_task *iser_task = task->dd_data; |
1304 |
+@@ -73,14 +73,6 @@ static int iser_prepare_read_cmd(struct iscsi_task *task, |
1305 |
+ return err; |
1306 |
+ } |
1307 |
+ |
1308 |
+- if (edtl > iser_task->data[ISER_DIR_IN].data_len) { |
1309 |
+- iser_err("Total data length: %ld, less than EDTL: " |
1310 |
+- "%d, in READ cmd BHS itt: %d, conn: 0x%p\n", |
1311 |
+- iser_task->data[ISER_DIR_IN].data_len, edtl, |
1312 |
+- task->itt, iser_task->ib_conn); |
1313 |
+- return -EINVAL; |
1314 |
+- } |
1315 |
+- |
1316 |
+ err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN); |
1317 |
+ if (err) { |
1318 |
+ iser_err("Failed to set up Data-IN RDMA\n"); |
1319 |
+@@ -100,8 +92,9 @@ static int iser_prepare_read_cmd(struct iscsi_task *task, |
1320 |
+ } |
1321 |
+ |
1322 |
+ /* Register user buffer memory and initialize passive rdma |
1323 |
+- * dto descriptor. Total data size is stored in |
1324 |
+- * task->data[ISER_DIR_OUT].data_len |
1325 |
++ * dto descriptor. Data size is stored in |
1326 |
++ * task->data[ISER_DIR_OUT].data_len, Protection size |
1327 |
++ * is stored at task->prot[ISER_DIR_OUT].data_len |
1328 |
+ */ |
1329 |
+ static int |
1330 |
+ iser_prepare_write_cmd(struct iscsi_task *task, |
1331 |
+@@ -135,14 +128,6 @@ iser_prepare_write_cmd(struct iscsi_task *task, |
1332 |
+ return err; |
1333 |
+ } |
1334 |
+ |
1335 |
+- if (edtl > iser_task->data[ISER_DIR_OUT].data_len) { |
1336 |
+- iser_err("Total data length: %ld, less than EDTL: %d, " |
1337 |
+- "in WRITE cmd BHS itt: %d, conn: 0x%p\n", |
1338 |
+- iser_task->data[ISER_DIR_OUT].data_len, |
1339 |
+- edtl, task->itt, task->conn); |
1340 |
+- return -EINVAL; |
1341 |
+- } |
1342 |
+- |
1343 |
+ err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT); |
1344 |
+ if (err != 0) { |
1345 |
+ iser_err("Failed to register write cmd RDMA mem\n"); |
1346 |
+@@ -417,11 +402,12 @@ int iser_send_command(struct iscsi_conn *conn, |
1347 |
+ if (scsi_prot_sg_count(sc)) { |
1348 |
+ prot_buf->buf = scsi_prot_sglist(sc); |
1349 |
+ prot_buf->size = scsi_prot_sg_count(sc); |
1350 |
+- prot_buf->data_len = sc->prot_sdb->length; |
1351 |
++ prot_buf->data_len = data_buf->data_len >> |
1352 |
++ ilog2(sc->device->sector_size) * 8; |
1353 |
+ } |
1354 |
+ |
1355 |
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) { |
1356 |
+- err = iser_prepare_read_cmd(task, edtl); |
1357 |
++ err = iser_prepare_read_cmd(task); |
1358 |
+ if (err) |
1359 |
+ goto send_command_error; |
1360 |
+ } |
1361 |
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c |
1362 |
+index b9d647468b99..d4c7928a0f36 100644 |
1363 |
+--- a/drivers/infiniband/ulp/isert/ib_isert.c |
1364 |
++++ b/drivers/infiniband/ulp/isert/ib_isert.c |
1365 |
+@@ -663,8 +663,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
1366 |
+ |
1367 |
+ pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; |
1368 |
+ if (pi_support && !device->pi_capable) { |
1369 |
+- pr_err("Protection information requested but not supported\n"); |
1370 |
+- ret = -EINVAL; |
1371 |
++ pr_err("Protection information requested but not supported, " |
1372 |
++ "rejecting connect request\n"); |
1373 |
++ ret = rdma_reject(cma_id, NULL, 0); |
1374 |
+ goto out_mr; |
1375 |
+ } |
1376 |
+ |
1377 |
+@@ -787,14 +788,12 @@ isert_disconnect_work(struct work_struct *work) |
1378 |
+ isert_put_conn(isert_conn); |
1379 |
+ return; |
1380 |
+ } |
1381 |
+- if (!isert_conn->logout_posted) { |
1382 |
+- pr_debug("Calling rdma_disconnect for !logout_posted from" |
1383 |
+- " isert_disconnect_work\n"); |
1384 |
++ |
1385 |
++ if (isert_conn->disconnect) { |
1386 |
++ /* Send DREQ/DREP towards our initiator */ |
1387 |
+ rdma_disconnect(isert_conn->conn_cm_id); |
1388 |
+- mutex_unlock(&isert_conn->conn_mutex); |
1389 |
+- iscsit_cause_connection_reinstatement(isert_conn->conn, 0); |
1390 |
+- goto wake_up; |
1391 |
+ } |
1392 |
++ |
1393 |
+ mutex_unlock(&isert_conn->conn_mutex); |
1394 |
+ |
1395 |
+ wake_up: |
1396 |
+@@ -803,10 +802,11 @@ wake_up: |
1397 |
+ } |
1398 |
+ |
1399 |
+ static void |
1400 |
+-isert_disconnected_handler(struct rdma_cm_id *cma_id) |
1401 |
++isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) |
1402 |
+ { |
1403 |
+ struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; |
1404 |
+ |
1405 |
++ isert_conn->disconnect = disconnect; |
1406 |
+ INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); |
1407 |
+ schedule_work(&isert_conn->conn_logout_work); |
1408 |
+ } |
1409 |
+@@ -815,29 +815,28 @@ static int |
1410 |
+ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
1411 |
+ { |
1412 |
+ int ret = 0; |
1413 |
++ bool disconnect = false; |
1414 |
+ |
1415 |
+ pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n", |
1416 |
+ event->event, event->status, cma_id->context, cma_id); |
1417 |
+ |
1418 |
+ switch (event->event) { |
1419 |
+ case RDMA_CM_EVENT_CONNECT_REQUEST: |
1420 |
+- pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n"); |
1421 |
+ ret = isert_connect_request(cma_id, event); |
1422 |
+ break; |
1423 |
+ case RDMA_CM_EVENT_ESTABLISHED: |
1424 |
+- pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n"); |
1425 |
+ isert_connected_handler(cma_id); |
1426 |
+ break; |
1427 |
+- case RDMA_CM_EVENT_DISCONNECTED: |
1428 |
+- pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n"); |
1429 |
+- isert_disconnected_handler(cma_id); |
1430 |
+- break; |
1431 |
+- case RDMA_CM_EVENT_DEVICE_REMOVAL: |
1432 |
+- case RDMA_CM_EVENT_ADDR_CHANGE: |
1433 |
++ case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ |
1434 |
++ case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ |
1435 |
++ case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ |
1436 |
++ disconnect = true; |
1437 |
++ case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ |
1438 |
++ isert_disconnected_handler(cma_id, disconnect); |
1439 |
+ break; |
1440 |
+ case RDMA_CM_EVENT_CONNECT_ERROR: |
1441 |
+ default: |
1442 |
+- pr_err("Unknown RDMA CMA event: %d\n", event->event); |
1443 |
++ pr_err("Unhandled RDMA CMA event: %d\n", event->event); |
1444 |
+ break; |
1445 |
+ } |
1446 |
+ |
1447 |
+@@ -1054,7 +1053,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, |
1448 |
+ } |
1449 |
+ if (!login->login_failed) { |
1450 |
+ if (login->login_complete) { |
1451 |
+- if (isert_conn->conn_device->use_fastreg) { |
1452 |
++ if (!conn->sess->sess_ops->SessionType && |
1453 |
++ isert_conn->conn_device->use_fastreg) { |
1454 |
++ /* Normal Session and fastreg is used */ |
1455 |
+ u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi; |
1456 |
+ |
1457 |
+ ret = isert_conn_create_fastreg_pool(isert_conn, |
1458 |
+@@ -1824,11 +1825,8 @@ isert_do_control_comp(struct work_struct *work) |
1459 |
+ break; |
1460 |
+ case ISTATE_SEND_LOGOUTRSP: |
1461 |
+ pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); |
1462 |
+- /* |
1463 |
+- * Call atomic_dec(&isert_conn->post_send_buf_count) |
1464 |
+- * from isert_wait_conn() |
1465 |
+- */ |
1466 |
+- isert_conn->logout_posted = true; |
1467 |
++ |
1468 |
++ atomic_dec(&isert_conn->post_send_buf_count); |
1469 |
+ iscsit_logout_post_handler(cmd, cmd->conn); |
1470 |
+ break; |
1471 |
+ case ISTATE_SEND_TEXTRSP: |
1472 |
+@@ -2034,6 +2032,8 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn) |
1473 |
+ isert_conn->state = ISER_CONN_DOWN; |
1474 |
+ mutex_unlock(&isert_conn->conn_mutex); |
1475 |
+ |
1476 |
++ iscsit_cause_connection_reinstatement(isert_conn->conn, 0); |
1477 |
++ |
1478 |
+ complete(&isert_conn->conn_wait_comp_err); |
1479 |
+ } |
1480 |
+ |
1481 |
+@@ -2320,7 +2320,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) |
1482 |
+ int rc; |
1483 |
+ |
1484 |
+ isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); |
1485 |
+- rc = iscsit_build_text_rsp(cmd, conn, hdr); |
1486 |
++ rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND); |
1487 |
+ if (rc < 0) |
1488 |
+ return rc; |
1489 |
+ |
1490 |
+@@ -3156,9 +3156,14 @@ accept_wait: |
1491 |
+ return -ENODEV; |
1492 |
+ |
1493 |
+ spin_lock_bh(&np->np_thread_lock); |
1494 |
+- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { |
1495 |
++ if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { |
1496 |
+ spin_unlock_bh(&np->np_thread_lock); |
1497 |
+- pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); |
1498 |
++ pr_debug("np_thread_state %d for isert_accept_np\n", |
1499 |
++ np->np_thread_state); |
1500 |
++ /** |
1501 |
++ * No point in stalling here when np_thread |
1502 |
++ * is in state RESET/SHUTDOWN/EXIT - bail |
1503 |
++ **/ |
1504 |
+ return -ENODEV; |
1505 |
+ } |
1506 |
+ spin_unlock_bh(&np->np_thread_lock); |
1507 |
+@@ -3208,15 +3213,9 @@ static void isert_wait_conn(struct iscsi_conn *conn) |
1508 |
+ struct isert_conn *isert_conn = conn->context; |
1509 |
+ |
1510 |
+ pr_debug("isert_wait_conn: Starting \n"); |
1511 |
+- /* |
1512 |
+- * Decrement post_send_buf_count for special case when called |
1513 |
+- * from isert_do_control_comp() -> iscsit_logout_post_handler() |
1514 |
+- */ |
1515 |
+- mutex_lock(&isert_conn->conn_mutex); |
1516 |
+- if (isert_conn->logout_posted) |
1517 |
+- atomic_dec(&isert_conn->post_send_buf_count); |
1518 |
+ |
1519 |
+- if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { |
1520 |
++ mutex_lock(&isert_conn->conn_mutex); |
1521 |
++ if (isert_conn->conn_cm_id) { |
1522 |
+ pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); |
1523 |
+ rdma_disconnect(isert_conn->conn_cm_id); |
1524 |
+ } |
1525 |
+@@ -3293,6 +3292,7 @@ destroy_rx_wq: |
1526 |
+ |
1527 |
+ static void __exit isert_exit(void) |
1528 |
+ { |
1529 |
++ flush_scheduled_work(); |
1530 |
+ destroy_workqueue(isert_comp_wq); |
1531 |
+ destroy_workqueue(isert_rx_wq); |
1532 |
+ iscsit_unregister_transport(&iser_target_transport); |
1533 |
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h |
1534 |
+index da6612e68000..04f51f7bf614 100644 |
1535 |
+--- a/drivers/infiniband/ulp/isert/ib_isert.h |
1536 |
++++ b/drivers/infiniband/ulp/isert/ib_isert.h |
1537 |
+@@ -116,7 +116,6 @@ struct isert_device; |
1538 |
+ |
1539 |
+ struct isert_conn { |
1540 |
+ enum iser_conn_state state; |
1541 |
+- bool logout_posted; |
1542 |
+ int post_recv_buf_count; |
1543 |
+ atomic_t post_send_buf_count; |
1544 |
+ u32 responder_resources; |
1545 |
+@@ -151,6 +150,7 @@ struct isert_conn { |
1546 |
+ #define ISERT_COMP_BATCH_COUNT 8 |
1547 |
+ int conn_comp_batch; |
1548 |
+ struct llist_head conn_comp_llist; |
1549 |
++ bool disconnect; |
1550 |
+ }; |
1551 |
+ |
1552 |
+ #define ISERT_MAX_CQ 64 |
1553 |
+diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c |
1554 |
+index e1863dbf4edc..7a9b98bc208b 100644 |
1555 |
+--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c |
1556 |
++++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c |
1557 |
+@@ -159,6 +159,12 @@ static int snd_ivtv_pcm_capture_open(struct snd_pcm_substream *substream) |
1558 |
+ |
1559 |
+ /* Instruct the CX2341[56] to start sending packets */ |
1560 |
+ snd_ivtv_lock(itvsc); |
1561 |
++ |
1562 |
++ if (ivtv_init_on_first_open(itv)) { |
1563 |
++ snd_ivtv_unlock(itvsc); |
1564 |
++ return -ENXIO; |
1565 |
++ } |
1566 |
++ |
1567 |
+ s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM]; |
1568 |
+ |
1569 |
+ v4l2_fh_init(&item.fh, s->vdev); |
1570 |
+diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c |
1571 |
+index eb472b5b26a0..40396e8b16a8 100644 |
1572 |
+--- a/drivers/media/pci/saa7134/saa7134-video.c |
1573 |
++++ b/drivers/media/pci/saa7134/saa7134-video.c |
1574 |
+@@ -1243,6 +1243,7 @@ static int video_release(struct file *file) |
1575 |
+ videobuf_streamoff(&dev->cap); |
1576 |
+ res_free(dev, fh, RESOURCE_VIDEO); |
1577 |
+ videobuf_mmap_free(&dev->cap); |
1578 |
++ INIT_LIST_HEAD(&dev->cap.stream); |
1579 |
+ } |
1580 |
+ if (dev->cap.read_buf) { |
1581 |
+ buffer_release(&dev->cap, dev->cap.read_buf); |
1582 |
+@@ -1254,6 +1255,7 @@ static int video_release(struct file *file) |
1583 |
+ videobuf_stop(&dev->vbi); |
1584 |
+ res_free(dev, fh, RESOURCE_VBI); |
1585 |
+ videobuf_mmap_free(&dev->vbi); |
1586 |
++ INIT_LIST_HEAD(&dev->vbi.stream); |
1587 |
+ } |
1588 |
+ |
1589 |
+ /* ts-capture will not work in planar mode, so turn it off Hac: 04.05*/ |
1590 |
+@@ -1987,17 +1989,12 @@ int saa7134_streamoff(struct file *file, void *priv, |
1591 |
+ enum v4l2_buf_type type) |
1592 |
+ { |
1593 |
+ struct saa7134_dev *dev = video_drvdata(file); |
1594 |
+- int err; |
1595 |
+ int res = saa7134_resource(file); |
1596 |
+ |
1597 |
+ if (res != RESOURCE_EMPRESS) |
1598 |
+ pm_qos_remove_request(&dev->qos_request); |
1599 |
+ |
1600 |
+- err = videobuf_streamoff(saa7134_queue(file)); |
1601 |
+- if (err < 0) |
1602 |
+- return err; |
1603 |
+- res_free(dev, priv, res); |
1604 |
+- return 0; |
1605 |
++ return videobuf_streamoff(saa7134_queue(file)); |
1606 |
+ } |
1607 |
+ EXPORT_SYMBOL_GPL(saa7134_streamoff); |
1608 |
+ |
1609 |
+diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c |
1610 |
+index 128b73b6cce2..5476dce3ad29 100644 |
1611 |
+--- a/drivers/media/platform/exynos4-is/fimc-is.c |
1612 |
++++ b/drivers/media/platform/exynos4-is/fimc-is.c |
1613 |
+@@ -367,6 +367,9 @@ static void fimc_is_free_cpu_memory(struct fimc_is *is) |
1614 |
+ { |
1615 |
+ struct device *dev = &is->pdev->dev; |
1616 |
+ |
1617 |
++ if (is->memory.vaddr == NULL) |
1618 |
++ return; |
1619 |
++ |
1620 |
+ dma_free_coherent(dev, is->memory.size, is->memory.vaddr, |
1621 |
+ is->memory.paddr); |
1622 |
+ } |
1623 |
+diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c |
1624 |
+index e62211a80f0e..6e2d6042ade6 100644 |
1625 |
+--- a/drivers/media/platform/exynos4-is/media-dev.c |
1626 |
++++ b/drivers/media/platform/exynos4-is/media-dev.c |
1627 |
+@@ -1520,7 +1520,7 @@ err: |
1628 |
+ } |
1629 |
+ #else |
1630 |
+ #define fimc_md_register_clk_provider(fmd) (0) |
1631 |
+-#define fimc_md_unregister_clk_provider(fmd) (0) |
1632 |
++#define fimc_md_unregister_clk_provider(fmd) |
1633 |
+ #endif |
1634 |
+ |
1635 |
+ static int subdev_notifier_bound(struct v4l2_async_notifier *notifier, |
1636 |
+diff --git a/drivers/media/platform/exynos4-is/media-dev.h b/drivers/media/platform/exynos4-is/media-dev.h |
1637 |
+index ee1e2519f728..58c49456b13f 100644 |
1638 |
+--- a/drivers/media/platform/exynos4-is/media-dev.h |
1639 |
++++ b/drivers/media/platform/exynos4-is/media-dev.h |
1640 |
+@@ -94,7 +94,9 @@ struct fimc_sensor_info { |
1641 |
+ }; |
1642 |
+ |
1643 |
+ struct cam_clk { |
1644 |
++#ifdef CONFIG_COMMON_CLK |
1645 |
+ struct clk_hw hw; |
1646 |
++#endif |
1647 |
+ struct fimc_md *fmd; |
1648 |
+ }; |
1649 |
+ #define to_cam_clk(_hw) container_of(_hw, struct cam_clk, hw) |
1650 |
+@@ -142,7 +144,9 @@ struct fimc_md { |
1651 |
+ |
1652 |
+ struct cam_clk_provider { |
1653 |
+ struct clk *clks[FIMC_MAX_CAMCLKS]; |
1654 |
++#ifdef CONFIG_COMMON_CLK |
1655 |
+ struct clk_onecell_data clk_data; |
1656 |
++#endif |
1657 |
+ struct device_node *of_node; |
1658 |
+ struct cam_clk camclk[FIMC_MAX_CAMCLKS]; |
1659 |
+ int num_clocks; |
1660 |
+diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c |
1661 |
+index 34a26e0cfe77..03504dcf3c52 100644 |
1662 |
+--- a/drivers/media/usb/stk1160/stk1160-core.c |
1663 |
++++ b/drivers/media/usb/stk1160/stk1160-core.c |
1664 |
+@@ -67,17 +67,25 @@ int stk1160_read_reg(struct stk1160 *dev, u16 reg, u8 *value) |
1665 |
+ { |
1666 |
+ int ret; |
1667 |
+ int pipe = usb_rcvctrlpipe(dev->udev, 0); |
1668 |
++ u8 *buf; |
1669 |
+ |
1670 |
+ *value = 0; |
1671 |
++ |
1672 |
++ buf = kmalloc(sizeof(u8), GFP_KERNEL); |
1673 |
++ if (!buf) |
1674 |
++ return -ENOMEM; |
1675 |
+ ret = usb_control_msg(dev->udev, pipe, 0x00, |
1676 |
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, |
1677 |
+- 0x00, reg, value, sizeof(u8), HZ); |
1678 |
++ 0x00, reg, buf, sizeof(u8), HZ); |
1679 |
+ if (ret < 0) { |
1680 |
+ stk1160_err("read failed on reg 0x%x (%d)\n", |
1681 |
+ reg, ret); |
1682 |
++ kfree(buf); |
1683 |
+ return ret; |
1684 |
+ } |
1685 |
+ |
1686 |
++ *value = *buf; |
1687 |
++ kfree(buf); |
1688 |
+ return 0; |
1689 |
+ } |
1690 |
+ |
1691 |
+diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h |
1692 |
+index 05b05b160e1e..abdea484c998 100644 |
1693 |
+--- a/drivers/media/usb/stk1160/stk1160.h |
1694 |
++++ b/drivers/media/usb/stk1160/stk1160.h |
1695 |
+@@ -143,7 +143,6 @@ struct stk1160 { |
1696 |
+ int num_alt; |
1697 |
+ |
1698 |
+ struct stk1160_isoc_ctl isoc_ctl; |
1699 |
+- char urb_buf[255]; /* urb control msg buffer */ |
1700 |
+ |
1701 |
+ /* frame properties */ |
1702 |
+ int width; /* current frame width */ |
1703 |
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c |
1704 |
+index 8d52baf5952b..8496811fb7fa 100644 |
1705 |
+--- a/drivers/media/usb/uvc/uvc_video.c |
1706 |
++++ b/drivers/media/usb/uvc/uvc_video.c |
1707 |
+@@ -361,6 +361,14 @@ static int uvc_commit_video(struct uvc_streaming *stream, |
1708 |
+ * Clocks and timestamps |
1709 |
+ */ |
1710 |
+ |
1711 |
++static inline void uvc_video_get_ts(struct timespec *ts) |
1712 |
++{ |
1713 |
++ if (uvc_clock_param == CLOCK_MONOTONIC) |
1714 |
++ ktime_get_ts(ts); |
1715 |
++ else |
1716 |
++ ktime_get_real_ts(ts); |
1717 |
++} |
1718 |
++ |
1719 |
+ static void |
1720 |
+ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, |
1721 |
+ const __u8 *data, int len) |
1722 |
+@@ -420,7 +428,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, |
1723 |
+ stream->clock.last_sof = dev_sof; |
1724 |
+ |
1725 |
+ host_sof = usb_get_current_frame_number(stream->dev->udev); |
1726 |
+- ktime_get_ts(&ts); |
1727 |
++ uvc_video_get_ts(&ts); |
1728 |
+ |
1729 |
+ /* The UVC specification allows device implementations that can't obtain |
1730 |
+ * the USB frame number to keep their own frame counters as long as they |
1731 |
+@@ -1011,10 +1019,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream, |
1732 |
+ return -ENODATA; |
1733 |
+ } |
1734 |
+ |
1735 |
+- if (uvc_clock_param == CLOCK_MONOTONIC) |
1736 |
+- ktime_get_ts(&ts); |
1737 |
+- else |
1738 |
+- ktime_get_real_ts(&ts); |
1739 |
++ uvc_video_get_ts(&ts); |
1740 |
+ |
1741 |
+ buf->buf.v4l2_buf.sequence = stream->sequence; |
1742 |
+ buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec; |
1743 |
+diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h |
1744 |
+index 2b859249303b..b0e61bf261a7 100644 |
1745 |
+--- a/drivers/pci/hotplug/acpiphp.h |
1746 |
++++ b/drivers/pci/hotplug/acpiphp.h |
1747 |
+@@ -142,6 +142,16 @@ static inline acpi_handle func_to_handle(struct acpiphp_func *func) |
1748 |
+ return func_to_acpi_device(func)->handle; |
1749 |
+ } |
1750 |
+ |
1751 |
++struct acpiphp_root_context { |
1752 |
++ struct acpi_hotplug_context hp; |
1753 |
++ struct acpiphp_bridge *root_bridge; |
1754 |
++}; |
1755 |
++ |
1756 |
++static inline struct acpiphp_root_context *to_acpiphp_root_context(struct acpi_hotplug_context *hp) |
1757 |
++{ |
1758 |
++ return container_of(hp, struct acpiphp_root_context, hp); |
1759 |
++} |
1760 |
++ |
1761 |
+ /* |
1762 |
+ * struct acpiphp_attention_info - device specific attention registration |
1763 |
+ * |
1764 |
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c |
1765 |
+index bccc27ee1030..af53580cf4f5 100644 |
1766 |
+--- a/drivers/pci/hotplug/acpiphp_glue.c |
1767 |
++++ b/drivers/pci/hotplug/acpiphp_glue.c |
1768 |
+@@ -374,17 +374,13 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data, |
1769 |
+ |
1770 |
+ static struct acpiphp_bridge *acpiphp_dev_to_bridge(struct acpi_device *adev) |
1771 |
+ { |
1772 |
+- struct acpiphp_context *context; |
1773 |
+ struct acpiphp_bridge *bridge = NULL; |
1774 |
+ |
1775 |
+ acpi_lock_hp_context(); |
1776 |
+- context = acpiphp_get_context(adev); |
1777 |
+- if (context) { |
1778 |
+- bridge = context->bridge; |
1779 |
++ if (adev->hp) { |
1780 |
++ bridge = to_acpiphp_root_context(adev->hp)->root_bridge; |
1781 |
+ if (bridge) |
1782 |
+ get_bridge(bridge); |
1783 |
+- |
1784 |
+- acpiphp_put_context(context); |
1785 |
+ } |
1786 |
+ acpi_unlock_hp_context(); |
1787 |
+ return bridge; |
1788 |
+@@ -883,7 +879,17 @@ void acpiphp_enumerate_slots(struct pci_bus *bus) |
1789 |
+ */ |
1790 |
+ get_device(&bus->dev); |
1791 |
+ |
1792 |
+- if (!pci_is_root_bus(bridge->pci_bus)) { |
1793 |
++ acpi_lock_hp_context(); |
1794 |
++ if (pci_is_root_bus(bridge->pci_bus)) { |
1795 |
++ struct acpiphp_root_context *root_context; |
1796 |
++ |
1797 |
++ root_context = kzalloc(sizeof(*root_context), GFP_KERNEL); |
1798 |
++ if (!root_context) |
1799 |
++ goto err; |
1800 |
++ |
1801 |
++ root_context->root_bridge = bridge; |
1802 |
++ acpi_set_hp_context(adev, &root_context->hp, NULL, NULL, NULL); |
1803 |
++ } else { |
1804 |
+ struct acpiphp_context *context; |
1805 |
+ |
1806 |
+ /* |
1807 |
+@@ -892,21 +898,16 @@ void acpiphp_enumerate_slots(struct pci_bus *bus) |
1808 |
+ * parent is going to be handled by pciehp, in which case this |
1809 |
+ * bridge is not interesting to us either. |
1810 |
+ */ |
1811 |
+- acpi_lock_hp_context(); |
1812 |
+ context = acpiphp_get_context(adev); |
1813 |
+- if (!context) { |
1814 |
+- acpi_unlock_hp_context(); |
1815 |
+- put_device(&bus->dev); |
1816 |
+- pci_dev_put(bridge->pci_dev); |
1817 |
+- kfree(bridge); |
1818 |
+- return; |
1819 |
+- } |
1820 |
++ if (!context) |
1821 |
++ goto err; |
1822 |
++ |
1823 |
+ bridge->context = context; |
1824 |
+ context->bridge = bridge; |
1825 |
+ /* Get a reference to the parent bridge. */ |
1826 |
+ get_bridge(context->func.parent); |
1827 |
+- acpi_unlock_hp_context(); |
1828 |
+ } |
1829 |
++ acpi_unlock_hp_context(); |
1830 |
+ |
1831 |
+ /* Must be added to the list prior to calling acpiphp_add_context(). */ |
1832 |
+ mutex_lock(&bridge_mutex); |
1833 |
+@@ -921,6 +922,30 @@ void acpiphp_enumerate_slots(struct pci_bus *bus) |
1834 |
+ cleanup_bridge(bridge); |
1835 |
+ put_bridge(bridge); |
1836 |
+ } |
1837 |
++ return; |
1838 |
++ |
1839 |
++ err: |
1840 |
++ acpi_unlock_hp_context(); |
1841 |
++ put_device(&bus->dev); |
1842 |
++ pci_dev_put(bridge->pci_dev); |
1843 |
++ kfree(bridge); |
1844 |
++} |
1845 |
++ |
1846 |
++void acpiphp_drop_bridge(struct acpiphp_bridge *bridge) |
1847 |
++{ |
1848 |
++ if (pci_is_root_bus(bridge->pci_bus)) { |
1849 |
++ struct acpiphp_root_context *root_context; |
1850 |
++ struct acpi_device *adev; |
1851 |
++ |
1852 |
++ acpi_lock_hp_context(); |
1853 |
++ adev = ACPI_COMPANION(bridge->pci_bus->bridge); |
1854 |
++ root_context = to_acpiphp_root_context(adev->hp); |
1855 |
++ adev->hp = NULL; |
1856 |
++ acpi_unlock_hp_context(); |
1857 |
++ kfree(root_context); |
1858 |
++ } |
1859 |
++ cleanup_bridge(bridge); |
1860 |
++ put_bridge(bridge); |
1861 |
+ } |
1862 |
+ |
1863 |
+ /** |
1864 |
+@@ -938,8 +963,7 @@ void acpiphp_remove_slots(struct pci_bus *bus) |
1865 |
+ list_for_each_entry(bridge, &bridge_list, list) |
1866 |
+ if (bridge->pci_bus == bus) { |
1867 |
+ mutex_unlock(&bridge_mutex); |
1868 |
+- cleanup_bridge(bridge); |
1869 |
+- put_bridge(bridge); |
1870 |
++ acpiphp_drop_bridge(bridge); |
1871 |
+ return; |
1872 |
+ } |
1873 |
+ |
1874 |
+diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c |
1875 |
+index 7f139326a642..ff026689358c 100644 |
1876 |
+--- a/drivers/phy/phy-exynos-mipi-video.c |
1877 |
++++ b/drivers/phy/phy-exynos-mipi-video.c |
1878 |
+@@ -101,7 +101,7 @@ static struct phy *exynos_mipi_video_phy_xlate(struct device *dev, |
1879 |
+ { |
1880 |
+ struct exynos_mipi_video_phy *state = dev_get_drvdata(dev); |
1881 |
+ |
1882 |
+- if (WARN_ON(args->args[0] > EXYNOS_MIPI_PHYS_NUM)) |
1883 |
++ if (WARN_ON(args->args[0] >= EXYNOS_MIPI_PHYS_NUM)) |
1884 |
+ return ERR_PTR(-ENODEV); |
1885 |
+ |
1886 |
+ return state->phys[args->args[0]].phy; |
1887 |
+diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c |
1888 |
+index f19a30f0fb42..fdd68dd69049 100644 |
1889 |
+--- a/drivers/regulator/s2mpa01.c |
1890 |
++++ b/drivers/regulator/s2mpa01.c |
1891 |
+@@ -116,7 +116,6 @@ static int s2mpa01_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) |
1892 |
+ ramp_delay = s2mpa01->ramp_delay16; |
1893 |
+ |
1894 |
+ ramp_shift = S2MPA01_BUCK16_RAMP_SHIFT; |
1895 |
+- ramp_reg = S2MPA01_REG_RAMP1; |
1896 |
+ break; |
1897 |
+ case S2MPA01_BUCK2: |
1898 |
+ enable_shift = S2MPA01_BUCK2_RAMP_EN_SHIFT; |
1899 |
+@@ -192,11 +191,15 @@ static int s2mpa01_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) |
1900 |
+ if (!ramp_enable) |
1901 |
+ goto ramp_disable; |
1902 |
+ |
1903 |
+- ret = regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1, |
1904 |
+- 1 << enable_shift, 1 << enable_shift); |
1905 |
+- if (ret) { |
1906 |
+- dev_err(&rdev->dev, "failed to enable ramp rate\n"); |
1907 |
+- return ret; |
1908 |
++ /* Ramp delay can be enabled/disabled only for buck[1234] */ |
1909 |
++ if (rdev_get_id(rdev) >= S2MPA01_BUCK1 && |
1910 |
++ rdev_get_id(rdev) <= S2MPA01_BUCK4) { |
1911 |
++ ret = regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1, |
1912 |
++ 1 << enable_shift, 1 << enable_shift); |
1913 |
++ if (ret) { |
1914 |
++ dev_err(&rdev->dev, "failed to enable ramp rate\n"); |
1915 |
++ return ret; |
1916 |
++ } |
1917 |
+ } |
1918 |
+ |
1919 |
+ ramp_val = get_ramp_delay(ramp_delay); |
1920 |
+diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c |
1921 |
+index e713c162fbd4..aaca37e1424f 100644 |
1922 |
+--- a/drivers/regulator/s2mps11.c |
1923 |
++++ b/drivers/regulator/s2mps11.c |
1924 |
+@@ -202,11 +202,16 @@ static int s2mps11_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) |
1925 |
+ if (!ramp_enable) |
1926 |
+ goto ramp_disable; |
1927 |
+ |
1928 |
+- ret = regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP, |
1929 |
+- 1 << enable_shift, 1 << enable_shift); |
1930 |
+- if (ret) { |
1931 |
+- dev_err(&rdev->dev, "failed to enable ramp rate\n"); |
1932 |
+- return ret; |
1933 |
++ /* Ramp delay can be enabled/disabled only for buck[2346] */ |
1934 |
++ if ((rdev_get_id(rdev) >= S2MPS11_BUCK2 && |
1935 |
++ rdev_get_id(rdev) <= S2MPS11_BUCK4) || |
1936 |
++ rdev_get_id(rdev) == S2MPS11_BUCK6) { |
1937 |
++ ret = regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP, |
1938 |
++ 1 << enable_shift, 1 << enable_shift); |
1939 |
++ if (ret) { |
1940 |
++ dev_err(&rdev->dev, "failed to enable ramp rate\n"); |
1941 |
++ return ret; |
1942 |
++ } |
1943 |
+ } |
1944 |
+ |
1945 |
+ ramp_val = get_ramp_delay(ramp_delay); |
1946 |
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c |
1947 |
+index 26dc005bb0f0..3f462349b16c 100644 |
1948 |
+--- a/drivers/scsi/libiscsi.c |
1949 |
++++ b/drivers/scsi/libiscsi.c |
1950 |
+@@ -338,7 +338,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) |
1951 |
+ struct iscsi_session *session = conn->session; |
1952 |
+ struct scsi_cmnd *sc = task->sc; |
1953 |
+ struct iscsi_scsi_req *hdr; |
1954 |
+- unsigned hdrlength, cmd_len; |
1955 |
++ unsigned hdrlength, cmd_len, transfer_length; |
1956 |
+ itt_t itt; |
1957 |
+ int rc; |
1958 |
+ |
1959 |
+@@ -391,11 +391,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) |
1960 |
+ if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) |
1961 |
+ task->protected = true; |
1962 |
+ |
1963 |
++ transfer_length = scsi_transfer_length(sc); |
1964 |
++ hdr->data_length = cpu_to_be32(transfer_length); |
1965 |
+ if (sc->sc_data_direction == DMA_TO_DEVICE) { |
1966 |
+- unsigned out_len = scsi_out(sc)->length; |
1967 |
+ struct iscsi_r2t_info *r2t = &task->unsol_r2t; |
1968 |
+ |
1969 |
+- hdr->data_length = cpu_to_be32(out_len); |
1970 |
+ hdr->flags |= ISCSI_FLAG_CMD_WRITE; |
1971 |
+ /* |
1972 |
+ * Write counters: |
1973 |
+@@ -414,18 +414,19 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) |
1974 |
+ memset(r2t, 0, sizeof(*r2t)); |
1975 |
+ |
1976 |
+ if (session->imm_data_en) { |
1977 |
+- if (out_len >= session->first_burst) |
1978 |
++ if (transfer_length >= session->first_burst) |
1979 |
+ task->imm_count = min(session->first_burst, |
1980 |
+ conn->max_xmit_dlength); |
1981 |
+ else |
1982 |
+- task->imm_count = min(out_len, |
1983 |
+- conn->max_xmit_dlength); |
1984 |
++ task->imm_count = min(transfer_length, |
1985 |
++ conn->max_xmit_dlength); |
1986 |
+ hton24(hdr->dlength, task->imm_count); |
1987 |
+ } else |
1988 |
+ zero_data(hdr->dlength); |
1989 |
+ |
1990 |
+ if (!session->initial_r2t_en) { |
1991 |
+- r2t->data_length = min(session->first_burst, out_len) - |
1992 |
++ r2t->data_length = min(session->first_burst, |
1993 |
++ transfer_length) - |
1994 |
+ task->imm_count; |
1995 |
+ r2t->data_offset = task->imm_count; |
1996 |
+ r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); |
1997 |
+@@ -438,7 +439,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) |
1998 |
+ } else { |
1999 |
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL; |
2000 |
+ zero_data(hdr->dlength); |
2001 |
+- hdr->data_length = cpu_to_be32(scsi_in(sc)->length); |
2002 |
+ |
2003 |
+ if (sc->sc_data_direction == DMA_FROM_DEVICE) |
2004 |
+ hdr->flags |= ISCSI_FLAG_CMD_READ; |
2005 |
+@@ -466,7 +466,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) |
2006 |
+ scsi_bidi_cmnd(sc) ? "bidirectional" : |
2007 |
+ sc->sc_data_direction == DMA_TO_DEVICE ? |
2008 |
+ "write" : "read", conn->id, sc, sc->cmnd[0], |
2009 |
+- task->itt, scsi_bufflen(sc), |
2010 |
++ task->itt, transfer_length, |
2011 |
+ scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, |
2012 |
+ session->cmdsn, |
2013 |
+ session->max_cmdsn - session->exp_cmdsn + 1); |
2014 |
+diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c |
2015 |
+index d47dedd2cdb4..6f5efcc89880 100644 |
2016 |
+--- a/drivers/staging/imx-drm/imx-hdmi.c |
2017 |
++++ b/drivers/staging/imx-drm/imx-hdmi.c |
2018 |
+@@ -120,8 +120,6 @@ struct imx_hdmi { |
2019 |
+ struct clk *isfr_clk; |
2020 |
+ struct clk *iahb_clk; |
2021 |
+ |
2022 |
+- enum drm_connector_status connector_status; |
2023 |
+- |
2024 |
+ struct hdmi_data_info hdmi_data; |
2025 |
+ int vic; |
2026 |
+ |
2027 |
+@@ -1382,7 +1380,9 @@ static enum drm_connector_status imx_hdmi_connector_detect(struct drm_connector |
2028 |
+ { |
2029 |
+ struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi, |
2030 |
+ connector); |
2031 |
+- return hdmi->connector_status; |
2032 |
++ |
2033 |
++ return hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_HPD ? |
2034 |
++ connector_status_connected : connector_status_disconnected; |
2035 |
+ } |
2036 |
+ |
2037 |
+ static int imx_hdmi_connector_get_modes(struct drm_connector *connector) |
2038 |
+@@ -1524,7 +1524,6 @@ static irqreturn_t imx_hdmi_irq(int irq, void *dev_id) |
2039 |
+ |
2040 |
+ hdmi_modb(hdmi, 0, HDMI_PHY_HPD, HDMI_PHY_POL0); |
2041 |
+ |
2042 |
+- hdmi->connector_status = connector_status_connected; |
2043 |
+ imx_hdmi_poweron(hdmi); |
2044 |
+ } else { |
2045 |
+ dev_dbg(hdmi->dev, "EVENT=plugout\n"); |
2046 |
+@@ -1532,7 +1531,6 @@ static irqreturn_t imx_hdmi_irq(int irq, void *dev_id) |
2047 |
+ hdmi_modb(hdmi, HDMI_PHY_HPD, HDMI_PHY_HPD, |
2048 |
+ HDMI_PHY_POL0); |
2049 |
+ |
2050 |
+- hdmi->connector_status = connector_status_disconnected; |
2051 |
+ imx_hdmi_poweroff(hdmi); |
2052 |
+ } |
2053 |
+ drm_helper_hpd_irq_event(hdmi->connector.dev); |
2054 |
+@@ -1606,7 +1604,6 @@ static int imx_hdmi_bind(struct device *dev, struct device *master, void *data) |
2055 |
+ return -ENOMEM; |
2056 |
+ |
2057 |
+ hdmi->dev = dev; |
2058 |
+- hdmi->connector_status = connector_status_disconnected; |
2059 |
+ hdmi->sample_rate = 48000; |
2060 |
+ hdmi->ratio = 100; |
2061 |
+ |
2062 |
+diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c |
2063 |
+index b2cd3a85166d..bbf236e842a9 100644 |
2064 |
+--- a/drivers/staging/media/bcm2048/radio-bcm2048.c |
2065 |
++++ b/drivers/staging/media/bcm2048/radio-bcm2048.c |
2066 |
+@@ -737,7 +737,7 @@ static int bcm2048_set_region(struct bcm2048_device *bdev, u8 region) |
2067 |
+ int err; |
2068 |
+ u32 new_frequency = 0; |
2069 |
+ |
2070 |
+- if (region > ARRAY_SIZE(region_configs)) |
2071 |
++ if (region >= ARRAY_SIZE(region_configs)) |
2072 |
+ return -EINVAL; |
2073 |
+ |
2074 |
+ mutex_lock(&bdev->mutex); |
2075 |
+diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c |
2076 |
+index 51dbc13e757f..5a40925680ac 100644 |
2077 |
+--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c |
2078 |
++++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c |
2079 |
+@@ -924,6 +924,7 @@ static int spinand_remove(struct spi_device *spi) |
2080 |
+ |
2081 |
+ static const struct of_device_id spinand_dt[] = { |
2082 |
+ { .compatible = "spinand,mt29f", }, |
2083 |
++ {} |
2084 |
+ }; |
2085 |
+ |
2086 |
+ /* |
2087 |
+diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c |
2088 |
+index 3dd90599fd4b..6c9e9a16b2e9 100644 |
2089 |
+--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c |
2090 |
++++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c |
2091 |
+@@ -1599,13 +1599,18 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l |
2092 |
+ pIE = (struct ndis_802_11_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len); |
2093 |
+ if (pIE == NULL) |
2094 |
+ return _FAIL; |
2095 |
++ if (ie_len > NDIS_802_11_LENGTH_RATES_EX) |
2096 |
++ return _FAIL; |
2097 |
+ |
2098 |
+ memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len); |
2099 |
+ supportRateNum = ie_len; |
2100 |
+ |
2101 |
+ pIE = (struct ndis_802_11_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len); |
2102 |
+- if (pIE) |
2103 |
++ if (pIE) { |
2104 |
++ if (supportRateNum + ie_len > NDIS_802_11_LENGTH_RATES_EX) |
2105 |
++ return _FAIL; |
2106 |
+ memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len); |
2107 |
++ } |
2108 |
+ |
2109 |
+ return _SUCCESS; |
2110 |
+ } |
2111 |
+diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c |
2112 |
+index 2f084e181d39..a1aca4416ca7 100644 |
2113 |
+--- a/drivers/staging/tidspbridge/core/dsp-clock.c |
2114 |
++++ b/drivers/staging/tidspbridge/core/dsp-clock.c |
2115 |
+@@ -226,7 +226,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id) |
2116 |
+ case GPT_CLK: |
2117 |
+ status = omap_dm_timer_start(timer[clk_id - 1]); |
2118 |
+ break; |
2119 |
+-#ifdef CONFIG_OMAP_MCBSP |
2120 |
++#ifdef CONFIG_SND_OMAP_SOC_MCBSP |
2121 |
+ case MCBSP_CLK: |
2122 |
+ omap_mcbsp_request(MCBSP_ID(clk_id)); |
2123 |
+ omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC); |
2124 |
+@@ -302,7 +302,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id) |
2125 |
+ case GPT_CLK: |
2126 |
+ status = omap_dm_timer_stop(timer[clk_id - 1]); |
2127 |
+ break; |
2128 |
+-#ifdef CONFIG_OMAP_MCBSP |
2129 |
++#ifdef CONFIG_SND_OMAP_SOC_MCBSP |
2130 |
+ case MCBSP_CLK: |
2131 |
+ omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PRCM_SRC); |
2132 |
+ omap_mcbsp_free(MCBSP_ID(clk_id)); |
2133 |
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c |
2134 |
+index 9189bc0a87ae..ca2bc348ef5b 100644 |
2135 |
+--- a/drivers/target/iscsi/iscsi_target.c |
2136 |
++++ b/drivers/target/iscsi/iscsi_target.c |
2137 |
+@@ -3390,7 +3390,9 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np) |
2138 |
+ |
2139 |
+ #define SENDTARGETS_BUF_LIMIT 32768U |
2140 |
+ |
2141 |
+-static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) |
2142 |
++static int |
2143 |
++iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, |
2144 |
++ enum iscsit_transport_type network_transport) |
2145 |
+ { |
2146 |
+ char *payload = NULL; |
2147 |
+ struct iscsi_conn *conn = cmd->conn; |
2148 |
+@@ -3467,6 +3469,9 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) |
2149 |
+ struct iscsi_np *np = tpg_np->tpg_np; |
2150 |
+ bool inaddr_any = iscsit_check_inaddr_any(np); |
2151 |
+ |
2152 |
++ if (np->np_network_transport != network_transport) |
2153 |
++ continue; |
2154 |
++ |
2155 |
+ if (!target_name_printed) { |
2156 |
+ len = sprintf(buf, "TargetName=%s", |
2157 |
+ tiqn->tiqn); |
2158 |
+@@ -3520,11 +3525,12 @@ eob: |
2159 |
+ |
2160 |
+ int |
2161 |
+ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, |
2162 |
+- struct iscsi_text_rsp *hdr) |
2163 |
++ struct iscsi_text_rsp *hdr, |
2164 |
++ enum iscsit_transport_type network_transport) |
2165 |
+ { |
2166 |
+ int text_length, padding; |
2167 |
+ |
2168 |
+- text_length = iscsit_build_sendtargets_response(cmd); |
2169 |
++ text_length = iscsit_build_sendtargets_response(cmd, network_transport); |
2170 |
+ if (text_length < 0) |
2171 |
+ return text_length; |
2172 |
+ |
2173 |
+@@ -3562,7 +3568,7 @@ static int iscsit_send_text_rsp( |
2174 |
+ u32 tx_size = 0; |
2175 |
+ int text_length, iov_count = 0, rc; |
2176 |
+ |
2177 |
+- rc = iscsit_build_text_rsp(cmd, conn, hdr); |
2178 |
++ rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP); |
2179 |
+ if (rc < 0) |
2180 |
+ return rc; |
2181 |
+ |
2182 |
+@@ -4234,8 +4240,6 @@ int iscsit_close_connection( |
2183 |
+ if (conn->conn_transport->iscsit_wait_conn) |
2184 |
+ conn->conn_transport->iscsit_wait_conn(conn); |
2185 |
+ |
2186 |
+- iscsit_free_queue_reqs_for_conn(conn); |
2187 |
+- |
2188 |
+ /* |
2189 |
+ * During Connection recovery drop unacknowledged out of order |
2190 |
+ * commands for this connection, and prepare the other commands |
2191 |
+@@ -4252,6 +4256,7 @@ int iscsit_close_connection( |
2192 |
+ iscsit_clear_ooo_cmdsns_for_conn(conn); |
2193 |
+ iscsit_release_commands_from_conn(conn); |
2194 |
+ } |
2195 |
++ iscsit_free_queue_reqs_for_conn(conn); |
2196 |
+ |
2197 |
+ /* |
2198 |
+ * Handle decrementing session or connection usage count if |
2199 |
+diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c |
2200 |
+index c886ad1c39fb..1f4c015e9078 100644 |
2201 |
+--- a/drivers/target/loopback/tcm_loop.c |
2202 |
++++ b/drivers/target/loopback/tcm_loop.c |
2203 |
+@@ -179,7 +179,7 @@ static void tcm_loop_submission_work(struct work_struct *work) |
2204 |
+ struct tcm_loop_hba *tl_hba; |
2205 |
+ struct tcm_loop_tpg *tl_tpg; |
2206 |
+ struct scatterlist *sgl_bidi = NULL; |
2207 |
+- u32 sgl_bidi_count = 0; |
2208 |
++ u32 sgl_bidi_count = 0, transfer_length; |
2209 |
+ int rc; |
2210 |
+ |
2211 |
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); |
2212 |
+@@ -213,12 +213,21 @@ static void tcm_loop_submission_work(struct work_struct *work) |
2213 |
+ |
2214 |
+ } |
2215 |
+ |
2216 |
+- if (!scsi_prot_sg_count(sc) && scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) |
2217 |
++ transfer_length = scsi_transfer_length(sc); |
2218 |
++ if (!scsi_prot_sg_count(sc) && |
2219 |
++ scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) { |
2220 |
+ se_cmd->prot_pto = true; |
2221 |
++ /* |
2222 |
++ * loopback transport doesn't support |
2223 |
++ * WRITE_GENERATE, READ_STRIP protection |
2224 |
++ * information operations, go ahead unprotected. |
2225 |
++ */ |
2226 |
++ transfer_length = scsi_bufflen(sc); |
2227 |
++ } |
2228 |
+ |
2229 |
+ rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, |
2230 |
+ &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, |
2231 |
+- scsi_bufflen(sc), tcm_loop_sam_attr(sc), |
2232 |
++ transfer_length, tcm_loop_sam_attr(sc), |
2233 |
+ sc->sc_data_direction, 0, |
2234 |
+ scsi_sglist(sc), scsi_sg_count(sc), |
2235 |
+ sgl_bidi, sgl_bidi_count, |
2236 |
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c |
2237 |
+index e0229592ec55..bcbc6810666d 100644 |
2238 |
+--- a/drivers/target/target_core_sbc.c |
2239 |
++++ b/drivers/target/target_core_sbc.c |
2240 |
+@@ -81,7 +81,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd) |
2241 |
+ transport_kunmap_data_sg(cmd); |
2242 |
+ } |
2243 |
+ |
2244 |
+- target_complete_cmd(cmd, GOOD); |
2245 |
++ target_complete_cmd_with_length(cmd, GOOD, 8); |
2246 |
+ return 0; |
2247 |
+ } |
2248 |
+ |
2249 |
+@@ -137,7 +137,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) |
2250 |
+ transport_kunmap_data_sg(cmd); |
2251 |
+ } |
2252 |
+ |
2253 |
+- target_complete_cmd(cmd, GOOD); |
2254 |
++ target_complete_cmd_with_length(cmd, GOOD, 32); |
2255 |
+ return 0; |
2256 |
+ } |
2257 |
+ |
2258 |
+@@ -665,8 +665,19 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, |
2259 |
+ |
2260 |
+ cmd->prot_type = dev->dev_attrib.pi_prot_type; |
2261 |
+ cmd->prot_length = dev->prot_length * sectors; |
2262 |
+- pr_debug("%s: prot_type=%d, prot_length=%d prot_op=%d prot_checks=%d\n", |
2263 |
+- __func__, cmd->prot_type, cmd->prot_length, |
2264 |
++ |
2265 |
++ /** |
2266 |
++ * In case protection information exists over the wire |
2267 |
++ * we modify command data length to describe pure data. |
2268 |
++ * The actual transfer length is data length + protection |
2269 |
++ * length |
2270 |
++ **/ |
2271 |
++ if (protect) |
2272 |
++ cmd->data_length = sectors * dev->dev_attrib.block_size; |
2273 |
++ |
2274 |
++ pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " |
2275 |
++ "prot_op=%d prot_checks=%d\n", |
2276 |
++ __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, |
2277 |
+ cmd->prot_op, cmd->prot_checks); |
2278 |
+ |
2279 |
+ return true; |
2280 |
+diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c |
2281 |
+index 8653666612a8..d24df1a6afc1 100644 |
2282 |
+--- a/drivers/target/target_core_spc.c |
2283 |
++++ b/drivers/target/target_core_spc.c |
2284 |
+@@ -721,6 +721,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) |
2285 |
+ unsigned char *buf; |
2286 |
+ sense_reason_t ret; |
2287 |
+ int p; |
2288 |
++ int len = 0; |
2289 |
+ |
2290 |
+ buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); |
2291 |
+ if (!buf) { |
2292 |
+@@ -742,6 +743,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) |
2293 |
+ } |
2294 |
+ |
2295 |
+ ret = spc_emulate_inquiry_std(cmd, buf); |
2296 |
++ len = buf[4] + 5; |
2297 |
+ goto out; |
2298 |
+ } |
2299 |
+ |
2300 |
+@@ -749,6 +751,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) |
2301 |
+ if (cdb[2] == evpd_handlers[p].page) { |
2302 |
+ buf[1] = cdb[2]; |
2303 |
+ ret = evpd_handlers[p].emulate(cmd, buf); |
2304 |
++ len = get_unaligned_be16(&buf[2]) + 4; |
2305 |
+ goto out; |
2306 |
+ } |
2307 |
+ } |
2308 |
+@@ -765,7 +768,7 @@ out: |
2309 |
+ kfree(buf); |
2310 |
+ |
2311 |
+ if (!ret) |
2312 |
+- target_complete_cmd(cmd, GOOD); |
2313 |
++ target_complete_cmd_with_length(cmd, GOOD, len); |
2314 |
+ return ret; |
2315 |
+ } |
2316 |
+ |
2317 |
+@@ -1103,7 +1106,7 @@ set_length: |
2318 |
+ transport_kunmap_data_sg(cmd); |
2319 |
+ } |
2320 |
+ |
2321 |
+- target_complete_cmd(cmd, GOOD); |
2322 |
++ target_complete_cmd_with_length(cmd, GOOD, length); |
2323 |
+ return 0; |
2324 |
+ } |
2325 |
+ |
2326 |
+@@ -1279,7 +1282,7 @@ done: |
2327 |
+ buf[3] = (lun_count & 0xff); |
2328 |
+ transport_kunmap_data_sg(cmd); |
2329 |
+ |
2330 |
+- target_complete_cmd(cmd, GOOD); |
2331 |
++ target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); |
2332 |
+ return 0; |
2333 |
+ } |
2334 |
+ EXPORT_SYMBOL(spc_emulate_report_luns); |
2335 |
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c |
2336 |
+index a51dd4efc23b..14772e98d3d2 100644 |
2337 |
+--- a/drivers/target/target_core_transport.c |
2338 |
++++ b/drivers/target/target_core_transport.c |
2339 |
+@@ -562,7 +562,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, |
2340 |
+ |
2341 |
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2342 |
+ |
2343 |
+- complete(&cmd->t_transport_stop_comp); |
2344 |
++ complete_all(&cmd->t_transport_stop_comp); |
2345 |
+ return 1; |
2346 |
+ } |
2347 |
+ |
2348 |
+@@ -687,7 +687,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) |
2349 |
+ if (cmd->transport_state & CMD_T_ABORTED && |
2350 |
+ cmd->transport_state & CMD_T_STOP) { |
2351 |
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2352 |
+- complete(&cmd->t_transport_stop_comp); |
2353 |
++ complete_all(&cmd->t_transport_stop_comp); |
2354 |
+ return; |
2355 |
+ } else if (!success) { |
2356 |
+ INIT_WORK(&cmd->work, target_complete_failure_work); |
2357 |
+@@ -703,6 +703,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) |
2358 |
+ } |
2359 |
+ EXPORT_SYMBOL(target_complete_cmd); |
2360 |
+ |
2361 |
++void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) |
2362 |
++{ |
2363 |
++ if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { |
2364 |
++ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { |
2365 |
++ cmd->residual_count += cmd->data_length - length; |
2366 |
++ } else { |
2367 |
++ cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; |
2368 |
++ cmd->residual_count = cmd->data_length - length; |
2369 |
++ } |
2370 |
++ |
2371 |
++ cmd->data_length = length; |
2372 |
++ } |
2373 |
++ |
2374 |
++ target_complete_cmd(cmd, scsi_status); |
2375 |
++} |
2376 |
++EXPORT_SYMBOL(target_complete_cmd_with_length); |
2377 |
++ |
2378 |
+ static void target_add_to_state_list(struct se_cmd *cmd) |
2379 |
+ { |
2380 |
+ struct se_device *dev = cmd->se_dev; |
2381 |
+@@ -1761,7 +1778,7 @@ void target_execute_cmd(struct se_cmd *cmd) |
2382 |
+ cmd->se_tfo->get_task_tag(cmd)); |
2383 |
+ |
2384 |
+ spin_unlock_irq(&cmd->t_state_lock); |
2385 |
+- complete(&cmd->t_transport_stop_comp); |
2386 |
++ complete_all(&cmd->t_transport_stop_comp); |
2387 |
+ return; |
2388 |
+ } |
2389 |
+ |
2390 |
+@@ -2938,6 +2955,12 @@ static void target_tmr_work(struct work_struct *work) |
2391 |
+ int transport_generic_handle_tmr( |
2392 |
+ struct se_cmd *cmd) |
2393 |
+ { |
2394 |
++ unsigned long flags; |
2395 |
++ |
2396 |
++ spin_lock_irqsave(&cmd->t_state_lock, flags); |
2397 |
++ cmd->transport_state |= CMD_T_ACTIVE; |
2398 |
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2399 |
++ |
2400 |
+ INIT_WORK(&cmd->work, target_tmr_work); |
2401 |
+ queue_work(cmd->se_dev->tmr_wq, &cmd->work); |
2402 |
+ return 0; |
2403 |
+diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c |
2404 |
+index 99246606a256..27981e2b9430 100644 |
2405 |
+--- a/drivers/tty/serial/of_serial.c |
2406 |
++++ b/drivers/tty/serial/of_serial.c |
2407 |
+@@ -173,6 +173,7 @@ static int of_platform_serial_probe(struct platform_device *ofdev) |
2408 |
+ { |
2409 |
+ struct uart_8250_port port8250; |
2410 |
+ memset(&port8250, 0, sizeof(port8250)); |
2411 |
++ port.type = port_type; |
2412 |
+ port8250.port = port; |
2413 |
+ |
2414 |
+ if (port.fifosize) |
2415 |
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c |
2416 |
+index 70715eeededd..85f398d3184d 100644 |
2417 |
+--- a/drivers/usb/dwc3/gadget.c |
2418 |
++++ b/drivers/usb/dwc3/gadget.c |
2419 |
+@@ -604,6 +604,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) |
2420 |
+ |
2421 |
+ dwc3_remove_requests(dwc, dep); |
2422 |
+ |
2423 |
++ /* make sure HW endpoint isn't stalled */ |
2424 |
++ if (dep->flags & DWC3_EP_STALL) |
2425 |
++ __dwc3_gadget_ep_set_halt(dep, 0); |
2426 |
++ |
2427 |
+ reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); |
2428 |
+ reg &= ~DWC3_DALEPENA_EP(dep->number); |
2429 |
+ dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); |
2430 |
+diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c |
2431 |
+index a925d0cbcd41..a0863a2d2142 100644 |
2432 |
+--- a/drivers/usb/gadget/inode.c |
2433 |
++++ b/drivers/usb/gadget/inode.c |
2434 |
+@@ -1501,7 +1501,7 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) |
2435 |
+ } |
2436 |
+ break; |
2437 |
+ |
2438 |
+-#ifndef CONFIG_USB_GADGET_PXA25X |
2439 |
++#ifndef CONFIG_USB_PXA25X |
2440 |
+ /* PXA automagically handles this request too */ |
2441 |
+ case USB_REQ_GET_CONFIGURATION: |
2442 |
+ if (ctrl->bRequestType != 0x80) |
2443 |
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c |
2444 |
+index 4a6d3dd68572..2f3acebb577a 100644 |
2445 |
+--- a/drivers/usb/host/pci-quirks.c |
2446 |
++++ b/drivers/usb/host/pci-quirks.c |
2447 |
+@@ -656,6 +656,14 @@ static const struct dmi_system_id ehci_dmi_nohandoff_table[] = { |
2448 |
+ DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), |
2449 |
+ }, |
2450 |
+ }, |
2451 |
++ { |
2452 |
++ /* HASEE E200 */ |
2453 |
++ .matches = { |
2454 |
++ DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"), |
2455 |
++ DMI_MATCH(DMI_BOARD_NAME, "E210"), |
2456 |
++ DMI_MATCH(DMI_BIOS_VERSION, "6.00"), |
2457 |
++ }, |
2458 |
++ }, |
2459 |
+ { } |
2460 |
+ }; |
2461 |
+ |
2462 |
+@@ -665,9 +673,14 @@ static void ehci_bios_handoff(struct pci_dev *pdev, |
2463 |
+ { |
2464 |
+ int try_handoff = 1, tried_handoff = 0; |
2465 |
+ |
2466 |
+- /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying |
2467 |
+- * the handoff on its unused controller. Skip it. */ |
2468 |
+- if (pdev->vendor == 0x8086 && pdev->device == 0x283a) { |
2469 |
++ /* |
2470 |
++ * The Pegatron Lucid tablet sporadically waits for 98 seconds trying |
2471 |
++ * the handoff on its unused controller. Skip it. |
2472 |
++ * |
2473 |
++ * The HASEE E200 hangs when the semaphore is set (bugzilla #77021). |
2474 |
++ */ |
2475 |
++ if (pdev->vendor == 0x8086 && (pdev->device == 0x283a || |
2476 |
++ pdev->device == 0x27cc)) { |
2477 |
+ if (dmi_check_system(ehci_dmi_nohandoff_table)) |
2478 |
+ try_handoff = 0; |
2479 |
+ } |
2480 |
+diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c |
2481 |
+index f6568b5e9b06..71dcacbab398 100644 |
2482 |
+--- a/drivers/usb/misc/usbtest.c |
2483 |
++++ b/drivers/usb/misc/usbtest.c |
2484 |
+@@ -7,7 +7,7 @@ |
2485 |
+ #include <linux/moduleparam.h> |
2486 |
+ #include <linux/scatterlist.h> |
2487 |
+ #include <linux/mutex.h> |
2488 |
+- |
2489 |
++#include <linux/timer.h> |
2490 |
+ #include <linux/usb.h> |
2491 |
+ |
2492 |
+ #define SIMPLE_IO_TIMEOUT 10000 /* in milliseconds */ |
2493 |
+@@ -484,6 +484,14 @@ alloc_sglist(int nents, int max, int vary) |
2494 |
+ return sg; |
2495 |
+ } |
2496 |
+ |
2497 |
++static void sg_timeout(unsigned long _req) |
2498 |
++{ |
2499 |
++ struct usb_sg_request *req = (struct usb_sg_request *) _req; |
2500 |
++ |
2501 |
++ req->status = -ETIMEDOUT; |
2502 |
++ usb_sg_cancel(req); |
2503 |
++} |
2504 |
++ |
2505 |
+ static int perform_sglist( |
2506 |
+ struct usbtest_dev *tdev, |
2507 |
+ unsigned iterations, |
2508 |
+@@ -495,6 +503,9 @@ static int perform_sglist( |
2509 |
+ { |
2510 |
+ struct usb_device *udev = testdev_to_usbdev(tdev); |
2511 |
+ int retval = 0; |
2512 |
++ struct timer_list sg_timer; |
2513 |
++ |
2514 |
++ setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req); |
2515 |
+ |
2516 |
+ while (retval == 0 && iterations-- > 0) { |
2517 |
+ retval = usb_sg_init(req, udev, pipe, |
2518 |
+@@ -505,7 +516,10 @@ static int perform_sglist( |
2519 |
+ |
2520 |
+ if (retval) |
2521 |
+ break; |
2522 |
++ mod_timer(&sg_timer, jiffies + |
2523 |
++ msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); |
2524 |
+ usb_sg_wait(req); |
2525 |
++ del_timer_sync(&sg_timer); |
2526 |
+ retval = req->status; |
2527 |
+ |
2528 |
+ /* FIXME check resulting data pattern */ |
2529 |
+@@ -1320,6 +1334,11 @@ static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async) |
2530 |
+ urb->context = &completion; |
2531 |
+ urb->complete = unlink1_callback; |
2532 |
+ |
2533 |
++ if (usb_pipeout(urb->pipe)) { |
2534 |
++ simple_fill_buf(urb); |
2535 |
++ urb->transfer_flags |= URB_ZERO_PACKET; |
2536 |
++ } |
2537 |
++ |
2538 |
+ /* keep the endpoint busy. there are lots of hc/hcd-internal |
2539 |
+ * states, and testing should get to all of them over time. |
2540 |
+ * |
2541 |
+@@ -1450,6 +1469,11 @@ static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num, |
2542 |
+ unlink_queued_callback, &ctx); |
2543 |
+ ctx.urbs[i]->transfer_dma = buf_dma; |
2544 |
+ ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP; |
2545 |
++ |
2546 |
++ if (usb_pipeout(ctx.urbs[i]->pipe)) { |
2547 |
++ simple_fill_buf(ctx.urbs[i]); |
2548 |
++ ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET; |
2549 |
++ } |
2550 |
+ } |
2551 |
+ |
2552 |
+ /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */ |
2553 |
+diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c |
2554 |
+index 6e146d723b37..69e49be8866b 100644 |
2555 |
+--- a/drivers/usb/phy/phy-isp1301-omap.c |
2556 |
++++ b/drivers/usb/phy/phy-isp1301-omap.c |
2557 |
+@@ -1295,7 +1295,7 @@ isp1301_set_host(struct usb_otg *otg, struct usb_bus *host) |
2558 |
+ return isp1301_otg_enable(isp); |
2559 |
+ return 0; |
2560 |
+ |
2561 |
+-#elif !defined(CONFIG_USB_GADGET_OMAP) |
2562 |
++#elif !IS_ENABLED(CONFIG_USB_OMAP) |
2563 |
+ // FIXME update its refcount |
2564 |
+ otg->host = host; |
2565 |
+ |
2566 |
+diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c |
2567 |
+index 35a2373cde67..9374bd2aba20 100644 |
2568 |
+--- a/drivers/usb/serial/bus.c |
2569 |
++++ b/drivers/usb/serial/bus.c |
2570 |
+@@ -97,13 +97,19 @@ static int usb_serial_device_remove(struct device *dev) |
2571 |
+ struct usb_serial_port *port; |
2572 |
+ int retval = 0; |
2573 |
+ int minor; |
2574 |
++ int autopm_err; |
2575 |
+ |
2576 |
+ port = to_usb_serial_port(dev); |
2577 |
+ if (!port) |
2578 |
+ return -ENODEV; |
2579 |
+ |
2580 |
+- /* make sure suspend/resume doesn't race against port_remove */ |
2581 |
+- usb_autopm_get_interface(port->serial->interface); |
2582 |
++ /* |
2583 |
++ * Make sure suspend/resume doesn't race against port_remove. |
2584 |
++ * |
2585 |
++ * Note that no further runtime PM callbacks will be made if |
2586 |
++ * autopm_get fails. |
2587 |
++ */ |
2588 |
++ autopm_err = usb_autopm_get_interface(port->serial->interface); |
2589 |
+ |
2590 |
+ minor = port->minor; |
2591 |
+ tty_unregister_device(usb_serial_tty_driver, minor); |
2592 |
+@@ -117,7 +123,9 @@ static int usb_serial_device_remove(struct device *dev) |
2593 |
+ dev_info(dev, "%s converter now disconnected from ttyUSB%d\n", |
2594 |
+ driver->description, minor); |
2595 |
+ |
2596 |
+- usb_autopm_put_interface(port->serial->interface); |
2597 |
++ if (!autopm_err) |
2598 |
++ usb_autopm_put_interface(port->serial->interface); |
2599 |
++ |
2600 |
+ return retval; |
2601 |
+ } |
2602 |
+ |
2603 |
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
2604 |
+index 948a19f0cdf7..70ede84f4f6b 100644 |
2605 |
+--- a/drivers/usb/serial/option.c |
2606 |
++++ b/drivers/usb/serial/option.c |
2607 |
+@@ -1925,6 +1925,7 @@ static int option_send_setup(struct usb_serial_port *port) |
2608 |
+ struct option_private *priv = intfdata->private; |
2609 |
+ struct usb_wwan_port_private *portdata; |
2610 |
+ int val = 0; |
2611 |
++ int res; |
2612 |
+ |
2613 |
+ portdata = usb_get_serial_port_data(port); |
2614 |
+ |
2615 |
+@@ -1933,9 +1934,17 @@ static int option_send_setup(struct usb_serial_port *port) |
2616 |
+ if (portdata->rts_state) |
2617 |
+ val |= 0x02; |
2618 |
+ |
2619 |
+- return usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), |
2620 |
++ res = usb_autopm_get_interface(serial->interface); |
2621 |
++ if (res) |
2622 |
++ return res; |
2623 |
++ |
2624 |
++ res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), |
2625 |
+ 0x22, 0x21, val, priv->bInterfaceNumber, NULL, |
2626 |
+ 0, USB_CTRL_SET_TIMEOUT); |
2627 |
++ |
2628 |
++ usb_autopm_put_interface(serial->interface); |
2629 |
++ |
2630 |
++ return res; |
2631 |
+ } |
2632 |
+ |
2633 |
+ MODULE_AUTHOR(DRIVER_AUTHOR); |
2634 |
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c |
2635 |
+index 6c0a542e8ec1..43d93dbf7d71 100644 |
2636 |
+--- a/drivers/usb/serial/qcserial.c |
2637 |
++++ b/drivers/usb/serial/qcserial.c |
2638 |
+@@ -145,12 +145,33 @@ static const struct usb_device_id id_table[] = { |
2639 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)}, /* Sierra Wireless EM7355 Device Management */ |
2640 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)}, /* Sierra Wireless EM7355 NMEA */ |
2641 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)}, /* Sierra Wireless EM7355 Modem */ |
2642 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 0)}, /* Sierra Wireless Modem Device Management */ |
2643 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 2)}, /* Sierra Wireless Modem NMEA */ |
2644 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 3)}, /* Sierra Wireless Modem Modem */ |
2645 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)}, /* Sierra Wireless MC7305/MC7355 Device Management */ |
2646 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)}, /* Sierra Wireless MC7305/MC7355 NMEA */ |
2647 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)}, /* Sierra Wireless MC7305/MC7355 Modem */ |
2648 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */ |
2649 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */ |
2650 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */ |
2651 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 0)}, /* Sierra Wireless Modem Device Management */ |
2652 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 2)}, /* Sierra Wireless Modem NMEA */ |
2653 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 3)}, /* Sierra Wireless Modem Modem */ |
2654 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 0)}, /* Sierra Wireless Modem Device Management */ |
2655 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 2)}, /* Sierra Wireless Modem NMEA */ |
2656 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 3)}, /* Sierra Wireless Modem Modem */ |
2657 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 0)}, /* Netgear AirCard 341U Device Management */ |
2658 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 2)}, /* Netgear AirCard 341U NMEA */ |
2659 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 3)}, /* Netgear AirCard 341U Modem */ |
2660 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 0)}, /* Sierra Wireless Modem Device Management */ |
2661 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 2)}, /* Sierra Wireless Modem NMEA */ |
2662 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 3)}, /* Sierra Wireless Modem Modem */ |
2663 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 0)}, /* Sierra Wireless Modem Device Management */ |
2664 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 2)}, /* Sierra Wireless Modem NMEA */ |
2665 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 3)}, /* Sierra Wireless Modem Modem */ |
2666 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 0)}, /* Sierra Wireless Modem Device Management */ |
2667 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 2)}, /* Sierra Wireless Modem NMEA */ |
2668 |
++ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 3)}, /* Sierra Wireless Modem Modem */ |
2669 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */ |
2670 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */ |
2671 |
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */ |
2672 |
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c |
2673 |
+index 6b192e602ce0..37480348e39b 100644 |
2674 |
+--- a/drivers/usb/serial/sierra.c |
2675 |
++++ b/drivers/usb/serial/sierra.c |
2676 |
+@@ -58,6 +58,7 @@ struct sierra_intf_private { |
2677 |
+ spinlock_t susp_lock; |
2678 |
+ unsigned int suspended:1; |
2679 |
+ int in_flight; |
2680 |
++ unsigned int open_ports; |
2681 |
+ }; |
2682 |
+ |
2683 |
+ static int sierra_set_power_state(struct usb_device *udev, __u16 swiState) |
2684 |
+@@ -759,6 +760,7 @@ static void sierra_close(struct usb_serial_port *port) |
2685 |
+ struct usb_serial *serial = port->serial; |
2686 |
+ struct sierra_port_private *portdata; |
2687 |
+ struct sierra_intf_private *intfdata = port->serial->private; |
2688 |
++ struct urb *urb; |
2689 |
+ |
2690 |
+ portdata = usb_get_serial_port_data(port); |
2691 |
+ |
2692 |
+@@ -767,7 +769,6 @@ static void sierra_close(struct usb_serial_port *port) |
2693 |
+ |
2694 |
+ mutex_lock(&serial->disc_mutex); |
2695 |
+ if (!serial->disconnected) { |
2696 |
+- serial->interface->needs_remote_wakeup = 0; |
2697 |
+ /* odd error handling due to pm counters */ |
2698 |
+ if (!usb_autopm_get_interface(serial->interface)) |
2699 |
+ sierra_send_setup(port); |
2700 |
+@@ -778,8 +779,22 @@ static void sierra_close(struct usb_serial_port *port) |
2701 |
+ mutex_unlock(&serial->disc_mutex); |
2702 |
+ spin_lock_irq(&intfdata->susp_lock); |
2703 |
+ portdata->opened = 0; |
2704 |
++ if (--intfdata->open_ports == 0) |
2705 |
++ serial->interface->needs_remote_wakeup = 0; |
2706 |
+ spin_unlock_irq(&intfdata->susp_lock); |
2707 |
+ |
2708 |
++ for (;;) { |
2709 |
++ urb = usb_get_from_anchor(&portdata->delayed); |
2710 |
++ if (!urb) |
2711 |
++ break; |
2712 |
++ kfree(urb->transfer_buffer); |
2713 |
++ usb_free_urb(urb); |
2714 |
++ usb_autopm_put_interface_async(serial->interface); |
2715 |
++ spin_lock(&portdata->lock); |
2716 |
++ portdata->outstanding_urbs--; |
2717 |
++ spin_unlock(&portdata->lock); |
2718 |
++ } |
2719 |
++ |
2720 |
+ sierra_stop_rx_urbs(port); |
2721 |
+ for (i = 0; i < portdata->num_in_urbs; i++) { |
2722 |
+ sierra_release_urb(portdata->in_urbs[i]); |
2723 |
+@@ -816,23 +831,29 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port) |
2724 |
+ usb_sndbulkpipe(serial->dev, endpoint) | USB_DIR_IN); |
2725 |
+ |
2726 |
+ err = sierra_submit_rx_urbs(port, GFP_KERNEL); |
2727 |
+- if (err) { |
2728 |
+- /* get rid of everything as in close */ |
2729 |
+- sierra_close(port); |
2730 |
+- /* restore balance for autopm */ |
2731 |
+- if (!serial->disconnected) |
2732 |
+- usb_autopm_put_interface(serial->interface); |
2733 |
+- return err; |
2734 |
+- } |
2735 |
++ if (err) |
2736 |
++ goto err_submit; |
2737 |
++ |
2738 |
+ sierra_send_setup(port); |
2739 |
+ |
2740 |
+- serial->interface->needs_remote_wakeup = 1; |
2741 |
+ spin_lock_irq(&intfdata->susp_lock); |
2742 |
+ portdata->opened = 1; |
2743 |
++ if (++intfdata->open_ports == 1) |
2744 |
++ serial->interface->needs_remote_wakeup = 1; |
2745 |
+ spin_unlock_irq(&intfdata->susp_lock); |
2746 |
+ usb_autopm_put_interface(serial->interface); |
2747 |
+ |
2748 |
+ return 0; |
2749 |
++ |
2750 |
++err_submit: |
2751 |
++ sierra_stop_rx_urbs(port); |
2752 |
++ |
2753 |
++ for (i = 0; i < portdata->num_in_urbs; i++) { |
2754 |
++ sierra_release_urb(portdata->in_urbs[i]); |
2755 |
++ portdata->in_urbs[i] = NULL; |
2756 |
++ } |
2757 |
++ |
2758 |
++ return err; |
2759 |
+ } |
2760 |
+ |
2761 |
+ |
2762 |
+@@ -928,6 +949,7 @@ static int sierra_port_remove(struct usb_serial_port *port) |
2763 |
+ struct sierra_port_private *portdata; |
2764 |
+ |
2765 |
+ portdata = usb_get_serial_port_data(port); |
2766 |
++ usb_set_serial_port_data(port, NULL); |
2767 |
+ kfree(portdata); |
2768 |
+ |
2769 |
+ return 0; |
2770 |
+@@ -944,6 +966,8 @@ static void stop_read_write_urbs(struct usb_serial *serial) |
2771 |
+ for (i = 0; i < serial->num_ports; ++i) { |
2772 |
+ port = serial->port[i]; |
2773 |
+ portdata = usb_get_serial_port_data(port); |
2774 |
++ if (!portdata) |
2775 |
++ continue; |
2776 |
+ sierra_stop_rx_urbs(port); |
2777 |
+ usb_kill_anchored_urbs(&portdata->active); |
2778 |
+ } |
2779 |
+@@ -986,6 +1010,9 @@ static int sierra_resume(struct usb_serial *serial) |
2780 |
+ port = serial->port[i]; |
2781 |
+ portdata = usb_get_serial_port_data(port); |
2782 |
+ |
2783 |
++ if (!portdata) |
2784 |
++ continue; |
2785 |
++ |
2786 |
+ while ((urb = usb_get_from_anchor(&portdata->delayed))) { |
2787 |
+ usb_anchor_urb(urb, &portdata->active); |
2788 |
+ intfdata->in_flight++; |
2789 |
+@@ -993,8 +1020,12 @@ static int sierra_resume(struct usb_serial *serial) |
2790 |
+ if (err < 0) { |
2791 |
+ intfdata->in_flight--; |
2792 |
+ usb_unanchor_urb(urb); |
2793 |
+- usb_scuttle_anchored_urbs(&portdata->delayed); |
2794 |
+- break; |
2795 |
++ kfree(urb->transfer_buffer); |
2796 |
++ usb_free_urb(urb); |
2797 |
++ spin_lock(&portdata->lock); |
2798 |
++ portdata->outstanding_urbs--; |
2799 |
++ spin_unlock(&portdata->lock); |
2800 |
++ continue; |
2801 |
+ } |
2802 |
+ } |
2803 |
+ |
2804 |
+diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c |
2805 |
+index b078440e822f..d91a9883e869 100644 |
2806 |
+--- a/drivers/usb/serial/usb_wwan.c |
2807 |
++++ b/drivers/usb/serial/usb_wwan.c |
2808 |
+@@ -228,8 +228,10 @@ int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port, |
2809 |
+ usb_pipeendpoint(this_urb->pipe), i); |
2810 |
+ |
2811 |
+ err = usb_autopm_get_interface_async(port->serial->interface); |
2812 |
+- if (err < 0) |
2813 |
++ if (err < 0) { |
2814 |
++ clear_bit(i, &portdata->out_busy); |
2815 |
+ break; |
2816 |
++ } |
2817 |
+ |
2818 |
+ /* send the data */ |
2819 |
+ memcpy(this_urb->transfer_buffer, buf, todo); |
2820 |
+@@ -386,6 +388,14 @@ int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port) |
2821 |
+ portdata = usb_get_serial_port_data(port); |
2822 |
+ intfdata = serial->private; |
2823 |
+ |
2824 |
++ if (port->interrupt_in_urb) { |
2825 |
++ err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); |
2826 |
++ if (err) { |
2827 |
++ dev_dbg(&port->dev, "%s: submit int urb failed: %d\n", |
2828 |
++ __func__, err); |
2829 |
++ } |
2830 |
++ } |
2831 |
++ |
2832 |
+ /* Start reading from the IN endpoint */ |
2833 |
+ for (i = 0; i < N_IN_URB; i++) { |
2834 |
+ urb = portdata->in_urbs[i]; |
2835 |
+@@ -412,12 +422,26 @@ int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port) |
2836 |
+ } |
2837 |
+ EXPORT_SYMBOL(usb_wwan_open); |
2838 |
+ |
2839 |
++static void unbusy_queued_urb(struct urb *urb, |
2840 |
++ struct usb_wwan_port_private *portdata) |
2841 |
++{ |
2842 |
++ int i; |
2843 |
++ |
2844 |
++ for (i = 0; i < N_OUT_URB; i++) { |
2845 |
++ if (urb == portdata->out_urbs[i]) { |
2846 |
++ clear_bit(i, &portdata->out_busy); |
2847 |
++ break; |
2848 |
++ } |
2849 |
++ } |
2850 |
++} |
2851 |
++ |
2852 |
+ void usb_wwan_close(struct usb_serial_port *port) |
2853 |
+ { |
2854 |
+ int i; |
2855 |
+ struct usb_serial *serial = port->serial; |
2856 |
+ struct usb_wwan_port_private *portdata; |
2857 |
+ struct usb_wwan_intf_private *intfdata = port->serial->private; |
2858 |
++ struct urb *urb; |
2859 |
+ |
2860 |
+ portdata = usb_get_serial_port_data(port); |
2861 |
+ |
2862 |
+@@ -426,10 +450,19 @@ void usb_wwan_close(struct usb_serial_port *port) |
2863 |
+ portdata->opened = 0; |
2864 |
+ spin_unlock_irq(&intfdata->susp_lock); |
2865 |
+ |
2866 |
++ for (;;) { |
2867 |
++ urb = usb_get_from_anchor(&portdata->delayed); |
2868 |
++ if (!urb) |
2869 |
++ break; |
2870 |
++ unbusy_queued_urb(urb, portdata); |
2871 |
++ usb_autopm_put_interface_async(serial->interface); |
2872 |
++ } |
2873 |
++ |
2874 |
+ for (i = 0; i < N_IN_URB; i++) |
2875 |
+ usb_kill_urb(portdata->in_urbs[i]); |
2876 |
+ for (i = 0; i < N_OUT_URB; i++) |
2877 |
+ usb_kill_urb(portdata->out_urbs[i]); |
2878 |
++ usb_kill_urb(port->interrupt_in_urb); |
2879 |
+ |
2880 |
+ /* balancing - important as an error cannot be handled*/ |
2881 |
+ usb_autopm_get_interface_no_resume(serial->interface); |
2882 |
+@@ -463,7 +496,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port) |
2883 |
+ struct usb_wwan_port_private *portdata; |
2884 |
+ struct urb *urb; |
2885 |
+ u8 *buffer; |
2886 |
+- int err; |
2887 |
+ int i; |
2888 |
+ |
2889 |
+ if (!port->bulk_in_size || !port->bulk_out_size) |
2890 |
+@@ -503,13 +535,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port) |
2891 |
+ |
2892 |
+ usb_set_serial_port_data(port, portdata); |
2893 |
+ |
2894 |
+- if (port->interrupt_in_urb) { |
2895 |
+- err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); |
2896 |
+- if (err) |
2897 |
+- dev_dbg(&port->dev, "%s: submit irq_in urb failed %d\n", |
2898 |
+- __func__, err); |
2899 |
+- } |
2900 |
+- |
2901 |
+ return 0; |
2902 |
+ |
2903 |
+ bail_out_error2: |
2904 |
+@@ -577,44 +602,29 @@ static void stop_read_write_urbs(struct usb_serial *serial) |
2905 |
+ int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message) |
2906 |
+ { |
2907 |
+ struct usb_wwan_intf_private *intfdata = serial->private; |
2908 |
+- int b; |
2909 |
+ |
2910 |
++ spin_lock_irq(&intfdata->susp_lock); |
2911 |
+ if (PMSG_IS_AUTO(message)) { |
2912 |
+- spin_lock_irq(&intfdata->susp_lock); |
2913 |
+- b = intfdata->in_flight; |
2914 |
+- spin_unlock_irq(&intfdata->susp_lock); |
2915 |
+- |
2916 |
+- if (b) |
2917 |
++ if (intfdata->in_flight) { |
2918 |
++ spin_unlock_irq(&intfdata->susp_lock); |
2919 |
+ return -EBUSY; |
2920 |
++ } |
2921 |
+ } |
2922 |
+- |
2923 |
+- spin_lock_irq(&intfdata->susp_lock); |
2924 |
+ intfdata->suspended = 1; |
2925 |
+ spin_unlock_irq(&intfdata->susp_lock); |
2926 |
++ |
2927 |
+ stop_read_write_urbs(serial); |
2928 |
+ |
2929 |
+ return 0; |
2930 |
+ } |
2931 |
+ EXPORT_SYMBOL(usb_wwan_suspend); |
2932 |
+ |
2933 |
+-static void unbusy_queued_urb(struct urb *urb, struct usb_wwan_port_private *portdata) |
2934 |
+-{ |
2935 |
+- int i; |
2936 |
+- |
2937 |
+- for (i = 0; i < N_OUT_URB; i++) { |
2938 |
+- if (urb == portdata->out_urbs[i]) { |
2939 |
+- clear_bit(i, &portdata->out_busy); |
2940 |
+- break; |
2941 |
+- } |
2942 |
+- } |
2943 |
+-} |
2944 |
+- |
2945 |
+-static void play_delayed(struct usb_serial_port *port) |
2946 |
++static int play_delayed(struct usb_serial_port *port) |
2947 |
+ { |
2948 |
+ struct usb_wwan_intf_private *data; |
2949 |
+ struct usb_wwan_port_private *portdata; |
2950 |
+ struct urb *urb; |
2951 |
+- int err; |
2952 |
++ int err = 0; |
2953 |
+ |
2954 |
+ portdata = usb_get_serial_port_data(port); |
2955 |
+ data = port->serial->private; |
2956 |
+@@ -631,6 +641,8 @@ static void play_delayed(struct usb_serial_port *port) |
2957 |
+ break; |
2958 |
+ } |
2959 |
+ } |
2960 |
++ |
2961 |
++ return err; |
2962 |
+ } |
2963 |
+ |
2964 |
+ int usb_wwan_resume(struct usb_serial *serial) |
2965 |
+@@ -640,54 +652,51 @@ int usb_wwan_resume(struct usb_serial *serial) |
2966 |
+ struct usb_wwan_intf_private *intfdata = serial->private; |
2967 |
+ struct usb_wwan_port_private *portdata; |
2968 |
+ struct urb *urb; |
2969 |
+- int err = 0; |
2970 |
+- |
2971 |
+- /* get the interrupt URBs resubmitted unconditionally */ |
2972 |
+- for (i = 0; i < serial->num_ports; i++) { |
2973 |
+- port = serial->port[i]; |
2974 |
+- if (!port->interrupt_in_urb) { |
2975 |
+- dev_dbg(&port->dev, "%s: No interrupt URB for port\n", __func__); |
2976 |
+- continue; |
2977 |
+- } |
2978 |
+- err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); |
2979 |
+- dev_dbg(&port->dev, "Submitted interrupt URB for port (result %d)\n", err); |
2980 |
+- if (err < 0) { |
2981 |
+- dev_err(&port->dev, "%s: Error %d for interrupt URB\n", |
2982 |
+- __func__, err); |
2983 |
+- goto err_out; |
2984 |
+- } |
2985 |
+- } |
2986 |
++ int err; |
2987 |
++ int err_count = 0; |
2988 |
+ |
2989 |
++ spin_lock_irq(&intfdata->susp_lock); |
2990 |
+ for (i = 0; i < serial->num_ports; i++) { |
2991 |
+ /* walk all ports */ |
2992 |
+ port = serial->port[i]; |
2993 |
+ portdata = usb_get_serial_port_data(port); |
2994 |
+ |
2995 |
+ /* skip closed ports */ |
2996 |
+- spin_lock_irq(&intfdata->susp_lock); |
2997 |
+- if (!portdata || !portdata->opened) { |
2998 |
+- spin_unlock_irq(&intfdata->susp_lock); |
2999 |
++ if (!portdata || !portdata->opened) |
3000 |
+ continue; |
3001 |
++ |
3002 |
++ if (port->interrupt_in_urb) { |
3003 |
++ err = usb_submit_urb(port->interrupt_in_urb, |
3004 |
++ GFP_ATOMIC); |
3005 |
++ if (err) { |
3006 |
++ dev_err(&port->dev, |
3007 |
++ "%s: submit int urb failed: %d\n", |
3008 |
++ __func__, err); |
3009 |
++ err_count++; |
3010 |
++ } |
3011 |
+ } |
3012 |
+ |
3013 |
++ err = play_delayed(port); |
3014 |
++ if (err) |
3015 |
++ err_count++; |
3016 |
++ |
3017 |
+ for (j = 0; j < N_IN_URB; j++) { |
3018 |
+ urb = portdata->in_urbs[j]; |
3019 |
+ err = usb_submit_urb(urb, GFP_ATOMIC); |
3020 |
+ if (err < 0) { |
3021 |
+ dev_err(&port->dev, "%s: Error %d for bulk URB %d\n", |
3022 |
+ __func__, err, i); |
3023 |
+- spin_unlock_irq(&intfdata->susp_lock); |
3024 |
+- goto err_out; |
3025 |
++ err_count++; |
3026 |
+ } |
3027 |
+ } |
3028 |
+- play_delayed(port); |
3029 |
+- spin_unlock_irq(&intfdata->susp_lock); |
3030 |
+ } |
3031 |
+- spin_lock_irq(&intfdata->susp_lock); |
3032 |
+ intfdata->suspended = 0; |
3033 |
+ spin_unlock_irq(&intfdata->susp_lock); |
3034 |
+-err_out: |
3035 |
+- return err; |
3036 |
++ |
3037 |
++ if (err_count) |
3038 |
++ return -EIO; |
3039 |
++ |
3040 |
++ return 0; |
3041 |
+ } |
3042 |
+ EXPORT_SYMBOL(usb_wwan_resume); |
3043 |
+ #endif |
3044 |
+diff --git a/drivers/video/fbdev/matrox/matroxfb_base.h b/drivers/video/fbdev/matrox/matroxfb_base.h |
3045 |
+index 556d96ce40bf..89a8a89a5eb2 100644 |
3046 |
+--- a/drivers/video/fbdev/matrox/matroxfb_base.h |
3047 |
++++ b/drivers/video/fbdev/matrox/matroxfb_base.h |
3048 |
+@@ -698,7 +698,7 @@ void matroxfb_unregister_driver(struct matroxfb_driver* drv); |
3049 |
+ |
3050 |
+ #define mga_fifo(n) do {} while ((mga_inl(M_FIFOSTATUS) & 0xFF) < (n)) |
3051 |
+ |
3052 |
+-#define WaitTillIdle() do {} while (mga_inl(M_STATUS) & 0x10000) |
3053 |
++#define WaitTillIdle() do { mga_inl(M_STATUS); do {} while (mga_inl(M_STATUS) & 0x10000); } while (0) |
3054 |
+ |
3055 |
+ /* code speedup */ |
3056 |
+ #ifdef CONFIG_FB_MATROX_MILLENIUM |
3057 |
+diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c |
3058 |
+index 7d44d669d5b6..43a0a52fc527 100644 |
3059 |
+--- a/drivers/video/fbdev/offb.c |
3060 |
++++ b/drivers/video/fbdev/offb.c |
3061 |
+@@ -91,15 +91,6 @@ extern boot_infos_t *boot_infos; |
3062 |
+ #define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4 |
3063 |
+ #define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8 |
3064 |
+ |
3065 |
+-#define FB_RIGHT_POS(p, bpp) (fb_be_math(p) ? 0 : (32 - (bpp))) |
3066 |
+- |
3067 |
+-static inline u32 offb_cmap_byteswap(struct fb_info *info, u32 value) |
3068 |
+-{ |
3069 |
+- u32 bpp = info->var.bits_per_pixel; |
3070 |
+- |
3071 |
+- return cpu_to_be32(value) >> FB_RIGHT_POS(info, bpp); |
3072 |
+-} |
3073 |
+- |
3074 |
+ /* |
3075 |
+ * Set a single color register. The values supplied are already |
3076 |
+ * rounded down to the hardware's capabilities (according to the |
3077 |
+@@ -129,7 +120,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, |
3078 |
+ mask <<= info->var.transp.offset; |
3079 |
+ value |= mask; |
3080 |
+ } |
3081 |
+- pal[regno] = offb_cmap_byteswap(info, value); |
3082 |
++ pal[regno] = value; |
3083 |
+ return 0; |
3084 |
+ } |
3085 |
+ |
3086 |
+diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c |
3087 |
+index ff52618cafbe..5d7341520544 100644 |
3088 |
+--- a/drivers/w1/w1.c |
3089 |
++++ b/drivers/w1/w1.c |
3090 |
+@@ -1078,6 +1078,8 @@ static void w1_search_process(struct w1_master *dev, u8 search_type) |
3091 |
+ * w1_process_callbacks() - execute each dev->async_list callback entry |
3092 |
+ * @dev: w1_master device |
3093 |
+ * |
3094 |
++ * The w1 master list_mutex must be held. |
3095 |
++ * |
3096 |
+ * Return: 1 if there were commands to executed 0 otherwise |
3097 |
+ */ |
3098 |
+ int w1_process_callbacks(struct w1_master *dev) |
3099 |
+diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c |
3100 |
+index 9b084db739c7..728039d2efe1 100644 |
3101 |
+--- a/drivers/w1/w1_int.c |
3102 |
++++ b/drivers/w1/w1_int.c |
3103 |
+@@ -219,9 +219,13 @@ void __w1_remove_master_device(struct w1_master *dev) |
3104 |
+ |
3105 |
+ if (msleep_interruptible(1000)) |
3106 |
+ flush_signals(current); |
3107 |
++ mutex_lock(&dev->list_mutex); |
3108 |
+ w1_process_callbacks(dev); |
3109 |
++ mutex_unlock(&dev->list_mutex); |
3110 |
+ } |
3111 |
++ mutex_lock(&dev->list_mutex); |
3112 |
+ w1_process_callbacks(dev); |
3113 |
++ mutex_unlock(&dev->list_mutex); |
3114 |
+ |
3115 |
+ memset(&msg, 0, sizeof(msg)); |
3116 |
+ msg.id.mst.id = dev->id; |
3117 |
+diff --git a/fs/aio.c b/fs/aio.c |
3118 |
+index a0ed6c7d2cd2..e609e15f36b9 100644 |
3119 |
+--- a/fs/aio.c |
3120 |
++++ b/fs/aio.c |
3121 |
+@@ -1021,6 +1021,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2) |
3122 |
+ |
3123 |
+ /* everything turned out well, dispose of the aiocb. */ |
3124 |
+ kiocb_free(iocb); |
3125 |
++ put_reqs_available(ctx, 1); |
3126 |
+ |
3127 |
+ /* |
3128 |
+ * We have to order our ring_info tail store above and test |
3129 |
+@@ -1062,6 +1063,9 @@ static long aio_read_events_ring(struct kioctx *ctx, |
3130 |
+ if (head == tail) |
3131 |
+ goto out; |
3132 |
+ |
3133 |
++ head %= ctx->nr_events; |
3134 |
++ tail %= ctx->nr_events; |
3135 |
++ |
3136 |
+ while (ret < nr) { |
3137 |
+ long avail; |
3138 |
+ struct io_event *ev; |
3139 |
+@@ -1100,8 +1104,6 @@ static long aio_read_events_ring(struct kioctx *ctx, |
3140 |
+ flush_dcache_page(ctx->ring_pages[0]); |
3141 |
+ |
3142 |
+ pr_debug("%li h%u t%u\n", ret, head, tail); |
3143 |
+- |
3144 |
+- put_reqs_available(ctx, ret); |
3145 |
+ out: |
3146 |
+ mutex_unlock(&ctx->ring_lock); |
3147 |
+ |
3148 |
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c |
3149 |
+index 10db21fa0926..b2e9b2063572 100644 |
3150 |
+--- a/fs/btrfs/backref.c |
3151 |
++++ b/fs/btrfs/backref.c |
3152 |
+@@ -984,11 +984,12 @@ again: |
3153 |
+ goto out; |
3154 |
+ } |
3155 |
+ if (ref->count && ref->parent) { |
3156 |
+- if (extent_item_pos && !ref->inode_list) { |
3157 |
++ if (extent_item_pos && !ref->inode_list && |
3158 |
++ ref->level == 0) { |
3159 |
+ u32 bsz; |
3160 |
+ struct extent_buffer *eb; |
3161 |
+ bsz = btrfs_level_size(fs_info->extent_root, |
3162 |
+- info_level); |
3163 |
++ ref->level); |
3164 |
+ eb = read_tree_block(fs_info->extent_root, |
3165 |
+ ref->parent, bsz, 0); |
3166 |
+ if (!eb || !extent_buffer_uptodate(eb)) { |
3167 |
+@@ -1404,9 +1405,10 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, |
3168 |
+ * returns <0 on error |
3169 |
+ */ |
3170 |
+ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb, |
3171 |
+- struct btrfs_extent_item *ei, u32 item_size, |
3172 |
+- struct btrfs_extent_inline_ref **out_eiref, |
3173 |
+- int *out_type) |
3174 |
++ struct btrfs_key *key, |
3175 |
++ struct btrfs_extent_item *ei, u32 item_size, |
3176 |
++ struct btrfs_extent_inline_ref **out_eiref, |
3177 |
++ int *out_type) |
3178 |
+ { |
3179 |
+ unsigned long end; |
3180 |
+ u64 flags; |
3181 |
+@@ -1416,19 +1418,26 @@ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb, |
3182 |
+ /* first call */ |
3183 |
+ flags = btrfs_extent_flags(eb, ei); |
3184 |
+ if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { |
3185 |
+- info = (struct btrfs_tree_block_info *)(ei + 1); |
3186 |
+- *out_eiref = |
3187 |
+- (struct btrfs_extent_inline_ref *)(info + 1); |
3188 |
++ if (key->type == BTRFS_METADATA_ITEM_KEY) { |
3189 |
++ /* a skinny metadata extent */ |
3190 |
++ *out_eiref = |
3191 |
++ (struct btrfs_extent_inline_ref *)(ei + 1); |
3192 |
++ } else { |
3193 |
++ WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY); |
3194 |
++ info = (struct btrfs_tree_block_info *)(ei + 1); |
3195 |
++ *out_eiref = |
3196 |
++ (struct btrfs_extent_inline_ref *)(info + 1); |
3197 |
++ } |
3198 |
+ } else { |
3199 |
+ *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1); |
3200 |
+ } |
3201 |
+ *ptr = (unsigned long)*out_eiref; |
3202 |
+- if ((void *)*ptr >= (void *)ei + item_size) |
3203 |
++ if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size) |
3204 |
+ return -ENOENT; |
3205 |
+ } |
3206 |
+ |
3207 |
+ end = (unsigned long)ei + item_size; |
3208 |
+- *out_eiref = (struct btrfs_extent_inline_ref *)*ptr; |
3209 |
++ *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr); |
3210 |
+ *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref); |
3211 |
+ |
3212 |
+ *ptr += btrfs_extent_inline_ref_size(*out_type); |
3213 |
+@@ -1447,8 +1456,8 @@ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb, |
3214 |
+ * <0 on error. |
3215 |
+ */ |
3216 |
+ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, |
3217 |
+- struct btrfs_extent_item *ei, u32 item_size, |
3218 |
+- u64 *out_root, u8 *out_level) |
3219 |
++ struct btrfs_key *key, struct btrfs_extent_item *ei, |
3220 |
++ u32 item_size, u64 *out_root, u8 *out_level) |
3221 |
+ { |
3222 |
+ int ret; |
3223 |
+ int type; |
3224 |
+@@ -1459,8 +1468,8 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, |
3225 |
+ return 1; |
3226 |
+ |
3227 |
+ while (1) { |
3228 |
+- ret = __get_extent_inline_ref(ptr, eb, ei, item_size, |
3229 |
+- &eiref, &type); |
3230 |
++ ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size, |
3231 |
++ &eiref, &type); |
3232 |
+ if (ret < 0) |
3233 |
+ return ret; |
3234 |
+ |
3235 |
+diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h |
3236 |
+index a910b27a8ad9..519b49e51f57 100644 |
3237 |
+--- a/fs/btrfs/backref.h |
3238 |
++++ b/fs/btrfs/backref.h |
3239 |
+@@ -40,8 +40,8 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, |
3240 |
+ u64 *flags); |
3241 |
+ |
3242 |
+ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, |
3243 |
+- struct btrfs_extent_item *ei, u32 item_size, |
3244 |
+- u64 *out_root, u8 *out_level); |
3245 |
++ struct btrfs_key *key, struct btrfs_extent_item *ei, |
3246 |
++ u32 item_size, u64 *out_root, u8 *out_level); |
3247 |
+ |
3248 |
+ int iterate_extent_inodes(struct btrfs_fs_info *fs_info, |
3249 |
+ u64 extent_item_objectid, |
3250 |
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h |
3251 |
+index ba6b88528dc7..9e80f527776a 100644 |
3252 |
+--- a/fs/btrfs/ctree.h |
3253 |
++++ b/fs/btrfs/ctree.h |
3254 |
+@@ -1113,6 +1113,12 @@ struct btrfs_qgroup_limit_item { |
3255 |
+ __le64 rsv_excl; |
3256 |
+ } __attribute__ ((__packed__)); |
3257 |
+ |
3258 |
++/* For raid type sysfs entries */ |
3259 |
++struct raid_kobject { |
3260 |
++ int raid_type; |
3261 |
++ struct kobject kobj; |
3262 |
++}; |
3263 |
++ |
3264 |
+ struct btrfs_space_info { |
3265 |
+ spinlock_t lock; |
3266 |
+ |
3267 |
+@@ -1163,7 +1169,7 @@ struct btrfs_space_info { |
3268 |
+ wait_queue_head_t wait; |
3269 |
+ |
3270 |
+ struct kobject kobj; |
3271 |
+- struct kobject block_group_kobjs[BTRFS_NR_RAID_TYPES]; |
3272 |
++ struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES]; |
3273 |
+ }; |
3274 |
+ |
3275 |
+ #define BTRFS_BLOCK_RSV_GLOBAL 1 |
3276 |
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c |
3277 |
+index 983314932af3..a62a5bdc0502 100644 |
3278 |
+--- a/fs/btrfs/disk-io.c |
3279 |
++++ b/fs/btrfs/disk-io.c |
3280 |
+@@ -3633,6 +3633,11 @@ int close_ctree(struct btrfs_root *root) |
3281 |
+ |
3282 |
+ btrfs_free_block_groups(fs_info); |
3283 |
+ |
3284 |
++ /* |
3285 |
++ * we must make sure there is not any read request to |
3286 |
++ * submit after we stopping all workers. |
3287 |
++ */ |
3288 |
++ invalidate_inode_pages2(fs_info->btree_inode->i_mapping); |
3289 |
+ btrfs_stop_all_workers(fs_info); |
3290 |
+ |
3291 |
+ free_root_pointers(fs_info, 1); |
3292 |
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c |
3293 |
+index 5590af92094b..5c343a9909cd 100644 |
3294 |
+--- a/fs/btrfs/extent-tree.c |
3295 |
++++ b/fs/btrfs/extent-tree.c |
3296 |
+@@ -3401,10 +3401,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, |
3297 |
+ return ret; |
3298 |
+ } |
3299 |
+ |
3300 |
+- for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { |
3301 |
++ for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) |
3302 |
+ INIT_LIST_HEAD(&found->block_groups[i]); |
3303 |
+- kobject_init(&found->block_group_kobjs[i], &btrfs_raid_ktype); |
3304 |
+- } |
3305 |
+ init_rwsem(&found->groups_sem); |
3306 |
+ spin_lock_init(&found->lock); |
3307 |
+ found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; |
3308 |
+@@ -8327,8 +8325,9 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) |
3309 |
+ list_del(&space_info->list); |
3310 |
+ for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { |
3311 |
+ struct kobject *kobj; |
3312 |
+- kobj = &space_info->block_group_kobjs[i]; |
3313 |
+- if (kobj->parent) { |
3314 |
++ kobj = space_info->block_group_kobjs[i]; |
3315 |
++ space_info->block_group_kobjs[i] = NULL; |
3316 |
++ if (kobj) { |
3317 |
+ kobject_del(kobj); |
3318 |
+ kobject_put(kobj); |
3319 |
+ } |
3320 |
+@@ -8352,17 +8351,26 @@ static void __link_block_group(struct btrfs_space_info *space_info, |
3321 |
+ up_write(&space_info->groups_sem); |
3322 |
+ |
3323 |
+ if (first) { |
3324 |
+- struct kobject *kobj = &space_info->block_group_kobjs[index]; |
3325 |
++ struct raid_kobject *rkobj; |
3326 |
+ int ret; |
3327 |
+ |
3328 |
+- kobject_get(&space_info->kobj); /* put in release */ |
3329 |
+- ret = kobject_add(kobj, &space_info->kobj, "%s", |
3330 |
+- get_raid_name(index)); |
3331 |
++ rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS); |
3332 |
++ if (!rkobj) |
3333 |
++ goto out_err; |
3334 |
++ rkobj->raid_type = index; |
3335 |
++ kobject_init(&rkobj->kobj, &btrfs_raid_ktype); |
3336 |
++ ret = kobject_add(&rkobj->kobj, &space_info->kobj, |
3337 |
++ "%s", get_raid_name(index)); |
3338 |
+ if (ret) { |
3339 |
+- pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n"); |
3340 |
+- kobject_put(&space_info->kobj); |
3341 |
++ kobject_put(&rkobj->kobj); |
3342 |
++ goto out_err; |
3343 |
+ } |
3344 |
++ space_info->block_group_kobjs[index] = &rkobj->kobj; |
3345 |
+ } |
3346 |
++ |
3347 |
++ return; |
3348 |
++out_err: |
3349 |
++ pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n"); |
3350 |
+ } |
3351 |
+ |
3352 |
+ static struct btrfs_block_group_cache * |
3353 |
+@@ -8697,6 +8705,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, |
3354 |
+ struct btrfs_root *tree_root = root->fs_info->tree_root; |
3355 |
+ struct btrfs_key key; |
3356 |
+ struct inode *inode; |
3357 |
++ struct kobject *kobj = NULL; |
3358 |
+ int ret; |
3359 |
+ int index; |
3360 |
+ int factor; |
3361 |
+@@ -8796,11 +8805,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, |
3362 |
+ */ |
3363 |
+ list_del_init(&block_group->list); |
3364 |
+ if (list_empty(&block_group->space_info->block_groups[index])) { |
3365 |
+- kobject_del(&block_group->space_info->block_group_kobjs[index]); |
3366 |
+- kobject_put(&block_group->space_info->block_group_kobjs[index]); |
3367 |
++ kobj = block_group->space_info->block_group_kobjs[index]; |
3368 |
++ block_group->space_info->block_group_kobjs[index] = NULL; |
3369 |
+ clear_avail_alloc_bits(root->fs_info, block_group->flags); |
3370 |
+ } |
3371 |
+ up_write(&block_group->space_info->groups_sem); |
3372 |
++ if (kobj) { |
3373 |
++ kobject_del(kobj); |
3374 |
++ kobject_put(kobj); |
3375 |
++ } |
3376 |
+ |
3377 |
+ if (block_group->cached == BTRFS_CACHE_STARTED) |
3378 |
+ wait_block_group_cache_done(block_group); |
3379 |
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c |
3380 |
+index 3955e475ceec..a2badb027ae6 100644 |
3381 |
+--- a/fs/btrfs/extent_io.c |
3382 |
++++ b/fs/btrfs/extent_io.c |
3383 |
+@@ -1693,6 +1693,7 @@ again: |
3384 |
+ * shortening the size of the delalloc range we're searching |
3385 |
+ */ |
3386 |
+ free_extent_state(cached_state); |
3387 |
++ cached_state = NULL; |
3388 |
+ if (!loops) { |
3389 |
+ max_bytes = PAGE_CACHE_SIZE; |
3390 |
+ loops = 1; |
3391 |
+@@ -2353,7 +2354,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end) |
3392 |
+ { |
3393 |
+ int uptodate = (err == 0); |
3394 |
+ struct extent_io_tree *tree; |
3395 |
+- int ret; |
3396 |
++ int ret = 0; |
3397 |
+ |
3398 |
+ tree = &BTRFS_I(page->mapping->host)->io_tree; |
3399 |
+ |
3400 |
+@@ -2367,6 +2368,8 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end) |
3401 |
+ if (!uptodate) { |
3402 |
+ ClearPageUptodate(page); |
3403 |
+ SetPageError(page); |
3404 |
++ ret = ret < 0 ? ret : -EIO; |
3405 |
++ mapping_set_error(page->mapping, ret); |
3406 |
+ } |
3407 |
+ return 0; |
3408 |
+ } |
3409 |
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c |
3410 |
+index ae6af072b635..3029925e96d7 100644 |
3411 |
+--- a/fs/btrfs/file.c |
3412 |
++++ b/fs/btrfs/file.c |
3413 |
+@@ -780,6 +780,18 @@ next_slot: |
3414 |
+ extent_end = search_start; |
3415 |
+ } |
3416 |
+ |
3417 |
++ /* |
3418 |
++ * Don't skip extent items representing 0 byte lengths. They |
3419 |
++ * used to be created (bug) if while punching holes we hit |
3420 |
++ * -ENOSPC condition. So if we find one here, just ensure we |
3421 |
++ * delete it, otherwise we would insert a new file extent item |
3422 |
++ * with the same key (offset) as that 0 bytes length file |
3423 |
++ * extent item in the call to setup_items_for_insert() later |
3424 |
++ * in this function. |
3425 |
++ */ |
3426 |
++ if (extent_end == key.offset && extent_end >= search_start) |
3427 |
++ goto delete_extent_item; |
3428 |
++ |
3429 |
+ if (extent_end <= search_start) { |
3430 |
+ path->slots[0]++; |
3431 |
+ goto next_slot; |
3432 |
+@@ -893,6 +905,7 @@ next_slot: |
3433 |
+ * | ------ extent ------ | |
3434 |
+ */ |
3435 |
+ if (start <= key.offset && end >= extent_end) { |
3436 |
++delete_extent_item: |
3437 |
+ if (del_nr == 0) { |
3438 |
+ del_slot = path->slots[0]; |
3439 |
+ del_nr = 1; |
3440 |
+@@ -2187,13 +2200,14 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) |
3441 |
+ bool same_page = ((offset >> PAGE_CACHE_SHIFT) == |
3442 |
+ ((offset + len - 1) >> PAGE_CACHE_SHIFT)); |
3443 |
+ bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES); |
3444 |
+- u64 ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE); |
3445 |
++ u64 ino_size; |
3446 |
+ |
3447 |
+ ret = btrfs_wait_ordered_range(inode, offset, len); |
3448 |
+ if (ret) |
3449 |
+ return ret; |
3450 |
+ |
3451 |
+ mutex_lock(&inode->i_mutex); |
3452 |
++ ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE); |
3453 |
+ /* |
3454 |
+ * We needn't truncate any page which is beyond the end of the file |
3455 |
+ * because we are sure there is no data there. |
3456 |
+@@ -2347,7 +2361,12 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) |
3457 |
+ } |
3458 |
+ |
3459 |
+ trans->block_rsv = &root->fs_info->trans_block_rsv; |
3460 |
+- if (cur_offset < ino_size) { |
3461 |
++ /* |
3462 |
++ * Don't insert file hole extent item if it's for a range beyond eof |
3463 |
++ * (because it's useless) or if it represents a 0 bytes range (when |
3464 |
++ * cur_offset == drop_end). |
3465 |
++ */ |
3466 |
++ if (cur_offset < ino_size && cur_offset < drop_end) { |
3467 |
+ ret = fill_holes(trans, inode, path, cur_offset, drop_end); |
3468 |
+ if (ret) { |
3469 |
+ err = ret; |
3470 |
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c |
3471 |
+index 73f3de7a083c..a6bd654dcd47 100644 |
3472 |
+--- a/fs/btrfs/free-space-cache.c |
3473 |
++++ b/fs/btrfs/free-space-cache.c |
3474 |
+@@ -831,7 +831,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, |
3475 |
+ |
3476 |
+ if (!matched) { |
3477 |
+ __btrfs_remove_free_space_cache(ctl); |
3478 |
+- btrfs_err(fs_info, "block group %llu has wrong amount of free space", |
3479 |
++ btrfs_warn(fs_info, "block group %llu has wrong amount of free space", |
3480 |
+ block_group->key.objectid); |
3481 |
+ ret = -1; |
3482 |
+ } |
3483 |
+@@ -843,7 +843,7 @@ out: |
3484 |
+ spin_unlock(&block_group->lock); |
3485 |
+ ret = 0; |
3486 |
+ |
3487 |
+- btrfs_err(fs_info, "failed to load free space cache for block group %llu", |
3488 |
++ btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now", |
3489 |
+ block_group->key.objectid); |
3490 |
+ } |
3491 |
+ |
3492 |
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c |
3493 |
+index 0be77993378e..12afb0dd3734 100644 |
3494 |
+--- a/fs/btrfs/scrub.c |
3495 |
++++ b/fs/btrfs/scrub.c |
3496 |
+@@ -588,8 +588,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) |
3497 |
+ |
3498 |
+ if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { |
3499 |
+ do { |
3500 |
+- ret = tree_backref_for_extent(&ptr, eb, ei, item_size, |
3501 |
+- &ref_root, &ref_level); |
3502 |
++ ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, |
3503 |
++ item_size, &ref_root, |
3504 |
++ &ref_level); |
3505 |
+ printk_in_rcu(KERN_WARNING |
3506 |
+ "BTRFS: %s at logical %llu on dev %s, " |
3507 |
+ "sector %llu: metadata %s (level %d) in tree " |
3508 |
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c |
3509 |
+index 484aacac2c89..6c9c084aa06a 100644 |
3510 |
+--- a/fs/btrfs/send.c |
3511 |
++++ b/fs/btrfs/send.c |
3512 |
+@@ -975,7 +975,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, |
3513 |
+ struct btrfs_dir_item *di; |
3514 |
+ struct btrfs_key di_key; |
3515 |
+ char *buf = NULL; |
3516 |
+- const int buf_len = PATH_MAX; |
3517 |
++ int buf_len; |
3518 |
+ u32 name_len; |
3519 |
+ u32 data_len; |
3520 |
+ u32 cur; |
3521 |
+@@ -985,6 +985,11 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, |
3522 |
+ int num; |
3523 |
+ u8 type; |
3524 |
+ |
3525 |
++ if (found_key->type == BTRFS_XATTR_ITEM_KEY) |
3526 |
++ buf_len = BTRFS_MAX_XATTR_SIZE(root); |
3527 |
++ else |
3528 |
++ buf_len = PATH_MAX; |
3529 |
++ |
3530 |
+ buf = kmalloc(buf_len, GFP_NOFS); |
3531 |
+ if (!buf) { |
3532 |
+ ret = -ENOMEM; |
3533 |
+@@ -1006,12 +1011,23 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, |
3534 |
+ type = btrfs_dir_type(eb, di); |
3535 |
+ btrfs_dir_item_key_to_cpu(eb, di, &di_key); |
3536 |
+ |
3537 |
+- /* |
3538 |
+- * Path too long |
3539 |
+- */ |
3540 |
+- if (name_len + data_len > buf_len) { |
3541 |
+- ret = -ENAMETOOLONG; |
3542 |
+- goto out; |
3543 |
++ if (type == BTRFS_FT_XATTR) { |
3544 |
++ if (name_len > XATTR_NAME_MAX) { |
3545 |
++ ret = -ENAMETOOLONG; |
3546 |
++ goto out; |
3547 |
++ } |
3548 |
++ if (name_len + data_len > buf_len) { |
3549 |
++ ret = -E2BIG; |
3550 |
++ goto out; |
3551 |
++ } |
3552 |
++ } else { |
3553 |
++ /* |
3554 |
++ * Path too long |
3555 |
++ */ |
3556 |
++ if (name_len + data_len > buf_len) { |
3557 |
++ ret = -ENAMETOOLONG; |
3558 |
++ goto out; |
3559 |
++ } |
3560 |
+ } |
3561 |
+ |
3562 |
+ read_extent_buffer(eb, buf, (unsigned long)(di + 1), |
3563 |
+@@ -1628,6 +1644,10 @@ static int lookup_dir_item_inode(struct btrfs_root *root, |
3564 |
+ goto out; |
3565 |
+ } |
3566 |
+ btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); |
3567 |
++ if (key.type == BTRFS_ROOT_ITEM_KEY) { |
3568 |
++ ret = -ENOENT; |
3569 |
++ goto out; |
3570 |
++ } |
3571 |
+ *found_inode = key.objectid; |
3572 |
+ *found_type = btrfs_dir_type(path->nodes[0], di); |
3573 |
+ |
3574 |
+@@ -3054,33 +3074,18 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) |
3575 |
+ if (ret < 0) |
3576 |
+ goto out; |
3577 |
+ |
3578 |
+- if (parent_ino == sctx->cur_ino) { |
3579 |
+- /* child only renamed, not moved */ |
3580 |
+- ASSERT(parent_gen == sctx->cur_inode_gen); |
3581 |
+- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, |
3582 |
+- from_path); |
3583 |
+- if (ret < 0) |
3584 |
+- goto out; |
3585 |
+- ret = fs_path_add_path(from_path, name); |
3586 |
+- if (ret < 0) |
3587 |
+- goto out; |
3588 |
+- } else { |
3589 |
+- /* child moved and maybe renamed too */ |
3590 |
+- sctx->send_progress = pm->ino; |
3591 |
+- ret = get_cur_path(sctx, pm->ino, pm->gen, from_path); |
3592 |
+- if (ret < 0) |
3593 |
+- goto out; |
3594 |
+- } |
3595 |
++ ret = get_cur_path(sctx, parent_ino, parent_gen, |
3596 |
++ from_path); |
3597 |
++ if (ret < 0) |
3598 |
++ goto out; |
3599 |
++ ret = fs_path_add_path(from_path, name); |
3600 |
++ if (ret < 0) |
3601 |
++ goto out; |
3602 |
+ |
3603 |
+- fs_path_free(name); |
3604 |
++ fs_path_reset(name); |
3605 |
++ to_path = name; |
3606 |
+ name = NULL; |
3607 |
+ |
3608 |
+- to_path = fs_path_alloc(); |
3609 |
+- if (!to_path) { |
3610 |
+- ret = -ENOMEM; |
3611 |
+- goto out; |
3612 |
+- } |
3613 |
+- |
3614 |
+ sctx->send_progress = sctx->cur_ino + 1; |
3615 |
+ ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); |
3616 |
+ if (ret < 0) |
3617 |
+diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c |
3618 |
+index c5eb2143dc66..4825cd2b10c2 100644 |
3619 |
+--- a/fs/btrfs/sysfs.c |
3620 |
++++ b/fs/btrfs/sysfs.c |
3621 |
+@@ -254,6 +254,7 @@ static ssize_t global_rsv_reserved_show(struct kobject *kobj, |
3622 |
+ BTRFS_ATTR(global_rsv_reserved, 0444, global_rsv_reserved_show); |
3623 |
+ |
3624 |
+ #define to_space_info(_kobj) container_of(_kobj, struct btrfs_space_info, kobj) |
3625 |
++#define to_raid_kobj(_kobj) container_of(_kobj, struct raid_kobject, kobj) |
3626 |
+ |
3627 |
+ static ssize_t raid_bytes_show(struct kobject *kobj, |
3628 |
+ struct kobj_attribute *attr, char *buf); |
3629 |
+@@ -266,7 +267,7 @@ static ssize_t raid_bytes_show(struct kobject *kobj, |
3630 |
+ { |
3631 |
+ struct btrfs_space_info *sinfo = to_space_info(kobj->parent); |
3632 |
+ struct btrfs_block_group_cache *block_group; |
3633 |
+- int index = kobj - sinfo->block_group_kobjs; |
3634 |
++ int index = to_raid_kobj(kobj)->raid_type; |
3635 |
+ u64 val = 0; |
3636 |
+ |
3637 |
+ down_read(&sinfo->groups_sem); |
3638 |
+@@ -288,7 +289,7 @@ static struct attribute *raid_attributes[] = { |
3639 |
+ |
3640 |
+ static void release_raid_kobj(struct kobject *kobj) |
3641 |
+ { |
3642 |
+- kobject_put(kobj->parent); |
3643 |
++ kfree(to_raid_kobj(kobj)); |
3644 |
+ } |
3645 |
+ |
3646 |
+ struct kobj_type btrfs_raid_ktype = { |
3647 |
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c |
3648 |
+index 49d7fab73360..57b699410fb8 100644 |
3649 |
+--- a/fs/btrfs/volumes.c |
3650 |
++++ b/fs/btrfs/volumes.c |
3651 |
+@@ -1452,6 +1452,22 @@ out: |
3652 |
+ return ret; |
3653 |
+ } |
3654 |
+ |
3655 |
++/* |
3656 |
++ * Function to update ctime/mtime for a given device path. |
3657 |
++ * Mainly used for ctime/mtime based probe like libblkid. |
3658 |
++ */ |
3659 |
++static void update_dev_time(char *path_name) |
3660 |
++{ |
3661 |
++ struct file *filp; |
3662 |
++ |
3663 |
++ filp = filp_open(path_name, O_RDWR, 0); |
3664 |
++ if (!filp) |
3665 |
++ return; |
3666 |
++ file_update_time(filp); |
3667 |
++ filp_close(filp, NULL); |
3668 |
++ return; |
3669 |
++} |
3670 |
++ |
3671 |
+ static int btrfs_rm_dev_item(struct btrfs_root *root, |
3672 |
+ struct btrfs_device *device) |
3673 |
+ { |
3674 |
+@@ -1674,11 +1690,12 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) |
3675 |
+ struct btrfs_fs_devices *fs_devices; |
3676 |
+ fs_devices = root->fs_info->fs_devices; |
3677 |
+ while (fs_devices) { |
3678 |
+- if (fs_devices->seed == cur_devices) |
3679 |
++ if (fs_devices->seed == cur_devices) { |
3680 |
++ fs_devices->seed = cur_devices->seed; |
3681 |
+ break; |
3682 |
++ } |
3683 |
+ fs_devices = fs_devices->seed; |
3684 |
+ } |
3685 |
+- fs_devices->seed = cur_devices->seed; |
3686 |
+ cur_devices->seed = NULL; |
3687 |
+ lock_chunks(root); |
3688 |
+ __btrfs_close_devices(cur_devices); |
3689 |
+@@ -1704,10 +1721,14 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) |
3690 |
+ |
3691 |
+ ret = 0; |
3692 |
+ |
3693 |
+- /* Notify udev that device has changed */ |
3694 |
+- if (bdev) |
3695 |
++ if (bdev) { |
3696 |
++ /* Notify udev that device has changed */ |
3697 |
+ btrfs_kobject_uevent(bdev, KOBJ_CHANGE); |
3698 |
+ |
3699 |
++ /* Update ctime/mtime for device path for libblkid */ |
3700 |
++ update_dev_time(device_path); |
3701 |
++ } |
3702 |
++ |
3703 |
+ error_brelse: |
3704 |
+ brelse(bh); |
3705 |
+ if (bdev) |
3706 |
+@@ -1883,7 +1904,6 @@ static int btrfs_prepare_sprout(struct btrfs_root *root) |
3707 |
+ fs_devices->seeding = 0; |
3708 |
+ fs_devices->num_devices = 0; |
3709 |
+ fs_devices->open_devices = 0; |
3710 |
+- fs_devices->total_devices = 0; |
3711 |
+ fs_devices->seed = seed_devices; |
3712 |
+ |
3713 |
+ generate_random_uuid(fs_devices->fsid); |
3714 |
+@@ -2146,6 +2166,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) |
3715 |
+ ret = btrfs_commit_transaction(trans, root); |
3716 |
+ } |
3717 |
+ |
3718 |
++ /* Update ctime/mtime for libblkid */ |
3719 |
++ update_dev_time(device_path); |
3720 |
+ return ret; |
3721 |
+ |
3722 |
+ error_trans: |
3723 |
+@@ -6058,10 +6080,14 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) |
3724 |
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; |
3725 |
+ struct btrfs_device *device; |
3726 |
+ |
3727 |
+- mutex_lock(&fs_devices->device_list_mutex); |
3728 |
+- list_for_each_entry(device, &fs_devices->devices, dev_list) |
3729 |
+- device->dev_root = fs_info->dev_root; |
3730 |
+- mutex_unlock(&fs_devices->device_list_mutex); |
3731 |
++ while (fs_devices) { |
3732 |
++ mutex_lock(&fs_devices->device_list_mutex); |
3733 |
++ list_for_each_entry(device, &fs_devices->devices, dev_list) |
3734 |
++ device->dev_root = fs_info->dev_root; |
3735 |
++ mutex_unlock(&fs_devices->device_list_mutex); |
3736 |
++ |
3737 |
++ fs_devices = fs_devices->seed; |
3738 |
++ } |
3739 |
+ } |
3740 |
+ |
3741 |
+ static void __btrfs_reset_dev_stats(struct btrfs_device *dev) |
3742 |
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c |
3743 |
+index 3802f8c94acc..1fb6ad2ac92d 100644 |
3744 |
+--- a/fs/cifs/smb2pdu.c |
3745 |
++++ b/fs/cifs/smb2pdu.c |
3746 |
+@@ -1089,6 +1089,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, |
3747 |
+ int rc = 0; |
3748 |
+ unsigned int num_iovecs = 2; |
3749 |
+ __u32 file_attributes = 0; |
3750 |
++ char *dhc_buf = NULL, *lc_buf = NULL; |
3751 |
+ |
3752 |
+ cifs_dbg(FYI, "create/open\n"); |
3753 |
+ |
3754 |
+@@ -1155,6 +1156,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, |
3755 |
+ kfree(copy_path); |
3756 |
+ return rc; |
3757 |
+ } |
3758 |
++ lc_buf = iov[num_iovecs-1].iov_base; |
3759 |
+ } |
3760 |
+ |
3761 |
+ if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) { |
3762 |
+@@ -1169,9 +1171,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, |
3763 |
+ if (rc) { |
3764 |
+ cifs_small_buf_release(req); |
3765 |
+ kfree(copy_path); |
3766 |
+- kfree(iov[num_iovecs-1].iov_base); |
3767 |
++ kfree(lc_buf); |
3768 |
+ return rc; |
3769 |
+ } |
3770 |
++ dhc_buf = iov[num_iovecs-1].iov_base; |
3771 |
+ } |
3772 |
+ |
3773 |
+ rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0); |
3774 |
+@@ -1203,6 +1206,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, |
3775 |
+ *oplock = rsp->OplockLevel; |
3776 |
+ creat_exit: |
3777 |
+ kfree(copy_path); |
3778 |
++ kfree(lc_buf); |
3779 |
++ kfree(dhc_buf); |
3780 |
+ free_rsp_buf(resp_buftype, rsp); |
3781 |
+ return rc; |
3782 |
+ } |
3783 |
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c |
3784 |
+index af903128891c..ead00467282d 100644 |
3785 |
+--- a/fs/eventpoll.c |
3786 |
++++ b/fs/eventpoll.c |
3787 |
+@@ -910,7 +910,7 @@ static const struct file_operations eventpoll_fops = { |
3788 |
+ void eventpoll_release_file(struct file *file) |
3789 |
+ { |
3790 |
+ struct eventpoll *ep; |
3791 |
+- struct epitem *epi; |
3792 |
++ struct epitem *epi, *next; |
3793 |
+ |
3794 |
+ /* |
3795 |
+ * We don't want to get "file->f_lock" because it is not |
3796 |
+@@ -926,7 +926,7 @@ void eventpoll_release_file(struct file *file) |
3797 |
+ * Besides, ep_remove() acquires the lock, so we can't hold it here. |
3798 |
+ */ |
3799 |
+ mutex_lock(&epmutex); |
3800 |
+- list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) { |
3801 |
++ list_for_each_entry_safe(epi, next, &file->f_ep_links, fllink) { |
3802 |
+ ep = epi->ep; |
3803 |
+ mutex_lock_nested(&ep->mtx, 0); |
3804 |
+ ep_remove(ep, epi); |
3805 |
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h |
3806 |
+index 66946aa62127..f542e486a4a4 100644 |
3807 |
+--- a/fs/ext4/ext4.h |
3808 |
++++ b/fs/ext4/ext4.h |
3809 |
+@@ -2771,7 +2771,8 @@ extern void ext4_io_submit(struct ext4_io_submit *io); |
3810 |
+ extern int ext4_bio_write_page(struct ext4_io_submit *io, |
3811 |
+ struct page *page, |
3812 |
+ int len, |
3813 |
+- struct writeback_control *wbc); |
3814 |
++ struct writeback_control *wbc, |
3815 |
++ bool keep_towrite); |
3816 |
+ |
3817 |
+ /* mmp.c */ |
3818 |
+ extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t); |
3819 |
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c |
3820 |
+index 01b0c208f625..f312c47b7d18 100644 |
3821 |
+--- a/fs/ext4/extents.c |
3822 |
++++ b/fs/ext4/extents.c |
3823 |
+@@ -4744,6 +4744,13 @@ static long ext4_zero_range(struct file *file, loff_t offset, |
3824 |
+ if (!S_ISREG(inode->i_mode)) |
3825 |
+ return -EINVAL; |
3826 |
+ |
3827 |
++ /* Call ext4_force_commit to flush all data in case of data=journal. */ |
3828 |
++ if (ext4_should_journal_data(inode)) { |
3829 |
++ ret = ext4_force_commit(inode->i_sb); |
3830 |
++ if (ret) |
3831 |
++ return ret; |
3832 |
++ } |
3833 |
++ |
3834 |
+ /* |
3835 |
+ * Write out all dirty pages to avoid race conditions |
3836 |
+ * Then release them. |
3837 |
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
3838 |
+index d7b7462a0e13..5bc199445dc2 100644 |
3839 |
+--- a/fs/ext4/inode.c |
3840 |
++++ b/fs/ext4/inode.c |
3841 |
+@@ -1846,6 +1846,7 @@ static int ext4_writepage(struct page *page, |
3842 |
+ struct buffer_head *page_bufs = NULL; |
3843 |
+ struct inode *inode = page->mapping->host; |
3844 |
+ struct ext4_io_submit io_submit; |
3845 |
++ bool keep_towrite = false; |
3846 |
+ |
3847 |
+ trace_ext4_writepage(page); |
3848 |
+ size = i_size_read(inode); |
3849 |
+@@ -1876,6 +1877,7 @@ static int ext4_writepage(struct page *page, |
3850 |
+ unlock_page(page); |
3851 |
+ return 0; |
3852 |
+ } |
3853 |
++ keep_towrite = true; |
3854 |
+ } |
3855 |
+ |
3856 |
+ if (PageChecked(page) && ext4_should_journal_data(inode)) |
3857 |
+@@ -1892,7 +1894,7 @@ static int ext4_writepage(struct page *page, |
3858 |
+ unlock_page(page); |
3859 |
+ return -ENOMEM; |
3860 |
+ } |
3861 |
+- ret = ext4_bio_write_page(&io_submit, page, len, wbc); |
3862 |
++ ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite); |
3863 |
+ ext4_io_submit(&io_submit); |
3864 |
+ /* Drop io_end reference we got from init */ |
3865 |
+ ext4_put_io_end_defer(io_submit.io_end); |
3866 |
+@@ -1911,7 +1913,7 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) |
3867 |
+ else |
3868 |
+ len = PAGE_CACHE_SIZE; |
3869 |
+ clear_page_dirty_for_io(page); |
3870 |
+- err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc); |
3871 |
++ err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); |
3872 |
+ if (!err) |
3873 |
+ mpd->wbc->nr_to_write--; |
3874 |
+ mpd->first_page++; |
3875 |
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c |
3876 |
+index c8238a26818c..fe4e668d3023 100644 |
3877 |
+--- a/fs/ext4/mballoc.c |
3878 |
++++ b/fs/ext4/mballoc.c |
3879 |
+@@ -3145,7 +3145,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, |
3880 |
+ } |
3881 |
+ BUG_ON(start + size <= ac->ac_o_ex.fe_logical && |
3882 |
+ start > ac->ac_o_ex.fe_logical); |
3883 |
+- BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); |
3884 |
++ BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); |
3885 |
+ |
3886 |
+ /* now prepare goal request */ |
3887 |
+ |
3888 |
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c |
3889 |
+index c18d95b50540..b6a3804a9855 100644 |
3890 |
+--- a/fs/ext4/page-io.c |
3891 |
++++ b/fs/ext4/page-io.c |
3892 |
+@@ -401,7 +401,8 @@ submit_and_retry: |
3893 |
+ int ext4_bio_write_page(struct ext4_io_submit *io, |
3894 |
+ struct page *page, |
3895 |
+ int len, |
3896 |
+- struct writeback_control *wbc) |
3897 |
++ struct writeback_control *wbc, |
3898 |
++ bool keep_towrite) |
3899 |
+ { |
3900 |
+ struct inode *inode = page->mapping->host; |
3901 |
+ unsigned block_start, blocksize; |
3902 |
+@@ -414,10 +415,24 @@ int ext4_bio_write_page(struct ext4_io_submit *io, |
3903 |
+ BUG_ON(!PageLocked(page)); |
3904 |
+ BUG_ON(PageWriteback(page)); |
3905 |
+ |
3906 |
+- set_page_writeback(page); |
3907 |
++ if (keep_towrite) |
3908 |
++ set_page_writeback_keepwrite(page); |
3909 |
++ else |
3910 |
++ set_page_writeback(page); |
3911 |
+ ClearPageError(page); |
3912 |
+ |
3913 |
+ /* |
3914 |
++ * Comments copied from block_write_full_page_endio: |
3915 |
++ * |
3916 |
++ * The page straddles i_size. It must be zeroed out on each and every |
3917 |
++ * writepage invocation because it may be mmapped. "A file is mapped |
3918 |
++ * in multiples of the page size. For a file that is not a multiple of |
3919 |
++ * the page size, the remaining memory is zeroed when mapped, and |
3920 |
++ * writes to that region are not written out to the file." |
3921 |
++ */ |
3922 |
++ if (len < PAGE_CACHE_SIZE) |
3923 |
++ zero_user_segment(page, len, PAGE_CACHE_SIZE); |
3924 |
++ /* |
3925 |
+ * In the first loop we prepare and mark buffers to submit. We have to |
3926 |
+ * mark all buffers in the page before submitting so that |
3927 |
+ * end_page_writeback() cannot be called from ext4_bio_end_io() when IO |
3928 |
+@@ -428,19 +443,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io, |
3929 |
+ do { |
3930 |
+ block_start = bh_offset(bh); |
3931 |
+ if (block_start >= len) { |
3932 |
+- /* |
3933 |
+- * Comments copied from block_write_full_page_endio: |
3934 |
+- * |
3935 |
+- * The page straddles i_size. It must be zeroed out on |
3936 |
+- * each and every writepage invocation because it may |
3937 |
+- * be mmapped. "A file is mapped in multiples of the |
3938 |
+- * page size. For a file that is not a multiple of |
3939 |
+- * the page size, the remaining memory is zeroed when |
3940 |
+- * mapped, and writes to that region are not written |
3941 |
+- * out to the file." |
3942 |
+- */ |
3943 |
+- zero_user_segment(page, block_start, |
3944 |
+- block_start + blocksize); |
3945 |
+ clear_buffer_dirty(bh); |
3946 |
+ set_buffer_uptodate(bh); |
3947 |
+ continue; |
3948 |
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c |
3949 |
+index 45abd60e2bff..bc077f3c8868 100644 |
3950 |
+--- a/fs/f2fs/data.c |
3951 |
++++ b/fs/f2fs/data.c |
3952 |
+@@ -835,6 +835,8 @@ out: |
3953 |
+ unlock_page(page); |
3954 |
+ if (need_balance_fs) |
3955 |
+ f2fs_balance_fs(sbi); |
3956 |
++ if (wbc->for_reclaim) |
3957 |
++ f2fs_submit_merged_bio(sbi, DATA, WRITE); |
3958 |
+ return 0; |
3959 |
+ |
3960 |
+ redirty_out: |
3961 |
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h |
3962 |
+index 7a8f2cd66c8b..0e2569031a6f 100644 |
3963 |
+--- a/include/linux/acpi.h |
3964 |
++++ b/include/linux/acpi.h |
3965 |
+@@ -37,6 +37,7 @@ |
3966 |
+ |
3967 |
+ #include <linux/list.h> |
3968 |
+ #include <linux/mod_devicetable.h> |
3969 |
++#include <linux/dynamic_debug.h> |
3970 |
+ |
3971 |
+ #include <acpi/acpi.h> |
3972 |
+ #include <acpi/acpi_bus.h> |
3973 |
+@@ -589,6 +590,14 @@ static inline __printf(3, 4) void |
3974 |
+ acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} |
3975 |
+ #endif /* !CONFIG_ACPI */ |
3976 |
+ |
3977 |
++#if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG) |
3978 |
++__printf(3, 4) |
3979 |
++void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...); |
3980 |
++#else |
3981 |
++#define __acpi_handle_debug(descriptor, handle, fmt, ...) \ |
3982 |
++ acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); |
3983 |
++#endif |
3984 |
++ |
3985 |
+ /* |
3986 |
+ * acpi_handle_<level>: Print message with ACPI prefix and object path |
3987 |
+ * |
3988 |
+@@ -610,11 +619,19 @@ acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} |
3989 |
+ #define acpi_handle_info(handle, fmt, ...) \ |
3990 |
+ acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__) |
3991 |
+ |
3992 |
+-/* REVISIT: Support CONFIG_DYNAMIC_DEBUG when necessary */ |
3993 |
+-#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) |
3994 |
++#if defined(DEBUG) |
3995 |
+ #define acpi_handle_debug(handle, fmt, ...) \ |
3996 |
+ acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__) |
3997 |
+ #else |
3998 |
++#if defined(CONFIG_DYNAMIC_DEBUG) |
3999 |
++#define acpi_handle_debug(handle, fmt, ...) \ |
4000 |
++do { \ |
4001 |
++ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ |
4002 |
++ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ |
4003 |
++ __acpi_handle_debug(&descriptor, handle, pr_fmt(fmt), \ |
4004 |
++ ##__VA_ARGS__); \ |
4005 |
++} while (0) |
4006 |
++#else |
4007 |
+ #define acpi_handle_debug(handle, fmt, ...) \ |
4008 |
+ ({ \ |
4009 |
+ if (0) \ |
4010 |
+@@ -622,5 +639,6 @@ acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} |
4011 |
+ 0; \ |
4012 |
+ }) |
4013 |
+ #endif |
4014 |
++#endif |
4015 |
+ |
4016 |
+ #endif /*_LINUX_ACPI_H*/ |
4017 |
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h |
4018 |
+index b65166de1d9d..d0bad1a8b0bd 100644 |
4019 |
+--- a/include/linux/hugetlb.h |
4020 |
++++ b/include/linux/hugetlb.h |
4021 |
+@@ -392,15 +392,13 @@ static inline pgoff_t basepage_index(struct page *page) |
4022 |
+ |
4023 |
+ extern void dissolve_free_huge_pages(unsigned long start_pfn, |
4024 |
+ unsigned long end_pfn); |
4025 |
+-int pmd_huge_support(void); |
4026 |
+-/* |
4027 |
+- * Currently hugepage migration is enabled only for pmd-based hugepage. |
4028 |
+- * This function will be updated when hugepage migration is more widely |
4029 |
+- * supported. |
4030 |
+- */ |
4031 |
+ static inline int hugepage_migration_support(struct hstate *h) |
4032 |
+ { |
4033 |
+- return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT); |
4034 |
++#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
4035 |
++ return huge_page_shift(h) == PMD_SHIFT; |
4036 |
++#else |
4037 |
++ return 0; |
4038 |
++#endif |
4039 |
+ } |
4040 |
+ |
4041 |
+ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
4042 |
+@@ -450,7 +448,6 @@ static inline pgoff_t basepage_index(struct page *page) |
4043 |
+ return page->index; |
4044 |
+ } |
4045 |
+ #define dissolve_free_huge_pages(s, e) do {} while (0) |
4046 |
+-#define pmd_huge_support() 0 |
4047 |
+ #define hugepage_migration_support(h) 0 |
4048 |
+ |
4049 |
+ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
4050 |
+diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h |
4051 |
+index 26e2661d3935..472c021a2d4f 100644 |
4052 |
+--- a/include/linux/irqdesc.h |
4053 |
++++ b/include/linux/irqdesc.h |
4054 |
+@@ -27,6 +27,8 @@ struct irq_desc; |
4055 |
+ * @irq_count: stats field to detect stalled irqs |
4056 |
+ * @last_unhandled: aging timer for unhandled count |
4057 |
+ * @irqs_unhandled: stats field for spurious unhandled interrupts |
4058 |
++ * @threads_handled: stats field for deferred spurious detection of threaded handlers |
4059 |
++ * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers |
4060 |
+ * @lock: locking for SMP |
4061 |
+ * @affinity_hint: hint to user space for preferred irq affinity |
4062 |
+ * @affinity_notify: context for notification of affinity changes |
4063 |
+@@ -52,6 +54,8 @@ struct irq_desc { |
4064 |
+ unsigned int irq_count; /* For detecting broken IRQs */ |
4065 |
+ unsigned long last_unhandled; /* Aging timer for unhandled count */ |
4066 |
+ unsigned int irqs_unhandled; |
4067 |
++ atomic_t threads_handled; |
4068 |
++ int threads_handled_last; |
4069 |
+ raw_spinlock_t lock; |
4070 |
+ struct cpumask *percpu_enabled; |
4071 |
+ #ifdef CONFIG_SMP |
4072 |
+diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h |
4073 |
+index 3c1b968da0ca..f230a978e6ba 100644 |
4074 |
+--- a/include/linux/mempolicy.h |
4075 |
++++ b/include/linux/mempolicy.h |
4076 |
+@@ -175,6 +175,12 @@ static inline int vma_migratable(struct vm_area_struct *vma) |
4077 |
+ { |
4078 |
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
4079 |
+ return 0; |
4080 |
++ |
4081 |
++#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
4082 |
++ if (vma->vm_flags & VM_HUGETLB) |
4083 |
++ return 0; |
4084 |
++#endif |
4085 |
++ |
4086 |
+ /* |
4087 |
+ * Migration allocates pages in the highest zone. If we cannot |
4088 |
+ * do so then migration (at least from node to node) is not |
4089 |
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h |
4090 |
+index fac5509c18f0..835aa3d36719 100644 |
4091 |
+--- a/include/linux/mmzone.h |
4092 |
++++ b/include/linux/mmzone.h |
4093 |
+@@ -75,9 +75,13 @@ enum { |
4094 |
+ |
4095 |
+ extern int page_group_by_mobility_disabled; |
4096 |
+ |
4097 |
++#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) |
4098 |
++#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) |
4099 |
++ |
4100 |
+ static inline int get_pageblock_migratetype(struct page *page) |
4101 |
+ { |
4102 |
+- return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); |
4103 |
++ BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2); |
4104 |
++ return get_pageblock_flags_mask(page, PB_migrate_end, MIGRATETYPE_MASK); |
4105 |
+ } |
4106 |
+ |
4107 |
+ struct free_area { |
4108 |
+diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h |
4109 |
+index d1fe1a761047..ca71a1d347a0 100644 |
4110 |
+--- a/include/linux/page-flags.h |
4111 |
++++ b/include/linux/page-flags.h |
4112 |
+@@ -317,13 +317,23 @@ CLEARPAGEFLAG(Uptodate, uptodate) |
4113 |
+ extern void cancel_dirty_page(struct page *page, unsigned int account_size); |
4114 |
+ |
4115 |
+ int test_clear_page_writeback(struct page *page); |
4116 |
+-int test_set_page_writeback(struct page *page); |
4117 |
++int __test_set_page_writeback(struct page *page, bool keep_write); |
4118 |
++ |
4119 |
++#define test_set_page_writeback(page) \ |
4120 |
++ __test_set_page_writeback(page, false) |
4121 |
++#define test_set_page_writeback_keepwrite(page) \ |
4122 |
++ __test_set_page_writeback(page, true) |
4123 |
+ |
4124 |
+ static inline void set_page_writeback(struct page *page) |
4125 |
+ { |
4126 |
+ test_set_page_writeback(page); |
4127 |
+ } |
4128 |
+ |
4129 |
++static inline void set_page_writeback_keepwrite(struct page *page) |
4130 |
++{ |
4131 |
++ test_set_page_writeback_keepwrite(page); |
4132 |
++} |
4133 |
++ |
4134 |
+ #ifdef CONFIG_PAGEFLAGS_EXTENDED |
4135 |
+ /* |
4136 |
+ * System with lots of page flags available. This allows separate |
4137 |
+diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h |
4138 |
+index 2ee8cd2466b5..c08730c10c7a 100644 |
4139 |
+--- a/include/linux/pageblock-flags.h |
4140 |
++++ b/include/linux/pageblock-flags.h |
4141 |
+@@ -30,9 +30,12 @@ enum pageblock_bits { |
4142 |
+ PB_migrate, |
4143 |
+ PB_migrate_end = PB_migrate + 3 - 1, |
4144 |
+ /* 3 bits required for migrate types */ |
4145 |
+-#ifdef CONFIG_COMPACTION |
4146 |
+ PB_migrate_skip,/* If set the block is skipped by compaction */ |
4147 |
+-#endif /* CONFIG_COMPACTION */ |
4148 |
++ |
4149 |
++ /* |
4150 |
++ * Assume the bits will always align on a word. If this assumption |
4151 |
++ * changes then get/set pageblock needs updating. |
4152 |
++ */ |
4153 |
+ NR_PAGEBLOCK_BITS |
4154 |
+ }; |
4155 |
+ |
4156 |
+@@ -62,11 +65,33 @@ extern int pageblock_order; |
4157 |
+ /* Forward declaration */ |
4158 |
+ struct page; |
4159 |
+ |
4160 |
++unsigned long get_pageblock_flags_mask(struct page *page, |
4161 |
++ unsigned long end_bitidx, |
4162 |
++ unsigned long mask); |
4163 |
++void set_pageblock_flags_mask(struct page *page, |
4164 |
++ unsigned long flags, |
4165 |
++ unsigned long end_bitidx, |
4166 |
++ unsigned long mask); |
4167 |
++ |
4168 |
+ /* Declarations for getting and setting flags. See mm/page_alloc.c */ |
4169 |
+-unsigned long get_pageblock_flags_group(struct page *page, |
4170 |
+- int start_bitidx, int end_bitidx); |
4171 |
+-void set_pageblock_flags_group(struct page *page, unsigned long flags, |
4172 |
+- int start_bitidx, int end_bitidx); |
4173 |
++static inline unsigned long get_pageblock_flags_group(struct page *page, |
4174 |
++ int start_bitidx, int end_bitidx) |
4175 |
++{ |
4176 |
++ unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1; |
4177 |
++ unsigned long mask = (1 << nr_flag_bits) - 1; |
4178 |
++ |
4179 |
++ return get_pageblock_flags_mask(page, end_bitidx, mask); |
4180 |
++} |
4181 |
++ |
4182 |
++static inline void set_pageblock_flags_group(struct page *page, |
4183 |
++ unsigned long flags, |
4184 |
++ int start_bitidx, int end_bitidx) |
4185 |
++{ |
4186 |
++ unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1; |
4187 |
++ unsigned long mask = (1 << nr_flag_bits) - 1; |
4188 |
++ |
4189 |
++ set_pageblock_flags_mask(page, flags, end_bitidx, mask); |
4190 |
++} |
4191 |
+ |
4192 |
+ #ifdef CONFIG_COMPACTION |
4193 |
+ #define get_pageblock_skip(page) \ |
4194 |
+diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h |
4195 |
+index 07d0df6bf768..077904c8b70d 100644 |
4196 |
+--- a/include/linux/ptrace.h |
4197 |
++++ b/include/linux/ptrace.h |
4198 |
+@@ -5,6 +5,7 @@ |
4199 |
+ #include <linux/sched.h> /* For struct task_struct. */ |
4200 |
+ #include <linux/err.h> /* for IS_ERR_VALUE */ |
4201 |
+ #include <linux/bug.h> /* For BUG_ON. */ |
4202 |
++#include <linux/pid_namespace.h> /* For task_active_pid_ns. */ |
4203 |
+ #include <uapi/linux/ptrace.h> |
4204 |
+ |
4205 |
+ /* |
4206 |
+@@ -129,6 +130,37 @@ static inline void ptrace_event(int event, unsigned long message) |
4207 |
+ } |
4208 |
+ |
4209 |
+ /** |
4210 |
++ * ptrace_event_pid - possibly stop for a ptrace event notification |
4211 |
++ * @event: %PTRACE_EVENT_* value to report |
4212 |
++ * @pid: process identifier for %PTRACE_GETEVENTMSG to return |
4213 |
++ * |
4214 |
++ * Check whether @event is enabled and, if so, report @event and @pid |
4215 |
++ * to the ptrace parent. @pid is reported as the pid_t seen from the |
4216 |
++ * the ptrace parent's pid namespace. |
4217 |
++ * |
4218 |
++ * Called without locks. |
4219 |
++ */ |
4220 |
++static inline void ptrace_event_pid(int event, struct pid *pid) |
4221 |
++{ |
4222 |
++ /* |
4223 |
++ * FIXME: There's a potential race if a ptracer in a different pid |
4224 |
++ * namespace than parent attaches between computing message below and |
4225 |
++ * when we acquire tasklist_lock in ptrace_stop(). If this happens, |
4226 |
++ * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG. |
4227 |
++ */ |
4228 |
++ unsigned long message = 0; |
4229 |
++ struct pid_namespace *ns; |
4230 |
++ |
4231 |
++ rcu_read_lock(); |
4232 |
++ ns = task_active_pid_ns(rcu_dereference(current->parent)); |
4233 |
++ if (ns) |
4234 |
++ message = pid_nr_ns(pid, ns); |
4235 |
++ rcu_read_unlock(); |
4236 |
++ |
4237 |
++ ptrace_event(event, message); |
4238 |
++} |
4239 |
++ |
4240 |
++/** |
4241 |
+ * ptrace_init_task - initialize ptrace state for a new child |
4242 |
+ * @child: new child task |
4243 |
+ * @ptrace: true if child should be ptrace'd by parent's tracer |
4244 |
+diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h |
4245 |
+index d4b571c2f9fd..b0b381b0cb07 100644 |
4246 |
+--- a/include/net/bluetooth/mgmt.h |
4247 |
++++ b/include/net/bluetooth/mgmt.h |
4248 |
+@@ -181,6 +181,9 @@ struct mgmt_cp_load_link_keys { |
4249 |
+ } __packed; |
4250 |
+ #define MGMT_LOAD_LINK_KEYS_SIZE 3 |
4251 |
+ |
4252 |
++#define MGMT_LTK_UNAUTHENTICATED 0x00 |
4253 |
++#define MGMT_LTK_AUTHENTICATED 0x01 |
4254 |
++ |
4255 |
+ struct mgmt_ltk_info { |
4256 |
+ struct mgmt_addr_info addr; |
4257 |
+ __u8 type; |
4258 |
+diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h |
4259 |
+index dd7c998221b3..a100c6e266c7 100644 |
4260 |
+--- a/include/scsi/scsi_cmnd.h |
4261 |
++++ b/include/scsi/scsi_cmnd.h |
4262 |
+@@ -7,6 +7,7 @@ |
4263 |
+ #include <linux/types.h> |
4264 |
+ #include <linux/timer.h> |
4265 |
+ #include <linux/scatterlist.h> |
4266 |
++#include <scsi/scsi_device.h> |
4267 |
+ |
4268 |
+ struct Scsi_Host; |
4269 |
+ struct scsi_device; |
4270 |
+@@ -306,4 +307,20 @@ static inline void set_driver_byte(struct scsi_cmnd *cmd, char status) |
4271 |
+ cmd->result = (cmd->result & 0x00ffffff) | (status << 24); |
4272 |
+ } |
4273 |
+ |
4274 |
++static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd) |
4275 |
++{ |
4276 |
++ unsigned int xfer_len = blk_rq_bytes(scmd->request); |
4277 |
++ unsigned int prot_op = scsi_get_prot_op(scmd); |
4278 |
++ unsigned int sector_size = scmd->device->sector_size; |
4279 |
++ |
4280 |
++ switch (prot_op) { |
4281 |
++ case SCSI_PROT_NORMAL: |
4282 |
++ case SCSI_PROT_WRITE_STRIP: |
4283 |
++ case SCSI_PROT_READ_INSERT: |
4284 |
++ return xfer_len; |
4285 |
++ } |
4286 |
++ |
4287 |
++ return xfer_len + (xfer_len >> ilog2(sector_size)) * 8; |
4288 |
++} |
4289 |
++ |
4290 |
+ #endif /* _SCSI_SCSI_CMND_H */ |
4291 |
+diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h |
4292 |
+index 33b487b5da92..daef9daa500c 100644 |
4293 |
+--- a/include/target/iscsi/iscsi_transport.h |
4294 |
++++ b/include/target/iscsi/iscsi_transport.h |
4295 |
+@@ -70,7 +70,8 @@ extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *, |
4296 |
+ extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *, |
4297 |
+ struct iscsi_tm_rsp *); |
4298 |
+ extern int iscsit_build_text_rsp(struct iscsi_cmd *, struct iscsi_conn *, |
4299 |
+- struct iscsi_text_rsp *); |
4300 |
++ struct iscsi_text_rsp *, |
4301 |
++ enum iscsit_transport_type); |
4302 |
+ extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *, |
4303 |
+ struct iscsi_reject *); |
4304 |
+ extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *, |
4305 |
+diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h |
4306 |
+index 3a1c1eea1fff..9adc1bca1178 100644 |
4307 |
+--- a/include/target/target_core_backend.h |
4308 |
++++ b/include/target/target_core_backend.h |
4309 |
+@@ -59,6 +59,7 @@ int transport_subsystem_register(struct se_subsystem_api *); |
4310 |
+ void transport_subsystem_release(struct se_subsystem_api *); |
4311 |
+ |
4312 |
+ void target_complete_cmd(struct se_cmd *, u8); |
4313 |
++void target_complete_cmd_with_length(struct se_cmd *, u8, int); |
4314 |
+ |
4315 |
+ sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size); |
4316 |
+ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd); |
4317 |
+diff --git a/kernel/fork.c b/kernel/fork.c |
4318 |
+index 54a8d26f612f..142904349fb5 100644 |
4319 |
+--- a/kernel/fork.c |
4320 |
++++ b/kernel/fork.c |
4321 |
+@@ -1606,10 +1606,12 @@ long do_fork(unsigned long clone_flags, |
4322 |
+ */ |
4323 |
+ if (!IS_ERR(p)) { |
4324 |
+ struct completion vfork; |
4325 |
++ struct pid *pid; |
4326 |
+ |
4327 |
+ trace_sched_process_fork(current, p); |
4328 |
+ |
4329 |
+- nr = task_pid_vnr(p); |
4330 |
++ pid = get_task_pid(p, PIDTYPE_PID); |
4331 |
++ nr = pid_vnr(pid); |
4332 |
+ |
4333 |
+ if (clone_flags & CLONE_PARENT_SETTID) |
4334 |
+ put_user(nr, parent_tidptr); |
4335 |
+@@ -1624,12 +1626,14 @@ long do_fork(unsigned long clone_flags, |
4336 |
+ |
4337 |
+ /* forking complete and child started to run, tell ptracer */ |
4338 |
+ if (unlikely(trace)) |
4339 |
+- ptrace_event(trace, nr); |
4340 |
++ ptrace_event_pid(trace, pid); |
4341 |
+ |
4342 |
+ if (clone_flags & CLONE_VFORK) { |
4343 |
+ if (!wait_for_vfork_done(p, &vfork)) |
4344 |
+- ptrace_event(PTRACE_EVENT_VFORK_DONE, nr); |
4345 |
++ ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); |
4346 |
+ } |
4347 |
++ |
4348 |
++ put_pid(pid); |
4349 |
+ } else { |
4350 |
+ nr = PTR_ERR(p); |
4351 |
+ } |
4352 |
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c |
4353 |
+index d34131ca372b..3dc6a61bf06a 100644 |
4354 |
+--- a/kernel/irq/manage.c |
4355 |
++++ b/kernel/irq/manage.c |
4356 |
+@@ -886,8 +886,8 @@ static int irq_thread(void *data) |
4357 |
+ irq_thread_check_affinity(desc, action); |
4358 |
+ |
4359 |
+ action_ret = handler_fn(desc, action); |
4360 |
+- if (!noirqdebug) |
4361 |
+- note_interrupt(action->irq, desc, action_ret); |
4362 |
++ if (action_ret == IRQ_HANDLED) |
4363 |
++ atomic_inc(&desc->threads_handled); |
4364 |
+ |
4365 |
+ wake_threads_waitq(desc); |
4366 |
+ } |
4367 |
+diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c |
4368 |
+index a1d8cc63b56e..e2514b0e439e 100644 |
4369 |
+--- a/kernel/irq/spurious.c |
4370 |
++++ b/kernel/irq/spurious.c |
4371 |
+@@ -270,6 +270,8 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc, |
4372 |
+ return action && (action->flags & IRQF_IRQPOLL); |
4373 |
+ } |
4374 |
+ |
4375 |
++#define SPURIOUS_DEFERRED 0x80000000 |
4376 |
++ |
4377 |
+ void note_interrupt(unsigned int irq, struct irq_desc *desc, |
4378 |
+ irqreturn_t action_ret) |
4379 |
+ { |
4380 |
+@@ -277,15 +279,111 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, |
4381 |
+ irq_settings_is_polled(desc)) |
4382 |
+ return; |
4383 |
+ |
4384 |
+- /* we get here again via the threaded handler */ |
4385 |
+- if (action_ret == IRQ_WAKE_THREAD) |
4386 |
+- return; |
4387 |
+- |
4388 |
+ if (bad_action_ret(action_ret)) { |
4389 |
+ report_bad_irq(irq, desc, action_ret); |
4390 |
+ return; |
4391 |
+ } |
4392 |
+ |
4393 |
++ /* |
4394 |
++ * We cannot call note_interrupt from the threaded handler |
4395 |
++ * because we need to look at the compound of all handlers |
4396 |
++ * (primary and threaded). Aside of that in the threaded |
4397 |
++ * shared case we have no serialization against an incoming |
4398 |
++ * hardware interrupt while we are dealing with a threaded |
4399 |
++ * result. |
4400 |
++ * |
4401 |
++ * So in case a thread is woken, we just note the fact and |
4402 |
++ * defer the analysis to the next hardware interrupt. |
4403 |
++ * |
4404 |
++ * The threaded handlers store whether they sucessfully |
4405 |
++ * handled an interrupt and we check whether that number |
4406 |
++ * changed versus the last invocation. |
4407 |
++ * |
4408 |
++ * We could handle all interrupts with the delayed by one |
4409 |
++ * mechanism, but for the non forced threaded case we'd just |
4410 |
++ * add pointless overhead to the straight hardirq interrupts |
4411 |
++ * for the sake of a few lines less code. |
4412 |
++ */ |
4413 |
++ if (action_ret & IRQ_WAKE_THREAD) { |
4414 |
++ /* |
4415 |
++ * There is a thread woken. Check whether one of the |
4416 |
++ * shared primary handlers returned IRQ_HANDLED. If |
4417 |
++ * not we defer the spurious detection to the next |
4418 |
++ * interrupt. |
4419 |
++ */ |
4420 |
++ if (action_ret == IRQ_WAKE_THREAD) { |
4421 |
++ int handled; |
4422 |
++ /* |
4423 |
++ * We use bit 31 of thread_handled_last to |
4424 |
++ * denote the deferred spurious detection |
4425 |
++ * active. No locking necessary as |
4426 |
++ * thread_handled_last is only accessed here |
4427 |
++ * and we have the guarantee that hard |
4428 |
++ * interrupts are not reentrant. |
4429 |
++ */ |
4430 |
++ if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) { |
4431 |
++ desc->threads_handled_last |= SPURIOUS_DEFERRED; |
4432 |
++ return; |
4433 |
++ } |
4434 |
++ /* |
4435 |
++ * Check whether one of the threaded handlers |
4436 |
++ * returned IRQ_HANDLED since the last |
4437 |
++ * interrupt happened. |
4438 |
++ * |
4439 |
++ * For simplicity we just set bit 31, as it is |
4440 |
++ * set in threads_handled_last as well. So we |
4441 |
++ * avoid extra masking. And we really do not |
4442 |
++ * care about the high bits of the handled |
4443 |
++ * count. We just care about the count being |
4444 |
++ * different than the one we saw before. |
4445 |
++ */ |
4446 |
++ handled = atomic_read(&desc->threads_handled); |
4447 |
++ handled |= SPURIOUS_DEFERRED; |
4448 |
++ if (handled != desc->threads_handled_last) { |
4449 |
++ action_ret = IRQ_HANDLED; |
4450 |
++ /* |
4451 |
++ * Note: We keep the SPURIOUS_DEFERRED |
4452 |
++ * bit set. We are handling the |
4453 |
++ * previous invocation right now. |
4454 |
++ * Keep it for the current one, so the |
4455 |
++ * next hardware interrupt will |
4456 |
++ * account for it. |
4457 |
++ */ |
4458 |
++ desc->threads_handled_last = handled; |
4459 |
++ } else { |
4460 |
++ /* |
4461 |
++ * None of the threaded handlers felt |
4462 |
++ * responsible for the last interrupt |
4463 |
++ * |
4464 |
++ * We keep the SPURIOUS_DEFERRED bit |
4465 |
++ * set in threads_handled_last as we |
4466 |
++ * need to account for the current |
4467 |
++ * interrupt as well. |
4468 |
++ */ |
4469 |
++ action_ret = IRQ_NONE; |
4470 |
++ } |
4471 |
++ } else { |
4472 |
++ /* |
4473 |
++ * One of the primary handlers returned |
4474 |
++ * IRQ_HANDLED. So we don't care about the |
4475 |
++ * threaded handlers on the same line. Clear |
4476 |
++ * the deferred detection bit. |
4477 |
++ * |
4478 |
++ * In theory we could/should check whether the |
4479 |
++ * deferred bit is set and take the result of |
4480 |
++ * the previous run into account here as |
4481 |
++ * well. But it's really not worth the |
4482 |
++ * trouble. If every other interrupt is |
4483 |
++ * handled we never trigger the spurious |
4484 |
++ * detector. And if this is just the one out |
4485 |
++ * of 100k unhandled ones which is handled |
4486 |
++ * then we merily delay the spurious detection |
4487 |
++ * by one hard interrupt. Not a real problem. |
4488 |
++ */ |
4489 |
++ desc->threads_handled_last &= ~SPURIOUS_DEFERRED; |
4490 |
++ } |
4491 |
++ } |
4492 |
++ |
4493 |
+ if (unlikely(action_ret == IRQ_NONE)) { |
4494 |
+ /* |
4495 |
+ * If we are seeing only the odd spurious IRQ caused by |
4496 |
+diff --git a/kernel/kthread.c b/kernel/kthread.c |
4497 |
+index 9a130ec06f7a..c2390f41307b 100644 |
4498 |
+--- a/kernel/kthread.c |
4499 |
++++ b/kernel/kthread.c |
4500 |
+@@ -262,7 +262,7 @@ static void create_kthread(struct kthread_create_info *create) |
4501 |
+ * kthread_stop() has been called). The return value should be zero |
4502 |
+ * or a negative error number; it will be passed to kthread_stop(). |
4503 |
+ * |
4504 |
+- * Returns a task_struct or ERR_PTR(-ENOMEM). |
4505 |
++ * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). |
4506 |
+ */ |
4507 |
+ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), |
4508 |
+ void *data, int node, |
4509 |
+@@ -298,7 +298,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), |
4510 |
+ * that thread. |
4511 |
+ */ |
4512 |
+ if (xchg(&create->done, NULL)) |
4513 |
+- return ERR_PTR(-ENOMEM); |
4514 |
++ return ERR_PTR(-EINTR); |
4515 |
+ /* |
4516 |
+ * kthreadd (or new kernel thread) will call complete() |
4517 |
+ * shortly. |
4518 |
+diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h |
4519 |
+index 14193d596d78..ab29b6a22669 100644 |
4520 |
+--- a/kernel/locking/rtmutex-debug.h |
4521 |
++++ b/kernel/locking/rtmutex-debug.h |
4522 |
+@@ -31,3 +31,8 @@ static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, |
4523 |
+ { |
4524 |
+ return (waiter != NULL); |
4525 |
+ } |
4526 |
++ |
4527 |
++static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w) |
4528 |
++{ |
4529 |
++ debug_rt_mutex_print_deadlock(w); |
4530 |
++} |
4531 |
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c |
4532 |
+index a620d4d08ca6..fc605941b9b8 100644 |
4533 |
+--- a/kernel/locking/rtmutex.c |
4534 |
++++ b/kernel/locking/rtmutex.c |
4535 |
+@@ -83,6 +83,47 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) |
4536 |
+ owner = *p; |
4537 |
+ } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); |
4538 |
+ } |
4539 |
++ |
4540 |
++/* |
4541 |
++ * Safe fastpath aware unlock: |
4542 |
++ * 1) Clear the waiters bit |
4543 |
++ * 2) Drop lock->wait_lock |
4544 |
++ * 3) Try to unlock the lock with cmpxchg |
4545 |
++ */ |
4546 |
++static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) |
4547 |
++ __releases(lock->wait_lock) |
4548 |
++{ |
4549 |
++ struct task_struct *owner = rt_mutex_owner(lock); |
4550 |
++ |
4551 |
++ clear_rt_mutex_waiters(lock); |
4552 |
++ raw_spin_unlock(&lock->wait_lock); |
4553 |
++ /* |
4554 |
++ * If a new waiter comes in between the unlock and the cmpxchg |
4555 |
++ * we have two situations: |
4556 |
++ * |
4557 |
++ * unlock(wait_lock); |
4558 |
++ * lock(wait_lock); |
4559 |
++ * cmpxchg(p, owner, 0) == owner |
4560 |
++ * mark_rt_mutex_waiters(lock); |
4561 |
++ * acquire(lock); |
4562 |
++ * or: |
4563 |
++ * |
4564 |
++ * unlock(wait_lock); |
4565 |
++ * lock(wait_lock); |
4566 |
++ * mark_rt_mutex_waiters(lock); |
4567 |
++ * |
4568 |
++ * cmpxchg(p, owner, 0) != owner |
4569 |
++ * enqueue_waiter(); |
4570 |
++ * unlock(wait_lock); |
4571 |
++ * lock(wait_lock); |
4572 |
++ * wake waiter(); |
4573 |
++ * unlock(wait_lock); |
4574 |
++ * lock(wait_lock); |
4575 |
++ * acquire(lock); |
4576 |
++ */ |
4577 |
++ return rt_mutex_cmpxchg(lock, owner, NULL); |
4578 |
++} |
4579 |
++ |
4580 |
+ #else |
4581 |
+ # define rt_mutex_cmpxchg(l,c,n) (0) |
4582 |
+ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) |
4583 |
+@@ -90,6 +131,17 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) |
4584 |
+ lock->owner = (struct task_struct *) |
4585 |
+ ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); |
4586 |
+ } |
4587 |
++ |
4588 |
++/* |
4589 |
++ * Simple slow path only version: lock->owner is protected by lock->wait_lock. |
4590 |
++ */ |
4591 |
++static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) |
4592 |
++ __releases(lock->wait_lock) |
4593 |
++{ |
4594 |
++ lock->owner = NULL; |
4595 |
++ raw_spin_unlock(&lock->wait_lock); |
4596 |
++ return true; |
4597 |
++} |
4598 |
+ #endif |
4599 |
+ |
4600 |
+ static inline int |
4601 |
+@@ -260,27 +312,36 @@ static void rt_mutex_adjust_prio(struct task_struct *task) |
4602 |
+ */ |
4603 |
+ int max_lock_depth = 1024; |
4604 |
+ |
4605 |
++static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) |
4606 |
++{ |
4607 |
++ return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; |
4608 |
++} |
4609 |
++ |
4610 |
+ /* |
4611 |
+ * Adjust the priority chain. Also used for deadlock detection. |
4612 |
+ * Decreases task's usage by one - may thus free the task. |
4613 |
+ * |
4614 |
+- * @task: the task owning the mutex (owner) for which a chain walk is probably |
4615 |
+- * needed |
4616 |
++ * @task: the task owning the mutex (owner) for which a chain walk is |
4617 |
++ * probably needed |
4618 |
+ * @deadlock_detect: do we have to carry out deadlock detection? |
4619 |
+- * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck |
4620 |
+- * things for a task that has just got its priority adjusted, and |
4621 |
+- * is waiting on a mutex) |
4622 |
++ * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck |
4623 |
++ * things for a task that has just got its priority adjusted, and |
4624 |
++ * is waiting on a mutex) |
4625 |
++ * @next_lock: the mutex on which the owner of @orig_lock was blocked before |
4626 |
++ * we dropped its pi_lock. Is never dereferenced, only used for |
4627 |
++ * comparison to detect lock chain changes. |
4628 |
+ * @orig_waiter: rt_mutex_waiter struct for the task that has just donated |
4629 |
+- * its priority to the mutex owner (can be NULL in the case |
4630 |
+- * depicted above or if the top waiter is gone away and we are |
4631 |
+- * actually deboosting the owner) |
4632 |
+- * @top_task: the current top waiter |
4633 |
++ * its priority to the mutex owner (can be NULL in the case |
4634 |
++ * depicted above or if the top waiter is gone away and we are |
4635 |
++ * actually deboosting the owner) |
4636 |
++ * @top_task: the current top waiter |
4637 |
+ * |
4638 |
+ * Returns 0 or -EDEADLK. |
4639 |
+ */ |
4640 |
+ static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
4641 |
+ int deadlock_detect, |
4642 |
+ struct rt_mutex *orig_lock, |
4643 |
++ struct rt_mutex *next_lock, |
4644 |
+ struct rt_mutex_waiter *orig_waiter, |
4645 |
+ struct task_struct *top_task) |
4646 |
+ { |
4647 |
+@@ -314,7 +375,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
4648 |
+ } |
4649 |
+ put_task_struct(task); |
4650 |
+ |
4651 |
+- return deadlock_detect ? -EDEADLK : 0; |
4652 |
++ return -EDEADLK; |
4653 |
+ } |
4654 |
+ retry: |
4655 |
+ /* |
4656 |
+@@ -339,6 +400,18 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
4657 |
+ goto out_unlock_pi; |
4658 |
+ |
4659 |
+ /* |
4660 |
++ * We dropped all locks after taking a refcount on @task, so |
4661 |
++ * the task might have moved on in the lock chain or even left |
4662 |
++ * the chain completely and blocks now on an unrelated lock or |
4663 |
++ * on @orig_lock. |
4664 |
++ * |
4665 |
++ * We stored the lock on which @task was blocked in @next_lock, |
4666 |
++ * so we can detect the chain change. |
4667 |
++ */ |
4668 |
++ if (next_lock != waiter->lock) |
4669 |
++ goto out_unlock_pi; |
4670 |
++ |
4671 |
++ /* |
4672 |
+ * Drop out, when the task has no waiters. Note, |
4673 |
+ * top_waiter can be NULL, when we are in the deboosting |
4674 |
+ * mode! |
4675 |
+@@ -377,7 +450,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
4676 |
+ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
4677 |
+ debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); |
4678 |
+ raw_spin_unlock(&lock->wait_lock); |
4679 |
+- ret = deadlock_detect ? -EDEADLK : 0; |
4680 |
++ ret = -EDEADLK; |
4681 |
+ goto out_unlock_pi; |
4682 |
+ } |
4683 |
+ |
4684 |
+@@ -422,11 +495,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
4685 |
+ __rt_mutex_adjust_prio(task); |
4686 |
+ } |
4687 |
+ |
4688 |
++ /* |
4689 |
++ * Check whether the task which owns the current lock is pi |
4690 |
++ * blocked itself. If yes we store a pointer to the lock for |
4691 |
++ * the lock chain change detection above. After we dropped |
4692 |
++ * task->pi_lock next_lock cannot be dereferenced anymore. |
4693 |
++ */ |
4694 |
++ next_lock = task_blocked_on_lock(task); |
4695 |
++ |
4696 |
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
4697 |
+ |
4698 |
+ top_waiter = rt_mutex_top_waiter(lock); |
4699 |
+ raw_spin_unlock(&lock->wait_lock); |
4700 |
+ |
4701 |
++ /* |
4702 |
++ * We reached the end of the lock chain. Stop right here. No |
4703 |
++ * point to go back just to figure that out. |
4704 |
++ */ |
4705 |
++ if (!next_lock) |
4706 |
++ goto out_put_task; |
4707 |
++ |
4708 |
+ if (!detect_deadlock && waiter != top_waiter) |
4709 |
+ goto out_put_task; |
4710 |
+ |
4711 |
+@@ -536,8 +624,9 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, |
4712 |
+ { |
4713 |
+ struct task_struct *owner = rt_mutex_owner(lock); |
4714 |
+ struct rt_mutex_waiter *top_waiter = waiter; |
4715 |
+- unsigned long flags; |
4716 |
++ struct rt_mutex *next_lock; |
4717 |
+ int chain_walk = 0, res; |
4718 |
++ unsigned long flags; |
4719 |
+ |
4720 |
+ /* |
4721 |
+ * Early deadlock detection. We really don't want the task to |
4722 |
+@@ -548,7 +637,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, |
4723 |
+ * which is wrong, as the other waiter is not in a deadlock |
4724 |
+ * situation. |
4725 |
+ */ |
4726 |
+- if (detect_deadlock && owner == task) |
4727 |
++ if (owner == task) |
4728 |
+ return -EDEADLK; |
4729 |
+ |
4730 |
+ raw_spin_lock_irqsave(&task->pi_lock, flags); |
4731 |
+@@ -569,20 +658,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, |
4732 |
+ if (!owner) |
4733 |
+ return 0; |
4734 |
+ |
4735 |
++ raw_spin_lock_irqsave(&owner->pi_lock, flags); |
4736 |
+ if (waiter == rt_mutex_top_waiter(lock)) { |
4737 |
+- raw_spin_lock_irqsave(&owner->pi_lock, flags); |
4738 |
+ rt_mutex_dequeue_pi(owner, top_waiter); |
4739 |
+ rt_mutex_enqueue_pi(owner, waiter); |
4740 |
+ |
4741 |
+ __rt_mutex_adjust_prio(owner); |
4742 |
+ if (owner->pi_blocked_on) |
4743 |
+ chain_walk = 1; |
4744 |
+- raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
4745 |
+- } |
4746 |
+- else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) |
4747 |
++ } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) { |
4748 |
+ chain_walk = 1; |
4749 |
++ } |
4750 |
+ |
4751 |
+- if (!chain_walk) |
4752 |
++ /* Store the lock on which owner is blocked or NULL */ |
4753 |
++ next_lock = task_blocked_on_lock(owner); |
4754 |
++ |
4755 |
++ raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
4756 |
++ /* |
4757 |
++ * Even if full deadlock detection is on, if the owner is not |
4758 |
++ * blocked itself, we can avoid finding this out in the chain |
4759 |
++ * walk. |
4760 |
++ */ |
4761 |
++ if (!chain_walk || !next_lock) |
4762 |
+ return 0; |
4763 |
+ |
4764 |
+ /* |
4765 |
+@@ -594,8 +691,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, |
4766 |
+ |
4767 |
+ raw_spin_unlock(&lock->wait_lock); |
4768 |
+ |
4769 |
+- res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, |
4770 |
+- task); |
4771 |
++ res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, |
4772 |
++ next_lock, waiter, task); |
4773 |
+ |
4774 |
+ raw_spin_lock(&lock->wait_lock); |
4775 |
+ |
4776 |
+@@ -605,7 +702,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, |
4777 |
+ /* |
4778 |
+ * Wake up the next waiter on the lock. |
4779 |
+ * |
4780 |
+- * Remove the top waiter from the current tasks waiter list and wake it up. |
4781 |
++ * Remove the top waiter from the current tasks pi waiter list and |
4782 |
++ * wake it up. |
4783 |
+ * |
4784 |
+ * Called with lock->wait_lock held. |
4785 |
+ */ |
4786 |
+@@ -626,10 +724,23 @@ static void wakeup_next_waiter(struct rt_mutex *lock) |
4787 |
+ */ |
4788 |
+ rt_mutex_dequeue_pi(current, waiter); |
4789 |
+ |
4790 |
+- rt_mutex_set_owner(lock, NULL); |
4791 |
++ /* |
4792 |
++ * As we are waking up the top waiter, and the waiter stays |
4793 |
++ * queued on the lock until it gets the lock, this lock |
4794 |
++ * obviously has waiters. Just set the bit here and this has |
4795 |
++ * the added benefit of forcing all new tasks into the |
4796 |
++ * slow path making sure no task of lower priority than |
4797 |
++ * the top waiter can steal this lock. |
4798 |
++ */ |
4799 |
++ lock->owner = (void *) RT_MUTEX_HAS_WAITERS; |
4800 |
+ |
4801 |
+ raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
4802 |
+ |
4803 |
++ /* |
4804 |
++ * It's safe to dereference waiter as it cannot go away as |
4805 |
++ * long as we hold lock->wait_lock. The waiter task needs to |
4806 |
++ * acquire it in order to dequeue the waiter. |
4807 |
++ */ |
4808 |
+ wake_up_process(waiter->task); |
4809 |
+ } |
4810 |
+ |
4811 |
+@@ -644,8 +755,8 @@ static void remove_waiter(struct rt_mutex *lock, |
4812 |
+ { |
4813 |
+ int first = (waiter == rt_mutex_top_waiter(lock)); |
4814 |
+ struct task_struct *owner = rt_mutex_owner(lock); |
4815 |
++ struct rt_mutex *next_lock = NULL; |
4816 |
+ unsigned long flags; |
4817 |
+- int chain_walk = 0; |
4818 |
+ |
4819 |
+ raw_spin_lock_irqsave(¤t->pi_lock, flags); |
4820 |
+ rt_mutex_dequeue(lock, waiter); |
4821 |
+@@ -669,13 +780,13 @@ static void remove_waiter(struct rt_mutex *lock, |
4822 |
+ } |
4823 |
+ __rt_mutex_adjust_prio(owner); |
4824 |
+ |
4825 |
+- if (owner->pi_blocked_on) |
4826 |
+- chain_walk = 1; |
4827 |
++ /* Store the lock on which owner is blocked or NULL */ |
4828 |
++ next_lock = task_blocked_on_lock(owner); |
4829 |
+ |
4830 |
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
4831 |
+ } |
4832 |
+ |
4833 |
+- if (!chain_walk) |
4834 |
++ if (!next_lock) |
4835 |
+ return; |
4836 |
+ |
4837 |
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
4838 |
+@@ -683,7 +794,7 @@ static void remove_waiter(struct rt_mutex *lock, |
4839 |
+ |
4840 |
+ raw_spin_unlock(&lock->wait_lock); |
4841 |
+ |
4842 |
+- rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); |
4843 |
++ rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current); |
4844 |
+ |
4845 |
+ raw_spin_lock(&lock->wait_lock); |
4846 |
+ } |
4847 |
+@@ -696,6 +807,7 @@ static void remove_waiter(struct rt_mutex *lock, |
4848 |
+ void rt_mutex_adjust_pi(struct task_struct *task) |
4849 |
+ { |
4850 |
+ struct rt_mutex_waiter *waiter; |
4851 |
++ struct rt_mutex *next_lock; |
4852 |
+ unsigned long flags; |
4853 |
+ |
4854 |
+ raw_spin_lock_irqsave(&task->pi_lock, flags); |
4855 |
+@@ -706,12 +818,13 @@ void rt_mutex_adjust_pi(struct task_struct *task) |
4856 |
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
4857 |
+ return; |
4858 |
+ } |
4859 |
+- |
4860 |
++ next_lock = waiter->lock; |
4861 |
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
4862 |
+ |
4863 |
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
4864 |
+ get_task_struct(task); |
4865 |
+- rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); |
4866 |
++ |
4867 |
++ rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task); |
4868 |
+ } |
4869 |
+ |
4870 |
+ /** |
4871 |
+@@ -763,6 +876,26 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, |
4872 |
+ return ret; |
4873 |
+ } |
4874 |
+ |
4875 |
++static void rt_mutex_handle_deadlock(int res, int detect_deadlock, |
4876 |
++ struct rt_mutex_waiter *w) |
4877 |
++{ |
4878 |
++ /* |
4879 |
++ * If the result is not -EDEADLOCK or the caller requested |
4880 |
++ * deadlock detection, nothing to do here. |
4881 |
++ */ |
4882 |
++ if (res != -EDEADLOCK || detect_deadlock) |
4883 |
++ return; |
4884 |
++ |
4885 |
++ /* |
4886 |
++ * Yell lowdly and stop the task right here. |
4887 |
++ */ |
4888 |
++ rt_mutex_print_deadlock(w); |
4889 |
++ while (1) { |
4890 |
++ set_current_state(TASK_INTERRUPTIBLE); |
4891 |
++ schedule(); |
4892 |
++ } |
4893 |
++} |
4894 |
++ |
4895 |
+ /* |
4896 |
+ * Slow path lock function: |
4897 |
+ */ |
4898 |
+@@ -802,8 +935,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, |
4899 |
+ |
4900 |
+ set_current_state(TASK_RUNNING); |
4901 |
+ |
4902 |
+- if (unlikely(ret)) |
4903 |
++ if (unlikely(ret)) { |
4904 |
+ remove_waiter(lock, &waiter); |
4905 |
++ rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter); |
4906 |
++ } |
4907 |
+ |
4908 |
+ /* |
4909 |
+ * try_to_take_rt_mutex() sets the waiter bit |
4910 |
+@@ -859,12 +994,49 @@ rt_mutex_slowunlock(struct rt_mutex *lock) |
4911 |
+ |
4912 |
+ rt_mutex_deadlock_account_unlock(current); |
4913 |
+ |
4914 |
+- if (!rt_mutex_has_waiters(lock)) { |
4915 |
+- lock->owner = NULL; |
4916 |
+- raw_spin_unlock(&lock->wait_lock); |
4917 |
+- return; |
4918 |
++ /* |
4919 |
++ * We must be careful here if the fast path is enabled. If we |
4920 |
++ * have no waiters queued we cannot set owner to NULL here |
4921 |
++ * because of: |
4922 |
++ * |
4923 |
++ * foo->lock->owner = NULL; |
4924 |
++ * rtmutex_lock(foo->lock); <- fast path |
4925 |
++ * free = atomic_dec_and_test(foo->refcnt); |
4926 |
++ * rtmutex_unlock(foo->lock); <- fast path |
4927 |
++ * if (free) |
4928 |
++ * kfree(foo); |
4929 |
++ * raw_spin_unlock(foo->lock->wait_lock); |
4930 |
++ * |
4931 |
++ * So for the fastpath enabled kernel: |
4932 |
++ * |
4933 |
++ * Nothing can set the waiters bit as long as we hold |
4934 |
++ * lock->wait_lock. So we do the following sequence: |
4935 |
++ * |
4936 |
++ * owner = rt_mutex_owner(lock); |
4937 |
++ * clear_rt_mutex_waiters(lock); |
4938 |
++ * raw_spin_unlock(&lock->wait_lock); |
4939 |
++ * if (cmpxchg(&lock->owner, owner, 0) == owner) |
4940 |
++ * return; |
4941 |
++ * goto retry; |
4942 |
++ * |
4943 |
++ * The fastpath disabled variant is simple as all access to |
4944 |
++ * lock->owner is serialized by lock->wait_lock: |
4945 |
++ * |
4946 |
++ * lock->owner = NULL; |
4947 |
++ * raw_spin_unlock(&lock->wait_lock); |
4948 |
++ */ |
4949 |
++ while (!rt_mutex_has_waiters(lock)) { |
4950 |
++ /* Drops lock->wait_lock ! */ |
4951 |
++ if (unlock_rt_mutex_safe(lock) == true) |
4952 |
++ return; |
4953 |
++ /* Relock the rtmutex and try again */ |
4954 |
++ raw_spin_lock(&lock->wait_lock); |
4955 |
+ } |
4956 |
+ |
4957 |
++ /* |
4958 |
++ * The wakeup next waiter path does not suffer from the above |
4959 |
++ * race. See the comments there. |
4960 |
++ */ |
4961 |
+ wakeup_next_waiter(lock); |
4962 |
+ |
4963 |
+ raw_spin_unlock(&lock->wait_lock); |
4964 |
+@@ -1112,7 +1284,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, |
4965 |
+ return 1; |
4966 |
+ } |
4967 |
+ |
4968 |
+- ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); |
4969 |
++ /* We enforce deadlock detection for futexes */ |
4970 |
++ ret = task_blocks_on_rt_mutex(lock, waiter, task, 1); |
4971 |
+ |
4972 |
+ if (ret && !rt_mutex_owner(lock)) { |
4973 |
+ /* |
4974 |
+diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h |
4975 |
+index a1a1dd06421d..f6a1f3c133b1 100644 |
4976 |
+--- a/kernel/locking/rtmutex.h |
4977 |
++++ b/kernel/locking/rtmutex.h |
4978 |
+@@ -24,3 +24,8 @@ |
4979 |
+ #define debug_rt_mutex_print_deadlock(w) do { } while (0) |
4980 |
+ #define debug_rt_mutex_detect_deadlock(w,d) (d) |
4981 |
+ #define debug_rt_mutex_reset_waiter(w) do { } while (0) |
4982 |
++ |
4983 |
++static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w) |
4984 |
++{ |
4985 |
++ WARN(1, "rtmutex deadlock detected\n"); |
4986 |
++} |
4987 |
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c |
4988 |
+index 7228258b85ec..221229cf0190 100644 |
4989 |
+--- a/kernel/printk/printk.c |
4990 |
++++ b/kernel/printk/printk.c |
4991 |
+@@ -2413,6 +2413,7 @@ int unregister_console(struct console *console) |
4992 |
+ if (console_drivers != NULL && console->flags & CON_CONSDEV) |
4993 |
+ console_drivers->flags |= CON_CONSDEV; |
4994 |
+ |
4995 |
++ console->flags &= ~CON_ENABLED; |
4996 |
+ console_unlock(); |
4997 |
+ console_sysfs_notify(); |
4998 |
+ return res; |
4999 |
+diff --git a/lib/idr.c b/lib/idr.c |
5000 |
+index 2642fa8e424d..4df67928816e 100644 |
5001 |
+--- a/lib/idr.c |
5002 |
++++ b/lib/idr.c |
5003 |
+@@ -249,7 +249,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, |
5004 |
+ id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
5005 |
+ |
5006 |
+ /* if already at the top layer, we need to grow */ |
5007 |
+- if (id >= 1 << (idp->layers * IDR_BITS)) { |
5008 |
++ if (id > idr_max(idp->layers)) { |
5009 |
+ *starting_id = id; |
5010 |
+ return -EAGAIN; |
5011 |
+ } |
5012 |
+@@ -811,12 +811,10 @@ void *idr_replace(struct idr *idp, void *ptr, int id) |
5013 |
+ if (!p) |
5014 |
+ return ERR_PTR(-EINVAL); |
5015 |
+ |
5016 |
+- n = (p->layer+1) * IDR_BITS; |
5017 |
+- |
5018 |
+- if (id >= (1 << n)) |
5019 |
++ if (id > idr_max(p->layer + 1)) |
5020 |
+ return ERR_PTR(-EINVAL); |
5021 |
+ |
5022 |
+- n -= IDR_BITS; |
5023 |
++ n = p->layer * IDR_BITS; |
5024 |
+ while ((n > 0) && p) { |
5025 |
+ p = p->ary[(id >> n) & IDR_MASK]; |
5026 |
+ n -= IDR_BITS; |
5027 |
+diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c |
5028 |
+index 99a03acb7d47..b74da447e81e 100644 |
5029 |
+--- a/lib/lz4/lz4_decompress.c |
5030 |
++++ b/lib/lz4/lz4_decompress.c |
5031 |
+@@ -108,6 +108,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize) |
5032 |
+ if (length == ML_MASK) { |
5033 |
+ for (; *ip == 255; length += 255) |
5034 |
+ ip++; |
5035 |
++ if (unlikely(length > (size_t)(length + *ip))) |
5036 |
++ goto _output_error; |
5037 |
+ length += *ip++; |
5038 |
+ } |
5039 |
+ |
5040 |
+@@ -157,7 +159,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize) |
5041 |
+ |
5042 |
+ /* write overflow error detected */ |
5043 |
+ _output_error: |
5044 |
+- return (int) (-(((char *)ip) - source)); |
5045 |
++ return -1; |
5046 |
+ } |
5047 |
+ |
5048 |
+ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, |
5049 |
+diff --git a/mm/Kconfig b/mm/Kconfig |
5050 |
+index 1b5a95f0fa01..2f42b9c2f345 100644 |
5051 |
+--- a/mm/Kconfig |
5052 |
++++ b/mm/Kconfig |
5053 |
+@@ -264,6 +264,9 @@ config MIGRATION |
5054 |
+ pages as migration can relocate pages to satisfy a huge page |
5055 |
+ allocation instead of reclaiming. |
5056 |
+ |
5057 |
++config ARCH_ENABLE_HUGEPAGE_MIGRATION |
5058 |
++ boolean |
5059 |
++ |
5060 |
+ config PHYS_ADDR_T_64BIT |
5061 |
+ def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT |
5062 |
+ |
5063 |
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c |
5064 |
+index 5177c6d4a2dd..67c927a10add 100644 |
5065 |
+--- a/mm/memcontrol.c |
5066 |
++++ b/mm/memcontrol.c |
5067 |
+@@ -2684,7 +2684,8 @@ static int mem_cgroup_try_charge(struct mem_cgroup *memcg, |
5068 |
+ * free their memory. |
5069 |
+ */ |
5070 |
+ if (unlikely(test_thread_flag(TIF_MEMDIE) || |
5071 |
+- fatal_signal_pending(current))) |
5072 |
++ fatal_signal_pending(current) || |
5073 |
++ current->flags & PF_EXITING)) |
5074 |
+ goto bypass; |
5075 |
+ |
5076 |
+ if (unlikely(task_in_memcg_oom(current))) |
5077 |
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c |
5078 |
+index 9ccef39a9de2..eb8fb727bd67 100644 |
5079 |
+--- a/mm/memory-failure.c |
5080 |
++++ b/mm/memory-failure.c |
5081 |
+@@ -204,9 +204,9 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno, |
5082 |
+ #endif |
5083 |
+ si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT; |
5084 |
+ |
5085 |
+- if ((flags & MF_ACTION_REQUIRED) && t == current) { |
5086 |
++ if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) { |
5087 |
+ si.si_code = BUS_MCEERR_AR; |
5088 |
+- ret = force_sig_info(SIGBUS, &si, t); |
5089 |
++ ret = force_sig_info(SIGBUS, &si, current); |
5090 |
+ } else { |
5091 |
+ /* |
5092 |
+ * Don't use force here, it's convenient if the signal |
5093 |
+@@ -380,20 +380,51 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno, |
5094 |
+ } |
5095 |
+ } |
5096 |
+ |
5097 |
+-static int task_early_kill(struct task_struct *tsk) |
5098 |
++/* |
5099 |
++ * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO) |
5100 |
++ * on behalf of the thread group. Return task_struct of the (first found) |
5101 |
++ * dedicated thread if found, and return NULL otherwise. |
5102 |
++ * |
5103 |
++ * We already hold read_lock(&tasklist_lock) in the caller, so we don't |
5104 |
++ * have to call rcu_read_lock/unlock() in this function. |
5105 |
++ */ |
5106 |
++static struct task_struct *find_early_kill_thread(struct task_struct *tsk) |
5107 |
+ { |
5108 |
++ struct task_struct *t; |
5109 |
++ |
5110 |
++ for_each_thread(tsk, t) |
5111 |
++ if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY)) |
5112 |
++ return t; |
5113 |
++ return NULL; |
5114 |
++} |
5115 |
++ |
5116 |
++/* |
5117 |
++ * Determine whether a given process is "early kill" process which expects |
5118 |
++ * to be signaled when some page under the process is hwpoisoned. |
5119 |
++ * Return task_struct of the dedicated thread (main thread unless explicitly |
5120 |
++ * specified) if the process is "early kill," and otherwise returns NULL. |
5121 |
++ */ |
5122 |
++static struct task_struct *task_early_kill(struct task_struct *tsk, |
5123 |
++ int force_early) |
5124 |
++{ |
5125 |
++ struct task_struct *t; |
5126 |
+ if (!tsk->mm) |
5127 |
+- return 0; |
5128 |
+- if (tsk->flags & PF_MCE_PROCESS) |
5129 |
+- return !!(tsk->flags & PF_MCE_EARLY); |
5130 |
+- return sysctl_memory_failure_early_kill; |
5131 |
++ return NULL; |
5132 |
++ if (force_early) |
5133 |
++ return tsk; |
5134 |
++ t = find_early_kill_thread(tsk); |
5135 |
++ if (t) |
5136 |
++ return t; |
5137 |
++ if (sysctl_memory_failure_early_kill) |
5138 |
++ return tsk; |
5139 |
++ return NULL; |
5140 |
+ } |
5141 |
+ |
5142 |
+ /* |
5143 |
+ * Collect processes when the error hit an anonymous page. |
5144 |
+ */ |
5145 |
+ static void collect_procs_anon(struct page *page, struct list_head *to_kill, |
5146 |
+- struct to_kill **tkc) |
5147 |
++ struct to_kill **tkc, int force_early) |
5148 |
+ { |
5149 |
+ struct vm_area_struct *vma; |
5150 |
+ struct task_struct *tsk; |
5151 |
+@@ -408,16 +439,17 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, |
5152 |
+ read_lock(&tasklist_lock); |
5153 |
+ for_each_process (tsk) { |
5154 |
+ struct anon_vma_chain *vmac; |
5155 |
++ struct task_struct *t = task_early_kill(tsk, force_early); |
5156 |
+ |
5157 |
+- if (!task_early_kill(tsk)) |
5158 |
++ if (!t) |
5159 |
+ continue; |
5160 |
+ anon_vma_interval_tree_foreach(vmac, &av->rb_root, |
5161 |
+ pgoff, pgoff) { |
5162 |
+ vma = vmac->vma; |
5163 |
+ if (!page_mapped_in_vma(page, vma)) |
5164 |
+ continue; |
5165 |
+- if (vma->vm_mm == tsk->mm) |
5166 |
+- add_to_kill(tsk, page, vma, to_kill, tkc); |
5167 |
++ if (vma->vm_mm == t->mm) |
5168 |
++ add_to_kill(t, page, vma, to_kill, tkc); |
5169 |
+ } |
5170 |
+ } |
5171 |
+ read_unlock(&tasklist_lock); |
5172 |
+@@ -428,7 +460,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, |
5173 |
+ * Collect processes when the error hit a file mapped page. |
5174 |
+ */ |
5175 |
+ static void collect_procs_file(struct page *page, struct list_head *to_kill, |
5176 |
+- struct to_kill **tkc) |
5177 |
++ struct to_kill **tkc, int force_early) |
5178 |
+ { |
5179 |
+ struct vm_area_struct *vma; |
5180 |
+ struct task_struct *tsk; |
5181 |
+@@ -438,10 +470,10 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, |
5182 |
+ read_lock(&tasklist_lock); |
5183 |
+ for_each_process(tsk) { |
5184 |
+ pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
5185 |
++ struct task_struct *t = task_early_kill(tsk, force_early); |
5186 |
+ |
5187 |
+- if (!task_early_kill(tsk)) |
5188 |
++ if (!t) |
5189 |
+ continue; |
5190 |
+- |
5191 |
+ vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, |
5192 |
+ pgoff) { |
5193 |
+ /* |
5194 |
+@@ -451,8 +483,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, |
5195 |
+ * Assume applications who requested early kill want |
5196 |
+ * to be informed of all such data corruptions. |
5197 |
+ */ |
5198 |
+- if (vma->vm_mm == tsk->mm) |
5199 |
+- add_to_kill(tsk, page, vma, to_kill, tkc); |
5200 |
++ if (vma->vm_mm == t->mm) |
5201 |
++ add_to_kill(t, page, vma, to_kill, tkc); |
5202 |
+ } |
5203 |
+ } |
5204 |
+ read_unlock(&tasklist_lock); |
5205 |
+@@ -465,7 +497,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, |
5206 |
+ * First preallocate one tokill structure outside the spin locks, |
5207 |
+ * so that we can kill at least one process reasonably reliable. |
5208 |
+ */ |
5209 |
+-static void collect_procs(struct page *page, struct list_head *tokill) |
5210 |
++static void collect_procs(struct page *page, struct list_head *tokill, |
5211 |
++ int force_early) |
5212 |
+ { |
5213 |
+ struct to_kill *tk; |
5214 |
+ |
5215 |
+@@ -476,9 +509,9 @@ static void collect_procs(struct page *page, struct list_head *tokill) |
5216 |
+ if (!tk) |
5217 |
+ return; |
5218 |
+ if (PageAnon(page)) |
5219 |
+- collect_procs_anon(page, tokill, &tk); |
5220 |
++ collect_procs_anon(page, tokill, &tk, force_early); |
5221 |
+ else |
5222 |
+- collect_procs_file(page, tokill, &tk); |
5223 |
++ collect_procs_file(page, tokill, &tk, force_early); |
5224 |
+ kfree(tk); |
5225 |
+ } |
5226 |
+ |
5227 |
+@@ -963,7 +996,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, |
5228 |
+ * there's nothing that can be done. |
5229 |
+ */ |
5230 |
+ if (kill) |
5231 |
+- collect_procs(ppage, &tokill); |
5232 |
++ collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED); |
5233 |
+ |
5234 |
+ ret = try_to_unmap(ppage, ttu); |
5235 |
+ if (ret != SWAP_SUCCESS) |
5236 |
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c |
5237 |
+index a4317da60532..154af210178b 100644 |
5238 |
+--- a/mm/page-writeback.c |
5239 |
++++ b/mm/page-writeback.c |
5240 |
+@@ -2398,7 +2398,7 @@ int test_clear_page_writeback(struct page *page) |
5241 |
+ return ret; |
5242 |
+ } |
5243 |
+ |
5244 |
+-int test_set_page_writeback(struct page *page) |
5245 |
++int __test_set_page_writeback(struct page *page, bool keep_write) |
5246 |
+ { |
5247 |
+ struct address_space *mapping = page_mapping(page); |
5248 |
+ int ret; |
5249 |
+@@ -2423,9 +2423,10 @@ int test_set_page_writeback(struct page *page) |
5250 |
+ radix_tree_tag_clear(&mapping->page_tree, |
5251 |
+ page_index(page), |
5252 |
+ PAGECACHE_TAG_DIRTY); |
5253 |
+- radix_tree_tag_clear(&mapping->page_tree, |
5254 |
+- page_index(page), |
5255 |
+- PAGECACHE_TAG_TOWRITE); |
5256 |
++ if (!keep_write) |
5257 |
++ radix_tree_tag_clear(&mapping->page_tree, |
5258 |
++ page_index(page), |
5259 |
++ PAGECACHE_TAG_TOWRITE); |
5260 |
+ spin_unlock_irqrestore(&mapping->tree_lock, flags); |
5261 |
+ } else { |
5262 |
+ ret = TestSetPageWriteback(page); |
5263 |
+@@ -2436,7 +2437,7 @@ int test_set_page_writeback(struct page *page) |
5264 |
+ return ret; |
5265 |
+ |
5266 |
+ } |
5267 |
+-EXPORT_SYMBOL(test_set_page_writeback); |
5268 |
++EXPORT_SYMBOL(__test_set_page_writeback); |
5269 |
+ |
5270 |
+ /* |
5271 |
+ * Return true if any of the pages in the mapping are marked with the |
5272 |
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
5273 |
+index 5dba2933c9c0..56eb0eb382b1 100644 |
5274 |
+--- a/mm/page_alloc.c |
5275 |
++++ b/mm/page_alloc.c |
5276 |
+@@ -6009,53 +6009,65 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) |
5277 |
+ * @end_bitidx: The last bit of interest |
5278 |
+ * returns pageblock_bits flags |
5279 |
+ */ |
5280 |
+-unsigned long get_pageblock_flags_group(struct page *page, |
5281 |
+- int start_bitidx, int end_bitidx) |
5282 |
++unsigned long get_pageblock_flags_mask(struct page *page, |
5283 |
++ unsigned long end_bitidx, |
5284 |
++ unsigned long mask) |
5285 |
+ { |
5286 |
+ struct zone *zone; |
5287 |
+ unsigned long *bitmap; |
5288 |
+- unsigned long pfn, bitidx; |
5289 |
+- unsigned long flags = 0; |
5290 |
+- unsigned long value = 1; |
5291 |
++ unsigned long pfn, bitidx, word_bitidx; |
5292 |
++ unsigned long word; |
5293 |
+ |
5294 |
+ zone = page_zone(page); |
5295 |
+ pfn = page_to_pfn(page); |
5296 |
+ bitmap = get_pageblock_bitmap(zone, pfn); |
5297 |
+ bitidx = pfn_to_bitidx(zone, pfn); |
5298 |
++ word_bitidx = bitidx / BITS_PER_LONG; |
5299 |
++ bitidx &= (BITS_PER_LONG-1); |
5300 |
+ |
5301 |
+- for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) |
5302 |
+- if (test_bit(bitidx + start_bitidx, bitmap)) |
5303 |
+- flags |= value; |
5304 |
+- |
5305 |
+- return flags; |
5306 |
++ word = bitmap[word_bitidx]; |
5307 |
++ bitidx += end_bitidx; |
5308 |
++ return (word >> (BITS_PER_LONG - bitidx - 1)) & mask; |
5309 |
+ } |
5310 |
+ |
5311 |
+ /** |
5312 |
+- * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages |
5313 |
++ * set_pageblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages |
5314 |
+ * @page: The page within the block of interest |
5315 |
+ * @start_bitidx: The first bit of interest |
5316 |
+ * @end_bitidx: The last bit of interest |
5317 |
+ * @flags: The flags to set |
5318 |
+ */ |
5319 |
+-void set_pageblock_flags_group(struct page *page, unsigned long flags, |
5320 |
+- int start_bitidx, int end_bitidx) |
5321 |
++void set_pageblock_flags_mask(struct page *page, unsigned long flags, |
5322 |
++ unsigned long end_bitidx, |
5323 |
++ unsigned long mask) |
5324 |
+ { |
5325 |
+ struct zone *zone; |
5326 |
+ unsigned long *bitmap; |
5327 |
+- unsigned long pfn, bitidx; |
5328 |
+- unsigned long value = 1; |
5329 |
++ unsigned long pfn, bitidx, word_bitidx; |
5330 |
++ unsigned long old_word, word; |
5331 |
++ |
5332 |
++ BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); |
5333 |
+ |
5334 |
+ zone = page_zone(page); |
5335 |
+ pfn = page_to_pfn(page); |
5336 |
+ bitmap = get_pageblock_bitmap(zone, pfn); |
5337 |
+ bitidx = pfn_to_bitidx(zone, pfn); |
5338 |
++ word_bitidx = bitidx / BITS_PER_LONG; |
5339 |
++ bitidx &= (BITS_PER_LONG-1); |
5340 |
++ |
5341 |
+ VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); |
5342 |
+ |
5343 |
+- for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) |
5344 |
+- if (flags & value) |
5345 |
+- __set_bit(bitidx + start_bitidx, bitmap); |
5346 |
+- else |
5347 |
+- __clear_bit(bitidx + start_bitidx, bitmap); |
5348 |
++ bitidx += end_bitidx; |
5349 |
++ mask <<= (BITS_PER_LONG - bitidx - 1); |
5350 |
++ flags <<= (BITS_PER_LONG - bitidx - 1); |
5351 |
++ |
5352 |
++ word = ACCESS_ONCE(bitmap[word_bitidx]); |
5353 |
++ for (;;) { |
5354 |
++ old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); |
5355 |
++ if (word == old_word) |
5356 |
++ break; |
5357 |
++ word = old_word; |
5358 |
++ } |
5359 |
+ } |
5360 |
+ |
5361 |
+ /* |
5362 |
+diff --git a/mm/rmap.c b/mm/rmap.c |
5363 |
+index 83bfafabb47b..14d1e28774e5 100644 |
5364 |
+--- a/mm/rmap.c |
5365 |
++++ b/mm/rmap.c |
5366 |
+@@ -103,6 +103,7 @@ static inline void anon_vma_free(struct anon_vma *anon_vma) |
5367 |
+ * LOCK should suffice since the actual taking of the lock must |
5368 |
+ * happen _before_ what follows. |
5369 |
+ */ |
5370 |
++ might_sleep(); |
5371 |
+ if (rwsem_is_locked(&anon_vma->root->rwsem)) { |
5372 |
+ anon_vma_lock_write(anon_vma); |
5373 |
+ anon_vma_unlock_write(anon_vma); |
5374 |
+@@ -426,8 +427,9 @@ struct anon_vma *page_get_anon_vma(struct page *page) |
5375 |
+ * above cannot corrupt). |
5376 |
+ */ |
5377 |
+ if (!page_mapped(page)) { |
5378 |
++ rcu_read_unlock(); |
5379 |
+ put_anon_vma(anon_vma); |
5380 |
+- anon_vma = NULL; |
5381 |
++ return NULL; |
5382 |
+ } |
5383 |
+ out: |
5384 |
+ rcu_read_unlock(); |
5385 |
+@@ -477,9 +479,9 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page) |
5386 |
+ } |
5387 |
+ |
5388 |
+ if (!page_mapped(page)) { |
5389 |
++ rcu_read_unlock(); |
5390 |
+ put_anon_vma(anon_vma); |
5391 |
+- anon_vma = NULL; |
5392 |
+- goto out; |
5393 |
++ return NULL; |
5394 |
+ } |
5395 |
+ |
5396 |
+ /* we pinned the anon_vma, its safe to sleep */ |
5397 |
+diff --git a/mm/vmscan.c b/mm/vmscan.c |
5398 |
+index 32c661d66a45..a50bde6edbbc 100644 |
5399 |
+--- a/mm/vmscan.c |
5400 |
++++ b/mm/vmscan.c |
5401 |
+@@ -2525,10 +2525,17 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat) |
5402 |
+ |
5403 |
+ for (i = 0; i <= ZONE_NORMAL; i++) { |
5404 |
+ zone = &pgdat->node_zones[i]; |
5405 |
++ if (!populated_zone(zone)) |
5406 |
++ continue; |
5407 |
++ |
5408 |
+ pfmemalloc_reserve += min_wmark_pages(zone); |
5409 |
+ free_pages += zone_page_state(zone, NR_FREE_PAGES); |
5410 |
+ } |
5411 |
+ |
5412 |
++ /* If there are no reserves (unexpected config) then do not throttle */ |
5413 |
++ if (!pfmemalloc_reserve) |
5414 |
++ return true; |
5415 |
++ |
5416 |
+ wmark_ok = free_pages > pfmemalloc_reserve / 2; |
5417 |
+ |
5418 |
+ /* kswapd must be awake if processes are being throttled */ |
5419 |
+@@ -2553,9 +2560,9 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat) |
5420 |
+ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, |
5421 |
+ nodemask_t *nodemask) |
5422 |
+ { |
5423 |
++ struct zoneref *z; |
5424 |
+ struct zone *zone; |
5425 |
+- int high_zoneidx = gfp_zone(gfp_mask); |
5426 |
+- pg_data_t *pgdat; |
5427 |
++ pg_data_t *pgdat = NULL; |
5428 |
+ |
5429 |
+ /* |
5430 |
+ * Kernel threads should not be throttled as they may be indirectly |
5431 |
+@@ -2574,10 +2581,34 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, |
5432 |
+ if (fatal_signal_pending(current)) |
5433 |
+ goto out; |
5434 |
+ |
5435 |
+- /* Check if the pfmemalloc reserves are ok */ |
5436 |
+- first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone); |
5437 |
+- pgdat = zone->zone_pgdat; |
5438 |
+- if (pfmemalloc_watermark_ok(pgdat)) |
5439 |
++ /* |
5440 |
++ * Check if the pfmemalloc reserves are ok by finding the first node |
5441 |
++ * with a usable ZONE_NORMAL or lower zone. The expectation is that |
5442 |
++ * GFP_KERNEL will be required for allocating network buffers when |
5443 |
++ * swapping over the network so ZONE_HIGHMEM is unusable. |
5444 |
++ * |
5445 |
++ * Throttling is based on the first usable node and throttled processes |
5446 |
++ * wait on a queue until kswapd makes progress and wakes them. There |
5447 |
++ * is an affinity then between processes waking up and where reclaim |
5448 |
++ * progress has been made assuming the process wakes on the same node. |
5449 |
++ * More importantly, processes running on remote nodes will not compete |
5450 |
++ * for remote pfmemalloc reserves and processes on different nodes |
5451 |
++ * should make reasonable progress. |
5452 |
++ */ |
5453 |
++ for_each_zone_zonelist_nodemask(zone, z, zonelist, |
5454 |
++ gfp_mask, nodemask) { |
5455 |
++ if (zone_idx(zone) > ZONE_NORMAL) |
5456 |
++ continue; |
5457 |
++ |
5458 |
++ /* Throttle based on the first usable node */ |
5459 |
++ pgdat = zone->zone_pgdat; |
5460 |
++ if (pfmemalloc_watermark_ok(pgdat)) |
5461 |
++ goto out; |
5462 |
++ break; |
5463 |
++ } |
5464 |
++ |
5465 |
++ /* If no zone was usable by the allocation flags then do not throttle */ |
5466 |
++ if (!pgdat) |
5467 |
+ goto out; |
5468 |
+ |
5469 |
+ /* Account for the throttling */ |
5470 |
+@@ -3302,7 +3333,10 @@ static int kswapd(void *p) |
5471 |
+ } |
5472 |
+ } |
5473 |
+ |
5474 |
++ tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); |
5475 |
+ current->reclaim_state = NULL; |
5476 |
++ lockdep_clear_current_reclaim_state(); |
5477 |
++ |
5478 |
+ return 0; |
5479 |
+ } |
5480 |
+ |
5481 |
+diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c |
5482 |
+index 73492b91105a..8796ffa08b43 100644 |
5483 |
+--- a/net/bluetooth/6lowpan.c |
5484 |
++++ b/net/bluetooth/6lowpan.c |
5485 |
+@@ -420,12 +420,18 @@ static int conn_send(struct l2cap_conn *conn, |
5486 |
+ return 0; |
5487 |
+ } |
5488 |
+ |
5489 |
+-static void get_dest_bdaddr(struct in6_addr *ip6_daddr, |
5490 |
+- bdaddr_t *addr, u8 *addr_type) |
5491 |
++static u8 get_addr_type_from_eui64(u8 byte) |
5492 |
+ { |
5493 |
+- u8 *eui64; |
5494 |
++ /* Is universal(0) or local(1) bit, */ |
5495 |
++ if (byte & 0x02) |
5496 |
++ return ADDR_LE_DEV_RANDOM; |
5497 |
+ |
5498 |
+- eui64 = ip6_daddr->s6_addr + 8; |
5499 |
++ return ADDR_LE_DEV_PUBLIC; |
5500 |
++} |
5501 |
++ |
5502 |
++static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr) |
5503 |
++{ |
5504 |
++ u8 *eui64 = ip6_daddr->s6_addr + 8; |
5505 |
+ |
5506 |
+ addr->b[0] = eui64[7]; |
5507 |
+ addr->b[1] = eui64[6]; |
5508 |
+@@ -433,16 +439,19 @@ static void get_dest_bdaddr(struct in6_addr *ip6_daddr, |
5509 |
+ addr->b[3] = eui64[2]; |
5510 |
+ addr->b[4] = eui64[1]; |
5511 |
+ addr->b[5] = eui64[0]; |
5512 |
++} |
5513 |
+ |
5514 |
+- addr->b[5] ^= 2; |
5515 |
++static void convert_dest_bdaddr(struct in6_addr *ip6_daddr, |
5516 |
++ bdaddr_t *addr, u8 *addr_type) |
5517 |
++{ |
5518 |
++ copy_to_bdaddr(ip6_daddr, addr); |
5519 |
+ |
5520 |
+- /* Set universal/local bit to 0 */ |
5521 |
+- if (addr->b[5] & 1) { |
5522 |
+- addr->b[5] &= ~1; |
5523 |
+- *addr_type = ADDR_LE_DEV_PUBLIC; |
5524 |
+- } else { |
5525 |
+- *addr_type = ADDR_LE_DEV_RANDOM; |
5526 |
+- } |
5527 |
++ /* We need to toggle the U/L bit that we got from IPv6 address |
5528 |
++ * so that we get the proper address and type of the BD address. |
5529 |
++ */ |
5530 |
++ addr->b[5] ^= 0x02; |
5531 |
++ |
5532 |
++ *addr_type = get_addr_type_from_eui64(addr->b[5]); |
5533 |
+ } |
5534 |
+ |
5535 |
+ static int header_create(struct sk_buff *skb, struct net_device *netdev, |
5536 |
+@@ -473,9 +482,11 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev, |
5537 |
+ /* Get destination BT device from skb. |
5538 |
+ * If there is no such peer then discard the packet. |
5539 |
+ */ |
5540 |
+- get_dest_bdaddr(&hdr->daddr, &addr, &addr_type); |
5541 |
++ convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type); |
5542 |
+ |
5543 |
+- BT_DBG("dest addr %pMR type %d", &addr, addr_type); |
5544 |
++ BT_DBG("dest addr %pMR type %s IP %pI6c", &addr, |
5545 |
++ addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM", |
5546 |
++ &hdr->daddr); |
5547 |
+ |
5548 |
+ read_lock_irqsave(&devices_lock, flags); |
5549 |
+ peer = peer_lookup_ba(dev, &addr, addr_type); |
5550 |
+@@ -556,7 +567,7 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev) |
5551 |
+ } else { |
5552 |
+ unsigned long flags; |
5553 |
+ |
5554 |
+- get_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type); |
5555 |
++ convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type); |
5556 |
+ eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8; |
5557 |
+ dev = lowpan_dev(netdev); |
5558 |
+ |
5559 |
+@@ -564,8 +575,10 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev) |
5560 |
+ peer = peer_lookup_ba(dev, &addr, addr_type); |
5561 |
+ read_unlock_irqrestore(&devices_lock, flags); |
5562 |
+ |
5563 |
+- BT_DBG("xmit from %s to %pMR (%pI6c) peer %p", netdev->name, |
5564 |
+- &addr, &lowpan_cb(skb)->addr, peer); |
5565 |
++ BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p", |
5566 |
++ netdev->name, &addr, |
5567 |
++ addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM", |
5568 |
++ &lowpan_cb(skb)->addr, peer); |
5569 |
+ |
5570 |
+ if (peer && peer->conn) |
5571 |
+ err = send_pkt(peer->conn, netdev->dev_addr, |
5572 |
+@@ -620,13 +633,13 @@ static void set_addr(u8 *eui, u8 *addr, u8 addr_type) |
5573 |
+ eui[6] = addr[1]; |
5574 |
+ eui[7] = addr[0]; |
5575 |
+ |
5576 |
+- eui[0] ^= 2; |
5577 |
+- |
5578 |
+- /* Universal/local bit set, RFC 4291 */ |
5579 |
++ /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */ |
5580 |
+ if (addr_type == ADDR_LE_DEV_PUBLIC) |
5581 |
+- eui[0] |= 1; |
5582 |
++ eui[0] &= ~0x02; |
5583 |
+ else |
5584 |
+- eui[0] &= ~1; |
5585 |
++ eui[0] |= 0x02; |
5586 |
++ |
5587 |
++ BT_DBG("type %d addr %*phC", addr_type, 8, eui); |
5588 |
+ } |
5589 |
+ |
5590 |
+ static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr, |
5591 |
+@@ -634,7 +647,6 @@ static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr, |
5592 |
+ { |
5593 |
+ netdev->addr_assign_type = NET_ADDR_PERM; |
5594 |
+ set_addr(netdev->dev_addr, addr->b, addr_type); |
5595 |
+- netdev->dev_addr[0] ^= 2; |
5596 |
+ } |
5597 |
+ |
5598 |
+ static void ifup(struct net_device *netdev) |
5599 |
+@@ -684,13 +696,6 @@ static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev) |
5600 |
+ |
5601 |
+ memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8, |
5602 |
+ EUI64_ADDR_LEN); |
5603 |
+- peer->eui64_addr[0] ^= 2; /* second bit-flip (Universe/Local) |
5604 |
+- * is done according RFC2464 |
5605 |
+- */ |
5606 |
+- |
5607 |
+- raw_dump_inline(__func__, "peer IPv6 address", |
5608 |
+- (unsigned char *)&peer->peer_addr, 16); |
5609 |
+- raw_dump_inline(__func__, "peer EUI64 address", peer->eui64_addr, 8); |
5610 |
+ |
5611 |
+ write_lock_irqsave(&devices_lock, flags); |
5612 |
+ INIT_LIST_HEAD(&peer->list); |
5613 |
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c |
5614 |
+index 15010a230b6d..0381d55f995d 100644 |
5615 |
+--- a/net/bluetooth/hci_event.c |
5616 |
++++ b/net/bluetooth/hci_event.c |
5617 |
+@@ -1342,6 +1342,7 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev, |
5618 |
+ * is requested. |
5619 |
+ */ |
5620 |
+ if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && |
5621 |
++ conn->pending_sec_level != BT_SECURITY_FIPS && |
5622 |
+ conn->pending_sec_level != BT_SECURITY_HIGH && |
5623 |
+ conn->pending_sec_level != BT_SECURITY_MEDIUM) |
5624 |
+ return 0; |
5625 |
+@@ -2957,7 +2958,8 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) |
5626 |
+ } |
5627 |
+ |
5628 |
+ if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && |
5629 |
+- conn->pending_sec_level == BT_SECURITY_HIGH) { |
5630 |
++ (conn->pending_sec_level == BT_SECURITY_HIGH || |
5631 |
++ conn->pending_sec_level == BT_SECURITY_FIPS)) { |
5632 |
+ BT_DBG("%s ignoring key unauthenticated for high security", |
5633 |
+ hdev->name); |
5634 |
+ goto not_found; |
5635 |
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c |
5636 |
+index ef5e5b04f34f..ade3fb4c23bc 100644 |
5637 |
+--- a/net/bluetooth/l2cap_sock.c |
5638 |
++++ b/net/bluetooth/l2cap_sock.c |
5639 |
+@@ -1180,13 +1180,16 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan) |
5640 |
+ /* Check for backlog size */ |
5641 |
+ if (sk_acceptq_is_full(parent)) { |
5642 |
+ BT_DBG("backlog full %d", parent->sk_ack_backlog); |
5643 |
++ release_sock(parent); |
5644 |
+ return NULL; |
5645 |
+ } |
5646 |
+ |
5647 |
+ sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, |
5648 |
+ GFP_ATOMIC); |
5649 |
+- if (!sk) |
5650 |
++ if (!sk) { |
5651 |
++ release_sock(parent); |
5652 |
+ return NULL; |
5653 |
++ } |
5654 |
+ |
5655 |
+ bt_sock_reclassify_lock(sk, BTPROTO_L2CAP); |
5656 |
+ |
5657 |
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c |
5658 |
+index d2d4e0d5aed0..c88b2b671849 100644 |
5659 |
+--- a/net/bluetooth/mgmt.c |
5660 |
++++ b/net/bluetooth/mgmt.c |
5661 |
+@@ -4530,7 +4530,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, |
5662 |
+ |
5663 |
+ for (i = 0; i < key_count; i++) { |
5664 |
+ struct mgmt_ltk_info *key = &cp->keys[i]; |
5665 |
+- u8 type, addr_type; |
5666 |
++ u8 type, addr_type, authenticated; |
5667 |
+ |
5668 |
+ if (key->addr.type == BDADDR_LE_PUBLIC) |
5669 |
+ addr_type = ADDR_LE_DEV_PUBLIC; |
5670 |
+@@ -4542,8 +4542,19 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, |
5671 |
+ else |
5672 |
+ type = HCI_SMP_LTK_SLAVE; |
5673 |
+ |
5674 |
++ switch (key->type) { |
5675 |
++ case MGMT_LTK_UNAUTHENTICATED: |
5676 |
++ authenticated = 0x00; |
5677 |
++ break; |
5678 |
++ case MGMT_LTK_AUTHENTICATED: |
5679 |
++ authenticated = 0x01; |
5680 |
++ break; |
5681 |
++ default: |
5682 |
++ continue; |
5683 |
++ } |
5684 |
++ |
5685 |
+ hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type, |
5686 |
+- key->type, key->val, key->enc_size, key->ediv, |
5687 |
++ authenticated, key->val, key->enc_size, key->ediv, |
5688 |
+ key->rand); |
5689 |
+ } |
5690 |
+ |
5691 |
+@@ -5005,6 +5016,14 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, |
5692 |
+ mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); |
5693 |
+ } |
5694 |
+ |
5695 |
++static u8 mgmt_ltk_type(struct smp_ltk *ltk) |
5696 |
++{ |
5697 |
++ if (ltk->authenticated) |
5698 |
++ return MGMT_LTK_AUTHENTICATED; |
5699 |
++ |
5700 |
++ return MGMT_LTK_UNAUTHENTICATED; |
5701 |
++} |
5702 |
++ |
5703 |
+ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent) |
5704 |
+ { |
5705 |
+ struct mgmt_ev_new_long_term_key ev; |
5706 |
+@@ -5030,7 +5049,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent) |
5707 |
+ |
5708 |
+ bacpy(&ev.key.addr.bdaddr, &key->bdaddr); |
5709 |
+ ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type); |
5710 |
+- ev.key.type = key->authenticated; |
5711 |
++ ev.key.type = mgmt_ltk_type(key); |
5712 |
+ ev.key.enc_size = key->enc_size; |
5713 |
+ ev.key.ediv = key->ediv; |
5714 |
+ ev.key.rand = key->rand; |
5715 |
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c |
5716 |
+index dfb4e1161c10..956d127528cb 100644 |
5717 |
+--- a/net/bluetooth/smp.c |
5718 |
++++ b/net/bluetooth/smp.c |
5719 |
+@@ -908,10 +908,11 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) |
5720 |
+ |
5721 |
+ authreq = seclevel_to_authreq(sec_level); |
5722 |
+ |
5723 |
+- /* hcon->auth_type is set by pair_device in mgmt.c. If the MITM |
5724 |
+- * flag is set we should also set it for the SMP request. |
5725 |
++ /* Require MITM if IO Capability allows or the security level |
5726 |
++ * requires it. |
5727 |
+ */ |
5728 |
+- if ((hcon->auth_type & 0x01)) |
5729 |
++ if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT || |
5730 |
++ sec_level > BT_SECURITY_MEDIUM) |
5731 |
+ authreq |= SMP_AUTH_MITM; |
5732 |
+ |
5733 |
+ if (hcon->link_mode & HCI_LM_MASTER) { |
5734 |
+diff --git a/scripts/package/builddeb b/scripts/package/builddeb |
5735 |
+index f46e4dd0558d..152d4d25ab7c 100644 |
5736 |
+--- a/scripts/package/builddeb |
5737 |
++++ b/scripts/package/builddeb |
5738 |
+@@ -155,11 +155,11 @@ if grep -q '^CONFIG_MODULES=y' $KCONFIG_CONFIG ; then |
5739 |
+ for module in $(find lib/modules/ -name *.ko); do |
5740 |
+ mkdir -p $(dirname $dbg_dir/usr/lib/debug/$module) |
5741 |
+ # only keep debug symbols in the debug file |
5742 |
+- objcopy --only-keep-debug $module $dbg_dir/usr/lib/debug/$module |
5743 |
++ $OBJCOPY --only-keep-debug $module $dbg_dir/usr/lib/debug/$module |
5744 |
+ # strip original module from debug symbols |
5745 |
+- objcopy --strip-debug $module |
5746 |
++ $OBJCOPY --strip-debug $module |
5747 |
+ # then add a link to those |
5748 |
+- objcopy --add-gnu-debuglink=$dbg_dir/usr/lib/debug/$module $module |
5749 |
++ $OBJCOPY --add-gnu-debuglink=$dbg_dir/usr/lib/debug/$module $module |
5750 |
+ done |
5751 |
+ ) |
5752 |
+ fi |
5753 |
+diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c |
5754 |
+index 05654f5e48d5..c4d6d2e20e0d 100644 |
5755 |
+--- a/tools/vm/page-types.c |
5756 |
++++ b/tools/vm/page-types.c |
5757 |
+@@ -32,6 +32,8 @@ |
5758 |
+ #include <assert.h> |
5759 |
+ #include <ftw.h> |
5760 |
+ #include <time.h> |
5761 |
++#include <setjmp.h> |
5762 |
++#include <signal.h> |
5763 |
+ #include <sys/types.h> |
5764 |
+ #include <sys/errno.h> |
5765 |
+ #include <sys/fcntl.h> |
5766 |
+@@ -824,21 +826,38 @@ static void show_file(const char *name, const struct stat *st) |
5767 |
+ atime, now - st->st_atime); |
5768 |
+ } |
5769 |
+ |
5770 |
++static sigjmp_buf sigbus_jmp; |
5771 |
++ |
5772 |
++static void * volatile sigbus_addr; |
5773 |
++ |
5774 |
++static void sigbus_handler(int sig, siginfo_t *info, void *ucontex) |
5775 |
++{ |
5776 |
++ (void)sig; |
5777 |
++ (void)ucontex; |
5778 |
++ sigbus_addr = info ? info->si_addr : NULL; |
5779 |
++ siglongjmp(sigbus_jmp, 1); |
5780 |
++} |
5781 |
++ |
5782 |
++static struct sigaction sigbus_action = { |
5783 |
++ .sa_sigaction = sigbus_handler, |
5784 |
++ .sa_flags = SA_SIGINFO, |
5785 |
++}; |
5786 |
++ |
5787 |
+ static void walk_file(const char *name, const struct stat *st) |
5788 |
+ { |
5789 |
+ uint8_t vec[PAGEMAP_BATCH]; |
5790 |
+ uint64_t buf[PAGEMAP_BATCH], flags; |
5791 |
+ unsigned long nr_pages, pfn, i; |
5792 |
++ off_t off, end = st->st_size; |
5793 |
+ int fd; |
5794 |
+- off_t off; |
5795 |
+ ssize_t len; |
5796 |
+ void *ptr; |
5797 |
+ int first = 1; |
5798 |
+ |
5799 |
+ fd = checked_open(name, O_RDONLY|O_NOATIME|O_NOFOLLOW); |
5800 |
+ |
5801 |
+- for (off = 0; off < st->st_size; off += len) { |
5802 |
+- nr_pages = (st->st_size - off + page_size - 1) / page_size; |
5803 |
++ for (off = 0; off < end; off += len) { |
5804 |
++ nr_pages = (end - off + page_size - 1) / page_size; |
5805 |
+ if (nr_pages > PAGEMAP_BATCH) |
5806 |
+ nr_pages = PAGEMAP_BATCH; |
5807 |
+ len = nr_pages * page_size; |
5808 |
+@@ -855,11 +874,19 @@ static void walk_file(const char *name, const struct stat *st) |
5809 |
+ if (madvise(ptr, len, MADV_RANDOM)) |
5810 |
+ fatal("madvice failed: %s", name); |
5811 |
+ |
5812 |
++ if (sigsetjmp(sigbus_jmp, 1)) { |
5813 |
++ end = off + sigbus_addr ? sigbus_addr - ptr : 0; |
5814 |
++ fprintf(stderr, "got sigbus at offset %lld: %s\n", |
5815 |
++ (long long)end, name); |
5816 |
++ goto got_sigbus; |
5817 |
++ } |
5818 |
++ |
5819 |
+ /* populate ptes */ |
5820 |
+ for (i = 0; i < nr_pages ; i++) { |
5821 |
+ if (vec[i] & 1) |
5822 |
+ (void)*(volatile int *)(ptr + i * page_size); |
5823 |
+ } |
5824 |
++got_sigbus: |
5825 |
+ |
5826 |
+ /* turn off harvesting reference bits */ |
5827 |
+ if (madvise(ptr, len, MADV_SEQUENTIAL)) |
5828 |
+@@ -910,6 +937,7 @@ static void walk_page_cache(void) |
5829 |
+ |
5830 |
+ kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY); |
5831 |
+ pagemap_fd = checked_open("/proc/self/pagemap", O_RDONLY); |
5832 |
++ sigaction(SIGBUS, &sigbus_action, NULL); |
5833 |
+ |
5834 |
+ if (stat(opt_file, &st)) |
5835 |
+ fatal("stat failed: %s\n", opt_file); |
5836 |
+@@ -925,6 +953,7 @@ static void walk_page_cache(void) |
5837 |
+ |
5838 |
+ close(kpageflags_fd); |
5839 |
+ close(pagemap_fd); |
5840 |
++ signal(SIGBUS, SIG_DFL); |
5841 |
+ } |
5842 |
+ |
5843 |
+ static void parse_file(const char *name) |