Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.4 commit in: /
Date: Tue, 01 Jul 2014 16:08:44
Message-Id: 1404230813.4cfc1e2967b4fbddf2d3fc251cdb8ee10b62ccd5.mpagano@gentoo
1 commit: 4cfc1e2967b4fbddf2d3fc251cdb8ee10b62ccd5
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jul 1 16:06:53 2014 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Jul 1 16:06:53 2014 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=4cfc1e29
7
8 Linux patch 3.4.96
9
10 ---
11 0000_README | 4 +
12 1095_linux-3.4.96.patch | 1326 +++++++++++++++++++++++++++++++++++++++++++++++
13 2 files changed, 1330 insertions(+)
14
15 diff --git a/0000_README b/0000_README
16 index 1db991a..a4ce565 100644
17 --- a/0000_README
18 +++ b/0000_README
19 @@ -419,6 +419,10 @@ Patch: 1094_linux-3.4.95.patch
20 From: http://www.kernel.org
21 Desc: Linux 3.4.95
22
23 +Patch: 1095_linux-3.4.96.patch
24 +From: http://www.kernel.org
25 +Desc: Linux 3.4.96
26 +
27 Patch: 1500_XATTR_USER_PREFIX.patch
28 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
29 Desc: Support for namespace user.pax.* on tmpfs.
30
31 diff --git a/1095_linux-3.4.96.patch b/1095_linux-3.4.96.patch
32 new file mode 100644
33 index 0000000..8c229d5
34 --- /dev/null
35 +++ b/1095_linux-3.4.96.patch
36 @@ -0,0 +1,1326 @@
37 +diff --git a/Makefile b/Makefile
38 +index fda1dab589be..e4ecdedbfe27 100644
39 +--- a/Makefile
40 ++++ b/Makefile
41 +@@ -1,6 +1,6 @@
42 + VERSION = 3
43 + PATCHLEVEL = 4
44 +-SUBLEVEL = 95
45 ++SUBLEVEL = 96
46 + EXTRAVERSION =
47 + NAME = Saber-toothed Squirrel
48 +
49 +diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
50 +index af4e8c8a5422..6582c4adc182 100644
51 +--- a/arch/arm/kernel/stacktrace.c
52 ++++ b/arch/arm/kernel/stacktrace.c
53 +@@ -83,13 +83,16 @@ static int save_trace(struct stackframe *frame, void *d)
54 + return trace->nr_entries >= trace->max_entries;
55 + }
56 +
57 +-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
58 ++/* This must be noinline to so that our skip calculation works correctly */
59 ++static noinline void __save_stack_trace(struct task_struct *tsk,
60 ++ struct stack_trace *trace, unsigned int nosched)
61 + {
62 + struct stack_trace_data data;
63 + struct stackframe frame;
64 +
65 + data.trace = trace;
66 + data.skip = trace->skip;
67 ++ data.no_sched_functions = nosched;
68 +
69 + if (tsk != current) {
70 + #ifdef CONFIG_SMP
71 +@@ -102,7 +105,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
72 + trace->entries[trace->nr_entries++] = ULONG_MAX;
73 + return;
74 + #else
75 +- data.no_sched_functions = 1;
76 + frame.fp = thread_saved_fp(tsk);
77 + frame.sp = thread_saved_sp(tsk);
78 + frame.lr = 0; /* recovered from the stack */
79 +@@ -111,11 +113,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
80 + } else {
81 + register unsigned long current_sp asm ("sp");
82 +
83 +- data.no_sched_functions = 0;
84 ++ /* We don't want this function nor the caller */
85 ++ data.skip += 2;
86 + frame.fp = (unsigned long)__builtin_frame_address(0);
87 + frame.sp = current_sp;
88 + frame.lr = (unsigned long)__builtin_return_address(0);
89 +- frame.pc = (unsigned long)save_stack_trace_tsk;
90 ++ frame.pc = (unsigned long)__save_stack_trace;
91 + }
92 +
93 + walk_stackframe(&frame, save_trace, &data);
94 +@@ -123,9 +126,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
95 + trace->entries[trace->nr_entries++] = ULONG_MAX;
96 + }
97 +
98 ++void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
99 ++{
100 ++ __save_stack_trace(tsk, trace, 1);
101 ++}
102 ++
103 + void save_stack_trace(struct stack_trace *trace)
104 + {
105 +- save_stack_trace_tsk(current, trace);
106 ++ __save_stack_trace(current, trace, 0);
107 + }
108 + EXPORT_SYMBOL_GPL(save_stack_trace);
109 + #endif
110 +diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
111 +index 47853debb3b9..025415e7346a 100644
112 +--- a/arch/s390/include/asm/lowcore.h
113 ++++ b/arch/s390/include/asm/lowcore.h
114 +@@ -142,9 +142,9 @@ struct _lowcore {
115 + __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */
116 +
117 + /* Interrupt response block */
118 +- __u8 irb[64]; /* 0x0300 */
119 ++ __u8 irb[96]; /* 0x0300 */
120 +
121 +- __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */
122 ++ __u8 pad_0x0360[0x0e00-0x0360]; /* 0x0360 */
123 +
124 + /*
125 + * 0xe00 contains the address of the IPL Parameter Information
126 +@@ -288,12 +288,13 @@ struct _lowcore {
127 + __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */
128 +
129 + /* Interrupt response block. */
130 +- __u8 irb[64]; /* 0x0400 */
131 ++ __u8 irb[96]; /* 0x0400 */
132 ++ __u8 pad_0x0460[0x0480-0x0460]; /* 0x0460 */
133 +
134 + /* Per cpu primary space access list */
135 +- __u32 paste[16]; /* 0x0440 */
136 ++ __u32 paste[16]; /* 0x0480 */
137 +
138 +- __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */
139 ++ __u8 pad_0x04c0[0x0e00-0x04c0]; /* 0x04c0 */
140 +
141 + /*
142 + * 0xe00 contains the address of the IPL Parameter Information
143 +diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
144 +index 2af4ccd88d16..e1e7f9c831da 100644
145 +--- a/arch/x86/kernel/entry_32.S
146 ++++ b/arch/x86/kernel/entry_32.S
147 +@@ -426,9 +426,10 @@ sysenter_past_esp:
148 + jnz sysenter_audit
149 + sysenter_do_call:
150 + cmpl $(NR_syscalls), %eax
151 +- jae syscall_badsys
152 ++ jae sysenter_badsys
153 + call *sys_call_table(,%eax,4)
154 + movl %eax,PT_EAX(%esp)
155 ++sysenter_after_call:
156 + LOCKDEP_SYS_EXIT
157 + DISABLE_INTERRUPTS(CLBR_ANY)
158 + TRACE_IRQS_OFF
159 +@@ -550,11 +551,6 @@ ENTRY(iret_exc)
160 +
161 + CFI_RESTORE_STATE
162 + ldt_ss:
163 +- larl PT_OLDSS(%esp), %eax
164 +- jnz restore_nocheck
165 +- testl $0x00400000, %eax # returning to 32bit stack?
166 +- jnz restore_nocheck # allright, normal return
167 +-
168 + #ifdef CONFIG_PARAVIRT
169 + /*
170 + * The kernel can't run on a non-flat stack if paravirt mode
171 +@@ -683,7 +679,12 @@ END(syscall_fault)
172 +
173 + syscall_badsys:
174 + movl $-ENOSYS,PT_EAX(%esp)
175 +- jmp resume_userspace
176 ++ jmp syscall_exit
177 ++END(syscall_badsys)
178 ++
179 ++sysenter_badsys:
180 ++ movl $-ENOSYS,PT_EAX(%esp)
181 ++ jmp sysenter_after_call
182 + END(syscall_badsys)
183 + CFI_ENDPROC
184 + /*
185 +diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
186 +index fd1f10348130..b6c54909d361 100644
187 +--- a/arch/x86/syscalls/syscall_64.tbl
188 ++++ b/arch/x86/syscalls/syscall_64.tbl
189 +@@ -212,10 +212,10 @@
190 + 203 common sched_setaffinity sys_sched_setaffinity
191 + 204 common sched_getaffinity sys_sched_getaffinity
192 + 205 64 set_thread_area
193 +-206 common io_setup sys_io_setup
194 ++206 64 io_setup sys_io_setup
195 + 207 common io_destroy sys_io_destroy
196 + 208 common io_getevents sys_io_getevents
197 +-209 common io_submit sys_io_submit
198 ++209 64 io_submit sys_io_submit
199 + 210 common io_cancel sys_io_cancel
200 + 211 64 get_thread_area
201 + 212 common lookup_dcookie sys_lookup_dcookie
202 +@@ -353,3 +353,5 @@
203 + 540 x32 process_vm_writev compat_sys_process_vm_writev
204 + 541 x32 setsockopt compat_sys_setsockopt
205 + 542 x32 getsockopt compat_sys_getsockopt
206 ++543 x32 io_setup compat_sys_io_setup
207 ++544 x32 io_submit compat_sys_io_submit
208 +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
209 +index 0bf5ec2d5818..06d2d22281b2 100644
210 +--- a/drivers/acpi/bus.c
211 ++++ b/drivers/acpi/bus.c
212 +@@ -57,6 +57,12 @@ EXPORT_SYMBOL(acpi_root_dir);
213 +
214 +
215 + #ifdef CONFIG_X86
216 ++#ifdef CONFIG_ACPI_CUSTOM_DSDT
217 ++static inline int set_copy_dsdt(const struct dmi_system_id *id)
218 ++{
219 ++ return 0;
220 ++}
221 ++#else
222 + static int set_copy_dsdt(const struct dmi_system_id *id)
223 + {
224 + printk(KERN_NOTICE "%s detected - "
225 +@@ -64,6 +70,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
226 + acpi_gbl_copy_dsdt_locally = 1;
227 + return 0;
228 + }
229 ++#endif
230 +
231 + static struct dmi_system_id dsdt_dmi_table[] __initdata = {
232 + /*
233 +diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
234 +index 25373df1dcf8..5d069c79bd8b 100644
235 +--- a/drivers/char/applicom.c
236 ++++ b/drivers/char/applicom.c
237 +@@ -345,7 +345,6 @@ out:
238 + free_irq(apbs[i].irq, &dummy);
239 + iounmap(apbs[i].RamIO);
240 + }
241 +- pci_disable_device(dev);
242 + return ret;
243 + }
244 +
245 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
246 +index 230f435c7ad8..75fa2e7b87b5 100644
247 +--- a/drivers/hid/hid-core.c
248 ++++ b/drivers/hid/hid-core.c
249 +@@ -861,7 +861,17 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
250 + * ->numbered being checked, which may not always be the case when
251 + * drivers go to access report values.
252 + */
253 +- report = hid->report_enum[type].report_id_hash[id];
254 ++ if (id == 0) {
255 ++ /*
256 ++ * Validating on id 0 means we should examine the first
257 ++ * report in the list.
258 ++ */
259 ++ report = list_entry(
260 ++ hid->report_enum[type].report_list.next,
261 ++ struct hid_report, list);
262 ++ } else {
263 ++ report = hid->report_enum[type].report_id_hash[id];
264 ++ }
265 + if (!report) {
266 + hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
267 + return NULL;
268 +diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
269 +index 5f92b865f64b..a28c5d312447 100644
270 +--- a/drivers/net/can/sja1000/peak_pci.c
271 ++++ b/drivers/net/can/sja1000/peak_pci.c
272 +@@ -547,7 +547,7 @@ static int __devinit peak_pci_probe(struct pci_dev *pdev,
273 + {
274 + struct sja1000_priv *priv;
275 + struct peak_pci_chan *chan;
276 +- struct net_device *dev;
277 ++ struct net_device *dev, *prev_dev;
278 + void __iomem *cfg_base, *reg_base;
279 + u16 sub_sys_id, icr;
280 + int i, err, channels;
281 +@@ -681,11 +681,13 @@ failure_remove_channels:
282 + writew(0x0, cfg_base + PITA_ICR + 2);
283 +
284 + chan = NULL;
285 +- for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
286 +- unregister_sja1000dev(dev);
287 +- free_sja1000dev(dev);
288 ++ for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
289 + priv = netdev_priv(dev);
290 + chan = priv->priv;
291 ++ prev_dev = chan->prev_dev;
292 ++
293 ++ unregister_sja1000dev(dev);
294 ++ free_sja1000dev(dev);
295 + }
296 +
297 + /* free any PCIeC resources too */
298 +@@ -719,10 +721,12 @@ static void __devexit peak_pci_remove(struct pci_dev *pdev)
299 +
300 + /* Loop over all registered devices */
301 + while (1) {
302 ++ struct net_device *prev_dev = chan->prev_dev;
303 ++
304 + dev_info(&pdev->dev, "removing device %s\n", dev->name);
305 + unregister_sja1000dev(dev);
306 + free_sja1000dev(dev);
307 +- dev = chan->prev_dev;
308 ++ dev = prev_dev;
309 +
310 + if (!dev) {
311 + /* do that only for first channel */
312 +diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
313 +index c7df34e6b60b..f82581428bb0 100644
314 +--- a/drivers/staging/tidspbridge/core/dsp-clock.c
315 ++++ b/drivers/staging/tidspbridge/core/dsp-clock.c
316 +@@ -213,7 +213,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
317 + case GPT_CLK:
318 + status = omap_dm_timer_start(timer[clk_id - 1]);
319 + break;
320 +-#ifdef CONFIG_OMAP_MCBSP
321 ++#ifdef CONFIG_SND_OMAP_SOC_MCBSP
322 + case MCBSP_CLK:
323 + omap_mcbsp_request(MCBSP_ID(clk_id));
324 + omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC);
325 +@@ -289,7 +289,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
326 + case GPT_CLK:
327 + status = omap_dm_timer_stop(timer[clk_id - 1]);
328 + break;
329 +-#ifdef CONFIG_OMAP_MCBSP
330 ++#ifdef CONFIG_SND_OMAP_SOC_MCBSP
331 + case MCBSP_CLK:
332 + omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PRCM_SRC);
333 + omap_mcbsp_free(MCBSP_ID(clk_id));
334 +diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
335 +index 8b68f7b82631..fa4e21b882a0 100644
336 +--- a/drivers/target/target_core_rd.c
337 ++++ b/drivers/target/target_core_rd.c
338 +@@ -177,7 +177,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
339 + - 1;
340 +
341 + for (j = 0; j < sg_per_table; j++) {
342 +- pg = alloc_pages(GFP_KERNEL, 0);
343 ++ pg = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
344 + if (!pg) {
345 + pr_err("Unable to allocate scatterlist"
346 + " pages for struct rd_dev_sg_table\n");
347 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
348 +index 8ed83b938a77..895497d42270 100644
349 +--- a/drivers/usb/dwc3/gadget.c
350 ++++ b/drivers/usb/dwc3/gadget.c
351 +@@ -531,6 +531,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
352 +
353 + dwc3_remove_requests(dwc, dep);
354 +
355 ++ /* make sure HW endpoint isn't stalled */
356 ++ if (dep->flags & DWC3_EP_STALL)
357 ++ __dwc3_gadget_ep_set_halt(dep, 0);
358 ++
359 + reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
360 + reg &= ~DWC3_DALEPENA_EP(dep->number);
361 + dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
362 +diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
363 +index e58b16442971..d9eaaa3b3e44 100644
364 +--- a/drivers/usb/gadget/inode.c
365 ++++ b/drivers/usb/gadget/inode.c
366 +@@ -1499,7 +1499,7 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
367 + }
368 + break;
369 +
370 +-#ifndef CONFIG_USB_GADGET_PXA25X
371 ++#ifndef CONFIG_USB_PXA25X
372 + /* PXA automagically handles this request too */
373 + case USB_REQ_GET_CONFIGURATION:
374 + if (ctrl->bRequestType != 0x80)
375 +diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
376 +index 78933512c18b..90dcf54cd7e8 100644
377 +--- a/drivers/usb/host/pci-quirks.c
378 ++++ b/drivers/usb/host/pci-quirks.c
379 +@@ -555,6 +555,14 @@ static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = {
380 + DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
381 + },
382 + },
383 ++ {
384 ++ /* HASEE E200 */
385 ++ .matches = {
386 ++ DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
387 ++ DMI_MATCH(DMI_BOARD_NAME, "E210"),
388 ++ DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
389 ++ },
390 ++ },
391 + { }
392 + };
393 +
394 +@@ -564,9 +572,14 @@ static void __devinit ehci_bios_handoff(struct pci_dev *pdev,
395 + {
396 + int try_handoff = 1, tried_handoff = 0;
397 +
398 +- /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
399 +- * the handoff on its unused controller. Skip it. */
400 +- if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
401 ++ /*
402 ++ * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
403 ++ * the handoff on its unused controller. Skip it.
404 ++ *
405 ++ * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
406 ++ */
407 ++ if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
408 ++ pdev->device == 0x27cc)) {
409 + if (dmi_check_system(ehci_dmi_nohandoff_table))
410 + try_handoff = 0;
411 + }
412 +diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
413 +index 055b84adedac..174be05ba2c7 100644
414 +--- a/drivers/usb/misc/usbtest.c
415 ++++ b/drivers/usb/misc/usbtest.c
416 +@@ -7,9 +7,10 @@
417 + #include <linux/moduleparam.h>
418 + #include <linux/scatterlist.h>
419 + #include <linux/mutex.h>
420 +-
421 ++#include <linux/timer.h>
422 + #include <linux/usb.h>
423 +
424 ++#define SIMPLE_IO_TIMEOUT 10000 /* in milliseconds */
425 +
426 + /*-------------------------------------------------------------------------*/
427 +
428 +@@ -355,6 +356,7 @@ static int simple_io(
429 + int max = urb->transfer_buffer_length;
430 + struct completion completion;
431 + int retval = 0;
432 ++ unsigned long expire;
433 +
434 + urb->context = &completion;
435 + while (retval == 0 && iterations-- > 0) {
436 +@@ -367,9 +369,15 @@ static int simple_io(
437 + if (retval != 0)
438 + break;
439 +
440 +- /* NOTE: no timeouts; can't be broken out of by interrupt */
441 +- wait_for_completion(&completion);
442 +- retval = urb->status;
443 ++ expire = msecs_to_jiffies(SIMPLE_IO_TIMEOUT);
444 ++ if (!wait_for_completion_timeout(&completion, expire)) {
445 ++ usb_kill_urb(urb);
446 ++ retval = (urb->status == -ENOENT ?
447 ++ -ETIMEDOUT : urb->status);
448 ++ } else {
449 ++ retval = urb->status;
450 ++ }
451 ++
452 + urb->dev = udev;
453 + if (retval == 0 && usb_pipein(urb->pipe))
454 + retval = simple_check_buf(tdev, urb);
455 +@@ -462,6 +470,14 @@ alloc_sglist(int nents, int max, int vary)
456 + return sg;
457 + }
458 +
459 ++static void sg_timeout(unsigned long _req)
460 ++{
461 ++ struct usb_sg_request *req = (struct usb_sg_request *) _req;
462 ++
463 ++ req->status = -ETIMEDOUT;
464 ++ usb_sg_cancel(req);
465 ++}
466 ++
467 + static int perform_sglist(
468 + struct usbtest_dev *tdev,
469 + unsigned iterations,
470 +@@ -473,6 +489,9 @@ static int perform_sglist(
471 + {
472 + struct usb_device *udev = testdev_to_usbdev(tdev);
473 + int retval = 0;
474 ++ struct timer_list sg_timer;
475 ++
476 ++ setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req);
477 +
478 + while (retval == 0 && iterations-- > 0) {
479 + retval = usb_sg_init(req, udev, pipe,
480 +@@ -483,7 +502,10 @@ static int perform_sglist(
481 +
482 + if (retval)
483 + break;
484 ++ mod_timer(&sg_timer, jiffies +
485 ++ msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
486 + usb_sg_wait(req);
487 ++ del_timer_sync(&sg_timer);
488 + retval = req->status;
489 +
490 + /* FIXME check resulting data pattern */
491 +@@ -1135,6 +1157,11 @@ static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
492 + urb->context = &completion;
493 + urb->complete = unlink1_callback;
494 +
495 ++ if (usb_pipeout(urb->pipe)) {
496 ++ simple_fill_buf(urb);
497 ++ urb->transfer_flags |= URB_ZERO_PACKET;
498 ++ }
499 ++
500 + /* keep the endpoint busy. there are lots of hc/hcd-internal
501 + * states, and testing should get to all of them over time.
502 + *
503 +@@ -1265,6 +1292,11 @@ static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
504 + unlink_queued_callback, &ctx);
505 + ctx.urbs[i]->transfer_dma = buf_dma;
506 + ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
507 ++
508 ++ if (usb_pipeout(ctx.urbs[i]->pipe)) {
509 ++ simple_fill_buf(ctx.urbs[i]);
510 ++ ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET;
511 ++ }
512 + }
513 +
514 + /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
515 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
516 +index def9ed02bf19..f7ccfbcdbf04 100644
517 +--- a/drivers/usb/serial/option.c
518 ++++ b/drivers/usb/serial/option.c
519 +@@ -1930,6 +1930,7 @@ static int option_send_setup(struct usb_serial_port *port)
520 + struct usb_wwan_port_private *portdata;
521 + int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
522 + int val = 0;
523 ++ int res;
524 + dbg("%s", __func__);
525 +
526 + if (is_blacklisted(ifNum, OPTION_BLACKLIST_SENDSETUP,
527 +@@ -1945,9 +1946,17 @@ static int option_send_setup(struct usb_serial_port *port)
528 + if (portdata->rts_state)
529 + val |= 0x02;
530 +
531 +- return usb_control_msg(serial->dev,
532 +- usb_rcvctrlpipe(serial->dev, 0),
533 +- 0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT);
534 ++ res = usb_autopm_get_interface(serial->interface);
535 ++ if (res)
536 ++ return res;
537 ++
538 ++ res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
539 ++ 0x22, 0x21, val, ifNum, NULL,
540 ++ 0, USB_CTRL_SET_TIMEOUT);
541 ++
542 ++ usb_autopm_put_interface(serial->interface);
543 ++
544 ++ return res;
545 + }
546 +
547 + MODULE_AUTHOR(DRIVER_AUTHOR);
548 +diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
549 +index 3f5e4a73ddd5..bd79d68b51c1 100644
550 +--- a/drivers/usb/serial/sierra.c
551 ++++ b/drivers/usb/serial/sierra.c
552 +@@ -59,6 +59,7 @@ struct sierra_intf_private {
553 + spinlock_t susp_lock;
554 + unsigned int suspended:1;
555 + int in_flight;
556 ++ unsigned int open_ports;
557 + };
558 +
559 + static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
560 +@@ -801,6 +802,7 @@ static void sierra_close(struct usb_serial_port *port)
561 + struct usb_serial *serial = port->serial;
562 + struct sierra_port_private *portdata;
563 + struct sierra_intf_private *intfdata = port->serial->private;
564 ++ struct urb *urb;
565 +
566 +
567 + dev_dbg(&port->dev, "%s\n", __func__);
568 +@@ -812,7 +814,6 @@ static void sierra_close(struct usb_serial_port *port)
569 + if (serial->dev) {
570 + mutex_lock(&serial->disc_mutex);
571 + if (!serial->disconnected) {
572 +- serial->interface->needs_remote_wakeup = 0;
573 + /* odd error handling due to pm counters */
574 + if (!usb_autopm_get_interface(serial->interface))
575 + sierra_send_setup(port);
576 +@@ -823,8 +824,21 @@ static void sierra_close(struct usb_serial_port *port)
577 + mutex_unlock(&serial->disc_mutex);
578 + spin_lock_irq(&intfdata->susp_lock);
579 + portdata->opened = 0;
580 ++ if (--intfdata->open_ports == 0)
581 ++ serial->interface->needs_remote_wakeup = 0;
582 + spin_unlock_irq(&intfdata->susp_lock);
583 +
584 ++ for (;;) {
585 ++ urb = usb_get_from_anchor(&portdata->delayed);
586 ++ if (!urb)
587 ++ break;
588 ++ kfree(urb->transfer_buffer);
589 ++ usb_free_urb(urb);
590 ++ usb_autopm_put_interface_async(serial->interface);
591 ++ spin_lock(&portdata->lock);
592 ++ portdata->outstanding_urbs--;
593 ++ spin_unlock(&portdata->lock);
594 ++ }
595 +
596 + /* Stop reading urbs */
597 + sierra_stop_rx_urbs(port);
598 +@@ -867,23 +881,29 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
599 + usb_sndbulkpipe(serial->dev, endpoint) | USB_DIR_IN);
600 +
601 + err = sierra_submit_rx_urbs(port, GFP_KERNEL);
602 +- if (err) {
603 +- /* get rid of everything as in close */
604 +- sierra_close(port);
605 +- /* restore balance for autopm */
606 +- if (!serial->disconnected)
607 +- usb_autopm_put_interface(serial->interface);
608 +- return err;
609 +- }
610 ++ if (err)
611 ++ goto err_submit;
612 ++
613 + sierra_send_setup(port);
614 +
615 +- serial->interface->needs_remote_wakeup = 1;
616 + spin_lock_irq(&intfdata->susp_lock);
617 + portdata->opened = 1;
618 ++ if (++intfdata->open_ports == 1)
619 ++ serial->interface->needs_remote_wakeup = 1;
620 + spin_unlock_irq(&intfdata->susp_lock);
621 + usb_autopm_put_interface(serial->interface);
622 +
623 + return 0;
624 ++
625 ++err_submit:
626 ++ sierra_stop_rx_urbs(port);
627 ++
628 ++ for (i = 0; i < portdata->num_in_urbs; i++) {
629 ++ sierra_release_urb(portdata->in_urbs[i]);
630 ++ portdata->in_urbs[i] = NULL;
631 ++ }
632 ++
633 ++ return err;
634 + }
635 +
636 +
637 +@@ -994,6 +1014,7 @@ static void sierra_release(struct usb_serial *serial)
638 + portdata = usb_get_serial_port_data(port);
639 + if (!portdata)
640 + continue;
641 ++ usb_set_serial_port_data(port, NULL);
642 + kfree(portdata);
643 + }
644 + kfree(serial->private);
645 +@@ -1010,6 +1031,8 @@ static void stop_read_write_urbs(struct usb_serial *serial)
646 + for (i = 0; i < serial->num_ports; ++i) {
647 + port = serial->port[i];
648 + portdata = usb_get_serial_port_data(port);
649 ++ if (!portdata)
650 ++ continue;
651 + sierra_stop_rx_urbs(port);
652 + usb_kill_anchored_urbs(&portdata->active);
653 + }
654 +@@ -1052,6 +1075,9 @@ static int sierra_resume(struct usb_serial *serial)
655 + port = serial->port[i];
656 + portdata = usb_get_serial_port_data(port);
657 +
658 ++ if (!portdata)
659 ++ continue;
660 ++
661 + while ((urb = usb_get_from_anchor(&portdata->delayed))) {
662 + usb_anchor_urb(urb, &portdata->active);
663 + intfdata->in_flight++;
664 +@@ -1059,8 +1085,12 @@ static int sierra_resume(struct usb_serial *serial)
665 + if (err < 0) {
666 + intfdata->in_flight--;
667 + usb_unanchor_urb(urb);
668 +- usb_scuttle_anchored_urbs(&portdata->delayed);
669 +- break;
670 ++ kfree(urb->transfer_buffer);
671 ++ usb_free_urb(urb);
672 ++ spin_lock(&portdata->lock);
673 ++ portdata->outstanding_urbs--;
674 ++ spin_unlock(&portdata->lock);
675 ++ continue;
676 + }
677 + }
678 +
679 +diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
680 +index 820436ec60e9..7e92f50965f6 100644
681 +--- a/drivers/usb/serial/usb_wwan.c
682 ++++ b/drivers/usb/serial/usb_wwan.c
683 +@@ -236,8 +236,10 @@ int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
684 + usb_pipeendpoint(this_urb->pipe), i);
685 +
686 + err = usb_autopm_get_interface_async(port->serial->interface);
687 +- if (err < 0)
688 ++ if (err < 0) {
689 ++ clear_bit(i, &portdata->out_busy);
690 + break;
691 ++ }
692 +
693 + /* send the data */
694 + memcpy(this_urb->transfer_buffer, buf, todo);
695 +@@ -406,6 +408,14 @@ int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
696 +
697 + dbg("%s", __func__);
698 +
699 ++ if (port->interrupt_in_urb) {
700 ++ err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
701 ++ if (err) {
702 ++ dev_dbg(&port->dev, "%s: submit int urb failed: %d\n",
703 ++ __func__, err);
704 ++ }
705 ++ }
706 ++
707 + /* Start reading from the IN endpoint */
708 + for (i = 0; i < N_IN_URB; i++) {
709 + urb = portdata->in_urbs[i];
710 +@@ -432,12 +442,26 @@ int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
711 + }
712 + EXPORT_SYMBOL(usb_wwan_open);
713 +
714 ++static void unbusy_queued_urb(struct urb *urb,
715 ++ struct usb_wwan_port_private *portdata)
716 ++{
717 ++ int i;
718 ++
719 ++ for (i = 0; i < N_OUT_URB; i++) {
720 ++ if (urb == portdata->out_urbs[i]) {
721 ++ clear_bit(i, &portdata->out_busy);
722 ++ break;
723 ++ }
724 ++ }
725 ++}
726 ++
727 + void usb_wwan_close(struct usb_serial_port *port)
728 + {
729 + int i;
730 + struct usb_serial *serial = port->serial;
731 + struct usb_wwan_port_private *portdata;
732 + struct usb_wwan_intf_private *intfdata = port->serial->private;
733 ++ struct urb *urb;
734 +
735 + dbg("%s", __func__);
736 + portdata = usb_get_serial_port_data(port);
737 +@@ -448,10 +472,19 @@ void usb_wwan_close(struct usb_serial_port *port)
738 + portdata->opened = 0;
739 + spin_unlock_irq(&intfdata->susp_lock);
740 +
741 ++ for (;;) {
742 ++ urb = usb_get_from_anchor(&portdata->delayed);
743 ++ if (!urb)
744 ++ break;
745 ++ unbusy_queued_urb(urb, portdata);
746 ++ usb_autopm_put_interface_async(serial->interface);
747 ++ }
748 ++
749 + for (i = 0; i < N_IN_URB; i++)
750 + usb_kill_urb(portdata->in_urbs[i]);
751 + for (i = 0; i < N_OUT_URB; i++)
752 + usb_kill_urb(portdata->out_urbs[i]);
753 ++ usb_kill_urb(port->interrupt_in_urb);
754 + /* balancing - important as an error cannot be handled*/
755 + usb_autopm_get_interface_no_resume(serial->interface);
756 + serial->interface->needs_remote_wakeup = 0;
757 +@@ -527,7 +560,7 @@ static void usb_wwan_setup_urbs(struct usb_serial *serial)
758 +
759 + int usb_wwan_startup(struct usb_serial *serial)
760 + {
761 +- int i, j, err;
762 ++ int i, j;
763 + struct usb_serial_port *port;
764 + struct usb_wwan_port_private *portdata;
765 + u8 *buffer;
766 +@@ -560,12 +593,6 @@ int usb_wwan_startup(struct usb_serial *serial)
767 + }
768 +
769 + usb_set_serial_port_data(port, portdata);
770 +-
771 +- if (!port->interrupt_in_urb)
772 +- continue;
773 +- err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
774 +- if (err)
775 +- dbg("%s: submit irq_in urb failed %d", __func__, err);
776 + }
777 + usb_wwan_setup_urbs(serial);
778 + return 0;
779 +@@ -645,46 +672,32 @@ EXPORT_SYMBOL(usb_wwan_release);
780 + int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
781 + {
782 + struct usb_wwan_intf_private *intfdata = serial->private;
783 +- int b;
784 +
785 + dbg("%s entered", __func__);
786 +
787 ++ spin_lock_irq(&intfdata->susp_lock);
788 + if (PMSG_IS_AUTO(message)) {
789 +- spin_lock_irq(&intfdata->susp_lock);
790 +- b = intfdata->in_flight;
791 +- spin_unlock_irq(&intfdata->susp_lock);
792 +-
793 +- if (b)
794 ++ if (intfdata->in_flight) {
795 ++ spin_unlock_irq(&intfdata->susp_lock);
796 + return -EBUSY;
797 ++ }
798 + }
799 +
800 +- spin_lock_irq(&intfdata->susp_lock);
801 + intfdata->suspended = 1;
802 + spin_unlock_irq(&intfdata->susp_lock);
803 ++
804 + stop_read_write_urbs(serial);
805 +
806 + return 0;
807 + }
808 + EXPORT_SYMBOL(usb_wwan_suspend);
809 +
810 +-static void unbusy_queued_urb(struct urb *urb, struct usb_wwan_port_private *portdata)
811 +-{
812 +- int i;
813 +-
814 +- for (i = 0; i < N_OUT_URB; i++) {
815 +- if (urb == portdata->out_urbs[i]) {
816 +- clear_bit(i, &portdata->out_busy);
817 +- break;
818 +- }
819 +- }
820 +-}
821 +-
822 +-static void play_delayed(struct usb_serial_port *port)
823 ++static int play_delayed(struct usb_serial_port *port)
824 + {
825 + struct usb_wwan_intf_private *data;
826 + struct usb_wwan_port_private *portdata;
827 + struct urb *urb;
828 +- int err;
829 ++ int err = 0;
830 +
831 + portdata = usb_get_serial_port_data(port);
832 + data = port->serial->private;
833 +@@ -701,6 +714,8 @@ static void play_delayed(struct usb_serial_port *port)
834 + break;
835 + }
836 + }
837 ++
838 ++ return err;
839 + }
840 +
841 + int usb_wwan_resume(struct usb_serial *serial)
842 +@@ -710,55 +725,53 @@ int usb_wwan_resume(struct usb_serial *serial)
843 + struct usb_wwan_intf_private *intfdata = serial->private;
844 + struct usb_wwan_port_private *portdata;
845 + struct urb *urb;
846 +- int err = 0;
847 ++ int err;
848 ++ int err_count = 0;
849 +
850 + dbg("%s entered", __func__);
851 +- /* get the interrupt URBs resubmitted unconditionally */
852 +- for (i = 0; i < serial->num_ports; i++) {
853 +- port = serial->port[i];
854 +- if (!port->interrupt_in_urb) {
855 +- dbg("%s: No interrupt URB for port %d", __func__, i);
856 +- continue;
857 +- }
858 +- err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
859 +- dbg("Submitted interrupt URB for port %d (result %d)", i, err);
860 +- if (err < 0) {
861 +- err("%s: Error %d for interrupt URB of port%d",
862 +- __func__, err, i);
863 +- goto err_out;
864 +- }
865 +- }
866 +
867 ++ spin_lock_irq(&intfdata->susp_lock);
868 + for (i = 0; i < serial->num_ports; i++) {
869 + /* walk all ports */
870 + port = serial->port[i];
871 + portdata = usb_get_serial_port_data(port);
872 +
873 + /* skip closed ports */
874 +- spin_lock_irq(&intfdata->susp_lock);
875 +- if (!portdata->opened) {
876 +- spin_unlock_irq(&intfdata->susp_lock);
877 ++ if (!portdata || !portdata->opened)
878 + continue;
879 ++
880 ++ if (port->interrupt_in_urb) {
881 ++ err = usb_submit_urb(port->interrupt_in_urb,
882 ++ GFP_ATOMIC);
883 ++ if (err) {
884 ++ dev_err(&port->dev,
885 ++ "%s: submit int urb failed: %d\n",
886 ++ __func__, err);
887 ++ err_count++;
888 ++ }
889 + }
890 +
891 ++ err = play_delayed(port);
892 ++ if (err)
893 ++ err_count++;
894 ++
895 + for (j = 0; j < N_IN_URB; j++) {
896 + urb = portdata->in_urbs[j];
897 + err = usb_submit_urb(urb, GFP_ATOMIC);
898 + if (err < 0) {
899 + err("%s: Error %d for bulk URB %d",
900 + __func__, err, i);
901 +- spin_unlock_irq(&intfdata->susp_lock);
902 +- goto err_out;
903 ++ err_count++;
904 + }
905 + }
906 +- play_delayed(port);
907 +- spin_unlock_irq(&intfdata->susp_lock);
908 + }
909 +- spin_lock_irq(&intfdata->susp_lock);
910 + intfdata->suspended = 0;
911 + spin_unlock_irq(&intfdata->susp_lock);
912 +-err_out:
913 +- return err;
914 ++
915 ++ if (err_count)
916 ++ return -EIO;
917 ++
918 ++ return 0;
919 + }
920 + EXPORT_SYMBOL(usb_wwan_resume);
921 + #endif
922 +diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
923 +index 556d96ce40bf..89a8a89a5eb2 100644
924 +--- a/drivers/video/matrox/matroxfb_base.h
925 ++++ b/drivers/video/matrox/matroxfb_base.h
926 +@@ -698,7 +698,7 @@ void matroxfb_unregister_driver(struct matroxfb_driver* drv);
927 +
928 + #define mga_fifo(n) do {} while ((mga_inl(M_FIFOSTATUS) & 0xFF) < (n))
929 +
930 +-#define WaitTillIdle() do {} while (mga_inl(M_STATUS) & 0x10000)
931 ++#define WaitTillIdle() do { mga_inl(M_STATUS); do {} while (mga_inl(M_STATUS) & 0x10000); } while (0)
932 +
933 + /* code speedup */
934 + #ifdef CONFIG_FB_MATROX_MILLENIUM
935 +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
936 +index bcec06750232..9a6b24a73aae 100644
937 +--- a/fs/btrfs/backref.c
938 ++++ b/fs/btrfs/backref.c
939 +@@ -1033,7 +1033,7 @@ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
940 + *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
941 + }
942 + *ptr = (unsigned long)*out_eiref;
943 +- if ((void *)*ptr >= (void *)ei + item_size)
944 ++ if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
945 + return -ENOENT;
946 + }
947 +
948 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
949 +index d64fda541483..24b58c7f01ef 100644
950 +--- a/fs/btrfs/extent_io.c
951 ++++ b/fs/btrfs/extent_io.c
952 +@@ -1551,6 +1551,7 @@ again:
953 + * shortening the size of the delalloc range we're searching
954 + */
955 + free_extent_state(cached_state);
956 ++ cached_state = NULL;
957 + if (!loops) {
958 + unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
959 + max_bytes = PAGE_CACHE_SIZE - offset;
960 +@@ -2244,7 +2245,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
961 + {
962 + int uptodate = (err == 0);
963 + struct extent_io_tree *tree;
964 +- int ret;
965 ++ int ret = 0;
966 +
967 + tree = &BTRFS_I(page->mapping->host)->io_tree;
968 +
969 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
970 +index 06744f1e91f4..d5dc63c6bb75 100644
971 +--- a/fs/btrfs/volumes.c
972 ++++ b/fs/btrfs/volumes.c
973 +@@ -1446,11 +1446,12 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
974 + struct btrfs_fs_devices *fs_devices;
975 + fs_devices = root->fs_info->fs_devices;
976 + while (fs_devices) {
977 +- if (fs_devices->seed == cur_devices)
978 ++ if (fs_devices->seed == cur_devices) {
979 ++ fs_devices->seed = cur_devices->seed;
980 + break;
981 ++ }
982 + fs_devices = fs_devices->seed;
983 + }
984 +- fs_devices->seed = cur_devices->seed;
985 + cur_devices->seed = NULL;
986 + lock_chunks(root);
987 + __btrfs_close_devices(cur_devices);
988 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
989 +index 2941ee6ef24f..cdfc763b313f 100644
990 +--- a/fs/ext4/mballoc.c
991 ++++ b/fs/ext4/mballoc.c
992 +@@ -3015,7 +3015,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
993 + }
994 + BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
995 + start > ac->ac_o_ex.fe_logical);
996 +- BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
997 ++ BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
998 +
999 + /* now prepare goal request */
1000 +
1001 +diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
1002 +index 9a323d12de1c..1e1e41787cd9 100644
1003 +--- a/include/linux/irqdesc.h
1004 ++++ b/include/linux/irqdesc.h
1005 +@@ -27,6 +27,8 @@ struct module;
1006 + * @irq_count: stats field to detect stalled irqs
1007 + * @last_unhandled: aging timer for unhandled count
1008 + * @irqs_unhandled: stats field for spurious unhandled interrupts
1009 ++ * @threads_handled: stats field for deferred spurious detection of threaded handlers
1010 ++ * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
1011 + * @lock: locking for SMP
1012 + * @affinity_hint: hint to user space for preferred irq affinity
1013 + * @affinity_notify: context for notification of affinity changes
1014 +@@ -52,6 +54,8 @@ struct irq_desc {
1015 + unsigned int irq_count; /* For detecting broken IRQs */
1016 + unsigned long last_unhandled; /* Aging timer for unhandled count */
1017 + unsigned int irqs_unhandled;
1018 ++ atomic_t threads_handled;
1019 ++ int threads_handled_last;
1020 + raw_spinlock_t lock;
1021 + struct cpumask *percpu_enabled;
1022 + #ifdef CONFIG_SMP
1023 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1024 +index 0884db3d315e..e22df7a4f1ab 100644
1025 +--- a/include/linux/skbuff.h
1026 ++++ b/include/linux/skbuff.h
1027 +@@ -1674,6 +1674,22 @@ static inline void skb_orphan(struct sk_buff *skb)
1028 + }
1029 +
1030 + /**
1031 ++ * skb_orphan_frags - orphan the frags contained in a buffer
1032 ++ * @skb: buffer to orphan frags from
1033 ++ * @gfp_mask: allocation mask for replacement pages
1034 ++ *
1035 ++ * For each frag in the SKB which needs a destructor (i.e. has an
1036 ++ * owner) create a copy of that frag and release the original
1037 ++ * page by calling the destructor.
1038 ++ */
1039 ++static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
1040 ++{
1041 ++ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
1042 ++ return 0;
1043 ++ return skb_copy_ubufs(skb, gfp_mask);
1044 ++}
1045 ++
1046 ++/**
1047 + * __skb_queue_purge - empty a list
1048 + * @list: list to empty
1049 + *
1050 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
1051 +index 3d1bbbcc2923..9baa6315acf5 100644
1052 +--- a/kernel/irq/manage.c
1053 ++++ b/kernel/irq/manage.c
1054 +@@ -809,8 +809,8 @@ static int irq_thread(void *data)
1055 + irq_thread_check_affinity(desc, action);
1056 +
1057 + action_ret = handler_fn(desc, action);
1058 +- if (!noirqdebug)
1059 +- note_interrupt(action->irq, desc, action_ret);
1060 ++ if (action_ret == IRQ_HANDLED)
1061 ++ atomic_inc(&desc->threads_handled);
1062 +
1063 + wake_threads_waitq(desc);
1064 + }
1065 +diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
1066 +index 7b5f012bde9d..febcee3c2aa9 100644
1067 +--- a/kernel/irq/spurious.c
1068 ++++ b/kernel/irq/spurious.c
1069 +@@ -265,21 +265,119 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
1070 + return action && (action->flags & IRQF_IRQPOLL);
1071 + }
1072 +
1073 ++#define SPURIOUS_DEFERRED 0x80000000
1074 ++
1075 + void note_interrupt(unsigned int irq, struct irq_desc *desc,
1076 + irqreturn_t action_ret)
1077 + {
1078 + if (desc->istate & IRQS_POLL_INPROGRESS)
1079 + return;
1080 +
1081 +- /* we get here again via the threaded handler */
1082 +- if (action_ret == IRQ_WAKE_THREAD)
1083 +- return;
1084 +-
1085 + if (bad_action_ret(action_ret)) {
1086 + report_bad_irq(irq, desc, action_ret);
1087 + return;
1088 + }
1089 +
1090 ++ /*
1091 ++ * We cannot call note_interrupt from the threaded handler
1092 ++ * because we need to look at the compound of all handlers
1093 ++ * (primary and threaded). Aside of that in the threaded
1094 ++ * shared case we have no serialization against an incoming
1095 ++ * hardware interrupt while we are dealing with a threaded
1096 ++ * result.
1097 ++ *
1098 ++ * So in case a thread is woken, we just note the fact and
1099 ++ * defer the analysis to the next hardware interrupt.
1100 ++ *
1101 ++ * The threaded handlers store whether they sucessfully
1102 ++ * handled an interrupt and we check whether that number
1103 ++ * changed versus the last invocation.
1104 ++ *
1105 ++ * We could handle all interrupts with the delayed by one
1106 ++ * mechanism, but for the non forced threaded case we'd just
1107 ++ * add pointless overhead to the straight hardirq interrupts
1108 ++ * for the sake of a few lines less code.
1109 ++ */
1110 ++ if (action_ret & IRQ_WAKE_THREAD) {
1111 ++ /*
1112 ++ * There is a thread woken. Check whether one of the
1113 ++ * shared primary handlers returned IRQ_HANDLED. If
1114 ++ * not we defer the spurious detection to the next
1115 ++ * interrupt.
1116 ++ */
1117 ++ if (action_ret == IRQ_WAKE_THREAD) {
1118 ++ int handled;
1119 ++ /*
1120 ++ * We use bit 31 of thread_handled_last to
1121 ++ * denote the deferred spurious detection
1122 ++ * active. No locking necessary as
1123 ++ * thread_handled_last is only accessed here
1124 ++ * and we have the guarantee that hard
1125 ++ * interrupts are not reentrant.
1126 ++ */
1127 ++ if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
1128 ++ desc->threads_handled_last |= SPURIOUS_DEFERRED;
1129 ++ return;
1130 ++ }
1131 ++ /*
1132 ++ * Check whether one of the threaded handlers
1133 ++ * returned IRQ_HANDLED since the last
1134 ++ * interrupt happened.
1135 ++ *
1136 ++ * For simplicity we just set bit 31, as it is
1137 ++ * set in threads_handled_last as well. So we
1138 ++ * avoid extra masking. And we really do not
1139 ++ * care about the high bits of the handled
1140 ++ * count. We just care about the count being
1141 ++ * different than the one we saw before.
1142 ++ */
1143 ++ handled = atomic_read(&desc->threads_handled);
1144 ++ handled |= SPURIOUS_DEFERRED;
1145 ++ if (handled != desc->threads_handled_last) {
1146 ++ action_ret = IRQ_HANDLED;
1147 ++ /*
1148 ++ * Note: We keep the SPURIOUS_DEFERRED
1149 ++ * bit set. We are handling the
1150 ++ * previous invocation right now.
1151 ++ * Keep it for the current one, so the
1152 ++ * next hardware interrupt will
1153 ++ * account for it.
1154 ++ */
1155 ++ desc->threads_handled_last = handled;
1156 ++ } else {
1157 ++ /*
1158 ++ * None of the threaded handlers felt
1159 ++ * responsible for the last interrupt
1160 ++ *
1161 ++ * We keep the SPURIOUS_DEFERRED bit
1162 ++ * set in threads_handled_last as we
1163 ++ * need to account for the current
1164 ++ * interrupt as well.
1165 ++ */
1166 ++ action_ret = IRQ_NONE;
1167 ++ }
1168 ++ } else {
1169 ++ /*
1170 ++ * One of the primary handlers returned
1171 ++ * IRQ_HANDLED. So we don't care about the
1172 ++ * threaded handlers on the same line. Clear
1173 ++ * the deferred detection bit.
1174 ++ *
1175 ++ * In theory we could/should check whether the
1176 ++ * deferred bit is set and take the result of
1177 ++ * the previous run into account here as
1178 ++ * well. But it's really not worth the
1179 ++ * trouble. If every other interrupt is
1180 ++ * handled we never trigger the spurious
1181 ++ * detector. And if this is just the one out
1182 ++ * of 100k unhandled ones which is handled
1183 ++ * then we merily delay the spurious detection
1184 ++ * by one hard interrupt. Not a real problem.
1185 ++ */
1186 ++ desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
1187 ++ }
1188 ++ }
1189 ++
1190 + if (unlikely(action_ret == IRQ_NONE)) {
1191 + /*
1192 + * If we are seeing only the odd spurious IRQ caused by
1193 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
1194 +index 2fe9bf640c71..97eec2174769 100644
1195 +--- a/mm/memory-failure.c
1196 ++++ b/mm/memory-failure.c
1197 +@@ -208,9 +208,9 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
1198 + #endif
1199 + si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT;
1200 +
1201 +- if ((flags & MF_ACTION_REQUIRED) && t == current) {
1202 ++ if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
1203 + si.si_code = BUS_MCEERR_AR;
1204 +- ret = force_sig_info(SIGBUS, &si, t);
1205 ++ ret = force_sig_info(SIGBUS, &si, current);
1206 + } else {
1207 + /*
1208 + * Don't use force here, it's convenient if the signal
1209 +@@ -382,10 +382,12 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
1210 + }
1211 + }
1212 +
1213 +-static int task_early_kill(struct task_struct *tsk)
1214 ++static int task_early_kill(struct task_struct *tsk, int force_early)
1215 + {
1216 + if (!tsk->mm)
1217 + return 0;
1218 ++ if (force_early)
1219 ++ return 1;
1220 + if (tsk->flags & PF_MCE_PROCESS)
1221 + return !!(tsk->flags & PF_MCE_EARLY);
1222 + return sysctl_memory_failure_early_kill;
1223 +@@ -395,7 +397,7 @@ static int task_early_kill(struct task_struct *tsk)
1224 + * Collect processes when the error hit an anonymous page.
1225 + */
1226 + static void collect_procs_anon(struct page *page, struct list_head *to_kill,
1227 +- struct to_kill **tkc)
1228 ++ struct to_kill **tkc, int force_early)
1229 + {
1230 + struct vm_area_struct *vma;
1231 + struct task_struct *tsk;
1232 +@@ -409,7 +411,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
1233 + for_each_process (tsk) {
1234 + struct anon_vma_chain *vmac;
1235 +
1236 +- if (!task_early_kill(tsk))
1237 ++ if (!task_early_kill(tsk, force_early))
1238 + continue;
1239 + list_for_each_entry(vmac, &av->head, same_anon_vma) {
1240 + vma = vmac->vma;
1241 +@@ -427,7 +429,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
1242 + * Collect processes when the error hit a file mapped page.
1243 + */
1244 + static void collect_procs_file(struct page *page, struct list_head *to_kill,
1245 +- struct to_kill **tkc)
1246 ++ struct to_kill **tkc, int force_early)
1247 + {
1248 + struct vm_area_struct *vma;
1249 + struct task_struct *tsk;
1250 +@@ -439,7 +441,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
1251 + for_each_process(tsk) {
1252 + pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1253 +
1254 +- if (!task_early_kill(tsk))
1255 ++ if (!task_early_kill(tsk, force_early))
1256 + continue;
1257 +
1258 + vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff,
1259 +@@ -465,7 +467,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
1260 + * First preallocate one tokill structure outside the spin locks,
1261 + * so that we can kill at least one process reasonably reliable.
1262 + */
1263 +-static void collect_procs(struct page *page, struct list_head *tokill)
1264 ++static void collect_procs(struct page *page, struct list_head *tokill,
1265 ++ int force_early)
1266 + {
1267 + struct to_kill *tk;
1268 +
1269 +@@ -476,9 +479,9 @@ static void collect_procs(struct page *page, struct list_head *tokill)
1270 + if (!tk)
1271 + return;
1272 + if (PageAnon(page))
1273 +- collect_procs_anon(page, tokill, &tk);
1274 ++ collect_procs_anon(page, tokill, &tk, force_early);
1275 + else
1276 +- collect_procs_file(page, tokill, &tk);
1277 ++ collect_procs_file(page, tokill, &tk, force_early);
1278 + kfree(tk);
1279 + }
1280 +
1281 +@@ -948,7 +951,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
1282 + * there's nothing that can be done.
1283 + */
1284 + if (kill)
1285 +- collect_procs(ppage, &tokill);
1286 ++ collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED);
1287 +
1288 + if (hpage != ppage)
1289 + lock_page(ppage);
1290 +diff --git a/mm/rmap.c b/mm/rmap.c
1291 +index 3ff473feafd4..6dc46f345dba 100644
1292 +--- a/mm/rmap.c
1293 ++++ b/mm/rmap.c
1294 +@@ -103,6 +103,7 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
1295 + * LOCK should suffice since the actual taking of the lock must
1296 + * happen _before_ what follows.
1297 + */
1298 ++ might_sleep();
1299 + if (mutex_is_locked(&anon_vma->root->mutex)) {
1300 + anon_vma_lock(anon_vma);
1301 + anon_vma_unlock(anon_vma);
1302 +@@ -476,8 +477,9 @@ struct anon_vma *page_get_anon_vma(struct page *page)
1303 + * above cannot corrupt).
1304 + */
1305 + if (!page_mapped(page)) {
1306 ++ rcu_read_unlock();
1307 + put_anon_vma(anon_vma);
1308 +- anon_vma = NULL;
1309 ++ return NULL;
1310 + }
1311 + out:
1312 + rcu_read_unlock();
1313 +@@ -527,9 +529,9 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
1314 + }
1315 +
1316 + if (!page_mapped(page)) {
1317 ++ rcu_read_unlock();
1318 + put_anon_vma(anon_vma);
1319 +- anon_vma = NULL;
1320 +- goto out;
1321 ++ return NULL;
1322 + }
1323 +
1324 + /* we pinned the anon_vma, its safe to sleep */
1325 +diff --git a/mm/vmscan.c b/mm/vmscan.c
1326 +index 907c8b7c995e..357ada63772c 100644
1327 +--- a/mm/vmscan.c
1328 ++++ b/mm/vmscan.c
1329 +@@ -3138,7 +3138,10 @@ static int kswapd(void *p)
1330 + }
1331 + }
1332 +
1333 ++ tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
1334 + current->reclaim_state = NULL;
1335 ++ lockdep_clear_current_reclaim_state();
1336 ++
1337 + return 0;
1338 + }
1339 +
1340 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1341 +index fe42834df408..8de819475378 100644
1342 +--- a/net/core/skbuff.c
1343 ++++ b/net/core/skbuff.c
1344 +@@ -733,7 +733,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1345 + skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
1346 + return 0;
1347 + }
1348 +-
1349 ++EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1350 +
1351 + /**
1352 + * skb_clone - duplicate an sk_buff
1353 +@@ -2777,6 +2777,9 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
1354 + skb_put(nskb, hsize), hsize);
1355 +
1356 + while (pos < offset + len && i < nfrags) {
1357 ++ if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1358 ++ goto err;
1359 ++
1360 + *frag = skb_shinfo(skb)->frags[i];
1361 + __skb_frag_ref(frag);
1362 + size = skb_frag_size(frag);