Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1693 - genpatches-2.6/trunk/2.6.33
Date: Fri, 02 Apr 2010 12:05:47
Message-Id: E1Nxfcw-0005bc-Mc@stork.gentoo.org
1 Author: mpagano
2 Date: 2010-04-02 12:05:30 +0000 (Fri, 02 Apr 2010)
3 New Revision: 1693
4
5 Added:
6 genpatches-2.6/trunk/2.6.33/1001_linux-2.6.33.2.patch
7 Modified:
8 genpatches-2.6/trunk/2.6.33/0000_README
9 Log:
10 Linux patch version 2.6.33.2
11
12 Modified: genpatches-2.6/trunk/2.6.33/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/2.6.33/0000_README 2010-04-02 12:00:14 UTC (rev 1692)
15 +++ genpatches-2.6/trunk/2.6.33/0000_README 2010-04-02 12:05:30 UTC (rev 1693)
16 @@ -43,6 +43,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 2.6.33.1
19
20 +Patch: 1001_linux-2.6.33.2.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 2.6.33.2
23 +
24 Patch: 4100_dm-bbr.patch
25 From: EVMS 2.5.2
26 Desc: Bad block relocation support for LiveCD users
27
28 Added: genpatches-2.6/trunk/2.6.33/1001_linux-2.6.33.2.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/2.6.33/1001_linux-2.6.33.2.patch (rev 0)
31 +++ genpatches-2.6/trunk/2.6.33/1001_linux-2.6.33.2.patch 2010-04-02 12:05:30 UTC (rev 1693)
32 @@ -0,0 +1,6335 @@
33 +diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
34 +index 3015da0..fe09a2c 100644
35 +--- a/Documentation/filesystems/tmpfs.txt
36 ++++ b/Documentation/filesystems/tmpfs.txt
37 +@@ -82,11 +82,13 @@ tmpfs has a mount option to set the NUMA memory allocation policy for
38 + all files in that instance (if CONFIG_NUMA is enabled) - which can be
39 + adjusted on the fly via 'mount -o remount ...'
40 +
41 +-mpol=default prefers to allocate memory from the local node
42 ++mpol=default use the process allocation policy
43 ++ (see set_mempolicy(2))
44 + mpol=prefer:Node prefers to allocate memory from the given Node
45 + mpol=bind:NodeList allocates memory only from nodes in NodeList
46 + mpol=interleave prefers to allocate from each node in turn
47 + mpol=interleave:NodeList allocates from each node of NodeList in turn
48 ++mpol=local prefers to allocate memory from the local node
49 +
50 + NodeList format is a comma-separated list of decimal numbers and ranges,
51 + a range being two hyphen-separated decimal numbers, the smallest and
52 +@@ -134,3 +136,5 @@ Author:
53 + Christoph Rohland <cr@×××.com>, 1.12.01
54 + Updated:
55 + Hugh Dickins, 4 June 2007
56 ++Updated:
57 ++ KOSAKI Motohiro, 16 Mar 2010
58 +diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
59 +index 4fddc50..6b84a04 100644
60 +--- a/arch/arm/boot/compressed/head.S
61 ++++ b/arch/arm/boot/compressed/head.S
62 +@@ -170,8 +170,8 @@ not_angel:
63 +
64 + .text
65 + adr r0, LC0
66 +- ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, ip, sp} )
67 +- THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, ip} )
68 ++ ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp})
69 ++ THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} )
70 + THUMB( ldr sp, [r0, #28] )
71 + subs r0, r0, r1 @ calculate the delta offset
72 +
73 +@@ -182,12 +182,13 @@ not_angel:
74 + /*
75 + * We're running at a different address. We need to fix
76 + * up various pointers:
77 +- * r5 - zImage base address
78 +- * r6 - GOT start
79 ++ * r5 - zImage base address (_start)
80 ++ * r6 - size of decompressed image
81 ++ * r11 - GOT start
82 + * ip - GOT end
83 + */
84 + add r5, r5, r0
85 +- add r6, r6, r0
86 ++ add r11, r11, r0
87 + add ip, ip, r0
88 +
89 + #ifndef CONFIG_ZBOOT_ROM
90 +@@ -205,10 +206,10 @@ not_angel:
91 + /*
92 + * Relocate all entries in the GOT table.
93 + */
94 +-1: ldr r1, [r6, #0] @ relocate entries in the GOT
95 ++1: ldr r1, [r11, #0] @ relocate entries in the GOT
96 + add r1, r1, r0 @ table. This fixes up the
97 +- str r1, [r6], #4 @ C references.
98 +- cmp r6, ip
99 ++ str r1, [r11], #4 @ C references.
100 ++ cmp r11, ip
101 + blo 1b
102 + #else
103 +
104 +@@ -216,12 +217,12 @@ not_angel:
105 + * Relocate entries in the GOT table. We only relocate
106 + * the entries that are outside the (relocated) BSS region.
107 + */
108 +-1: ldr r1, [r6, #0] @ relocate entries in the GOT
109 ++1: ldr r1, [r11, #0] @ relocate entries in the GOT
110 + cmp r1, r2 @ entry < bss_start ||
111 + cmphs r3, r1 @ _end < entry
112 + addlo r1, r1, r0 @ table. This fixes up the
113 +- str r1, [r6], #4 @ C references.
114 +- cmp r6, ip
115 ++ str r1, [r11], #4 @ C references.
116 ++ cmp r11, ip
117 + blo 1b
118 + #endif
119 +
120 +@@ -247,6 +248,7 @@ not_relocated: mov r0, #0
121 + * Check to see if we will overwrite ourselves.
122 + * r4 = final kernel address
123 + * r5 = start of this image
124 ++ * r6 = size of decompressed image
125 + * r2 = end of malloc space (and therefore this image)
126 + * We basically want:
127 + * r4 >= r2 -> OK
128 +@@ -254,8 +256,7 @@ not_relocated: mov r0, #0
129 + */
130 + cmp r4, r2
131 + bhs wont_overwrite
132 +- sub r3, sp, r5 @ > compressed kernel size
133 +- add r0, r4, r3, lsl #2 @ allow for 4x expansion
134 ++ add r0, r4, r6
135 + cmp r0, r5
136 + bls wont_overwrite
137 +
138 +@@ -271,7 +272,6 @@ not_relocated: mov r0, #0
139 + * r1-r3 = unused
140 + * r4 = kernel execution address
141 + * r5 = decompressed kernel start
142 +- * r6 = processor ID
143 + * r7 = architecture ID
144 + * r8 = atags pointer
145 + * r9-r12,r14 = corrupted
146 +@@ -312,7 +312,8 @@ LC0: .word LC0 @ r1
147 + .word _end @ r3
148 + .word zreladdr @ r4
149 + .word _start @ r5
150 +- .word _got_start @ r6
151 ++ .word _image_size @ r6
152 ++ .word _got_start @ r11
153 + .word _got_end @ ip
154 + .word user_stack+4096 @ sp
155 + LC1: .word reloc_end - reloc_start
156 +@@ -336,7 +337,6 @@ params: ldr r0, =params_phys
157 + *
158 + * On entry,
159 + * r4 = kernel execution address
160 +- * r6 = processor ID
161 + * r7 = architecture number
162 + * r8 = atags pointer
163 + * r9 = run-time address of "start" (???)
164 +@@ -542,7 +542,6 @@ __common_mmu_cache_on:
165 + * r1-r3 = unused
166 + * r4 = kernel execution address
167 + * r5 = decompressed kernel start
168 +- * r6 = processor ID
169 + * r7 = architecture ID
170 + * r8 = atags pointer
171 + * r9-r12,r14 = corrupted
172 +@@ -581,19 +580,19 @@ call_kernel: bl cache_clean_flush
173 + * r1 = corrupted
174 + * r2 = corrupted
175 + * r3 = block offset
176 +- * r6 = corrupted
177 ++ * r9 = corrupted
178 + * r12 = corrupted
179 + */
180 +
181 + call_cache_fn: adr r12, proc_types
182 + #ifdef CONFIG_CPU_CP15
183 +- mrc p15, 0, r6, c0, c0 @ get processor ID
184 ++ mrc p15, 0, r9, c0, c0 @ get processor ID
185 + #else
186 +- ldr r6, =CONFIG_PROCESSOR_ID
187 ++ ldr r9, =CONFIG_PROCESSOR_ID
188 + #endif
189 + 1: ldr r1, [r12, #0] @ get value
190 + ldr r2, [r12, #4] @ get mask
191 +- eor r1, r1, r6 @ (real ^ match)
192 ++ eor r1, r1, r9 @ (real ^ match)
193 + tst r1, r2 @ & mask
194 + ARM( addeq pc, r12, r3 ) @ call cache function
195 + THUMB( addeq r12, r3 )
196 +@@ -778,8 +777,7 @@ proc_types:
197 + * Turn off the Cache and MMU. ARMv3 does not support
198 + * reading the control register, but ARMv4 does.
199 + *
200 +- * On entry, r6 = processor ID
201 +- * On exit, r0, r1, r2, r3, r12 corrupted
202 ++ * On exit, r0, r1, r2, r3, r9, r12 corrupted
203 + * This routine must preserve: r4, r6, r7
204 + */
205 + .align 5
206 +@@ -852,10 +850,8 @@ __armv3_mmu_cache_off:
207 + /*
208 + * Clean and flush the cache to maintain consistency.
209 + *
210 +- * On entry,
211 +- * r6 = processor ID
212 + * On exit,
213 +- * r1, r2, r3, r11, r12 corrupted
214 ++ * r1, r2, r3, r9, r11, r12 corrupted
215 + * This routine must preserve:
216 + * r0, r4, r5, r6, r7
217 + */
218 +@@ -967,7 +963,7 @@ __armv4_mmu_cache_flush:
219 + mov r2, #64*1024 @ default: 32K dcache size (*2)
220 + mov r11, #32 @ default: 32 byte line size
221 + mrc p15, 0, r3, c0, c0, 1 @ read cache type
222 +- teq r3, r6 @ cache ID register present?
223 ++ teq r3, r9 @ cache ID register present?
224 + beq no_cache_id
225 + mov r1, r3, lsr #18
226 + and r1, r1, #7
227 +diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in
228 +index a5924b9..cbed030 100644
229 +--- a/arch/arm/boot/compressed/vmlinux.lds.in
230 ++++ b/arch/arm/boot/compressed/vmlinux.lds.in
231 +@@ -36,6 +36,9 @@ SECTIONS
232 +
233 + _etext = .;
234 +
235 ++ /* Assume size of decompressed image is 4x the compressed image */
236 ++ _image_size = (_etext - _text) * 4;
237 ++
238 + _got_start = .;
239 + .got : { *(.got) }
240 + _got_end = .;
241 +diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
242 +index 1eb85fb..a3c0a32 100644
243 +--- a/arch/powerpc/kernel/perf_event.c
244 ++++ b/arch/powerpc/kernel/perf_event.c
245 +@@ -1164,10 +1164,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
246 + * Finally record data if requested.
247 + */
248 + if (record) {
249 +- struct perf_sample_data data = {
250 +- .addr = ~0ULL,
251 +- .period = event->hw.last_period,
252 +- };
253 ++ struct perf_sample_data data;
254 ++
255 ++ perf_sample_data_init(&data, ~0ULL);
256 ++ data.period = event->hw.last_period;
257 +
258 + if (event->attr.sample_type & PERF_SAMPLE_ADDR)
259 + perf_get_data_addr(regs, &data.addr);
260 +diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
261 +index b51b1fc..d3cc94f 100644
262 +--- a/arch/sh/boot/compressed/misc.c
263 ++++ b/arch/sh/boot/compressed/misc.c
264 +@@ -132,7 +132,7 @@ void decompress_kernel(void)
265 + output_addr = (CONFIG_MEMORY_START + 0x2000);
266 + #else
267 + output_addr = __pa((unsigned long)&_text+PAGE_SIZE);
268 +-#ifdef CONFIG_29BIT
269 ++#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_LEGACY)
270 + output_addr |= P2SEG;
271 + #endif
272 + #endif
273 +diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
274 +index e856456..8c70d3e 100644
275 +--- a/arch/sparc/kernel/perf_event.c
276 ++++ b/arch/sparc/kernel/perf_event.c
277 +@@ -1189,7 +1189,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
278 +
279 + regs = args->regs;
280 +
281 +- data.addr = 0;
282 ++ perf_sample_data_init(&data, 0);
283 +
284 + cpuc = &__get_cpu_var(cpu_hw_events);
285 +
286 +@@ -1337,7 +1337,7 @@ static void perf_callchain_user_32(struct pt_regs *regs,
287 + callchain_store(entry, PERF_CONTEXT_USER);
288 + callchain_store(entry, regs->tpc);
289 +
290 +- ufp = regs->u_regs[UREG_I6];
291 ++ ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
292 + do {
293 + struct sparc_stackf32 *usf, sf;
294 + unsigned long pc;
295 +diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
296 +index 4b7c937..2d8b70d 100644
297 +--- a/arch/sparc/prom/p1275.c
298 ++++ b/arch/sparc/prom/p1275.c
299 +@@ -32,10 +32,9 @@ extern void prom_cif_interface(void);
300 + extern void prom_cif_callback(void);
301 +
302 + /*
303 +- * This provides SMP safety on the p1275buf. prom_callback() drops this lock
304 +- * to allow recursuve acquisition.
305 ++ * This provides SMP safety on the p1275buf.
306 + */
307 +-DEFINE_SPINLOCK(prom_entry_lock);
308 ++DEFINE_RAW_SPINLOCK(prom_entry_lock);
309 +
310 + long p1275_cmd(const char *service, long fmt, ...)
311 + {
312 +@@ -47,7 +46,9 @@ long p1275_cmd(const char *service, long fmt, ...)
313 +
314 + p = p1275buf.prom_buffer;
315 +
316 +- spin_lock_irqsave(&prom_entry_lock, flags);
317 ++ raw_local_save_flags(flags);
318 ++ raw_local_irq_restore(PIL_NMI);
319 ++ raw_spin_lock(&prom_entry_lock);
320 +
321 + p1275buf.prom_args[0] = (unsigned long)p; /* service */
322 + strcpy (p, service);
323 +@@ -139,7 +140,8 @@ long p1275_cmd(const char *service, long fmt, ...)
324 + va_end(list);
325 + x = p1275buf.prom_args [nargs + 3];
326 +
327 +- spin_unlock_irqrestore(&prom_entry_lock, flags);
328 ++ raw_spin_unlock(&prom_entry_lock);
329 ++ raw_local_irq_restore(flags);
330 +
331 + return x;
332 + }
333 +diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
334 +index 14f9890..c22a164 100644
335 +--- a/arch/x86/include/asm/fixmap.h
336 ++++ b/arch/x86/include/asm/fixmap.h
337 +@@ -82,6 +82,9 @@ enum fixed_addresses {
338 + #endif
339 + FIX_DBGP_BASE,
340 + FIX_EARLYCON_MEM_BASE,
341 ++#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
342 ++ FIX_OHCI1394_BASE,
343 ++#endif
344 + #ifdef CONFIG_X86_LOCAL_APIC
345 + FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
346 + #endif
347 +@@ -126,9 +129,6 @@ enum fixed_addresses {
348 + FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
349 + (__end_of_permanent_fixed_addresses & 255),
350 + FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
351 +-#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
352 +- FIX_OHCI1394_BASE,
353 +-#endif
354 + #ifdef CONFIG_X86_32
355 + FIX_WP_TEST,
356 + #endif
357 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
358 +index 1cd58cd..4604e6a 100644
359 +--- a/arch/x86/include/asm/msr-index.h
360 ++++ b/arch/x86/include/asm/msr-index.h
361 +@@ -105,6 +105,8 @@
362 + #define MSR_AMD64_PATCH_LEVEL 0x0000008b
363 + #define MSR_AMD64_NB_CFG 0xc001001f
364 + #define MSR_AMD64_PATCH_LOADER 0xc0010020
365 ++#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
366 ++#define MSR_AMD64_OSVW_STATUS 0xc0010141
367 + #define MSR_AMD64_IBSFETCHCTL 0xc0011030
368 + #define MSR_AMD64_IBSFETCHLINAD 0xc0011031
369 + #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
370 +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
371 +index 879666f..7e1cca1 100644
372 +--- a/arch/x86/kernel/cpu/intel.c
373 ++++ b/arch/x86/kernel/cpu/intel.c
374 +@@ -70,7 +70,8 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
375 + if (c->x86_power & (1 << 8)) {
376 + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
377 + set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
378 +- sched_clock_stable = 1;
379 ++ if (!check_tsc_unstable())
380 ++ sched_clock_stable = 1;
381 + }
382 +
383 + /*
384 +diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
385 +index 8c1c070..98819b3 100644
386 +--- a/arch/x86/kernel/cpu/perf_event.c
387 ++++ b/arch/x86/kernel/cpu/perf_event.c
388 +@@ -1636,10 +1636,9 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
389 +
390 + ds->bts_index = ds->bts_buffer_base;
391 +
392 ++ perf_sample_data_init(&data, 0);
393 +
394 + data.period = event->hw.last_period;
395 +- data.addr = 0;
396 +- data.raw = NULL;
397 + regs.ip = 0;
398 +
399 + /*
400 +@@ -1756,8 +1755,7 @@ static int p6_pmu_handle_irq(struct pt_regs *regs)
401 + int idx, handled = 0;
402 + u64 val;
403 +
404 +- data.addr = 0;
405 +- data.raw = NULL;
406 ++ perf_sample_data_init(&data, 0);
407 +
408 + cpuc = &__get_cpu_var(cpu_hw_events);
409 +
410 +@@ -1802,8 +1800,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
411 + int bit, loops;
412 + u64 ack, status;
413 +
414 +- data.addr = 0;
415 +- data.raw = NULL;
416 ++ perf_sample_data_init(&data, 0);
417 +
418 + cpuc = &__get_cpu_var(cpu_hw_events);
419 +
420 +diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
421 +index 0ad9597..a6c906c 100644
422 +--- a/arch/x86/kernel/dumpstack_64.c
423 ++++ b/arch/x86/kernel/dumpstack_64.c
424 +@@ -125,9 +125,15 @@ fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
425 + {
426 + #ifdef CONFIG_FRAME_POINTER
427 + struct stack_frame *frame = (struct stack_frame *)bp;
428 ++ unsigned long next;
429 +
430 +- if (!in_irq_stack(stack, irq_stack, irq_stack_end))
431 +- return (unsigned long)frame->next_frame;
432 ++ if (!in_irq_stack(stack, irq_stack, irq_stack_end)) {
433 ++ if (!probe_kernel_address(&frame->next_frame, next))
434 ++ return next;
435 ++ else
436 ++ WARN_ONCE(1, "Perf: bad frame pointer = %p in "
437 ++ "callchain\n", &frame->next_frame);
438 ++ }
439 + #endif
440 + return bp;
441 + }
442 +diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
443 +index bb6006e..1e8cead 100644
444 +--- a/arch/x86/kernel/hw_breakpoint.c
445 ++++ b/arch/x86/kernel/hw_breakpoint.c
446 +@@ -531,8 +531,3 @@ void hw_breakpoint_pmu_read(struct perf_event *bp)
447 + {
448 + /* TODO */
449 + }
450 +-
451 +-void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
452 +-{
453 +- /* TODO */
454 +-}
455 +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
456 +index c9b3522..999c8a6 100644
457 +--- a/arch/x86/kernel/process.c
458 ++++ b/arch/x86/kernel/process.c
459 +@@ -519,21 +519,37 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
460 + }
461 +
462 + /*
463 +- * Check for AMD CPUs, which have potentially C1E support
464 ++ * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e.
465 ++ * For more information see
466 ++ * - Erratum #400 for NPT family 0xf and family 0x10 CPUs
467 ++ * - Erratum #365 for family 0x11 (not affected because C1e not in use)
468 + */
469 + static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
470 + {
471 ++ u64 val;
472 + if (c->x86_vendor != X86_VENDOR_AMD)
473 +- return 0;
474 +-
475 +- if (c->x86 < 0x0F)
476 +- return 0;
477 ++ goto no_c1e_idle;
478 +
479 + /* Family 0x0f models < rev F do not have C1E */
480 +- if (c->x86 == 0x0f && c->x86_model < 0x40)
481 +- return 0;
482 ++ if (c->x86 == 0x0F && c->x86_model >= 0x40)
483 ++ return 1;
484 +
485 +- return 1;
486 ++ if (c->x86 == 0x10) {
487 ++ /*
488 ++ * check OSVW bit for CPUs that are not affected
489 ++ * by erratum #400
490 ++ */
491 ++ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
492 ++ if (val >= 2) {
493 ++ rdmsrl(MSR_AMD64_OSVW_STATUS, val);
494 ++ if (!(val & BIT(1)))
495 ++ goto no_c1e_idle;
496 ++ }
497 ++ return 1;
498 ++ }
499 ++
500 ++no_c1e_idle:
501 ++ return 0;
502 + }
503 +
504 + static cpumask_var_t c1e_mask;
505 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
506 +index a1e1bc9..e900908 100644
507 +--- a/arch/x86/kvm/x86.c
508 ++++ b/arch/x86/kvm/x86.c
509 +@@ -1351,6 +1351,7 @@ int kvm_dev_ioctl_check_extension(long ext)
510 + case KVM_CAP_XEN_HVM:
511 + case KVM_CAP_ADJUST_CLOCK:
512 + case KVM_CAP_VCPU_EVENTS:
513 ++ case KVM_CAP_X86_ROBUST_SINGLESTEP:
514 + r = 1;
515 + break;
516 + case KVM_CAP_COALESCED_MMIO:
517 +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
518 +index 1d4eb93..cf07c26 100644
519 +--- a/arch/x86/mm/pageattr.c
520 ++++ b/arch/x86/mm/pageattr.c
521 +@@ -291,8 +291,29 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
522 + */
523 + if (kernel_set_to_readonly &&
524 + within(address, (unsigned long)_text,
525 +- (unsigned long)__end_rodata_hpage_align))
526 +- pgprot_val(forbidden) |= _PAGE_RW;
527 ++ (unsigned long)__end_rodata_hpage_align)) {
528 ++ unsigned int level;
529 ++
530 ++ /*
531 ++ * Don't enforce the !RW mapping for the kernel text mapping,
532 ++ * if the current mapping is already using small page mapping.
533 ++ * No need to work hard to preserve large page mappings in this
534 ++ * case.
535 ++ *
536 ++ * This also fixes the Linux Xen paravirt guest boot failure
537 ++ * (because of unexpected read-only mappings for kernel identity
538 ++ * mappings). In this paravirt guest case, the kernel text
539 ++ * mapping and the kernel identity mapping share the same
540 ++ * page-table pages. Thus we can't really use different
541 ++ * protections for the kernel text and identity mappings. Also,
542 ++ * these shared mappings are made of small page mappings.
543 ++ * Thus this don't enforce !RW mapping for small page kernel
544 ++ * text mapping logic will help Linux Xen parvirt guest boot
545 ++ * aswell.
546 ++ */
547 ++ if (lookup_address(address, &level) && (level != PG_LEVEL_4K))
548 ++ pgprot_val(forbidden) |= _PAGE_RW;
549 ++ }
550 + #endif
551 +
552 + prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
553 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
554 +index a6a736a..9e2feb6 100644
555 +--- a/drivers/ata/ahci.c
556 ++++ b/drivers/ata/ahci.c
557 +@@ -2831,6 +2831,14 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
558 + * On HP dv[4-6] and HDX18 with earlier BIOSen, link
559 + * to the harddisk doesn't become online after
560 + * resuming from STR. Warn and fail suspend.
561 ++ *
562 ++ * http://bugzilla.kernel.org/show_bug.cgi?id=12276
563 ++ *
564 ++ * Use dates instead of versions to match as HP is
565 ++ * apparently recycling both product and version
566 ++ * strings.
567 ++ *
568 ++ * http://bugzilla.kernel.org/show_bug.cgi?id=15462
569 + */
570 + {
571 + .ident = "dv4",
572 +@@ -2839,7 +2847,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
573 + DMI_MATCH(DMI_PRODUCT_NAME,
574 + "HP Pavilion dv4 Notebook PC"),
575 + },
576 +- .driver_data = "F.30", /* cutoff BIOS version */
577 ++ .driver_data = "20090105", /* F.30 */
578 + },
579 + {
580 + .ident = "dv5",
581 +@@ -2848,7 +2856,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
582 + DMI_MATCH(DMI_PRODUCT_NAME,
583 + "HP Pavilion dv5 Notebook PC"),
584 + },
585 +- .driver_data = "F.16", /* cutoff BIOS version */
586 ++ .driver_data = "20090506", /* F.16 */
587 + },
588 + {
589 + .ident = "dv6",
590 +@@ -2857,7 +2865,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
591 + DMI_MATCH(DMI_PRODUCT_NAME,
592 + "HP Pavilion dv6 Notebook PC"),
593 + },
594 +- .driver_data = "F.21", /* cutoff BIOS version */
595 ++ .driver_data = "20090423", /* F.21 */
596 + },
597 + {
598 + .ident = "HDX18",
599 +@@ -2866,7 +2874,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
600 + DMI_MATCH(DMI_PRODUCT_NAME,
601 + "HP HDX18 Notebook PC"),
602 + },
603 +- .driver_data = "F.23", /* cutoff BIOS version */
604 ++ .driver_data = "20090430", /* F.23 */
605 + },
606 + /*
607 + * Acer eMachines G725 has the same problem. BIOS
608 +@@ -2874,6 +2882,8 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
609 + * work. Inbetween, there are V1.06, V2.06 and V3.03
610 + * that we don't have much idea about. For now,
611 + * blacklist anything older than V3.04.
612 ++ *
613 ++ * http://bugzilla.kernel.org/show_bug.cgi?id=15104
614 + */
615 + {
616 + .ident = "G725",
617 +@@ -2881,19 +2891,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
618 + DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
619 + DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
620 + },
621 +- .driver_data = "V3.04", /* cutoff BIOS version */
622 ++ .driver_data = "20091216", /* V3.04 */
623 + },
624 + { } /* terminate list */
625 + };
626 + const struct dmi_system_id *dmi = dmi_first_match(sysids);
627 +- const char *ver;
628 ++ int year, month, date;
629 ++ char buf[9];
630 +
631 + if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
632 + return false;
633 +
634 +- ver = dmi_get_system_info(DMI_BIOS_VERSION);
635 ++ dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
636 ++ snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
637 +
638 +- return !ver || strcmp(ver, dmi->driver_data) < 0;
639 ++ return strcmp(buf, dmi->driver_data) < 0;
640 + }
641 +
642 + static bool ahci_broken_online(struct pci_dev *pdev)
643 +diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
644 +index 0d97890..be7c395 100644
645 +--- a/drivers/ata/pata_via.c
646 ++++ b/drivers/ata/pata_via.c
647 +@@ -588,6 +588,10 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
648 + u8 rev = isa->revision;
649 + pci_dev_put(isa);
650 +
651 ++ if ((id->device == 0x0415 || id->device == 0x3164) &&
652 ++ (config->id != id->device))
653 ++ continue;
654 ++
655 + if (rev >= config->rev_min && rev <= config->rev_max)
656 + break;
657 + }
658 +diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
659 +index 66fa4e1..f27c4d6 100644
660 +--- a/drivers/char/tty_buffer.c
661 ++++ b/drivers/char/tty_buffer.c
662 +@@ -247,7 +247,8 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
663 + {
664 + int copied = 0;
665 + do {
666 +- int space = tty_buffer_request_room(tty, size - copied);
667 ++ int goal = min(size - copied, TTY_BUFFER_PAGE);
668 ++ int space = tty_buffer_request_room(tty, goal);
669 + struct tty_buffer *tb = tty->buf.tail;
670 + /* If there is no space then tb may be NULL */
671 + if (unlikely(space == 0))
672 +@@ -283,7 +284,8 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
673 + {
674 + int copied = 0;
675 + do {
676 +- int space = tty_buffer_request_room(tty, size - copied);
677 ++ int goal = min(size - copied, TTY_BUFFER_PAGE);
678 ++ int space = tty_buffer_request_room(tty, goal);
679 + struct tty_buffer *tb = tty->buf.tail;
680 + /* If there is no space then tb may be NULL */
681 + if (unlikely(space == 0))
682 +diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
683 +index 8fc91a0..f5b6d9f 100644
684 +--- a/drivers/edac/edac_mce_amd.c
685 ++++ b/drivers/edac/edac_mce_amd.c
686 +@@ -316,7 +316,12 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
687 + if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
688 + pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
689 + } else {
690 +- pr_cont(", core: %d\n", fls((regs->nbsh & 0xf) - 1));
691 ++ u8 assoc_cpus = regs->nbsh & 0xf;
692 ++
693 ++ if (assoc_cpus > 0)
694 ++ pr_cont(", core: %d", fls(assoc_cpus) - 1);
695 ++
696 ++ pr_cont("\n");
697 + }
698 +
699 + pr_emerg("%s.\n", EXT_ERR_MSG(xec));
700 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
701 +index ec8a0d7..fd099a1 100644
702 +--- a/drivers/gpu/drm/i915/i915_gem.c
703 ++++ b/drivers/gpu/drm/i915/i915_gem.c
704 +@@ -1470,9 +1470,6 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
705 + obj_priv->dirty = 0;
706 +
707 + for (i = 0; i < page_count; i++) {
708 +- if (obj_priv->pages[i] == NULL)
709 +- break;
710 +-
711 + if (obj_priv->dirty)
712 + set_page_dirty(obj_priv->pages[i]);
713 +
714 +@@ -2228,7 +2225,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
715 + struct address_space *mapping;
716 + struct inode *inode;
717 + struct page *page;
718 +- int ret;
719 +
720 + if (obj_priv->pages_refcount++ != 0)
721 + return 0;
722 +@@ -2251,11 +2247,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
723 + mapping_gfp_mask (mapping) |
724 + __GFP_COLD |
725 + gfpmask);
726 +- if (IS_ERR(page)) {
727 +- ret = PTR_ERR(page);
728 +- i915_gem_object_put_pages(obj);
729 +- return ret;
730 +- }
731 ++ if (IS_ERR(page))
732 ++ goto err_pages;
733 ++
734 + obj_priv->pages[i] = page;
735 + }
736 +
737 +@@ -2263,6 +2257,15 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
738 + i915_gem_object_do_bit_17_swizzle(obj);
739 +
740 + return 0;
741 ++
742 ++err_pages:
743 ++ while (i--)
744 ++ page_cache_release(obj_priv->pages[i]);
745 ++
746 ++ drm_free_large(obj_priv->pages);
747 ++ obj_priv->pages = NULL;
748 ++ obj_priv->pages_refcount--;
749 ++ return PTR_ERR(page);
750 + }
751 +
752 + static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
753 +diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
754 +index 2639591..63f569b 100644
755 +--- a/drivers/gpu/drm/i915/intel_overlay.c
756 ++++ b/drivers/gpu/drm/i915/intel_overlay.c
757 +@@ -1083,14 +1083,18 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
758 +
759 + drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
760 + DRM_MODE_OBJECT_CRTC);
761 +- if (!drmmode_obj)
762 +- return -ENOENT;
763 ++ if (!drmmode_obj) {
764 ++ ret = -ENOENT;
765 ++ goto out_free;
766 ++ }
767 + crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
768 +
769 + new_bo = drm_gem_object_lookup(dev, file_priv,
770 + put_image_rec->bo_handle);
771 +- if (!new_bo)
772 +- return -ENOENT;
773 ++ if (!new_bo) {
774 ++ ret = -ENOENT;
775 ++ goto out_free;
776 ++ }
777 +
778 + mutex_lock(&dev->mode_config.mutex);
779 + mutex_lock(&dev->struct_mutex);
780 +@@ -1180,6 +1184,7 @@ out_unlock:
781 + mutex_unlock(&dev->struct_mutex);
782 + mutex_unlock(&dev->mode_config.mutex);
783 + drm_gem_object_unreference(new_bo);
784 ++out_free:
785 + kfree(params);
786 +
787 + return ret;
788 +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
789 +index d2f6335..a378bc3 100644
790 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
791 ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
792 +@@ -239,12 +239,14 @@ nouveau_connector_detect(struct drm_connector *connector)
793 + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
794 + nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
795 + if (nv_encoder && nv_connector->native_mode) {
796 ++ unsigned status = connector_status_connected;
797 ++
798 + #ifdef CONFIG_ACPI
799 + if (!nouveau_ignorelid && !acpi_lid_open())
800 +- return connector_status_disconnected;
801 ++ status = connector_status_unknown;
802 + #endif
803 + nouveau_connector_set_encoder(connector, nv_encoder);
804 +- return connector_status_connected;
805 ++ return status;
806 + }
807 +
808 + /* Cleanup the previous EDID block. */
809 +diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
810 +index 2d7bcee..cb4290a 100644
811 +--- a/drivers/hwmon/coretemp.c
812 ++++ b/drivers/hwmon/coretemp.c
813 +@@ -228,7 +228,7 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
814 + if (err) {
815 + dev_warn(dev,
816 + "Unable to access MSR 0xEE, for Tjmax, left"
817 +- " at default");
818 ++ " at default\n");
819 + } else if (eax & 0x40000000) {
820 + tjmax = tjmax_ee;
821 + }
822 +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
823 +index df6ab55..5574be2 100644
824 +--- a/drivers/i2c/busses/i2c-i801.c
825 ++++ b/drivers/i2c/busses/i2c-i801.c
826 +@@ -415,9 +415,11 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
827 + data->block[0] = 32; /* max for SMBus block reads */
828 + }
829 +
830 ++ /* Experience has shown that the block buffer can only be used for
831 ++ SMBus (not I2C) block transactions, even though the datasheet
832 ++ doesn't mention this limitation. */
833 + if ((i801_features & FEATURE_BLOCK_BUFFER)
834 +- && !(command == I2C_SMBUS_I2C_BLOCK_DATA
835 +- && read_write == I2C_SMBUS_READ)
836 ++ && command != I2C_SMBUS_I2C_BLOCK_DATA
837 + && i801_set_block_buffer_mode() == 0)
838 + result = i801_block_transaction_by_block(data, read_write,
839 + hwpec);
840 +diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
841 +index 1c440a7..b289ec9 100644
842 +--- a/drivers/i2c/busses/i2c-powermac.c
843 ++++ b/drivers/i2c/busses/i2c-powermac.c
844 +@@ -122,9 +122,14 @@ static s32 i2c_powermac_smbus_xfer( struct i2c_adapter* adap,
845 +
846 + rc = pmac_i2c_xfer(bus, addrdir, subsize, subaddr, buf, len);
847 + if (rc) {
848 +- dev_err(&adap->dev,
849 +- "I2C transfer at 0x%02x failed, size %d, err %d\n",
850 +- addrdir >> 1, size, rc);
851 ++ if (rc == -ENXIO)
852 ++ dev_dbg(&adap->dev,
853 ++ "I2C transfer at 0x%02x failed, size %d, "
854 ++ "err %d\n", addrdir >> 1, size, rc);
855 ++ else
856 ++ dev_err(&adap->dev,
857 ++ "I2C transfer at 0x%02x failed, size %d, "
858 ++ "err %d\n", addrdir >> 1, size, rc);
859 + goto bail;
860 + }
861 +
862 +@@ -175,10 +180,16 @@ static int i2c_powermac_master_xfer( struct i2c_adapter *adap,
863 + goto bail;
864 + }
865 + rc = pmac_i2c_xfer(bus, addrdir, 0, 0, msgs->buf, msgs->len);
866 +- if (rc < 0)
867 +- dev_err(&adap->dev, "I2C %s 0x%02x failed, err %d\n",
868 +- addrdir & 1 ? "read from" : "write to", addrdir >> 1,
869 +- rc);
870 ++ if (rc < 0) {
871 ++ if (rc == -ENXIO)
872 ++ dev_dbg(&adap->dev, "I2C %s 0x%02x failed, err %d\n",
873 ++ addrdir & 1 ? "read from" : "write to",
874 ++ addrdir >> 1, rc);
875 ++ else
876 ++ dev_err(&adap->dev, "I2C %s 0x%02x failed, err %d\n",
877 ++ addrdir & 1 ? "read from" : "write to",
878 ++ addrdir >> 1, rc);
879 ++ }
880 + bail:
881 + pmac_i2c_close(bus);
882 + return rc < 0 ? rc : 1;
883 +diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
884 +index 0f67f1a..d7e6f09 100644
885 +--- a/drivers/ide/icside.c
886 ++++ b/drivers/ide/icside.c
887 +@@ -65,6 +65,8 @@ static struct cardinfo icside_cardinfo_v6_2 = {
888 + };
889 +
890 + struct icside_state {
891 ++ unsigned int channel;
892 ++ unsigned int enabled;
893 + void __iomem *irq_port;
894 + void __iomem *ioc_base;
895 + unsigned int sel;
896 +@@ -114,11 +116,18 @@ static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
897 + struct icside_state *state = ec->irq_data;
898 + void __iomem *base = state->irq_port;
899 +
900 +- writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
901 +- readb(base + ICS_ARCIN_V6_INTROFFSET_2);
902 ++ state->enabled = 1;
903 +
904 +- writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
905 +- readb(base + ICS_ARCIN_V6_INTROFFSET_1);
906 ++ switch (state->channel) {
907 ++ case 0:
908 ++ writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
909 ++ readb(base + ICS_ARCIN_V6_INTROFFSET_2);
910 ++ break;
911 ++ case 1:
912 ++ writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
913 ++ readb(base + ICS_ARCIN_V6_INTROFFSET_1);
914 ++ break;
915 ++ }
916 + }
917 +
918 + /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
919 +@@ -128,6 +137,8 @@ static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
920 + {
921 + struct icside_state *state = ec->irq_data;
922 +
923 ++ state->enabled = 0;
924 ++
925 + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
926 + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
927 + }
928 +@@ -149,6 +160,44 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = {
929 + .irqpending = icside_irqpending_arcin_v6,
930 + };
931 +
932 ++/*
933 ++ * Handle routing of interrupts. This is called before
934 ++ * we write the command to the drive.
935 ++ */
936 ++static void icside_maskproc(ide_drive_t *drive, int mask)
937 ++{
938 ++ ide_hwif_t *hwif = drive->hwif;
939 ++ struct expansion_card *ec = ECARD_DEV(hwif->dev);
940 ++ struct icside_state *state = ecard_get_drvdata(ec);
941 ++ unsigned long flags;
942 ++
943 ++ local_irq_save(flags);
944 ++
945 ++ state->channel = hwif->channel;
946 ++
947 ++ if (state->enabled && !mask) {
948 ++ switch (hwif->channel) {
949 ++ case 0:
950 ++ writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
951 ++ readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
952 ++ break;
953 ++ case 1:
954 ++ writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
955 ++ readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
956 ++ break;
957 ++ }
958 ++ } else {
959 ++ readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
960 ++ readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
961 ++ }
962 ++
963 ++ local_irq_restore(flags);
964 ++}
965 ++
966 ++static const struct ide_port_ops icside_v6_no_dma_port_ops = {
967 ++ .maskproc = icside_maskproc,
968 ++};
969 ++
970 + #ifdef CONFIG_BLK_DEV_IDEDMA_ICS
971 + /*
972 + * SG-DMA support.
973 +@@ -228,6 +277,7 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
974 +
975 + static const struct ide_port_ops icside_v6_port_ops = {
976 + .set_dma_mode = icside_set_dma_mode,
977 ++ .maskproc = icside_maskproc,
978 + };
979 +
980 + static void icside_dma_host_set(ide_drive_t *drive, int on)
981 +@@ -272,6 +322,11 @@ static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
982 + BUG_ON(dma_channel_active(ec->dma));
983 +
984 + /*
985 ++ * Ensure that we have the right interrupt routed.
986 ++ */
987 ++ icside_maskproc(drive, 0);
988 ++
989 ++ /*
990 + * Route the DMA signals to the correct interface.
991 + */
992 + writeb(state->sel | hwif->channel, state->ioc_base);
993 +@@ -399,6 +454,7 @@ err_free:
994 +
995 + static const struct ide_port_info icside_v6_port_info __initdata = {
996 + .init_dma = icside_dma_off_init,
997 ++ .port_ops = &icside_v6_no_dma_port_ops,
998 + .dma_ops = &icside_v6_dma_ops,
999 + .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
1000 + .mwdma_mask = ATA_MWDMA2,
1001 +diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
1002 +index 4d76ba4..0c11237 100644
1003 +--- a/drivers/ide/ide-probe.c
1004 ++++ b/drivers/ide/ide-probe.c
1005 +@@ -695,14 +695,8 @@ static int ide_probe_port(ide_hwif_t *hwif)
1006 + if (irqd)
1007 + disable_irq(hwif->irq);
1008 +
1009 +- rc = ide_port_wait_ready(hwif);
1010 +- if (rc == -ENODEV) {
1011 +- printk(KERN_INFO "%s: no devices on the port\n", hwif->name);
1012 +- goto out;
1013 +- } else if (rc == -EBUSY)
1014 +- printk(KERN_ERR "%s: not ready before the probe\n", hwif->name);
1015 +- else
1016 +- rc = -ENODEV;
1017 ++ if (ide_port_wait_ready(hwif) == -EBUSY)
1018 ++ printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name);
1019 +
1020 + /*
1021 + * Second drive should only exist if first drive was found,
1022 +@@ -713,7 +707,7 @@ static int ide_probe_port(ide_hwif_t *hwif)
1023 + if (drive->dev_flags & IDE_DFLAG_PRESENT)
1024 + rc = 0;
1025 + }
1026 +-out:
1027 ++
1028 + /*
1029 + * Use cached IRQ number. It might be (and is...) changed by probe
1030 + * code above
1031 +diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
1032 +index 35161dd..e3bca38 100644
1033 +--- a/drivers/ide/pdc202xx_old.c
1034 ++++ b/drivers/ide/pdc202xx_old.c
1035 +@@ -100,13 +100,13 @@ static int pdc202xx_test_irq(ide_hwif_t *hwif)
1036 + * bit 7: error, bit 6: interrupting,
1037 + * bit 5: FIFO full, bit 4: FIFO empty
1038 + */
1039 +- return ((sc1d & 0x50) == 0x40) ? 1 : 0;
1040 ++ return ((sc1d & 0x50) == 0x50) ? 1 : 0;
1041 + } else {
1042 + /*
1043 + * bit 3: error, bit 2: interrupting,
1044 + * bit 1: FIFO full, bit 0: FIFO empty
1045 + */
1046 +- return ((sc1d & 0x05) == 0x04) ? 1 : 0;
1047 ++ return ((sc1d & 0x05) == 0x05) ? 1 : 0;
1048 + }
1049 + }
1050 +
1051 +diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
1052 +index f93c2c0..f6dad83 100644
1053 +--- a/drivers/input/mouse/alps.c
1054 ++++ b/drivers/input/mouse/alps.c
1055 +@@ -63,6 +63,8 @@ static const struct alps_model_info alps_model_data[] = {
1056 + { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
1057 + ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
1058 + { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
1059 ++ { { 0x52, 0x01, 0x14 }, 0xff, 0xff,
1060 ++ ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
1061 + };
1062 +
1063 + /*
1064 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
1065 +index 2a5982e..525b9b9 100644
1066 +--- a/drivers/input/serio/i8042-x86ia64io.h
1067 ++++ b/drivers/input/serio/i8042-x86ia64io.h
1068 +@@ -442,6 +442,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
1069 + },
1070 + },
1071 + {
1072 ++ /* Medion Akoya E1222 */
1073 ++ .matches = {
1074 ++ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
1075 ++ DMI_MATCH(DMI_PRODUCT_NAME, "E122X"),
1076 ++ },
1077 ++ },
1078 ++ {
1079 + /* Mivvy M310 */
1080 + .matches = {
1081 + DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
1082 +diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
1083 +index 3f5cd06..6b6c25d 100644
1084 +--- a/drivers/isdn/gigaset/capi.c
1085 ++++ b/drivers/isdn/gigaset/capi.c
1086 +@@ -1313,7 +1313,7 @@ static void do_connect_req(struct gigaset_capi_ctr *iif,
1087 + }
1088 +
1089 + /* check parameter: CIP Value */
1090 +- if (cmsg->CIPValue > ARRAY_SIZE(cip2bchlc) ||
1091 ++ if (cmsg->CIPValue >= ARRAY_SIZE(cip2bchlc) ||
1092 + (cmsg->CIPValue > 0 && cip2bchlc[cmsg->CIPValue].bc == NULL)) {
1093 + dev_notice(cs->dev, "%s: unknown CIP value %d\n",
1094 + "CONNECT_REQ", cmsg->CIPValue);
1095 +@@ -2215,36 +2215,24 @@ static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
1096 + }
1097 +
1098 +
1099 +-static struct capi_driver capi_driver_gigaset = {
1100 +- .name = "gigaset",
1101 +- .revision = "1.0",
1102 +-};
1103 +-
1104 + /**
1105 +- * gigaset_isdn_register() - register to LL
1106 ++ * gigaset_isdn_regdev() - register device to LL
1107 + * @cs: device descriptor structure.
1108 + * @isdnid: device name.
1109 + *
1110 +- * Called by main module to register the device with the LL.
1111 +- *
1112 + * Return value: 1 for success, 0 for failure
1113 + */
1114 +-int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
1115 ++int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
1116 + {
1117 + struct gigaset_capi_ctr *iif;
1118 + int rc;
1119 +
1120 +- pr_info("Kernel CAPI interface\n");
1121 +-
1122 + iif = kmalloc(sizeof(*iif), GFP_KERNEL);
1123 + if (!iif) {
1124 + pr_err("%s: out of memory\n", __func__);
1125 + return 0;
1126 + }
1127 +
1128 +- /* register driver with CAPI (ToDo: what for?) */
1129 +- register_capi_driver(&capi_driver_gigaset);
1130 +-
1131 + /* prepare controller structure */
1132 + iif->ctr.owner = THIS_MODULE;
1133 + iif->ctr.driverdata = cs;
1134 +@@ -2265,7 +2253,6 @@ int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
1135 + rc = attach_capi_ctr(&iif->ctr);
1136 + if (rc) {
1137 + pr_err("attach_capi_ctr failed (%d)\n", rc);
1138 +- unregister_capi_driver(&capi_driver_gigaset);
1139 + kfree(iif);
1140 + return 0;
1141 + }
1142 +@@ -2276,17 +2263,36 @@ int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
1143 + }
1144 +
1145 + /**
1146 +- * gigaset_isdn_unregister() - unregister from LL
1147 ++ * gigaset_isdn_unregdev() - unregister device from LL
1148 + * @cs: device descriptor structure.
1149 +- *
1150 +- * Called by main module to unregister the device from the LL.
1151 + */
1152 +-void gigaset_isdn_unregister(struct cardstate *cs)
1153 ++void gigaset_isdn_unregdev(struct cardstate *cs)
1154 + {
1155 + struct gigaset_capi_ctr *iif = cs->iif;
1156 +
1157 + detach_capi_ctr(&iif->ctr);
1158 + kfree(iif);
1159 + cs->iif = NULL;
1160 ++}
1161 ++
1162 ++static struct capi_driver capi_driver_gigaset = {
1163 ++ .name = "gigaset",
1164 ++ .revision = "1.0",
1165 ++};
1166 ++
1167 ++/**
1168 ++ * gigaset_isdn_regdrv() - register driver to LL
1169 ++ */
1170 ++void gigaset_isdn_regdrv(void)
1171 ++{
1172 ++ pr_info("Kernel CAPI interface\n");
1173 ++ register_capi_driver(&capi_driver_gigaset);
1174 ++}
1175 ++
1176 ++/**
1177 ++ * gigaset_isdn_unregdrv() - unregister driver from LL
1178 ++ */
1179 ++void gigaset_isdn_unregdrv(void)
1180 ++{
1181 + unregister_capi_driver(&capi_driver_gigaset);
1182 + }
1183 +diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
1184 +index 664b0c5..0427fac 100644
1185 +--- a/drivers/isdn/gigaset/common.c
1186 ++++ b/drivers/isdn/gigaset/common.c
1187 +@@ -505,7 +505,7 @@ void gigaset_freecs(struct cardstate *cs)
1188 + case 2: /* error in initcshw */
1189 + /* Deregister from LL */
1190 + make_invalid(cs, VALID_ID);
1191 +- gigaset_isdn_unregister(cs);
1192 ++ gigaset_isdn_unregdev(cs);
1193 +
1194 + /* fall through */
1195 + case 1: /* error when registering to LL */
1196 +@@ -767,7 +767,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
1197 + cs->cmdbytes = 0;
1198 +
1199 + gig_dbg(DEBUG_INIT, "setting up iif");
1200 +- if (!gigaset_isdn_register(cs, modulename)) {
1201 ++ if (!gigaset_isdn_regdev(cs, modulename)) {
1202 + pr_err("error registering ISDN device\n");
1203 + goto error;
1204 + }
1205 +@@ -1214,11 +1214,13 @@ static int __init gigaset_init_module(void)
1206 + gigaset_debuglevel = DEBUG_DEFAULT;
1207 +
1208 + pr_info(DRIVER_DESC DRIVER_DESC_DEBUG "\n");
1209 ++ gigaset_isdn_regdrv();
1210 + return 0;
1211 + }
1212 +
1213 + static void __exit gigaset_exit_module(void)
1214 + {
1215 ++ gigaset_isdn_unregdrv();
1216 + }
1217 +
1218 + module_init(gigaset_init_module);
1219 +diff --git a/drivers/isdn/gigaset/dummyll.c b/drivers/isdn/gigaset/dummyll.c
1220 +index 5b27c99..bd0b1ea 100644
1221 +--- a/drivers/isdn/gigaset/dummyll.c
1222 ++++ b/drivers/isdn/gigaset/dummyll.c
1223 +@@ -57,12 +57,20 @@ void gigaset_isdn_stop(struct cardstate *cs)
1224 + {
1225 + }
1226 +
1227 +-int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
1228 ++int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
1229 + {
1230 +- pr_info("no ISDN subsystem interface\n");
1231 + return 1;
1232 + }
1233 +
1234 +-void gigaset_isdn_unregister(struct cardstate *cs)
1235 ++void gigaset_isdn_unregdev(struct cardstate *cs)
1236 ++{
1237 ++}
1238 ++
1239 ++void gigaset_isdn_regdrv(void)
1240 ++{
1241 ++ pr_info("no ISDN subsystem interface\n");
1242 ++}
1243 ++
1244 ++void gigaset_isdn_unregdrv(void)
1245 + {
1246 + }
1247 +diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
1248 +index ddeb045..0304d02 100644
1249 +--- a/drivers/isdn/gigaset/ev-layer.c
1250 ++++ b/drivers/isdn/gigaset/ev-layer.c
1251 +@@ -1259,14 +1259,10 @@ static void do_action(int action, struct cardstate *cs,
1252 + * note that bcs may be NULL if no B channel is free
1253 + */
1254 + at_state2->ConState = 700;
1255 +- kfree(at_state2->str_var[STR_NMBR]);
1256 +- at_state2->str_var[STR_NMBR] = NULL;
1257 +- kfree(at_state2->str_var[STR_ZCPN]);
1258 +- at_state2->str_var[STR_ZCPN] = NULL;
1259 +- kfree(at_state2->str_var[STR_ZBC]);
1260 +- at_state2->str_var[STR_ZBC] = NULL;
1261 +- kfree(at_state2->str_var[STR_ZHLC]);
1262 +- at_state2->str_var[STR_ZHLC] = NULL;
1263 ++ for (i = 0; i < STR_NUM; ++i) {
1264 ++ kfree(at_state2->str_var[i]);
1265 ++ at_state2->str_var[i] = NULL;
1266 ++ }
1267 + at_state2->int_var[VAR_ZCTP] = -1;
1268 +
1269 + spin_lock_irqsave(&cs->lock, flags);
1270 +diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
1271 +index e963a6c..62909b2 100644
1272 +--- a/drivers/isdn/gigaset/gigaset.h
1273 ++++ b/drivers/isdn/gigaset/gigaset.h
1274 +@@ -674,8 +674,10 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size);
1275 + */
1276 +
1277 + /* Called from common.c for setting up/shutting down with the ISDN subsystem */
1278 +-int gigaset_isdn_register(struct cardstate *cs, const char *isdnid);
1279 +-void gigaset_isdn_unregister(struct cardstate *cs);
1280 ++void gigaset_isdn_regdrv(void);
1281 ++void gigaset_isdn_unregdrv(void);
1282 ++int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid);
1283 ++void gigaset_isdn_unregdev(struct cardstate *cs);
1284 +
1285 + /* Called from hardware module to indicate completion of an skb */
1286 + void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
1287 +diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
1288 +index c129ee4..6429a6b 100644
1289 +--- a/drivers/isdn/gigaset/i4l.c
1290 ++++ b/drivers/isdn/gigaset/i4l.c
1291 +@@ -632,15 +632,13 @@ void gigaset_isdn_stop(struct cardstate *cs)
1292 + }
1293 +
1294 + /**
1295 +- * gigaset_isdn_register() - register to LL
1296 ++ * gigaset_isdn_regdev() - register to LL
1297 + * @cs: device descriptor structure.
1298 + * @isdnid: device name.
1299 + *
1300 +- * Called by main module to register the device with the LL.
1301 +- *
1302 + * Return value: 1 for success, 0 for failure
1303 + */
1304 +-int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
1305 ++int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
1306 + {
1307 + isdn_if *iif;
1308 +
1309 +@@ -690,15 +688,29 @@ int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
1310 + }
1311 +
1312 + /**
1313 +- * gigaset_isdn_unregister() - unregister from LL
1314 ++ * gigaset_isdn_unregdev() - unregister device from LL
1315 + * @cs: device descriptor structure.
1316 +- *
1317 +- * Called by main module to unregister the device from the LL.
1318 + */
1319 +-void gigaset_isdn_unregister(struct cardstate *cs)
1320 ++void gigaset_isdn_unregdev(struct cardstate *cs)
1321 + {
1322 + gig_dbg(DEBUG_CMD, "sending UNLOAD");
1323 + gigaset_i4l_cmd(cs, ISDN_STAT_UNLOAD);
1324 + kfree(cs->iif);
1325 + cs->iif = NULL;
1326 + }
1327 ++
1328 ++/**
1329 ++ * gigaset_isdn_regdrv() - register driver to LL
1330 ++ */
1331 ++void gigaset_isdn_regdrv(void)
1332 ++{
1333 ++ /* nothing to do */
1334 ++}
1335 ++
1336 ++/**
1337 ++ * gigaset_isdn_unregdrv() - unregister driver from LL
1338 ++ */
1339 ++void gigaset_isdn_unregdrv(void)
1340 ++{
1341 ++ /* nothing to do */
1342 ++}
1343 +diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
1344 +index d2260b0..07bb299 100644
1345 +--- a/drivers/isdn/gigaset/interface.c
1346 ++++ b/drivers/isdn/gigaset/interface.c
1347 +@@ -632,7 +632,6 @@ void gigaset_if_receive(struct cardstate *cs,
1348 + if (tty == NULL)
1349 + gig_dbg(DEBUG_ANY, "receive on closed device");
1350 + else {
1351 +- tty_buffer_request_room(tty, len);
1352 + tty_insert_flip_string(tty, buffer, len);
1353 + tty_flip_buffer_push(tty);
1354 + }
1355 +diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
1356 +index e5225d2..0823e26 100644
1357 +--- a/drivers/leds/leds-gpio.c
1358 ++++ b/drivers/leds/leds-gpio.c
1359 +@@ -211,7 +211,6 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
1360 + const struct of_device_id *match)
1361 + {
1362 + struct device_node *np = ofdev->node, *child;
1363 +- struct gpio_led led;
1364 + struct gpio_led_of_platform_data *pdata;
1365 + int count = 0, ret;
1366 +
1367 +@@ -226,8 +225,8 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
1368 + if (!pdata)
1369 + return -ENOMEM;
1370 +
1371 +- memset(&led, 0, sizeof(led));
1372 + for_each_child_of_node(np, child) {
1373 ++ struct gpio_led led = {};
1374 + enum of_gpio_flags flags;
1375 + const char *state;
1376 +
1377 +diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
1378 +index cc0505e..6b0a495 100644
1379 +--- a/drivers/media/video/em28xx/em28xx-dvb.c
1380 ++++ b/drivers/media/video/em28xx/em28xx-dvb.c
1381 +@@ -606,6 +606,7 @@ static int dvb_fini(struct em28xx *dev)
1382 +
1383 + if (dev->dvb) {
1384 + unregister_dvb(dev->dvb);
1385 ++ kfree(dev->dvb);
1386 + dev->dvb = NULL;
1387 + }
1388 +
1389 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1390 +index efa0e41..1f800ae 100644
1391 +--- a/drivers/net/bonding/bond_main.c
1392 ++++ b/drivers/net/bonding/bond_main.c
1393 +@@ -4935,6 +4935,8 @@ int bond_create(struct net *net, const char *name)
1394 + }
1395 +
1396 + res = register_netdevice(bond_dev);
1397 ++ if (res < 0)
1398 ++ goto out_netdev;
1399 +
1400 + out:
1401 + rtnl_unlock();
1402 +diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
1403 +index 0ec1524..fe5e320 100644
1404 +--- a/drivers/net/can/bfin_can.c
1405 ++++ b/drivers/net/can/bfin_can.c
1406 +@@ -26,6 +26,7 @@
1407 +
1408 + #define DRV_NAME "bfin_can"
1409 + #define BFIN_CAN_TIMEOUT 100
1410 ++#define TX_ECHO_SKB_MAX 1
1411 +
1412 + /*
1413 + * transmit and receive channels
1414 +@@ -590,7 +591,7 @@ struct net_device *alloc_bfin_candev(void)
1415 + struct net_device *dev;
1416 + struct bfin_can_priv *priv;
1417 +
1418 +- dev = alloc_candev(sizeof(*priv));
1419 ++ dev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
1420 + if (!dev)
1421 + return NULL;
1422 +
1423 +diff --git a/drivers/net/e100.c b/drivers/net/e100.c
1424 +index 839fb2b..a565ea1 100644
1425 +--- a/drivers/net/e100.c
1426 ++++ b/drivers/net/e100.c
1427 +@@ -2854,7 +2854,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
1428 + }
1429 + nic->cbs_pool = pci_pool_create(netdev->name,
1430 + nic->pdev,
1431 +- nic->params.cbs.count * sizeof(struct cb),
1432 ++ nic->params.cbs.max * sizeof(struct cb),
1433 + sizeof(u32),
1434 + 0);
1435 + DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
1436 +diff --git a/drivers/net/jme.c b/drivers/net/jme.c
1437 +index 792b88f..981c9fb 100644
1438 +--- a/drivers/net/jme.c
1439 ++++ b/drivers/net/jme.c
1440 +@@ -946,6 +946,8 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
1441 + jme->jme_vlan_rx(skb, jme->vlgrp,
1442 + le16_to_cpu(rxdesc->descwb.vlan));
1443 + NET_STAT(jme).rx_bytes += 4;
1444 ++ } else {
1445 ++ dev_kfree_skb(skb);
1446 + }
1447 + } else {
1448 + jme->jme_rx(skb);
1449 +@@ -2085,12 +2087,45 @@ jme_tx_timeout(struct net_device *netdev)
1450 + jme_reset_link(jme);
1451 + }
1452 +
1453 ++static inline void jme_pause_rx(struct jme_adapter *jme)
1454 ++{
1455 ++ atomic_dec(&jme->link_changing);
1456 ++
1457 ++ jme_set_rx_pcc(jme, PCC_OFF);
1458 ++ if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1459 ++ JME_NAPI_DISABLE(jme);
1460 ++ } else {
1461 ++ tasklet_disable(&jme->rxclean_task);
1462 ++ tasklet_disable(&jme->rxempty_task);
1463 ++ }
1464 ++}
1465 ++
1466 ++static inline void jme_resume_rx(struct jme_adapter *jme)
1467 ++{
1468 ++ struct dynpcc_info *dpi = &(jme->dpi);
1469 ++
1470 ++ if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1471 ++ JME_NAPI_ENABLE(jme);
1472 ++ } else {
1473 ++ tasklet_hi_enable(&jme->rxclean_task);
1474 ++ tasklet_hi_enable(&jme->rxempty_task);
1475 ++ }
1476 ++ dpi->cur = PCC_P1;
1477 ++ dpi->attempt = PCC_P1;
1478 ++ dpi->cnt = 0;
1479 ++ jme_set_rx_pcc(jme, PCC_P1);
1480 ++
1481 ++ atomic_inc(&jme->link_changing);
1482 ++}
1483 ++
1484 + static void
1485 + jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1486 + {
1487 + struct jme_adapter *jme = netdev_priv(netdev);
1488 +
1489 ++ jme_pause_rx(jme);
1490 + jme->vlgrp = grp;
1491 ++ jme_resume_rx(jme);
1492 + }
1493 +
1494 + static void
1495 +diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
1496 +index 9fbb2eb..449a982 100644
1497 +--- a/drivers/net/pppol2tp.c
1498 ++++ b/drivers/net/pppol2tp.c
1499 +@@ -756,6 +756,7 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
1500 +
1501 + /* Try to dequeue as many skbs from reorder_q as we can. */
1502 + pppol2tp_recv_dequeue(session);
1503 ++ sock_put(sock);
1504 +
1505 + return 0;
1506 +
1507 +@@ -772,6 +773,7 @@ discard_bad_csum:
1508 + UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0);
1509 + tunnel->stats.rx_errors++;
1510 + kfree_skb(skb);
1511 ++ sock_put(sock);
1512 +
1513 + return 0;
1514 +
1515 +@@ -1180,7 +1182,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1516 + /* Calculate UDP checksum if configured to do so */
1517 + if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
1518 + skb->ip_summed = CHECKSUM_NONE;
1519 +- else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
1520 ++ else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
1521 ++ (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
1522 + skb->ip_summed = CHECKSUM_COMPLETE;
1523 + csum = skb_checksum(skb, 0, udp_len, 0);
1524 + uh->check = csum_tcpudp_magic(inet->inet_saddr,
1525 +@@ -1661,6 +1664,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1526 + if (tunnel_sock == NULL)
1527 + goto end;
1528 +
1529 ++ sock_hold(tunnel_sock);
1530 + tunnel = tunnel_sock->sk_user_data;
1531 + } else {
1532 + tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
1533 +diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
1534 +index 60f96c4..67d414b 100644
1535 +--- a/drivers/net/r8169.c
1536 ++++ b/drivers/net/r8169.c
1537 +@@ -186,7 +186,12 @@ static struct pci_device_id rtl8169_pci_tbl[] = {
1538 +
1539 + MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
1540 +
1541 +-static int rx_copybreak = 200;
1542 ++/*
1543 ++ * we set our copybreak very high so that we don't have
1544 ++ * to allocate 16k frames all the time (see note in
1545 ++ * rtl8169_open()
1546 ++ */
1547 ++static int rx_copybreak = 16383;
1548 + static int use_dac;
1549 + static struct {
1550 + u32 msg_enable;
1551 +@@ -3245,9 +3250,13 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
1552 + }
1553 +
1554 + static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
1555 +- struct net_device *dev)
1556 ++ unsigned int mtu)
1557 + {
1558 +- unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
1559 ++ unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
1560 ++
1561 ++ if (max_frame != 16383)
1562 ++ printk(KERN_WARNING "WARNING! Changing of MTU on this NIC"
1563 ++ "May lead to frame reception errors!\n");
1564 +
1565 + tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
1566 + }
1567 +@@ -3259,7 +3268,17 @@ static int rtl8169_open(struct net_device *dev)
1568 + int retval = -ENOMEM;
1569 +
1570 +
1571 +- rtl8169_set_rxbufsize(tp, dev);
1572 ++ /*
1573 ++ * Note that we use a magic value here, its wierd I know
1574 ++ * its done because, some subset of rtl8169 hardware suffers from
1575 ++ * a problem in which frames received that are longer than
1576 ++ * the size set in RxMaxSize register return garbage sizes
1577 ++ * when received. To avoid this we need to turn off filtering,
1578 ++ * which is done by setting a value of 16383 in the RxMaxSize register
1579 ++ * and allocating 16k frames to handle the largest possible rx value
1580 ++ * thats what the magic math below does.
1581 ++ */
1582 ++ rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN);
1583 +
1584 + /*
1585 + * Rx and Tx desscriptors needs 256 bytes alignment.
1586 +@@ -3912,7 +3931,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
1587 +
1588 + rtl8169_down(dev);
1589 +
1590 +- rtl8169_set_rxbufsize(tp, dev);
1591 ++ rtl8169_set_rxbufsize(tp, dev->mtu);
1592 +
1593 + ret = rtl8169_init_ring(dev);
1594 + if (ret < 0)
1595 +diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
1596 +index 7f82b02..17d1493 100644
1597 +--- a/drivers/net/tg3.c
1598 ++++ b/drivers/net/tg3.c
1599 +@@ -5223,7 +5223,7 @@ static void tg3_poll_controller(struct net_device *dev)
1600 + struct tg3 *tp = netdev_priv(dev);
1601 +
1602 + for (i = 0; i < tp->irq_cnt; i++)
1603 +- tg3_interrupt(tp->napi[i].irq_vec, dev);
1604 ++ tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
1605 + }
1606 + #endif
1607 +
1608 +diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
1609 +index bbd2f31..8b43089 100644
1610 +--- a/drivers/net/wireless/ath/ath5k/ath5k.h
1611 ++++ b/drivers/net/wireless/ath/ath5k/ath5k.h
1612 +@@ -535,7 +535,7 @@ struct ath5k_txq_info {
1613 + u32 tqi_cbr_period; /* Constant bit rate period */
1614 + u32 tqi_cbr_overflow_limit;
1615 + u32 tqi_burst_time;
1616 +- u32 tqi_ready_time; /* Not used */
1617 ++ u32 tqi_ready_time; /* Time queue waits after an event */
1618 + };
1619 +
1620 + /*
1621 +diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
1622 +index d6ee8ac..ced648b 100644
1623 +--- a/drivers/net/wireless/ath/ath5k/base.c
1624 ++++ b/drivers/net/wireless/ath/ath5k/base.c
1625 +@@ -1537,7 +1537,8 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1626 +
1627 + ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
1628 + if (ret)
1629 +- return ret;
1630 ++ goto err;
1631 ++
1632 + if (sc->opmode == NL80211_IFTYPE_AP ||
1633 + sc->opmode == NL80211_IFTYPE_MESH_POINT) {
1634 + /*
1635 +@@ -1564,10 +1565,25 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1636 + if (ret) {
1637 + ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
1638 + "hardware queue!\n", __func__);
1639 +- return ret;
1640 ++ goto err;
1641 + }
1642 ++ ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
1643 ++ if (ret)
1644 ++ goto err;
1645 +
1646 +- return ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */;
1647 ++ /* reconfigure cabq with ready time to 80% of beacon_interval */
1648 ++ ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1649 ++ if (ret)
1650 ++ goto err;
1651 ++
1652 ++ qi.tqi_ready_time = (sc->bintval * 80) / 100;
1653 ++ ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1654 ++ if (ret)
1655 ++ goto err;
1656 ++
1657 ++ ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
1658 ++err:
1659 ++ return ret;
1660 + }
1661 +
1662 + static void
1663 +diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
1664 +index 72474c0..97df0d9 100644
1665 +--- a/drivers/net/wireless/ath/ath5k/phy.c
1666 ++++ b/drivers/net/wireless/ath/ath5k/phy.c
1667 +@@ -1386,38 +1386,39 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
1668 + goto done;
1669 +
1670 + /* Calibration has finished, get the results and re-run */
1671 ++
1672 ++ /* work around empty results which can apparently happen on 5212 */
1673 + for (i = 0; i <= 10; i++) {
1674 + iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
1675 + i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
1676 + q_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_Q);
1677 ++ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1678 ++ "iq_corr:%x i_pwr:%x q_pwr:%x", iq_corr, i_pwr, q_pwr);
1679 ++ if (i_pwr && q_pwr)
1680 ++ break;
1681 + }
1682 +
1683 + i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7;
1684 + q_coffd = q_pwr >> 7;
1685 +
1686 +- /* No correction */
1687 +- if (i_coffd == 0 || q_coffd == 0)
1688 ++ /* protect against divide by 0 and loss of sign bits */
1689 ++ if (i_coffd == 0 || q_coffd < 2)
1690 + goto done;
1691 +
1692 +- i_coff = ((-iq_corr) / i_coffd);
1693 ++ i_coff = (-iq_corr) / i_coffd;
1694 ++ i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
1695 +
1696 +- /* Boundary check */
1697 +- if (i_coff > 31)
1698 +- i_coff = 31;
1699 +- if (i_coff < -32)
1700 +- i_coff = -32;
1701 ++ q_coff = (i_pwr / q_coffd) - 128;
1702 ++ q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */
1703 +
1704 +- q_coff = (((s32)i_pwr / q_coffd) - 128);
1705 ++ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1706 ++ "new I:%d Q:%d (i_coffd:%x q_coffd:%x)",
1707 ++ i_coff, q_coff, i_coffd, q_coffd);
1708 +
1709 +- /* Boundary check */
1710 +- if (q_coff > 15)
1711 +- q_coff = 15;
1712 +- if (q_coff < -16)
1713 +- q_coff = -16;
1714 +-
1715 +- /* Commit new I/Q value */
1716 +- AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE |
1717 +- ((u32)q_coff) | ((u32)i_coff << AR5K_PHY_IQ_CORR_Q_I_COFF_S));
1718 ++ /* Commit new I/Q values (set enable bit last to match HAL sources) */
1719 ++ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_I_COFF, i_coff);
1720 ++ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_Q_COFF, q_coff);
1721 ++ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE);
1722 +
1723 + /* Re-enable calibration -if we don't we'll commit
1724 + * the same values again and again */
1725 +diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
1726 +index eeebb9a..b7c5725 100644
1727 +--- a/drivers/net/wireless/ath/ath5k/qcu.c
1728 ++++ b/drivers/net/wireless/ath/ath5k/qcu.c
1729 +@@ -408,12 +408,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
1730 + break;
1731 +
1732 + case AR5K_TX_QUEUE_CAB:
1733 ++ /* XXX: use BCN_SENT_GT, if we can figure out how */
1734 + AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
1735 +- AR5K_QCU_MISC_FRSHED_BCN_SENT_GT |
1736 ++ AR5K_QCU_MISC_FRSHED_DBA_GT |
1737 + AR5K_QCU_MISC_CBREXP_DIS |
1738 + AR5K_QCU_MISC_CBREXP_BCN_DIS);
1739 +
1740 +- ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
1741 ++ ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
1742 + (AR5K_TUNE_SW_BEACON_RESP -
1743 + AR5K_TUNE_DMA_BEACON_RESP) -
1744 + AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
1745 +diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
1746 +index 4cb9c5d..1464f89 100644
1747 +--- a/drivers/net/wireless/ath/ath5k/reg.h
1748 ++++ b/drivers/net/wireless/ath/ath5k/reg.h
1749 +@@ -2187,6 +2187,7 @@
1750 + */
1751 + #define AR5K_PHY_IQ 0x9920 /* Register Address */
1752 + #define AR5K_PHY_IQ_CORR_Q_Q_COFF 0x0000001f /* Mask for q correction info */
1753 ++#define AR5K_PHY_IQ_CORR_Q_Q_COFF_S 0
1754 + #define AR5K_PHY_IQ_CORR_Q_I_COFF 0x000007e0 /* Mask for i correction info */
1755 + #define AR5K_PHY_IQ_CORR_Q_I_COFF_S 5
1756 + #define AR5K_PHY_IQ_CORR_ENABLE 0x00000800 /* Enable i/q correction */
1757 +diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
1758 +index 62954fc..dbc52ee 100644
1759 +--- a/drivers/net/wireless/ath/ath5k/reset.c
1760 ++++ b/drivers/net/wireless/ath/ath5k/reset.c
1761 +@@ -1371,8 +1371,9 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1762 + * Set clocks to 32KHz operation and use an
1763 + * external 32KHz crystal when sleeping if one
1764 + * exists */
1765 +- if (ah->ah_version == AR5K_AR5212)
1766 +- ath5k_hw_set_sleep_clock(ah, true);
1767 ++ if (ah->ah_version == AR5K_AR5212 &&
1768 ++ ah->ah_op_mode != NL80211_IFTYPE_AP)
1769 ++ ath5k_hw_set_sleep_clock(ah, true);
1770 +
1771 + /*
1772 + * Disable beacons and reset the register
1773 +diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
1774 +index 1597a42..2bad712 100644
1775 +--- a/drivers/net/wireless/ath/ath9k/ath9k.h
1776 ++++ b/drivers/net/wireless/ath/ath9k/ath9k.h
1777 +@@ -267,6 +267,7 @@ void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1778 + u16 tid, u16 *ssn);
1779 + void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
1780 + void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
1781 ++void ath9k_enable_ps(struct ath_softc *sc);
1782 +
1783 + /********/
1784 + /* VIFs */
1785 +diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
1786 +index 06eaaa9..20b1fd3 100644
1787 +--- a/drivers/net/wireless/ath/ath9k/beacon.c
1788 ++++ b/drivers/net/wireless/ath/ath9k/beacon.c
1789 +@@ -573,6 +573,13 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
1790 + u64 tsf;
1791 + int num_beacons, offset, dtim_dec_count, cfp_dec_count;
1792 +
1793 ++ /* No need to configure beacon if we are not associated */
1794 ++ if (!common->curaid) {
1795 ++ ath_print(common, ATH_DBG_BEACON,
1796 ++ "STA is not yet associated..skipping beacon config\n");
1797 ++ return;
1798 ++ }
1799 ++
1800 + memset(&bs, 0, sizeof(bs));
1801 + intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
1802 +
1803 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
1804 +index 7c64aa5..6661178 100644
1805 +--- a/drivers/net/wireless/ath/ath9k/hw.c
1806 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
1807 +@@ -380,7 +380,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
1808 + ah->config.pcie_clock_req = 0;
1809 + ah->config.pcie_waen = 0;
1810 + ah->config.analog_shiftreg = 1;
1811 +- ah->config.ht_enable = 1;
1812 + ah->config.ofdm_trig_low = 200;
1813 + ah->config.ofdm_trig_high = 500;
1814 + ah->config.cck_trig_high = 200;
1815 +@@ -392,6 +391,11 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
1816 + ah->config.spurchans[i][1] = AR_NO_SPUR;
1817 + }
1818 +
1819 ++ if (ah->hw_version.devid != AR2427_DEVID_PCIE)
1820 ++ ah->config.ht_enable = 1;
1821 ++ else
1822 ++ ah->config.ht_enable = 0;
1823 ++
1824 + ah->config.intr_mitigation = true;
1825 +
1826 + /*
1827 +@@ -590,6 +594,7 @@ static bool ath9k_hw_devid_supported(u16 devid)
1828 + case AR5416_DEVID_AR9287_PCI:
1829 + case AR5416_DEVID_AR9287_PCIE:
1830 + case AR9271_USB:
1831 ++ case AR2427_DEVID_PCIE:
1832 + return true;
1833 + default:
1834 + break;
1835 +diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
1836 +index e2b0c73..33a28ec 100644
1837 +--- a/drivers/net/wireless/ath/ath9k/hw.h
1838 ++++ b/drivers/net/wireless/ath/ath9k/hw.h
1839 +@@ -40,6 +40,7 @@
1840 + #define AR9280_DEVID_PCI 0x0029
1841 + #define AR9280_DEVID_PCIE 0x002a
1842 + #define AR9285_DEVID_PCIE 0x002b
1843 ++#define AR2427_DEVID_PCIE 0x002c
1844 +
1845 + #define AR5416_AR9100_DEVID 0x000b
1846 +
1847 +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
1848 +index 4faafbd..33a1071 100644
1849 +--- a/drivers/net/wireless/ath/ath9k/main.c
1850 ++++ b/drivers/net/wireless/ath/ath9k/main.c
1851 +@@ -1854,11 +1854,14 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1852 + hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1853 + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1854 + IEEE80211_HW_SIGNAL_DBM |
1855 +- IEEE80211_HW_AMPDU_AGGREGATION |
1856 + IEEE80211_HW_SUPPORTS_PS |
1857 + IEEE80211_HW_PS_NULLFUNC_STACK |
1858 ++ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1859 + IEEE80211_HW_SPECTRUM_MGMT;
1860 +
1861 ++ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
1862 ++ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1863 ++
1864 + if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
1865 + hw->flags |= IEEE80211_HW_MFP_CAPABLE;
1866 +
1867 +@@ -2679,6 +2682,19 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1868 + mutex_unlock(&sc->mutex);
1869 + }
1870 +
1871 ++void ath9k_enable_ps(struct ath_softc *sc)
1872 ++{
1873 ++ sc->ps_enabled = true;
1874 ++ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1875 ++ if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
1876 ++ sc->imask |= ATH9K_INT_TIM_TIMER;
1877 ++ ath9k_hw_set_interrupts(sc->sc_ah,
1878 ++ sc->imask);
1879 ++ }
1880 ++ }
1881 ++ ath9k_hw_setrxabort(sc->sc_ah, 1);
1882 ++}
1883 ++
1884 + static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1885 + {
1886 + struct ath_wiphy *aphy = hw->priv;
1887 +@@ -2732,22 +2748,13 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1888 + if (changed & IEEE80211_CONF_CHANGE_PS) {
1889 + if (conf->flags & IEEE80211_CONF_PS) {
1890 + sc->sc_flags |= SC_OP_PS_ENABLED;
1891 +- if (!(ah->caps.hw_caps &
1892 +- ATH9K_HW_CAP_AUTOSLEEP)) {
1893 +- if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
1894 +- sc->imask |= ATH9K_INT_TIM_TIMER;
1895 +- ath9k_hw_set_interrupts(sc->sc_ah,
1896 +- sc->imask);
1897 +- }
1898 +- }
1899 + /*
1900 + * At this point we know hardware has received an ACK
1901 + * of a previously sent null data frame.
1902 + */
1903 + if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) {
1904 + sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
1905 +- sc->ps_enabled = true;
1906 +- ath9k_hw_setrxabort(sc->sc_ah, 1);
1907 ++ ath9k_enable_ps(sc);
1908 + }
1909 + } else {
1910 + sc->ps_enabled = false;
1911 +diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
1912 +index f7af5ea..199c54a 100644
1913 +--- a/drivers/net/wireless/ath/ath9k/pci.c
1914 ++++ b/drivers/net/wireless/ath/ath9k/pci.c
1915 +@@ -25,6 +25,7 @@ static struct pci_device_id ath_pci_id_table[] __devinitdata = {
1916 + { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
1917 + { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
1918 + { PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
1919 ++ { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
1920 + { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
1921 + { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
1922 + { 0 }
1923 +diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
1924 +index 1d6cf7d..171ce2b 100644
1925 +--- a/drivers/net/wireless/ath/ath9k/rc.c
1926 ++++ b/drivers/net/wireless/ath/ath9k/rc.c
1927 +@@ -1323,7 +1323,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1928 +
1929 + static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1930 + struct ieee80211_sta *sta, void *priv_sta,
1931 +- u32 changed)
1932 ++ u32 changed, enum nl80211_channel_type oper_chan_type)
1933 + {
1934 + struct ath_softc *sc = priv;
1935 + struct ath_rate_priv *ath_rc_priv = priv_sta;
1936 +@@ -1340,8 +1340,8 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1937 + if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
1938 + return;
1939 +
1940 +- if (sc->hw->conf.channel_type == NL80211_CHAN_HT40MINUS ||
1941 +- sc->hw->conf.channel_type == NL80211_CHAN_HT40PLUS)
1942 ++ if (oper_chan_type == NL80211_CHAN_HT40MINUS ||
1943 ++ oper_chan_type == NL80211_CHAN_HT40PLUS)
1944 + oper_cw40 = true;
1945 +
1946 + oper_sgi40 = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1947 +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
1948 +index 29bf336..c3ce920 100644
1949 +--- a/drivers/net/wireless/ath/ath9k/xmit.c
1950 ++++ b/drivers/net/wireless/ath/ath9k/xmit.c
1951 +@@ -1353,25 +1353,6 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1952 + return htype;
1953 + }
1954 +
1955 +-static bool is_pae(struct sk_buff *skb)
1956 +-{
1957 +- struct ieee80211_hdr *hdr;
1958 +- __le16 fc;
1959 +-
1960 +- hdr = (struct ieee80211_hdr *)skb->data;
1961 +- fc = hdr->frame_control;
1962 +-
1963 +- if (ieee80211_is_data(fc)) {
1964 +- if (ieee80211_is_nullfunc(fc) ||
1965 +- /* Port Access Entity (IEEE 802.1X) */
1966 +- (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
1967 +- return true;
1968 +- }
1969 +- }
1970 +-
1971 +- return false;
1972 +-}
1973 +-
1974 + static int get_hw_crypto_keytype(struct sk_buff *skb)
1975 + {
1976 + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1977 +@@ -1701,7 +1682,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1978 + goto tx_done;
1979 + }
1980 +
1981 +- if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) {
1982 ++ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
1983 + /*
1984 + * Try aggregation if it's a unicast data frame
1985 + * and the destination is HT capable.
1986 +@@ -2053,10 +2034,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1987 + */
1988 + if (bf->bf_isnullfunc &&
1989 + (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
1990 +- if ((sc->sc_flags & SC_OP_PS_ENABLED)) {
1991 +- sc->ps_enabled = true;
1992 +- ath9k_hw_setrxabort(sc->sc_ah, 1);
1993 +- } else
1994 ++ if ((sc->sc_flags & SC_OP_PS_ENABLED))
1995 ++ ath9k_enable_ps(sc);
1996 ++ else
1997 + sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED;
1998 + }
1999 +
2000 +@@ -2264,7 +2244,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2001 + if (ATH_TXQ_SETUP(sc, i)) {
2002 + txq = &sc->tx.txq[i];
2003 +
2004 +- spin_lock(&txq->axq_lock);
2005 ++ spin_lock_bh(&txq->axq_lock);
2006 +
2007 + list_for_each_entry_safe(ac,
2008 + ac_tmp, &txq->axq_acq, list) {
2009 +@@ -2285,7 +2265,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2010 + }
2011 + }
2012 +
2013 +- spin_unlock(&txq->axq_lock);
2014 ++ spin_unlock_bh(&txq->axq_lock);
2015 + }
2016 + }
2017 + }
2018 +diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
2019 +index b59166c..629c166 100644
2020 +--- a/drivers/net/wireless/b43/main.c
2021 ++++ b/drivers/net/wireless/b43/main.c
2022 +@@ -852,19 +852,16 @@ static void b43_op_update_tkip_key(struct ieee80211_hw *hw,
2023 + if (B43_WARN_ON(!modparam_hwtkip))
2024 + return;
2025 +
2026 +- mutex_lock(&wl->mutex);
2027 +-
2028 ++ /* This is only called from the RX path through mac80211, where
2029 ++ * our mutex is already locked. */
2030 ++ B43_WARN_ON(!mutex_is_locked(&wl->mutex));
2031 + dev = wl->current_dev;
2032 +- if (!dev || b43_status(dev) < B43_STAT_INITIALIZED)
2033 +- goto out_unlock;
2034 ++ B43_WARN_ON(!dev || b43_status(dev) < B43_STAT_INITIALIZED);
2035 +
2036 + keymac_write(dev, index, NULL); /* First zero out mac to avoid race */
2037 +
2038 + rx_tkip_phase1_write(dev, index, iv32, phase1key);
2039 + keymac_write(dev, index, addr);
2040 +-
2041 +-out_unlock:
2042 +- mutex_unlock(&wl->mutex);
2043 + }
2044 +
2045 + static void do_key_write(struct b43_wldev *dev,
2046 +diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
2047 +index 234891d..e955515 100644
2048 +--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
2049 ++++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
2050 +@@ -2474,11 +2474,9 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2051 + memset((void *)&priv->hw_params, 0,
2052 + sizeof(struct iwl_hw_params));
2053 +
2054 +- priv->shared_virt =
2055 +- pci_alloc_consistent(priv->pci_dev,
2056 +- sizeof(struct iwl3945_shared),
2057 +- &priv->shared_phys);
2058 +-
2059 ++ priv->shared_virt = dma_alloc_coherent(&priv->pci_dev->dev,
2060 ++ sizeof(struct iwl3945_shared),
2061 ++ &priv->shared_phys, GFP_KERNEL);
2062 + if (!priv->shared_virt) {
2063 + IWL_ERR(priv, "failed to allocate pci memory\n");
2064 + mutex_unlock(&priv->mutex);
2065 +diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
2066 +index f36f804..6e9e156 100644
2067 +--- a/drivers/net/wireless/iwlwifi/iwl-core.c
2068 ++++ b/drivers/net/wireless/iwlwifi/iwl-core.c
2069 +@@ -1658,9 +1658,9 @@ EXPORT_SYMBOL(iwl_set_tx_power);
2070 + void iwl_free_isr_ict(struct iwl_priv *priv)
2071 + {
2072 + if (priv->ict_tbl_vir) {
2073 +- pci_free_consistent(priv->pci_dev, (sizeof(u32) * ICT_COUNT) +
2074 +- PAGE_SIZE, priv->ict_tbl_vir,
2075 +- priv->ict_tbl_dma);
2076 ++ dma_free_coherent(&priv->pci_dev->dev,
2077 ++ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
2078 ++ priv->ict_tbl_vir, priv->ict_tbl_dma);
2079 + priv->ict_tbl_vir = NULL;
2080 + }
2081 + }
2082 +@@ -1676,9 +1676,9 @@ int iwl_alloc_isr_ict(struct iwl_priv *priv)
2083 + if (priv->cfg->use_isr_legacy)
2084 + return 0;
2085 + /* allocate shrared data table */
2086 +- priv->ict_tbl_vir = pci_alloc_consistent(priv->pci_dev, (sizeof(u32) *
2087 +- ICT_COUNT) + PAGE_SIZE,
2088 +- &priv->ict_tbl_dma);
2089 ++ priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev,
2090 ++ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
2091 ++ &priv->ict_tbl_dma, GFP_KERNEL);
2092 + if (!priv->ict_tbl_vir)
2093 + return -ENOMEM;
2094 +
2095 +diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
2096 +index bd0b12e..f8481e8 100644
2097 +--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
2098 ++++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
2099 +@@ -80,8 +80,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
2100 + struct fw_desc *desc)
2101 + {
2102 + if (desc->v_addr)
2103 +- pci_free_consistent(pci_dev, desc->len,
2104 +- desc->v_addr, desc->p_addr);
2105 ++ dma_free_coherent(&pci_dev->dev, desc->len,
2106 ++ desc->v_addr, desc->p_addr);
2107 + desc->v_addr = NULL;
2108 + desc->len = 0;
2109 + }
2110 +@@ -89,7 +89,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
2111 + static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
2112 + struct fw_desc *desc)
2113 + {
2114 +- desc->v_addr = pci_alloc_consistent(pci_dev, desc->len, &desc->p_addr);
2115 ++ desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
2116 ++ &desc->p_addr, GFP_KERNEL);
2117 + return (desc->v_addr != NULL) ? 0 : -ENOMEM;
2118 + }
2119 +
2120 +diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
2121 +index 2dbce85..4ac16d9 100644
2122 +--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
2123 ++++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
2124 +@@ -350,10 +350,10 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
2125 + }
2126 + }
2127 +
2128 +- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2129 +- rxq->dma_addr);
2130 +- pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
2131 +- rxq->rb_stts, rxq->rb_stts_dma);
2132 ++ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2133 ++ rxq->dma_addr);
2134 ++ dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
2135 ++ rxq->rb_stts, rxq->rb_stts_dma);
2136 + rxq->bd = NULL;
2137 + rxq->rb_stts = NULL;
2138 + }
2139 +@@ -362,7 +362,7 @@ EXPORT_SYMBOL(iwl_rx_queue_free);
2140 + int iwl_rx_queue_alloc(struct iwl_priv *priv)
2141 + {
2142 + struct iwl_rx_queue *rxq = &priv->rxq;
2143 +- struct pci_dev *dev = priv->pci_dev;
2144 ++ struct device *dev = &priv->pci_dev->dev;
2145 + int i;
2146 +
2147 + spin_lock_init(&rxq->lock);
2148 +@@ -370,12 +370,13 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
2149 + INIT_LIST_HEAD(&rxq->rx_used);
2150 +
2151 + /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
2152 +- rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
2153 ++ rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
2154 ++ GFP_KERNEL);
2155 + if (!rxq->bd)
2156 + goto err_bd;
2157 +
2158 +- rxq->rb_stts = pci_alloc_consistent(dev, sizeof(struct iwl_rb_status),
2159 +- &rxq->rb_stts_dma);
2160 ++ rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
2161 ++ &rxq->rb_stts_dma, GFP_KERNEL);
2162 + if (!rxq->rb_stts)
2163 + goto err_rb;
2164 +
2165 +@@ -392,8 +393,8 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
2166 + return 0;
2167 +
2168 + err_rb:
2169 +- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2170 +- rxq->dma_addr);
2171 ++ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2172 ++ rxq->dma_addr);
2173 + err_bd:
2174 + return -ENOMEM;
2175 + }
2176 +diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
2177 +index 8f40715..88470fb 100644
2178 +--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
2179 ++++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
2180 +@@ -60,7 +60,8 @@ static const u16 default_tid_to_tx_fifo[] = {
2181 + static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
2182 + struct iwl_dma_ptr *ptr, size_t size)
2183 + {
2184 +- ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
2185 ++ ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
2186 ++ GFP_KERNEL);
2187 + if (!ptr->addr)
2188 + return -ENOMEM;
2189 + ptr->size = size;
2190 +@@ -73,7 +74,7 @@ static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
2191 + if (unlikely(!ptr->addr))
2192 + return;
2193 +
2194 +- pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
2195 ++ dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
2196 + memset(ptr, 0, sizeof(*ptr));
2197 + }
2198 +
2199 +@@ -126,7 +127,7 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv,
2200 + if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
2201 + priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
2202 + else {
2203 +- IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n",
2204 ++ IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
2205 + priv->stations[sta_id].tid[tid].tfds_in_queue,
2206 + freed);
2207 + priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
2208 +@@ -146,7 +147,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
2209 + {
2210 + struct iwl_tx_queue *txq = &priv->txq[txq_id];
2211 + struct iwl_queue *q = &txq->q;
2212 +- struct pci_dev *dev = priv->pci_dev;
2213 ++ struct device *dev = &priv->pci_dev->dev;
2214 + int i;
2215 +
2216 + if (q->n_bd == 0)
2217 +@@ -163,8 +164,8 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
2218 +
2219 + /* De-alloc circular buffer of TFDs */
2220 + if (txq->q.n_bd)
2221 +- pci_free_consistent(dev, priv->hw_params.tfd_size *
2222 +- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
2223 ++ dma_free_coherent(dev, priv->hw_params.tfd_size *
2224 ++ txq->q.n_bd, txq->tfds, txq->q.dma_addr);
2225 +
2226 + /* De-alloc array of per-TFD driver data */
2227 + kfree(txq->txb);
2228 +@@ -193,7 +194,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
2229 + {
2230 + struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
2231 + struct iwl_queue *q = &txq->q;
2232 +- struct pci_dev *dev = priv->pci_dev;
2233 ++ struct device *dev = &priv->pci_dev->dev;
2234 + int i;
2235 +
2236 + if (q->n_bd == 0)
2237 +@@ -205,8 +206,8 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
2238 +
2239 + /* De-alloc circular buffer of TFDs */
2240 + if (txq->q.n_bd)
2241 +- pci_free_consistent(dev, priv->hw_params.tfd_size *
2242 +- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
2243 ++ dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
2244 ++ txq->tfds, txq->q.dma_addr);
2245 +
2246 + /* deallocate arrays */
2247 + kfree(txq->cmd);
2248 +@@ -297,7 +298,7 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
2249 + static int iwl_tx_queue_alloc(struct iwl_priv *priv,
2250 + struct iwl_tx_queue *txq, u32 id)
2251 + {
2252 +- struct pci_dev *dev = priv->pci_dev;
2253 ++ struct device *dev = &priv->pci_dev->dev;
2254 + size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2255 +
2256 + /* Driver private data, only for Tx (not command) queues,
2257 +@@ -316,8 +317,8 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
2258 +
2259 + /* Circular buffer of transmit frame descriptors (TFDs),
2260 + * shared with device */
2261 +- txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
2262 +-
2263 ++ txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
2264 ++ GFP_KERNEL);
2265 + if (!txq->tfds) {
2266 + IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
2267 + goto error;
2268 +diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2269 +index f8e4e4b..f297865 100644
2270 +--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
2271 ++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2272 +@@ -352,10 +352,10 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
2273 + static void iwl3945_unset_hw_params(struct iwl_priv *priv)
2274 + {
2275 + if (priv->shared_virt)
2276 +- pci_free_consistent(priv->pci_dev,
2277 +- sizeof(struct iwl3945_shared),
2278 +- priv->shared_virt,
2279 +- priv->shared_phys);
2280 ++ dma_free_coherent(&priv->pci_dev->dev,
2281 ++ sizeof(struct iwl3945_shared),
2282 ++ priv->shared_virt,
2283 ++ priv->shared_phys);
2284 + }
2285 +
2286 + static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
2287 +@@ -1253,10 +1253,10 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
2288 + }
2289 + }
2290 +
2291 +- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2292 +- rxq->dma_addr);
2293 +- pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
2294 +- rxq->rb_stts, rxq->rb_stts_dma);
2295 ++ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2296 ++ rxq->dma_addr);
2297 ++ dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
2298 ++ rxq->rb_stts, rxq->rb_stts_dma);
2299 + rxq->bd = NULL;
2300 + rxq->rb_stts = NULL;
2301 + }
2302 +diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
2303 +index a007230..1685c09 100644
2304 +--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c
2305 ++++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
2306 +@@ -443,7 +443,8 @@ out:
2307 +
2308 + void wl1251_debugfs_reset(struct wl1251 *wl)
2309 + {
2310 +- memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
2311 ++ if (wl->stats.fw_stats != NULL)
2312 ++ memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
2313 + wl->stats.retry_count = 0;
2314 + wl->stats.excessive_retries = 0;
2315 + }
2316 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2317 +index 315fea4..3245d33 100644
2318 +--- a/drivers/pci/pci.c
2319 ++++ b/drivers/pci/pci.c
2320 +@@ -2421,18 +2421,17 @@ EXPORT_SYMBOL_GPL(pci_reset_function);
2321 + */
2322 + int pcix_get_max_mmrbc(struct pci_dev *dev)
2323 + {
2324 +- int err, cap;
2325 ++ int cap;
2326 + u32 stat;
2327 +
2328 + cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2329 + if (!cap)
2330 + return -EINVAL;
2331 +
2332 +- err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
2333 +- if (err)
2334 ++ if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
2335 + return -EINVAL;
2336 +
2337 +- return (stat & PCI_X_STATUS_MAX_READ) >> 12;
2338 ++ return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
2339 + }
2340 + EXPORT_SYMBOL(pcix_get_max_mmrbc);
2341 +
2342 +@@ -2445,18 +2444,17 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc);
2343 + */
2344 + int pcix_get_mmrbc(struct pci_dev *dev)
2345 + {
2346 +- int ret, cap;
2347 +- u32 cmd;
2348 ++ int cap;
2349 ++ u16 cmd;
2350 +
2351 + cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2352 + if (!cap)
2353 + return -EINVAL;
2354 +
2355 +- ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
2356 +- if (!ret)
2357 +- ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
2358 ++ if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
2359 ++ return -EINVAL;
2360 +
2361 +- return ret;
2362 ++ return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
2363 + }
2364 + EXPORT_SYMBOL(pcix_get_mmrbc);
2365 +
2366 +@@ -2471,28 +2469,27 @@ EXPORT_SYMBOL(pcix_get_mmrbc);
2367 + */
2368 + int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
2369 + {
2370 +- int cap, err = -EINVAL;
2371 +- u32 stat, cmd, v, o;
2372 ++ int cap;
2373 ++ u32 stat, v, o;
2374 ++ u16 cmd;
2375 +
2376 + if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
2377 +- goto out;
2378 ++ return -EINVAL;
2379 +
2380 + v = ffs(mmrbc) - 10;
2381 +
2382 + cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2383 + if (!cap)
2384 +- goto out;
2385 ++ return -EINVAL;
2386 +
2387 +- err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
2388 +- if (err)
2389 +- goto out;
2390 ++ if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
2391 ++ return -EINVAL;
2392 +
2393 + if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
2394 + return -E2BIG;
2395 +
2396 +- err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
2397 +- if (err)
2398 +- goto out;
2399 ++ if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
2400 ++ return -EINVAL;
2401 +
2402 + o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
2403 + if (o != v) {
2404 +@@ -2502,10 +2499,10 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
2405 +
2406 + cmd &= ~PCI_X_CMD_MAX_READ;
2407 + cmd |= v << 2;
2408 +- err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd);
2409 ++ if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
2410 ++ return -EIO;
2411 + }
2412 +-out:
2413 +- return err;
2414 ++ return 0;
2415 + }
2416 + EXPORT_SYMBOL(pcix_set_mmrbc);
2417 +
2418 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2419 +index d58b940..456c265 100644
2420 +--- a/drivers/pci/quirks.c
2421 ++++ b/drivers/pci/quirks.c
2422 +@@ -2534,6 +2534,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
2423 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
2424 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
2425 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
2426 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov);
2427 +
2428 + #endif /* CONFIG_PCI_IOV */
2429 +
2430 +diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
2431 +index ed90082..8cb20e4 100644
2432 +--- a/drivers/platform/x86/classmate-laptop.c
2433 ++++ b/drivers/platform/x86/classmate-laptop.c
2434 +@@ -34,6 +34,11 @@ struct cmpc_accel {
2435 + #define CMPC_ACCEL_SENSITIVITY_DEFAULT 5
2436 +
2437 +
2438 ++#define CMPC_ACCEL_HID "ACCE0000"
2439 ++#define CMPC_TABLET_HID "TBLT0000"
2440 ++#define CMPC_BL_HID "IPML200"
2441 ++#define CMPC_KEYS_HID "FnBT0000"
2442 ++
2443 + /*
2444 + * Generic input device code.
2445 + */
2446 +@@ -282,10 +287,9 @@ static int cmpc_accel_remove(struct acpi_device *acpi, int type)
2447 + }
2448 +
2449 + static const struct acpi_device_id cmpc_accel_device_ids[] = {
2450 +- {"ACCE0000", 0},
2451 ++ {CMPC_ACCEL_HID, 0},
2452 + {"", 0}
2453 + };
2454 +-MODULE_DEVICE_TABLE(acpi, cmpc_accel_device_ids);
2455 +
2456 + static struct acpi_driver cmpc_accel_acpi_driver = {
2457 + .owner = THIS_MODULE,
2458 +@@ -366,10 +370,9 @@ static int cmpc_tablet_resume(struct acpi_device *acpi)
2459 + }
2460 +
2461 + static const struct acpi_device_id cmpc_tablet_device_ids[] = {
2462 +- {"TBLT0000", 0},
2463 ++ {CMPC_TABLET_HID, 0},
2464 + {"", 0}
2465 + };
2466 +-MODULE_DEVICE_TABLE(acpi, cmpc_tablet_device_ids);
2467 +
2468 + static struct acpi_driver cmpc_tablet_acpi_driver = {
2469 + .owner = THIS_MODULE,
2470 +@@ -477,17 +480,16 @@ static int cmpc_bl_remove(struct acpi_device *acpi, int type)
2471 + return 0;
2472 + }
2473 +
2474 +-static const struct acpi_device_id cmpc_device_ids[] = {
2475 +- {"IPML200", 0},
2476 ++static const struct acpi_device_id cmpc_bl_device_ids[] = {
2477 ++ {CMPC_BL_HID, 0},
2478 + {"", 0}
2479 + };
2480 +-MODULE_DEVICE_TABLE(acpi, cmpc_device_ids);
2481 +
2482 + static struct acpi_driver cmpc_bl_acpi_driver = {
2483 + .owner = THIS_MODULE,
2484 + .name = "cmpc",
2485 + .class = "cmpc",
2486 +- .ids = cmpc_device_ids,
2487 ++ .ids = cmpc_bl_device_ids,
2488 + .ops = {
2489 + .add = cmpc_bl_add,
2490 + .remove = cmpc_bl_remove
2491 +@@ -540,10 +542,9 @@ static int cmpc_keys_remove(struct acpi_device *acpi, int type)
2492 + }
2493 +
2494 + static const struct acpi_device_id cmpc_keys_device_ids[] = {
2495 +- {"FnBT0000", 0},
2496 ++ {CMPC_KEYS_HID, 0},
2497 + {"", 0}
2498 + };
2499 +-MODULE_DEVICE_TABLE(acpi, cmpc_keys_device_ids);
2500 +
2501 + static struct acpi_driver cmpc_keys_acpi_driver = {
2502 + .owner = THIS_MODULE,
2503 +@@ -607,3 +608,13 @@ static void cmpc_exit(void)
2504 +
2505 + module_init(cmpc_init);
2506 + module_exit(cmpc_exit);
2507 ++
2508 ++static const struct acpi_device_id cmpc_device_ids[] = {
2509 ++ {CMPC_ACCEL_HID, 0},
2510 ++ {CMPC_TABLET_HID, 0},
2511 ++ {CMPC_BL_HID, 0},
2512 ++ {CMPC_KEYS_HID, 0},
2513 ++ {"", 0}
2514 ++};
2515 ++
2516 ++MODULE_DEVICE_TABLE(acpi, cmpc_device_ids);
2517 +diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
2518 +index fa34b92..1b82170 100644
2519 +--- a/drivers/scsi/qlogicpti.c
2520 ++++ b/drivers/scsi/qlogicpti.c
2521 +@@ -738,7 +738,7 @@ static int __devinit qpti_register_irq(struct qlogicpti *qpti)
2522 + * sanely maintain.
2523 + */
2524 + if (request_irq(qpti->irq, qpti_intr,
2525 +- IRQF_SHARED, "Qlogic/PTI", qpti))
2526 ++ IRQF_SHARED, "QlogicPTI", qpti))
2527 + goto fail;
2528 +
2529 + printk("qlogicpti%d: IRQ %d ", qpti->qpti_id, qpti->irq);
2530 +diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
2531 +index 653f22a..bb8fd5b 100644
2532 +--- a/drivers/scsi/scsi_transport_fc.c
2533 ++++ b/drivers/scsi/scsi_transport_fc.c
2534 +@@ -1216,6 +1216,15 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
2535 + {
2536 + struct fc_vport *vport = transport_class_to_vport(dev);
2537 + struct Scsi_Host *shost = vport_to_shost(vport);
2538 ++ unsigned long flags;
2539 ++
2540 ++ spin_lock_irqsave(shost->host_lock, flags);
2541 ++ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
2542 ++ spin_unlock_irqrestore(shost->host_lock, flags);
2543 ++ return -EBUSY;
2544 ++ }
2545 ++ vport->flags |= FC_VPORT_DELETING;
2546 ++ spin_unlock_irqrestore(shost->host_lock, flags);
2547 +
2548 + fc_queue_work(shost, &vport->vport_delete_work);
2549 + return count;
2550 +@@ -1805,6 +1814,9 @@ store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
2551 + list_for_each_entry(vport, &fc_host->vports, peers) {
2552 + if ((vport->channel == 0) &&
2553 + (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
2554 ++ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
2555 ++ break;
2556 ++ vport->flags |= FC_VPORT_DELETING;
2557 + match = 1;
2558 + break;
2559 + }
2560 +@@ -3354,18 +3366,6 @@ fc_vport_terminate(struct fc_vport *vport)
2561 + unsigned long flags;
2562 + int stat;
2563 +
2564 +- spin_lock_irqsave(shost->host_lock, flags);
2565 +- if (vport->flags & FC_VPORT_CREATING) {
2566 +- spin_unlock_irqrestore(shost->host_lock, flags);
2567 +- return -EBUSY;
2568 +- }
2569 +- if (vport->flags & (FC_VPORT_DEL)) {
2570 +- spin_unlock_irqrestore(shost->host_lock, flags);
2571 +- return -EALREADY;
2572 +- }
2573 +- vport->flags |= FC_VPORT_DELETING;
2574 +- spin_unlock_irqrestore(shost->host_lock, flags);
2575 +-
2576 + if (i->f->vport_delete)
2577 + stat = i->f->vport_delete(vport);
2578 + else
2579 +diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
2580 +index 55b034b..3c8a024 100644
2581 +--- a/drivers/scsi/ses.c
2582 ++++ b/drivers/scsi/ses.c
2583 +@@ -591,8 +591,6 @@ static int ses_intf_add(struct device *cdev,
2584 + ses_dev->page10_len = len;
2585 + buf = NULL;
2586 + }
2587 +- kfree(hdr_buf);
2588 +-
2589 + scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
2590 + if (!scomp)
2591 + goto err_free;
2592 +@@ -604,6 +602,8 @@ static int ses_intf_add(struct device *cdev,
2593 + goto err_free;
2594 + }
2595 +
2596 ++ kfree(hdr_buf);
2597 ++
2598 + edev->scratch = ses_dev;
2599 + for (i = 0; i < components; i++)
2600 + edev->component[i].scratch = scomp + i;
2601 +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
2602 +index a678186..4fd67d6 100644
2603 +--- a/drivers/usb/core/devio.c
2604 ++++ b/drivers/usb/core/devio.c
2605 +@@ -1176,6 +1176,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
2606 + free_async(as);
2607 + return -ENOMEM;
2608 + }
2609 ++ /* Isochronous input data may end up being discontiguous
2610 ++ * if some of the packets are short. Clear the buffer so
2611 ++ * that the gaps don't leak kernel data to userspace.
2612 ++ */
2613 ++ if (is_in && uurb->type == USBDEVFS_URB_TYPE_ISO)
2614 ++ memset(as->urb->transfer_buffer, 0,
2615 ++ uurb->buffer_length);
2616 + }
2617 + as->urb->dev = ps->dev;
2618 + as->urb->pipe = (uurb->type << 30) |
2619 +@@ -1312,10 +1319,14 @@ static int processcompl(struct async *as, void __user * __user *arg)
2620 + void __user *addr = as->userurb;
2621 + unsigned int i;
2622 +
2623 +- if (as->userbuffer && urb->actual_length)
2624 +- if (copy_to_user(as->userbuffer, urb->transfer_buffer,
2625 +- urb->actual_length))
2626 ++ if (as->userbuffer && urb->actual_length) {
2627 ++ if (urb->number_of_packets > 0) /* Isochronous */
2628 ++ i = urb->transfer_buffer_length;
2629 ++ else /* Non-Isoc */
2630 ++ i = urb->actual_length;
2631 ++ if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
2632 + goto err_out;
2633 ++ }
2634 + if (put_user(as->status, &userurb->status))
2635 + goto err_out;
2636 + if (put_user(urb->actual_length, &userurb->actual_length))
2637 +diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
2638 +index 1ec3857..9c90b67 100644
2639 +--- a/drivers/usb/host/ehci-hcd.c
2640 ++++ b/drivers/usb/host/ehci-hcd.c
2641 +@@ -995,7 +995,7 @@ rescan:
2642 + /* endpoints can be iso streams. for now, we don't
2643 + * accelerate iso completions ... so spin a while.
2644 + */
2645 +- if (qh->hw->hw_info1 == 0) {
2646 ++ if (qh->hw == NULL) {
2647 + ehci_vdbg (ehci, "iso delay\n");
2648 + goto idle_timeout;
2649 + }
2650 +diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
2651 +index 1e391e6..df533ce 100644
2652 +--- a/drivers/usb/host/ehci-sched.c
2653 ++++ b/drivers/usb/host/ehci-sched.c
2654 +@@ -1121,8 +1121,8 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
2655 + urb->interval);
2656 + }
2657 +
2658 +- /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */
2659 +- } else if (unlikely (stream->hw_info1 != 0)) {
2660 ++ /* if dev->ep [epnum] is a QH, hw is set */
2661 ++ } else if (unlikely (stream->hw != NULL)) {
2662 + ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
2663 + urb->dev->devpath, epnum,
2664 + usb_pipein(urb->pipe) ? "in" : "out");
2665 +@@ -1563,13 +1563,27 @@ itd_patch(
2666 + static inline void
2667 + itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
2668 + {
2669 +- /* always prepend ITD/SITD ... only QH tree is order-sensitive */
2670 +- itd->itd_next = ehci->pshadow [frame];
2671 +- itd->hw_next = ehci->periodic [frame];
2672 +- ehci->pshadow [frame].itd = itd;
2673 ++ union ehci_shadow *prev = &ehci->pshadow[frame];
2674 ++ __hc32 *hw_p = &ehci->periodic[frame];
2675 ++ union ehci_shadow here = *prev;
2676 ++ __hc32 type = 0;
2677 ++
2678 ++ /* skip any iso nodes which might belong to previous microframes */
2679 ++ while (here.ptr) {
2680 ++ type = Q_NEXT_TYPE(ehci, *hw_p);
2681 ++ if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
2682 ++ break;
2683 ++ prev = periodic_next_shadow(ehci, prev, type);
2684 ++ hw_p = shadow_next_periodic(ehci, &here, type);
2685 ++ here = *prev;
2686 ++ }
2687 ++
2688 ++ itd->itd_next = here;
2689 ++ itd->hw_next = *hw_p;
2690 ++ prev->itd = itd;
2691 + itd->frame = frame;
2692 + wmb ();
2693 +- ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
2694 ++ *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
2695 + }
2696 +
2697 + /* fit urb's itds into the selected schedule slot; activate as needed */
2698 +diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
2699 +index 2d85e21..b1dce96 100644
2700 +--- a/drivers/usb/host/ehci.h
2701 ++++ b/drivers/usb/host/ehci.h
2702 +@@ -394,9 +394,8 @@ struct ehci_iso_sched {
2703 + * acts like a qh would, if EHCI had them for ISO.
2704 + */
2705 + struct ehci_iso_stream {
2706 +- /* first two fields match QH, but info1 == 0 */
2707 +- __hc32 hw_next;
2708 +- __hc32 hw_info1;
2709 ++ /* first field matches ehci_hq, but is NULL */
2710 ++ struct ehci_qh_hw *hw;
2711 +
2712 + u32 refcount;
2713 + u8 bEndpointAddress;
2714 +diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
2715 +index bee558a..f71a73a 100644
2716 +--- a/drivers/usb/host/r8a66597-hcd.c
2717 ++++ b/drivers/usb/host/r8a66597-hcd.c
2718 +@@ -418,7 +418,7 @@ static u8 alloc_usb_address(struct r8a66597 *r8a66597, struct urb *urb)
2719 +
2720 + /* this function must be called with interrupt disabled */
2721 + static void free_usb_address(struct r8a66597 *r8a66597,
2722 +- struct r8a66597_device *dev)
2723 ++ struct r8a66597_device *dev, int reset)
2724 + {
2725 + int port;
2726 +
2727 +@@ -430,7 +430,13 @@ static void free_usb_address(struct r8a66597 *r8a66597,
2728 + dev->state = USB_STATE_DEFAULT;
2729 + r8a66597->address_map &= ~(1 << dev->address);
2730 + dev->address = 0;
2731 +- dev_set_drvdata(&dev->udev->dev, NULL);
2732 ++ /*
2733 ++ * Only when resetting USB, it is necessary to erase drvdata. When
2734 ++ * a usb device with usb hub is disconnect, "dev->udev" is already
2735 ++ * freed on usb_desconnect(). So we cannot access the data.
2736 ++ */
2737 ++ if (reset)
2738 ++ dev_set_drvdata(&dev->udev->dev, NULL);
2739 + list_del(&dev->device_list);
2740 + kfree(dev);
2741 +
2742 +@@ -1069,7 +1075,7 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port)
2743 + struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
2744 +
2745 + disable_r8a66597_pipe_all(r8a66597, dev);
2746 +- free_usb_address(r8a66597, dev);
2747 ++ free_usb_address(r8a66597, dev, 0);
2748 +
2749 + start_root_hub_sampling(r8a66597, port, 0);
2750 + }
2751 +@@ -2085,7 +2091,7 @@ static void update_usb_address_map(struct r8a66597 *r8a66597,
2752 + spin_lock_irqsave(&r8a66597->lock, flags);
2753 + dev = get_r8a66597_device(r8a66597, addr);
2754 + disable_r8a66597_pipe_all(r8a66597, dev);
2755 +- free_usb_address(r8a66597, dev);
2756 ++ free_usb_address(r8a66597, dev, 0);
2757 + put_child_connect_map(r8a66597, addr);
2758 + spin_unlock_irqrestore(&r8a66597->lock, flags);
2759 + }
2760 +@@ -2228,7 +2234,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2761 + rh->port |= (1 << USB_PORT_FEAT_RESET);
2762 +
2763 + disable_r8a66597_pipe_all(r8a66597, dev);
2764 +- free_usb_address(r8a66597, dev);
2765 ++ free_usb_address(r8a66597, dev, 1);
2766 +
2767 + r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT,
2768 + get_dvstctr_reg(port));
2769 +diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
2770 +index 5e92c72..fa920c7 100644
2771 +--- a/drivers/usb/host/xhci-hcd.c
2772 ++++ b/drivers/usb/host/xhci-hcd.c
2773 +@@ -1173,6 +1173,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2774 + cmd_completion = &virt_dev->cmd_completion;
2775 + cmd_status = &virt_dev->cmd_status;
2776 + }
2777 ++ init_completion(cmd_completion);
2778 +
2779 + if (!ctx_change)
2780 + ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2781 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2782 +index 34acf6c..ca9e3ba 100644
2783 +--- a/drivers/usb/serial/ftdi_sio.c
2784 ++++ b/drivers/usb/serial/ftdi_sio.c
2785 +@@ -658,6 +658,7 @@ static struct usb_device_id id_table_combined [] = {
2786 + { USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) },
2787 + { USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) },
2788 + { USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) },
2789 ++ { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
2790 + { USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) },
2791 + { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) },
2792 + { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) },
2793 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2794 +index d10b5a8..8f9e805 100644
2795 +--- a/drivers/usb/serial/ftdi_sio_ids.h
2796 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
2797 +@@ -501,6 +501,13 @@
2798 + #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
2799 +
2800 + /*
2801 ++ * Contec products (http://www.contec.com)
2802 ++ * Submitted by Daniel Sangorrin
2803 ++ */
2804 ++#define CONTEC_VID 0x06CE /* Vendor ID */
2805 ++#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
2806 ++
2807 ++/*
2808 + * Definitions for B&B Electronics products.
2809 + */
2810 + #define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
2811 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2812 +index 6e94a67..d93283d 100644
2813 +--- a/drivers/usb/serial/option.c
2814 ++++ b/drivers/usb/serial/option.c
2815 +@@ -288,7 +288,9 @@ static int option_resume(struct usb_serial *serial);
2816 +
2817 + #define QUALCOMM_VENDOR_ID 0x05C6
2818 +
2819 +-#define MAXON_VENDOR_ID 0x16d8
2820 ++#define CMOTECH_VENDOR_ID 0x16d8
2821 ++#define CMOTECH_PRODUCT_6008 0x6008
2822 ++#define CMOTECH_PRODUCT_6280 0x6280
2823 +
2824 + #define TELIT_VENDOR_ID 0x1bc7
2825 + #define TELIT_PRODUCT_UC864E 0x1003
2826 +@@ -520,7 +522,8 @@ static struct usb_device_id option_ids[] = {
2827 + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
2828 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
2829 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
2830 +- { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
2831 ++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
2832 ++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
2833 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
2834 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
2835 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
2836 +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
2837 +index 7528b8d..8ab4ab2 100644
2838 +--- a/drivers/usb/serial/qcserial.c
2839 ++++ b/drivers/usb/serial/qcserial.c
2840 +@@ -47,6 +47,35 @@ static struct usb_device_id id_table[] = {
2841 + {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
2842 + {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
2843 + {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
2844 ++ {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */
2845 ++ {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
2846 ++ {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */
2847 ++ {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
2848 ++ {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */
2849 ++ {USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
2850 ++ {USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */
2851 ++ {USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
2852 ++ {USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */
2853 ++ {USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */
2854 ++ {USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */
2855 ++ {USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
2856 ++ {USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */
2857 ++ {USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
2858 ++ {USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */
2859 ++ {USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
2860 ++ {USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */
2861 ++ {USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2862 ++ {USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2863 ++ {USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2864 ++ {USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2865 ++ {USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2866 ++ {USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2867 ++ {USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2868 ++ {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2869 ++ {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2870 ++ {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2871 ++ {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
2872 ++ {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
2873 + { } /* Terminating entry */
2874 + };
2875 + MODULE_DEVICE_TABLE(usb, id_table);
2876 +diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
2877 +index 5a5c303..f15fb02 100644
2878 +--- a/drivers/video/Kconfig
2879 ++++ b/drivers/video/Kconfig
2880 +@@ -909,6 +909,18 @@ config FB_XVR2500
2881 + mostly initialized the card already. It is treated as a
2882 + completely dumb framebuffer device.
2883 +
2884 ++config FB_XVR1000
2885 ++ bool "Sun XVR-1000 support"
2886 ++ depends on (FB = y) && SPARC64
2887 ++ select FB_CFB_FILLRECT
2888 ++ select FB_CFB_COPYAREA
2889 ++ select FB_CFB_IMAGEBLIT
2890 ++ help
2891 ++ This is the framebuffer device for the Sun XVR-1000 and similar
2892 ++ graphics cards. The driver only works on sparc64 systems where
2893 ++ the system firmware has mostly initialized the card already. It
2894 ++ is treated as a completely dumb framebuffer device.
2895 ++
2896 + config FB_PVR2
2897 + tristate "NEC PowerVR 2 display support"
2898 + depends on FB && SH_DREAMCAST
2899 +diff --git a/drivers/video/Makefile b/drivers/video/Makefile
2900 +index 4ecb30c..8c9a357 100644
2901 +--- a/drivers/video/Makefile
2902 ++++ b/drivers/video/Makefile
2903 +@@ -79,6 +79,7 @@ obj-$(CONFIG_FB_N411) += n411.o
2904 + obj-$(CONFIG_FB_HGA) += hgafb.o
2905 + obj-$(CONFIG_FB_XVR500) += sunxvr500.o
2906 + obj-$(CONFIG_FB_XVR2500) += sunxvr2500.o
2907 ++obj-$(CONFIG_FB_XVR1000) += sunxvr1000.o
2908 + obj-$(CONFIG_FB_IGA) += igafb.o
2909 + obj-$(CONFIG_FB_APOLLO) += dnfb.o
2910 + obj-$(CONFIG_FB_Q40) += q40fb.o
2911 +diff --git a/drivers/video/sunxvr1000.c b/drivers/video/sunxvr1000.c
2912 +new file mode 100644
2913 +index 0000000..a8248c0
2914 +--- /dev/null
2915 ++++ b/drivers/video/sunxvr1000.c
2916 +@@ -0,0 +1,228 @@
2917 ++/* sunxvr1000.c: Sun XVR-1000 driver for sparc64 systems
2918 ++ *
2919 ++ * Copyright (C) 2010 David S. Miller (davem@×××××××××.net)
2920 ++ */
2921 ++
2922 ++#include <linux/module.h>
2923 ++#include <linux/kernel.h>
2924 ++#include <linux/slab.h>
2925 ++#include <linux/fb.h>
2926 ++#include <linux/init.h>
2927 ++#include <linux/of_device.h>
2928 ++
2929 ++struct gfb_info {
2930 ++ struct fb_info *info;
2931 ++
2932 ++ char __iomem *fb_base;
2933 ++ unsigned long fb_base_phys;
2934 ++
2935 ++ struct device_node *of_node;
2936 ++
2937 ++ unsigned int width;
2938 ++ unsigned int height;
2939 ++ unsigned int depth;
2940 ++ unsigned int fb_size;
2941 ++
2942 ++ u32 pseudo_palette[16];
2943 ++};
2944 ++
2945 ++static int __devinit gfb_get_props(struct gfb_info *gp)
2946 ++{
2947 ++ gp->width = of_getintprop_default(gp->of_node, "width", 0);
2948 ++ gp->height = of_getintprop_default(gp->of_node, "height", 0);
2949 ++ gp->depth = of_getintprop_default(gp->of_node, "depth", 32);
2950 ++
2951 ++ if (!gp->width || !gp->height) {
2952 ++ printk(KERN_ERR "gfb: Critical properties missing for %s\n",
2953 ++ gp->of_node->full_name);
2954 ++ return -EINVAL;
2955 ++ }
2956 ++
2957 ++ return 0;
2958 ++}
2959 ++
2960 ++static int gfb_setcolreg(unsigned regno,
2961 ++ unsigned red, unsigned green, unsigned blue,
2962 ++ unsigned transp, struct fb_info *info)
2963 ++{
2964 ++ u32 value;
2965 ++
2966 ++ if (regno < 16) {
2967 ++ red >>= 8;
2968 ++ green >>= 8;
2969 ++ blue >>= 8;
2970 ++
2971 ++ value = (blue << 16) | (green << 8) | red;
2972 ++ ((u32 *)info->pseudo_palette)[regno] = value;
2973 ++ }
2974 ++
2975 ++ return 0;
2976 ++}
2977 ++
2978 ++static struct fb_ops gfb_ops = {
2979 ++ .owner = THIS_MODULE,
2980 ++ .fb_setcolreg = gfb_setcolreg,
2981 ++ .fb_fillrect = cfb_fillrect,
2982 ++ .fb_copyarea = cfb_copyarea,
2983 ++ .fb_imageblit = cfb_imageblit,
2984 ++};
2985 ++
2986 ++static int __devinit gfb_set_fbinfo(struct gfb_info *gp)
2987 ++{
2988 ++ struct fb_info *info = gp->info;
2989 ++ struct fb_var_screeninfo *var = &info->var;
2990 ++
2991 ++ info->flags = FBINFO_DEFAULT;
2992 ++ info->fbops = &gfb_ops;
2993 ++ info->screen_base = gp->fb_base;
2994 ++ info->screen_size = gp->fb_size;
2995 ++
2996 ++ info->pseudo_palette = gp->pseudo_palette;
2997 ++
2998 ++ /* Fill fix common fields */
2999 ++ strlcpy(info->fix.id, "gfb", sizeof(info->fix.id));
3000 ++ info->fix.smem_start = gp->fb_base_phys;
3001 ++ info->fix.smem_len = gp->fb_size;
3002 ++ info->fix.type = FB_TYPE_PACKED_PIXELS;
3003 ++ if (gp->depth == 32 || gp->depth == 24)
3004 ++ info->fix.visual = FB_VISUAL_TRUECOLOR;
3005 ++ else
3006 ++ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
3007 ++
3008 ++ var->xres = gp->width;
3009 ++ var->yres = gp->height;
3010 ++ var->xres_virtual = var->xres;
3011 ++ var->yres_virtual = var->yres;
3012 ++ var->bits_per_pixel = gp->depth;
3013 ++
3014 ++ var->red.offset = 0;
3015 ++ var->red.length = 8;
3016 ++ var->green.offset = 8;
3017 ++ var->green.length = 8;
3018 ++ var->blue.offset = 16;
3019 ++ var->blue.length = 8;
3020 ++ var->transp.offset = 0;
3021 ++ var->transp.length = 0;
3022 ++
3023 ++ if (fb_alloc_cmap(&info->cmap, 256, 0)) {
3024 ++ printk(KERN_ERR "gfb: Cannot allocate color map.\n");
3025 ++ return -ENOMEM;
3026 ++ }
3027 ++
3028 ++ return 0;
3029 ++}
3030 ++
3031 ++static int __devinit gfb_probe(struct of_device *op,
3032 ++ const struct of_device_id *match)
3033 ++{
3034 ++ struct device_node *dp = op->node;
3035 ++ struct fb_info *info;
3036 ++ struct gfb_info *gp;
3037 ++ int err;
3038 ++
3039 ++ info = framebuffer_alloc(sizeof(struct gfb_info), &op->dev);
3040 ++ if (!info) {
3041 ++ printk(KERN_ERR "gfb: Cannot allocate fb_info\n");
3042 ++ err = -ENOMEM;
3043 ++ goto err_out;
3044 ++ }
3045 ++
3046 ++ gp = info->par;
3047 ++ gp->info = info;
3048 ++ gp->of_node = dp;
3049 ++
3050 ++ gp->fb_base_phys = op->resource[6].start;
3051 ++
3052 ++ err = gfb_get_props(gp);
3053 ++ if (err)
3054 ++ goto err_release_fb;
3055 ++
3056 ++ /* Framebuffer length is the same regardless of resolution. */
3057 ++ info->fix.line_length = 16384;
3058 ++ gp->fb_size = info->fix.line_length * gp->height;
3059 ++
3060 ++ gp->fb_base = of_ioremap(&op->resource[6], 0,
3061 ++ gp->fb_size, "gfb fb");
3062 ++ if (!gp->fb_base)
3063 ++ goto err_release_fb;
3064 ++
3065 ++ err = gfb_set_fbinfo(gp);
3066 ++ if (err)
3067 ++ goto err_unmap_fb;
3068 ++
3069 ++ printk("gfb: Found device at %s\n", dp->full_name);
3070 ++
3071 ++ err = register_framebuffer(info);
3072 ++ if (err < 0) {
3073 ++ printk(KERN_ERR "gfb: Could not register framebuffer %s\n",
3074 ++ dp->full_name);
3075 ++ goto err_unmap_fb;
3076 ++ }
3077 ++
3078 ++ dev_set_drvdata(&op->dev, info);
3079 ++
3080 ++ return 0;
3081 ++
3082 ++err_unmap_fb:
3083 ++ of_iounmap(&op->resource[6], gp->fb_base, gp->fb_size);
3084 ++
3085 ++err_release_fb:
3086 ++ framebuffer_release(info);
3087 ++
3088 ++err_out:
3089 ++ return err;
3090 ++}
3091 ++
3092 ++static int __devexit gfb_remove(struct of_device *op)
3093 ++{
3094 ++ struct fb_info *info = dev_get_drvdata(&op->dev);
3095 ++ struct gfb_info *gp = info->par;
3096 ++
3097 ++ unregister_framebuffer(info);
3098 ++
3099 ++ iounmap(gp->fb_base);
3100 ++
3101 ++ of_iounmap(&op->resource[6], gp->fb_base, gp->fb_size);
3102 ++
3103 ++ framebuffer_release(info);
3104 ++
3105 ++ dev_set_drvdata(&op->dev, NULL);
3106 ++
3107 ++ return 0;
3108 ++}
3109 ++
3110 ++static const struct of_device_id gfb_match[] = {
3111 ++ {
3112 ++ .name = "SUNW,gfb",
3113 ++ },
3114 ++ {},
3115 ++};
3116 ++MODULE_DEVICE_TABLE(of, ffb_match);
3117 ++
3118 ++static struct of_platform_driver gfb_driver = {
3119 ++ .name = "gfb",
3120 ++ .match_table = gfb_match,
3121 ++ .probe = gfb_probe,
3122 ++ .remove = __devexit_p(gfb_remove),
3123 ++};
3124 ++
3125 ++static int __init gfb_init(void)
3126 ++{
3127 ++ if (fb_get_options("gfb", NULL))
3128 ++ return -ENODEV;
3129 ++
3130 ++ return of_register_driver(&gfb_driver, &of_bus_type);
3131 ++}
3132 ++
3133 ++static void __exit gfb_exit(void)
3134 ++{
3135 ++ of_unregister_driver(&gfb_driver);
3136 ++}
3137 ++
3138 ++module_init(gfb_init);
3139 ++module_exit(gfb_exit);
3140 ++
3141 ++MODULE_DESCRIPTION("framebuffer driver for Sun XVR-1000 graphics");
3142 ++MODULE_AUTHOR("David S. Miller <davem@×××××××××.net>");
3143 ++MODULE_VERSION("1.0");
3144 ++MODULE_LICENSE("GPL");
3145 +diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
3146 +index 28d9cf7..7127bfe 100644
3147 +--- a/drivers/virtio/virtio_pci.c
3148 ++++ b/drivers/virtio/virtio_pci.c
3149 +@@ -473,7 +473,8 @@ static void vp_del_vqs(struct virtio_device *vdev)
3150 +
3151 + list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
3152 + info = vq->priv;
3153 +- if (vp_dev->per_vq_vectors)
3154 ++ if (vp_dev->per_vq_vectors &&
3155 ++ info->msix_vector != VIRTIO_MSI_NO_VECTOR)
3156 + free_irq(vp_dev->msix_entries[info->msix_vector].vector,
3157 + vq);
3158 + vp_del_vq(vq);
3159 +diff --git a/fs/exec.c b/fs/exec.c
3160 +index cce6bbd..9071360 100644
3161 +--- a/fs/exec.c
3162 ++++ b/fs/exec.c
3163 +@@ -1923,8 +1923,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
3164 + /*
3165 + * Dont allow local users get cute and trick others to coredump
3166 + * into their pre-created files:
3167 ++ * Note, this is not relevant for pipes
3168 + */
3169 +- if (inode->i_uid != current_fsuid())
3170 ++ if (!ispipe && (inode->i_uid != current_fsuid()))
3171 + goto close_fail;
3172 + if (!cprm.file->f_op)
3173 + goto close_fail;
3174 +diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
3175 +index a6abbae..e6dd2ae 100644
3176 +--- a/fs/gfs2/file.c
3177 ++++ b/fs/gfs2/file.c
3178 +@@ -640,7 +640,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
3179 +
3180 + if (!(fl->fl_flags & FL_POSIX))
3181 + return -ENOLCK;
3182 +- if (__mandatory_lock(&ip->i_inode))
3183 ++ if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
3184 + return -ENOLCK;
3185 +
3186 + if (cmd == F_CANCELLK) {
3187 +diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
3188 +index 944b627..69e7b81 100644
3189 +--- a/fs/nfs/delegation.h
3190 ++++ b/fs/nfs/delegation.h
3191 +@@ -71,4 +71,10 @@ static inline int nfs_inode_return_delegation(struct inode *inode)
3192 + }
3193 + #endif
3194 +
3195 ++static inline int nfs_have_delegated_attributes(struct inode *inode)
3196 ++{
3197 ++ return nfs_have_delegation(inode, FMODE_READ) &&
3198 ++ !(NFS_I(inode)->cache_validity & NFS_INO_REVAL_FORCED);
3199 ++}
3200 ++
3201 + #endif
3202 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
3203 +index 3c7f03b..8b5382e 100644
3204 +--- a/fs/nfs/dir.c
3205 ++++ b/fs/nfs/dir.c
3206 +@@ -1789,7 +1789,7 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str
3207 + cache = nfs_access_search_rbtree(inode, cred);
3208 + if (cache == NULL)
3209 + goto out;
3210 +- if (!nfs_have_delegation(inode, FMODE_READ) &&
3211 ++ if (!nfs_have_delegated_attributes(inode) &&
3212 + !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo))
3213 + goto out_stale;
3214 + res->jiffies = cache->jiffies;
3215 +diff --git a/fs/nfs/file.c b/fs/nfs/file.c
3216 +index 63f2071..bdd2142 100644
3217 +--- a/fs/nfs/file.c
3218 ++++ b/fs/nfs/file.c
3219 +@@ -486,7 +486,8 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
3220 + {
3221 + dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
3222 +
3223 +- if (gfp & __GFP_WAIT)
3224 ++ /* Only do I/O if gfp is a superset of GFP_KERNEL */
3225 ++ if ((gfp & GFP_KERNEL) == GFP_KERNEL)
3226 + nfs_wb_page(page->mapping->host, page);
3227 + /* If PagePrivate() is set, then the page is not freeable */
3228 + if (PagePrivate(page))
3229 +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
3230 +index f141bde..5f59a2d 100644
3231 +--- a/fs/nfs/inode.c
3232 ++++ b/fs/nfs/inode.c
3233 +@@ -759,7 +759,7 @@ int nfs_attribute_timeout(struct inode *inode)
3234 + {
3235 + struct nfs_inode *nfsi = NFS_I(inode);
3236 +
3237 +- if (nfs_have_delegation(inode, FMODE_READ))
3238 ++ if (nfs_have_delegated_attributes(inode))
3239 + return 0;
3240 + return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
3241 + }
3242 +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
3243 +index a12c45b..29d9d36 100644
3244 +--- a/fs/nfs/pagelist.c
3245 ++++ b/fs/nfs/pagelist.c
3246 +@@ -112,12 +112,10 @@ void nfs_unlock_request(struct nfs_page *req)
3247 + */
3248 + int nfs_set_page_tag_locked(struct nfs_page *req)
3249 + {
3250 +- struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
3251 +-
3252 + if (!nfs_lock_request_dontget(req))
3253 + return 0;
3254 + if (req->wb_page != NULL)
3255 +- radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
3256 ++ radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
3257 + return 1;
3258 + }
3259 +
3260 +@@ -126,10 +124,10 @@ int nfs_set_page_tag_locked(struct nfs_page *req)
3261 + */
3262 + void nfs_clear_page_tag_locked(struct nfs_page *req)
3263 + {
3264 +- struct inode *inode = req->wb_context->path.dentry->d_inode;
3265 +- struct nfs_inode *nfsi = NFS_I(inode);
3266 +-
3267 + if (req->wb_page != NULL) {
3268 ++ struct inode *inode = req->wb_context->path.dentry->d_inode;
3269 ++ struct nfs_inode *nfsi = NFS_I(inode);
3270 ++
3271 + spin_lock(&inode->i_lock);
3272 + radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
3273 + nfs_unlock_request(req);
3274 +@@ -142,16 +140,22 @@ void nfs_clear_page_tag_locked(struct nfs_page *req)
3275 + * nfs_clear_request - Free up all resources allocated to the request
3276 + * @req:
3277 + *
3278 +- * Release page resources associated with a write request after it
3279 +- * has completed.
3280 ++ * Release page and open context resources associated with a read/write
3281 ++ * request after it has completed.
3282 + */
3283 + void nfs_clear_request(struct nfs_page *req)
3284 + {
3285 + struct page *page = req->wb_page;
3286 ++ struct nfs_open_context *ctx = req->wb_context;
3287 ++
3288 + if (page != NULL) {
3289 + page_cache_release(page);
3290 + req->wb_page = NULL;
3291 + }
3292 ++ if (ctx != NULL) {
3293 ++ put_nfs_open_context(ctx);
3294 ++ req->wb_context = NULL;
3295 ++ }
3296 + }
3297 +
3298 +
3299 +@@ -165,9 +169,8 @@ static void nfs_free_request(struct kref *kref)
3300 + {
3301 + struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
3302 +
3303 +- /* Release struct file or cached credential */
3304 ++ /* Release struct file and open context */
3305 + nfs_clear_request(req);
3306 +- put_nfs_open_context(req->wb_context);
3307 + nfs_page_free(req);
3308 + }
3309 +
3310 +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
3311 +index 105b508..ddce17b 100644
3312 +--- a/fs/nilfs2/segment.c
3313 ++++ b/fs/nilfs2/segment.c
3314 +@@ -1902,8 +1902,7 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
3315 +
3316 + list_splice_tail_init(&sci->sc_write_logs, &logs);
3317 + ret = nilfs_wait_on_logs(&logs);
3318 +- if (ret)
3319 +- nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret);
3320 ++ nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret ? : err);
3321 +
3322 + list_splice_tail_init(&sci->sc_segbufs, &logs);
3323 + nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
3324 +diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c
3325 +index 0028d2e..90be97f 100644
3326 +--- a/fs/partitions/msdos.c
3327 ++++ b/fs/partitions/msdos.c
3328 +@@ -31,14 +31,17 @@
3329 + */
3330 + #include <asm/unaligned.h>
3331 +
3332 +-#define SYS_IND(p) (get_unaligned(&p->sys_ind))
3333 +-#define NR_SECTS(p) ({ __le32 __a = get_unaligned(&p->nr_sects); \
3334 +- le32_to_cpu(__a); \
3335 +- })
3336 ++#define SYS_IND(p) get_unaligned(&p->sys_ind)
3337 +
3338 +-#define START_SECT(p) ({ __le32 __a = get_unaligned(&p->start_sect); \
3339 +- le32_to_cpu(__a); \
3340 +- })
3341 ++static inline sector_t nr_sects(struct partition *p)
3342 ++{
3343 ++ return (sector_t)get_unaligned_le32(&p->nr_sects);
3344 ++}
3345 ++
3346 ++static inline sector_t start_sect(struct partition *p)
3347 ++{
3348 ++ return (sector_t)get_unaligned_le32(&p->start_sect);
3349 ++}
3350 +
3351 + static inline int is_extended_partition(struct partition *p)
3352 + {
3353 +@@ -104,13 +107,13 @@ static int aix_magic_present(unsigned char *p, struct block_device *bdev)
3354 +
3355 + static void
3356 + parse_extended(struct parsed_partitions *state, struct block_device *bdev,
3357 +- u32 first_sector, u32 first_size)
3358 ++ sector_t first_sector, sector_t first_size)
3359 + {
3360 + struct partition *p;
3361 + Sector sect;
3362 + unsigned char *data;
3363 +- u32 this_sector, this_size;
3364 +- int sector_size = bdev_logical_block_size(bdev) / 512;
3365 ++ sector_t this_sector, this_size;
3366 ++ sector_t sector_size = bdev_logical_block_size(bdev) / 512;
3367 + int loopct = 0; /* number of links followed
3368 + without finding a data partition */
3369 + int i;
3370 +@@ -145,14 +148,14 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev,
3371 + * First process the data partition(s)
3372 + */
3373 + for (i=0; i<4; i++, p++) {
3374 +- u32 offs, size, next;
3375 +- if (!NR_SECTS(p) || is_extended_partition(p))
3376 ++ sector_t offs, size, next;
3377 ++ if (!nr_sects(p) || is_extended_partition(p))
3378 + continue;
3379 +
3380 + /* Check the 3rd and 4th entries -
3381 + these sometimes contain random garbage */
3382 +- offs = START_SECT(p)*sector_size;
3383 +- size = NR_SECTS(p)*sector_size;
3384 ++ offs = start_sect(p)*sector_size;
3385 ++ size = nr_sects(p)*sector_size;
3386 + next = this_sector + offs;
3387 + if (i >= 2) {
3388 + if (offs + size > this_size)
3389 +@@ -179,13 +182,13 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev,
3390 + */
3391 + p -= 4;
3392 + for (i=0; i<4; i++, p++)
3393 +- if (NR_SECTS(p) && is_extended_partition(p))
3394 ++ if (nr_sects(p) && is_extended_partition(p))
3395 + break;
3396 + if (i == 4)
3397 + goto done; /* nothing left to do */
3398 +
3399 +- this_sector = first_sector + START_SECT(p) * sector_size;
3400 +- this_size = NR_SECTS(p) * sector_size;
3401 ++ this_sector = first_sector + start_sect(p) * sector_size;
3402 ++ this_size = nr_sects(p) * sector_size;
3403 + put_dev_sector(sect);
3404 + }
3405 + done:
3406 +@@ -197,7 +200,7 @@ done:
3407 +
3408 + static void
3409 + parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev,
3410 +- u32 offset, u32 size, int origin)
3411 ++ sector_t offset, sector_t size, int origin)
3412 + {
3413 + #ifdef CONFIG_SOLARIS_X86_PARTITION
3414 + Sector sect;
3415 +@@ -244,7 +247,7 @@ parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev,
3416 + */
3417 + static void
3418 + parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
3419 +- u32 offset, u32 size, int origin, char *flavour,
3420 ++ sector_t offset, sector_t size, int origin, char *flavour,
3421 + int max_partitions)
3422 + {
3423 + Sector sect;
3424 +@@ -263,7 +266,7 @@ parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
3425 + if (le16_to_cpu(l->d_npartitions) < max_partitions)
3426 + max_partitions = le16_to_cpu(l->d_npartitions);
3427 + for (p = l->d_partitions; p - l->d_partitions < max_partitions; p++) {
3428 +- u32 bsd_start, bsd_size;
3429 ++ sector_t bsd_start, bsd_size;
3430 +
3431 + if (state->next == state->limit)
3432 + break;
3433 +@@ -290,7 +293,7 @@ parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
3434 +
3435 + static void
3436 + parse_freebsd(struct parsed_partitions *state, struct block_device *bdev,
3437 +- u32 offset, u32 size, int origin)
3438 ++ sector_t offset, sector_t size, int origin)
3439 + {
3440 + #ifdef CONFIG_BSD_DISKLABEL
3441 + parse_bsd(state, bdev, offset, size, origin,
3442 +@@ -300,7 +303,7 @@ parse_freebsd(struct parsed_partitions *state, struct block_device *bdev,
3443 +
3444 + static void
3445 + parse_netbsd(struct parsed_partitions *state, struct block_device *bdev,
3446 +- u32 offset, u32 size, int origin)
3447 ++ sector_t offset, sector_t size, int origin)
3448 + {
3449 + #ifdef CONFIG_BSD_DISKLABEL
3450 + parse_bsd(state, bdev, offset, size, origin,
3451 +@@ -310,7 +313,7 @@ parse_netbsd(struct parsed_partitions *state, struct block_device *bdev,
3452 +
3453 + static void
3454 + parse_openbsd(struct parsed_partitions *state, struct block_device *bdev,
3455 +- u32 offset, u32 size, int origin)
3456 ++ sector_t offset, sector_t size, int origin)
3457 + {
3458 + #ifdef CONFIG_BSD_DISKLABEL
3459 + parse_bsd(state, bdev, offset, size, origin,
3460 +@@ -324,7 +327,7 @@ parse_openbsd(struct parsed_partitions *state, struct block_device *bdev,
3461 + */
3462 + static void
3463 + parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
3464 +- u32 offset, u32 size, int origin)
3465 ++ sector_t offset, sector_t size, int origin)
3466 + {
3467 + #ifdef CONFIG_UNIXWARE_DISKLABEL
3468 + Sector sect;
3469 +@@ -348,7 +351,8 @@ parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
3470 +
3471 + if (p->s_label != UNIXWARE_FS_UNUSED)
3472 + put_partition(state, state->next++,
3473 +- START_SECT(p), NR_SECTS(p));
3474 ++ le32_to_cpu(p->start_sect),
3475 ++ le32_to_cpu(p->nr_sects));
3476 + p++;
3477 + }
3478 + put_dev_sector(sect);
3479 +@@ -363,7 +367,7 @@ parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
3480 + */
3481 + static void
3482 + parse_minix(struct parsed_partitions *state, struct block_device *bdev,
3483 +- u32 offset, u32 size, int origin)
3484 ++ sector_t offset, sector_t size, int origin)
3485 + {
3486 + #ifdef CONFIG_MINIX_SUBPARTITION
3487 + Sector sect;
3488 +@@ -390,7 +394,7 @@ parse_minix(struct parsed_partitions *state, struct block_device *bdev,
3489 + /* add each partition in use */
3490 + if (SYS_IND(p) == MINIX_PARTITION)
3491 + put_partition(state, state->next++,
3492 +- START_SECT(p), NR_SECTS(p));
3493 ++ start_sect(p), nr_sects(p));
3494 + }
3495 + printk(" >\n");
3496 + }
3497 +@@ -401,7 +405,7 @@ parse_minix(struct parsed_partitions *state, struct block_device *bdev,
3498 + static struct {
3499 + unsigned char id;
3500 + void (*parse)(struct parsed_partitions *, struct block_device *,
3501 +- u32, u32, int);
3502 ++ sector_t, sector_t, int);
3503 + } subtypes[] = {
3504 + {FREEBSD_PARTITION, parse_freebsd},
3505 + {NETBSD_PARTITION, parse_netbsd},
3506 +@@ -415,7 +419,7 @@ static struct {
3507 +
3508 + int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
3509 + {
3510 +- int sector_size = bdev_logical_block_size(bdev) / 512;
3511 ++ sector_t sector_size = bdev_logical_block_size(bdev) / 512;
3512 + Sector sect;
3513 + unsigned char *data;
3514 + struct partition *p;
3515 +@@ -483,14 +487,21 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
3516 +
3517 + state->next = 5;
3518 + for (slot = 1 ; slot <= 4 ; slot++, p++) {
3519 +- u32 start = START_SECT(p)*sector_size;
3520 +- u32 size = NR_SECTS(p)*sector_size;
3521 ++ sector_t start = start_sect(p)*sector_size;
3522 ++ sector_t size = nr_sects(p)*sector_size;
3523 + if (!size)
3524 + continue;
3525 + if (is_extended_partition(p)) {
3526 +- /* prevent someone doing mkfs or mkswap on an
3527 +- extended partition, but leave room for LILO */
3528 +- put_partition(state, slot, start, size == 1 ? 1 : 2);
3529 ++ /*
3530 ++ * prevent someone doing mkfs or mkswap on an
3531 ++ * extended partition, but leave room for LILO
3532 ++ * FIXME: this uses one logical sector for > 512b
3533 ++ * sector, although it may not be enough/proper.
3534 ++ */
3535 ++ sector_t n = 2;
3536 ++ n = min(size, max(sector_size, n));
3537 ++ put_partition(state, slot, start, n);
3538 ++
3539 + printk(" <");
3540 + parse_extended(state, bdev, start, size);
3541 + printk(" >");
3542 +@@ -513,7 +524,7 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
3543 + unsigned char id = SYS_IND(p);
3544 + int n;
3545 +
3546 +- if (!NR_SECTS(p))
3547 ++ if (!nr_sects(p))
3548 + continue;
3549 +
3550 + for (n = 0; subtypes[n].parse && id != subtypes[n].id; n++)
3551 +@@ -521,8 +532,8 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
3552 +
3553 + if (!subtypes[n].parse)
3554 + continue;
3555 +- subtypes[n].parse(state, bdev, START_SECT(p)*sector_size,
3556 +- NR_SECTS(p)*sector_size, slot);
3557 ++ subtypes[n].parse(state, bdev, start_sect(p)*sector_size,
3558 ++ nr_sects(p)*sector_size, slot);
3559 + }
3560 + put_dev_sector(sect);
3561 + return 1;
3562 +diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
3563 +index 3fc62b0..6e722c1 100644
3564 +--- a/fs/quota/dquot.c
3565 ++++ b/fs/quota/dquot.c
3566 +@@ -225,6 +225,8 @@ static struct hlist_head *dquot_hash;
3567 + struct dqstats dqstats;
3568 + EXPORT_SYMBOL(dqstats);
3569 +
3570 ++static qsize_t inode_get_rsv_space(struct inode *inode);
3571 ++
3572 + static inline unsigned int
3573 + hashfn(const struct super_block *sb, unsigned int id, int type)
3574 + {
3575 +@@ -840,11 +842,14 @@ static int dqinit_needed(struct inode *inode, int type)
3576 + static void add_dquot_ref(struct super_block *sb, int type)
3577 + {
3578 + struct inode *inode, *old_inode = NULL;
3579 ++ int reserved = 0;
3580 +
3581 + spin_lock(&inode_lock);
3582 + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
3583 + if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
3584 + continue;
3585 ++ if (unlikely(inode_get_rsv_space(inode) > 0))
3586 ++ reserved = 1;
3587 + if (!atomic_read(&inode->i_writecount))
3588 + continue;
3589 + if (!dqinit_needed(inode, type))
3590 +@@ -865,6 +870,12 @@ static void add_dquot_ref(struct super_block *sb, int type)
3591 + }
3592 + spin_unlock(&inode_lock);
3593 + iput(old_inode);
3594 ++
3595 ++ if (reserved) {
3596 ++ printk(KERN_WARNING "VFS (%s): Writes happened before quota"
3597 ++ " was turned on thus quota information is probably "
3598 ++ "inconsistent. Please run quotacheck(8).\n", sb->s_id);
3599 ++ }
3600 + }
3601 +
3602 + /*
3603 +@@ -978,10 +989,12 @@ static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
3604 + /*
3605 + * Claim reserved quota space
3606 + */
3607 +-static void dquot_claim_reserved_space(struct dquot *dquot,
3608 +- qsize_t number)
3609 ++static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
3610 + {
3611 +- WARN_ON(dquot->dq_dqb.dqb_rsvspace < number);
3612 ++ if (dquot->dq_dqb.dqb_rsvspace < number) {
3613 ++ WARN_ON_ONCE(1);
3614 ++ number = dquot->dq_dqb.dqb_rsvspace;
3615 ++ }
3616 + dquot->dq_dqb.dqb_curspace += number;
3617 + dquot->dq_dqb.dqb_rsvspace -= number;
3618 + }
3619 +@@ -989,7 +1002,12 @@ static void dquot_claim_reserved_space(struct dquot *dquot,
3620 + static inline
3621 + void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
3622 + {
3623 +- dquot->dq_dqb.dqb_rsvspace -= number;
3624 ++ if (dquot->dq_dqb.dqb_rsvspace >= number)
3625 ++ dquot->dq_dqb.dqb_rsvspace -= number;
3626 ++ else {
3627 ++ WARN_ON_ONCE(1);
3628 ++ dquot->dq_dqb.dqb_rsvspace = 0;
3629 ++ }
3630 + }
3631 +
3632 + static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
3633 +@@ -1242,6 +1260,7 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space)
3634 + return QUOTA_NL_BHARDBELOW;
3635 + return QUOTA_NL_NOWARN;
3636 + }
3637 ++
3638 + /*
3639 + * Initialize quota pointers in inode
3640 + * We do things in a bit complicated way but by that we avoid calling
3641 +@@ -1253,6 +1272,7 @@ int dquot_initialize(struct inode *inode, int type)
3642 + int cnt, ret = 0;
3643 + struct dquot *got[MAXQUOTAS] = { NULL, NULL };
3644 + struct super_block *sb = inode->i_sb;
3645 ++ qsize_t rsv;
3646 +
3647 + /* First test before acquiring mutex - solves deadlocks when we
3648 + * re-enter the quota code and are already holding the mutex */
3649 +@@ -1287,6 +1307,13 @@ int dquot_initialize(struct inode *inode, int type)
3650 + if (!inode->i_dquot[cnt]) {
3651 + inode->i_dquot[cnt] = got[cnt];
3652 + got[cnt] = NULL;
3653 ++ /*
3654 ++ * Make quota reservation system happy if someone
3655 ++ * did a write before quota was turned on
3656 ++ */
3657 ++ rsv = inode_get_rsv_space(inode);
3658 ++ if (unlikely(rsv))
3659 ++ dquot_resv_space(inode->i_dquot[cnt], rsv);
3660 + }
3661 + }
3662 + out_err:
3663 +@@ -1351,28 +1378,30 @@ static qsize_t *inode_reserved_space(struct inode * inode)
3664 + return inode->i_sb->dq_op->get_reserved_space(inode);
3665 + }
3666 +
3667 +-static void inode_add_rsv_space(struct inode *inode, qsize_t number)
3668 ++void inode_add_rsv_space(struct inode *inode, qsize_t number)
3669 + {
3670 + spin_lock(&inode->i_lock);
3671 + *inode_reserved_space(inode) += number;
3672 + spin_unlock(&inode->i_lock);
3673 + }
3674 ++EXPORT_SYMBOL(inode_add_rsv_space);
3675 +
3676 +-
3677 +-static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
3678 ++void inode_claim_rsv_space(struct inode *inode, qsize_t number)
3679 + {
3680 + spin_lock(&inode->i_lock);
3681 + *inode_reserved_space(inode) -= number;
3682 + __inode_add_bytes(inode, number);
3683 + spin_unlock(&inode->i_lock);
3684 + }
3685 ++EXPORT_SYMBOL(inode_claim_rsv_space);
3686 +
3687 +-static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
3688 ++void inode_sub_rsv_space(struct inode *inode, qsize_t number)
3689 + {
3690 + spin_lock(&inode->i_lock);
3691 + *inode_reserved_space(inode) -= number;
3692 + spin_unlock(&inode->i_lock);
3693 + }
3694 ++EXPORT_SYMBOL(inode_sub_rsv_space);
3695 +
3696 + static qsize_t inode_get_rsv_space(struct inode *inode)
3697 + {
3698 +diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
3699 +index 5032b9a..ad5ec1d 100644
3700 +--- a/include/linux/decompress/mm.h
3701 ++++ b/include/linux/decompress/mm.h
3702 +@@ -14,11 +14,21 @@
3703 +
3704 + /* Code active when included from pre-boot environment: */
3705 +
3706 ++/*
3707 ++ * Some architectures want to ensure there is no local data in their
3708 ++ * pre-boot environment, so that data can arbitarily relocated (via
3709 ++ * GOT references). This is achieved by defining STATIC_RW_DATA to
3710 ++ * be null.
3711 ++ */
3712 ++#ifndef STATIC_RW_DATA
3713 ++#define STATIC_RW_DATA static
3714 ++#endif
3715 ++
3716 + /* A trivial malloc implementation, adapted from
3717 + * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
3718 + */
3719 +-static unsigned long malloc_ptr;
3720 +-static int malloc_count;
3721 ++STATIC_RW_DATA unsigned long malloc_ptr;
3722 ++STATIC_RW_DATA int malloc_count;
3723 +
3724 + static void *malloc(int size)
3725 + {
3726 +diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
3727 +index 1822d63..16b92d0 100644
3728 +--- a/include/linux/if_tunnel.h
3729 ++++ b/include/linux/if_tunnel.h
3730 +@@ -2,6 +2,7 @@
3731 + #define _IF_TUNNEL_H_
3732 +
3733 + #include <linux/types.h>
3734 ++#include <asm/byteorder.h>
3735 +
3736 + #ifdef __KERNEL__
3737 + #include <linux/ip.h>
3738 +diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
3739 +index bc0fc79..ece0b1c 100644
3740 +--- a/include/linux/kfifo.h
3741 ++++ b/include/linux/kfifo.h
3742 +@@ -102,8 +102,6 @@ union { \
3743 + unsigned char name##kfifo_buffer[size]; \
3744 + struct kfifo name = __kfifo_initializer(size, name##kfifo_buffer)
3745 +
3746 +-#undef __kfifo_initializer
3747 +-
3748 + extern void kfifo_init(struct kfifo *fifo, void *buffer,
3749 + unsigned int size);
3750 + extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size,
3751 +diff --git a/include/linux/kvm.h b/include/linux/kvm.h
3752 +index a24de0b..553a388 100644
3753 +--- a/include/linux/kvm.h
3754 ++++ b/include/linux/kvm.h
3755 +@@ -497,6 +497,7 @@ struct kvm_ioeventfd {
3756 + #endif
3757 + #define KVM_CAP_S390_PSW 42
3758 + #define KVM_CAP_PPC_SEGSTATE 43
3759 ++#define KVM_CAP_X86_ROBUST_SINGLESTEP 51
3760 +
3761 + #ifdef KVM_CAP_IRQ_ROUTING
3762 +
3763 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
3764 +index 99914e6..03e8d81 100644
3765 +--- a/include/linux/netdevice.h
3766 ++++ b/include/linux/netdevice.h
3767 +@@ -2023,12 +2023,12 @@ static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
3768 + * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
3769 + * ARP on active-backup slaves with arp_validate enabled.
3770 + */
3771 +-static inline int skb_bond_should_drop(struct sk_buff *skb)
3772 ++static inline int skb_bond_should_drop(struct sk_buff *skb,
3773 ++ struct net_device *master)
3774 + {
3775 +- struct net_device *dev = skb->dev;
3776 +- struct net_device *master = dev->master;
3777 +-
3778 + if (master) {
3779 ++ struct net_device *dev = skb->dev;
3780 ++
3781 + if (master->priv_flags & IFF_MASTER_ARPMON)
3782 + dev->last_rx = jiffies;
3783 +
3784 +diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
3785 +index 49d321f..264d83d 100644
3786 +--- a/include/linux/netfilter/nfnetlink.h
3787 ++++ b/include/linux/netfilter/nfnetlink.h
3788 +@@ -76,7 +76,7 @@ extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
3789 + extern int nfnetlink_has_listeners(unsigned int group);
3790 + extern int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group,
3791 + int echo, gfp_t flags);
3792 +-extern void nfnetlink_set_err(u32 pid, u32 group, int error);
3793 ++extern int nfnetlink_set_err(u32 pid, u32 group, int error);
3794 + extern int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags);
3795 +
3796 + extern void nfnl_lock(void);
3797 +diff --git a/include/linux/netlink.h b/include/linux/netlink.h
3798 +index fde27c0..6eaca5e 100644
3799 +--- a/include/linux/netlink.h
3800 ++++ b/include/linux/netlink.h
3801 +@@ -188,7 +188,7 @@ extern int netlink_has_listeners(struct sock *sk, unsigned int group);
3802 + extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
3803 + extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
3804 + __u32 group, gfp_t allocation);
3805 +-extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
3806 ++extern int netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
3807 + extern int netlink_register_notifier(struct notifier_block *nb);
3808 + extern int netlink_unregister_notifier(struct notifier_block *nb);
3809 +
3810 +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
3811 +index c8ea0c7..41f977b 100644
3812 +--- a/include/linux/perf_event.h
3813 ++++ b/include/linux/perf_event.h
3814 +@@ -793,6 +793,13 @@ struct perf_sample_data {
3815 + struct perf_raw_record *raw;
3816 + };
3817 +
3818 ++static inline
3819 ++void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
3820 ++{
3821 ++ data->addr = addr;
3822 ++ data->raw = NULL;
3823 ++}
3824 ++
3825 + extern void perf_output_sample(struct perf_output_handle *handle,
3826 + struct perf_event_header *header,
3827 + struct perf_sample_data *data,
3828 +diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
3829 +index 3ebb231..a529d86 100644
3830 +--- a/include/linux/quotaops.h
3831 ++++ b/include/linux/quotaops.h
3832 +@@ -26,6 +26,10 @@ static inline void writeout_quota_sb(struct super_block *sb, int type)
3833 + sb->s_qcop->quota_sync(sb, type);
3834 + }
3835 +
3836 ++void inode_add_rsv_space(struct inode *inode, qsize_t number);
3837 ++void inode_claim_rsv_space(struct inode *inode, qsize_t number);
3838 ++void inode_sub_rsv_space(struct inode *inode, qsize_t number);
3839 ++
3840 + int dquot_initialize(struct inode *inode, int type);
3841 + int dquot_drop(struct inode *inode);
3842 + struct dquot *dqget(struct super_block *sb, unsigned int id, int type);
3843 +@@ -42,7 +46,6 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number);
3844 + int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc);
3845 + int dquot_claim_space(struct inode *inode, qsize_t number);
3846 + void dquot_release_reserved_space(struct inode *inode, qsize_t number);
3847 +-qsize_t dquot_get_reserved_space(struct inode *inode);
3848 +
3849 + int dquot_free_space(struct inode *inode, qsize_t number);
3850 + int dquot_free_inode(const struct inode *inode, qsize_t number);
3851 +@@ -199,6 +202,8 @@ static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
3852 + if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
3853 + return 1;
3854 + }
3855 ++ else
3856 ++ inode_add_rsv_space(inode, nr);
3857 + return 0;
3858 + }
3859 +
3860 +@@ -221,7 +226,7 @@ static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
3861 + if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
3862 + return 1;
3863 + } else
3864 +- inode_add_bytes(inode, nr);
3865 ++ inode_claim_rsv_space(inode, nr);
3866 +
3867 + mark_inode_dirty(inode);
3868 + return 0;
3869 +@@ -235,6 +240,8 @@ void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
3870 + {
3871 + if (sb_any_quota_active(inode->i_sb))
3872 + inode->i_sb->dq_op->release_rsv(inode, nr);
3873 ++ else
3874 ++ inode_sub_rsv_space(inode, nr);
3875 + }
3876 +
3877 + static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
3878 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
3879 +index ec226a2..28a9617 100644
3880 +--- a/include/linux/skbuff.h
3881 ++++ b/include/linux/skbuff.h
3882 +@@ -190,9 +190,6 @@ struct skb_shared_info {
3883 + atomic_t dataref;
3884 + unsigned short nr_frags;
3885 + unsigned short gso_size;
3886 +-#ifdef CONFIG_HAS_DMA
3887 +- dma_addr_t dma_head;
3888 +-#endif
3889 + /* Warning: this field is not always filled in (UFO)! */
3890 + unsigned short gso_segs;
3891 + unsigned short gso_type;
3892 +@@ -201,9 +198,6 @@ struct skb_shared_info {
3893 + struct sk_buff *frag_list;
3894 + struct skb_shared_hwtstamps hwtstamps;
3895 + skb_frag_t frags[MAX_SKB_FRAGS];
3896 +-#ifdef CONFIG_HAS_DMA
3897 +- dma_addr_t dma_maps[MAX_SKB_FRAGS];
3898 +-#endif
3899 + /* Intermediate layers must ensure that destructor_arg
3900 + * remains valid until skb destructor */
3901 + void * destructor_arg;
3902 +diff --git a/include/linux/tty.h b/include/linux/tty.h
3903 +index 6abfcf5..42f2076 100644
3904 +--- a/include/linux/tty.h
3905 ++++ b/include/linux/tty.h
3906 +@@ -68,6 +68,17 @@ struct tty_buffer {
3907 + unsigned long data[0];
3908 + };
3909 +
3910 ++/*
3911 ++ * We default to dicing tty buffer allocations to this many characters
3912 ++ * in order to avoid multiple page allocations. We know the size of
3913 ++ * tty_buffer itself but it must also be taken into account that the
3914 ++ * the buffer is 256 byte aligned. See tty_buffer_find for the allocation
3915 ++ * logic this must match
3916 ++ */
3917 ++
3918 ++#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
3919 ++
3920 ++
3921 + struct tty_bufhead {
3922 + struct delayed_work work;
3923 + spinlock_t lock;
3924 +diff --git a/include/net/mac80211.h b/include/net/mac80211.h
3925 +index 0bf3697..f39b303 100644
3926 +--- a/include/net/mac80211.h
3927 ++++ b/include/net/mac80211.h
3928 +@@ -926,6 +926,9 @@ enum ieee80211_tkip_key_type {
3929 + * @IEEE80211_HW_BEACON_FILTER:
3930 + * Hardware supports dropping of irrelevant beacon frames to
3931 + * avoid waking up cpu.
3932 ++ * @IEEE80211_HW_REPORTS_TX_ACK_STATUS:
3933 ++ * Hardware can provide ack status reports of Tx frames to
3934 ++ * the stack.
3935 + */
3936 + enum ieee80211_hw_flags {
3937 + IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
3938 +@@ -943,6 +946,7 @@ enum ieee80211_hw_flags {
3939 + IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
3940 + IEEE80211_HW_MFP_CAPABLE = 1<<13,
3941 + IEEE80211_HW_BEACON_FILTER = 1<<14,
3942 ++ IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<15,
3943 + };
3944 +
3945 + /**
3946 +@@ -2258,7 +2262,8 @@ struct rate_control_ops {
3947 + struct ieee80211_sta *sta, void *priv_sta);
3948 + void (*rate_update)(void *priv, struct ieee80211_supported_band *sband,
3949 + struct ieee80211_sta *sta,
3950 +- void *priv_sta, u32 changed);
3951 ++ void *priv_sta, u32 changed,
3952 ++ enum nl80211_channel_type oper_chan_type);
3953 + void (*free_sta)(void *priv, struct ieee80211_sta *sta,
3954 + void *priv_sta);
3955 +
3956 +diff --git a/include/net/netlink.h b/include/net/netlink.h
3957 +index a63b219..668ad04 100644
3958 +--- a/include/net/netlink.h
3959 ++++ b/include/net/netlink.h
3960 +@@ -945,7 +945,11 @@ static inline u64 nla_get_u64(const struct nlattr *nla)
3961 + */
3962 + static inline __be64 nla_get_be64(const struct nlattr *nla)
3963 + {
3964 +- return *(__be64 *) nla_data(nla);
3965 ++ __be64 tmp;
3966 ++
3967 ++ nla_memcpy(&tmp, nla, sizeof(tmp));
3968 ++
3969 ++ return tmp;
3970 + }
3971 +
3972 + /**
3973 +diff --git a/include/net/sock.h b/include/net/sock.h
3974 +index 3f1a480..86f2da1 100644
3975 +--- a/include/net/sock.h
3976 ++++ b/include/net/sock.h
3977 +@@ -253,6 +253,8 @@ struct sock {
3978 + struct {
3979 + struct sk_buff *head;
3980 + struct sk_buff *tail;
3981 ++ int len;
3982 ++ int limit;
3983 + } sk_backlog;
3984 + wait_queue_head_t *sk_sleep;
3985 + struct dst_entry *sk_dst_cache;
3986 +@@ -574,8 +576,8 @@ static inline int sk_stream_memory_free(struct sock *sk)
3987 + return sk->sk_wmem_queued < sk->sk_sndbuf;
3988 + }
3989 +
3990 +-/* The per-socket spinlock must be held here. */
3991 +-static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
3992 ++/* OOB backlog add */
3993 ++static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
3994 + {
3995 + if (!sk->sk_backlog.tail) {
3996 + sk->sk_backlog.head = sk->sk_backlog.tail = skb;
3997 +@@ -586,6 +588,17 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
3998 + skb->next = NULL;
3999 + }
4000 +
4001 ++/* The per-socket spinlock must be held here. */
4002 ++static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
4003 ++{
4004 ++ if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
4005 ++ return -ENOBUFS;
4006 ++
4007 ++ __sk_add_backlog(sk, skb);
4008 ++ sk->sk_backlog.len += skb->truesize;
4009 ++ return 0;
4010 ++}
4011 ++
4012 + static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
4013 + {
4014 + return sk->sk_backlog_rcv(sk, skb);
4015 +diff --git a/include/net/xfrm.h b/include/net/xfrm.h
4016 +index 60c2770..1e355d8 100644
4017 +--- a/include/net/xfrm.h
4018 ++++ b/include/net/xfrm.h
4019 +@@ -274,7 +274,8 @@ struct xfrm_policy_afinfo {
4020 + struct dst_entry *dst,
4021 + int nfheader_len);
4022 + int (*fill_dst)(struct xfrm_dst *xdst,
4023 +- struct net_device *dev);
4024 ++ struct net_device *dev,
4025 ++ struct flowi *fl);
4026 + };
4027 +
4028 + extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
4029 +diff --git a/init/main.c b/init/main.c
4030 +index 4cb47a1..512ba15 100644
4031 +--- a/init/main.c
4032 ++++ b/init/main.c
4033 +@@ -846,7 +846,7 @@ static int __init kernel_init(void * unused)
4034 + /*
4035 + * init can allocate pages on any node
4036 + */
4037 +- set_mems_allowed(node_possible_map);
4038 ++ set_mems_allowed(node_states[N_HIGH_MEMORY]);
4039 + /*
4040 + * init can run on any cpu.
4041 + */
4042 +diff --git a/ipc/mqueue.c b/ipc/mqueue.c
4043 +index c79bd57..04985a7 100644
4044 +--- a/ipc/mqueue.c
4045 ++++ b/ipc/mqueue.c
4046 +@@ -705,7 +705,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
4047 + dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
4048 + if (IS_ERR(dentry)) {
4049 + error = PTR_ERR(dentry);
4050 +- goto out_err;
4051 ++ goto out_putfd;
4052 + }
4053 + mntget(ipc_ns->mq_mnt);
4054 +
4055 +@@ -742,7 +742,6 @@ out:
4056 + mntput(ipc_ns->mq_mnt);
4057 + out_putfd:
4058 + put_unused_fd(fd);
4059 +-out_err:
4060 + fd = error;
4061 + out_upsem:
4062 + mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
4063 +diff --git a/kernel/cpuset.c b/kernel/cpuset.c
4064 +index ba401fa..5d38bd7 100644
4065 +--- a/kernel/cpuset.c
4066 ++++ b/kernel/cpuset.c
4067 +@@ -920,9 +920,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
4068 + * call to guarantee_online_mems(), as we know no one is changing
4069 + * our task's cpuset.
4070 + *
4071 +- * Hold callback_mutex around the two modifications of our tasks
4072 +- * mems_allowed to synchronize with cpuset_mems_allowed().
4073 +- *
4074 + * While the mm_struct we are migrating is typically from some
4075 + * other task, the task_struct mems_allowed that we are hacking
4076 + * is for our current task, which must allocate new pages for that
4077 +@@ -1391,11 +1388,10 @@ static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
4078 +
4079 + if (cs == &top_cpuset) {
4080 + cpumask_copy(cpus_attach, cpu_possible_mask);
4081 +- to = node_possible_map;
4082 + } else {
4083 + guarantee_online_cpus(cs, cpus_attach);
4084 +- guarantee_online_mems(cs, &to);
4085 + }
4086 ++ guarantee_online_mems(cs, &to);
4087 +
4088 + /* do per-task migration stuff possibly for each in the threadgroup */
4089 + cpuset_attach_task(tsk, &to, cs);
4090 +@@ -2090,15 +2086,23 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
4091 + static int cpuset_track_online_nodes(struct notifier_block *self,
4092 + unsigned long action, void *arg)
4093 + {
4094 ++ nodemask_t oldmems;
4095 ++
4096 + cgroup_lock();
4097 + switch (action) {
4098 + case MEM_ONLINE:
4099 +- case MEM_OFFLINE:
4100 ++ oldmems = top_cpuset.mems_allowed;
4101 + mutex_lock(&callback_mutex);
4102 + top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
4103 + mutex_unlock(&callback_mutex);
4104 +- if (action == MEM_OFFLINE)
4105 +- scan_for_empty_cpusets(&top_cpuset);
4106 ++ update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
4107 ++ break;
4108 ++ case MEM_OFFLINE:
4109 ++ /*
4110 ++ * needn't update top_cpuset.mems_allowed explicitly because
4111 ++ * scan_for_empty_cpusets() will update it.
4112 ++ */
4113 ++ scan_for_empty_cpusets(&top_cpuset);
4114 + break;
4115 + default:
4116 + break;
4117 +diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
4118 +index 967e661..4d99512 100644
4119 +--- a/kernel/hw_breakpoint.c
4120 ++++ b/kernel/hw_breakpoint.c
4121 +@@ -489,5 +489,4 @@ struct pmu perf_ops_bp = {
4122 + .enable = arch_install_hw_breakpoint,
4123 + .disable = arch_uninstall_hw_breakpoint,
4124 + .read = hw_breakpoint_pmu_read,
4125 +- .unthrottle = hw_breakpoint_pmu_unthrottle
4126 + };
4127 +diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
4128 +index d70394f..71eba24 100644
4129 +--- a/kernel/irq/chip.c
4130 ++++ b/kernel/irq/chip.c
4131 +@@ -359,6 +359,23 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
4132 + if (desc->chip->ack)
4133 + desc->chip->ack(irq);
4134 + }
4135 ++ desc->status |= IRQ_MASKED;
4136 ++}
4137 ++
4138 ++static inline void mask_irq(struct irq_desc *desc, int irq)
4139 ++{
4140 ++ if (desc->chip->mask) {
4141 ++ desc->chip->mask(irq);
4142 ++ desc->status |= IRQ_MASKED;
4143 ++ }
4144 ++}
4145 ++
4146 ++static inline void unmask_irq(struct irq_desc *desc, int irq)
4147 ++{
4148 ++ if (desc->chip->unmask) {
4149 ++ desc->chip->unmask(irq);
4150 ++ desc->status &= ~IRQ_MASKED;
4151 ++ }
4152 + }
4153 +
4154 + /*
4155 +@@ -484,10 +501,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
4156 + raw_spin_lock(&desc->lock);
4157 + desc->status &= ~IRQ_INPROGRESS;
4158 +
4159 +- if (unlikely(desc->status & IRQ_ONESHOT))
4160 +- desc->status |= IRQ_MASKED;
4161 +- else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
4162 +- desc->chip->unmask(irq);
4163 ++ if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
4164 ++ unmask_irq(desc, irq);
4165 + out_unlock:
4166 + raw_spin_unlock(&desc->lock);
4167 + }
4168 +@@ -524,8 +539,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
4169 + action = desc->action;
4170 + if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
4171 + desc->status |= IRQ_PENDING;
4172 +- if (desc->chip->mask)
4173 +- desc->chip->mask(irq);
4174 ++ mask_irq(desc, irq);
4175 + goto out;
4176 + }
4177 +
4178 +@@ -593,7 +607,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
4179 + irqreturn_t action_ret;
4180 +
4181 + if (unlikely(!action)) {
4182 +- desc->chip->mask(irq);
4183 ++ mask_irq(desc, irq);
4184 + goto out_unlock;
4185 + }
4186 +
4187 +@@ -605,8 +619,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
4188 + if (unlikely((desc->status &
4189 + (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
4190 + (IRQ_PENDING | IRQ_MASKED))) {
4191 +- desc->chip->unmask(irq);
4192 +- desc->status &= ~IRQ_MASKED;
4193 ++ unmask_irq(desc, irq);
4194 + }
4195 +
4196 + desc->status &= ~IRQ_PENDING;
4197 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
4198 +index eb6078c..69a3d7b 100644
4199 +--- a/kernel/irq/manage.c
4200 ++++ b/kernel/irq/manage.c
4201 +@@ -483,8 +483,26 @@ static int irq_wait_for_interrupt(struct irqaction *action)
4202 + */
4203 + static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
4204 + {
4205 ++again:
4206 + chip_bus_lock(irq, desc);
4207 + raw_spin_lock_irq(&desc->lock);
4208 ++
4209 ++ /*
4210 ++ * Implausible though it may be we need to protect us against
4211 ++ * the following scenario:
4212 ++ *
4213 ++ * The thread is faster done than the hard interrupt handler
4214 ++ * on the other CPU. If we unmask the irq line then the
4215 ++ * interrupt can come in again and masks the line, leaves due
4216 ++ * to IRQ_INPROGRESS and the irq line is masked forever.
4217 ++ */
4218 ++ if (unlikely(desc->status & IRQ_INPROGRESS)) {
4219 ++ raw_spin_unlock_irq(&desc->lock);
4220 ++ chip_bus_sync_unlock(irq, desc);
4221 ++ cpu_relax();
4222 ++ goto again;
4223 ++ }
4224 ++
4225 + if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
4226 + desc->status &= ~IRQ_MASKED;
4227 + desc->chip->unmask(irq);
4228 +diff --git a/kernel/kthread.c b/kernel/kthread.c
4229 +index fbb6222..84c7f99 100644
4230 +--- a/kernel/kthread.c
4231 ++++ b/kernel/kthread.c
4232 +@@ -219,7 +219,7 @@ int kthreadd(void *unused)
4233 + set_task_comm(tsk, "kthreadd");
4234 + ignore_signals(tsk);
4235 + set_cpus_allowed_ptr(tsk, cpu_all_mask);
4236 +- set_mems_allowed(node_possible_map);
4237 ++ set_mems_allowed(node_states[N_HIGH_MEMORY]);
4238 +
4239 + current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
4240 +
4241 +diff --git a/kernel/perf_event.c b/kernel/perf_event.c
4242 +index b707465..32d0ae2 100644
4243 +--- a/kernel/perf_event.c
4244 ++++ b/kernel/perf_event.c
4245 +@@ -4027,8 +4027,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4246 + if (rctx < 0)
4247 + return;
4248 +
4249 +- data.addr = addr;
4250 +- data.raw = NULL;
4251 ++ perf_sample_data_init(&data, addr);
4252 +
4253 + do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
4254 +
4255 +@@ -4073,11 +4072,10 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
4256 + struct perf_event *event;
4257 + u64 period;
4258 +
4259 +- event = container_of(hrtimer, struct perf_event, hw.hrtimer);
4260 ++ event = container_of(hrtimer, struct perf_event, hw.hrtimer);
4261 + event->pmu->read(event);
4262 +
4263 +- data.addr = 0;
4264 +- data.raw = NULL;
4265 ++ perf_sample_data_init(&data, 0);
4266 + data.period = event->hw.last_period;
4267 + regs = get_irq_regs();
4268 + /*
4269 +@@ -4241,17 +4239,15 @@ static const struct pmu perf_ops_task_clock = {
4270 + void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4271 + int entry_size)
4272 + {
4273 ++ struct pt_regs *regs = get_irq_regs();
4274 ++ struct perf_sample_data data;
4275 + struct perf_raw_record raw = {
4276 + .size = entry_size,
4277 + .data = record,
4278 + };
4279 +
4280 +- struct perf_sample_data data = {
4281 +- .addr = addr,
4282 +- .raw = &raw,
4283 +- };
4284 +-
4285 +- struct pt_regs *regs = get_irq_regs();
4286 ++ perf_sample_data_init(&data, addr);
4287 ++ data.raw = &raw;
4288 +
4289 + if (!regs)
4290 + regs = task_pt_regs(current);
4291 +@@ -4367,8 +4363,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
4292 + struct perf_sample_data sample;
4293 + struct pt_regs *regs = data;
4294 +
4295 +- sample.raw = NULL;
4296 +- sample.addr = bp->attr.bp_addr;
4297 ++ perf_sample_data_init(&sample, bp->attr.bp_addr);
4298 +
4299 + if (!perf_exclude_event(bp, regs))
4300 + perf_swevent_add(bp, 1, 1, &sample, regs);
4301 +@@ -5251,12 +5246,22 @@ int perf_event_init_task(struct task_struct *child)
4302 + return ret;
4303 + }
4304 +
4305 ++static void __init perf_event_init_all_cpus(void)
4306 ++{
4307 ++ int cpu;
4308 ++ struct perf_cpu_context *cpuctx;
4309 ++
4310 ++ for_each_possible_cpu(cpu) {
4311 ++ cpuctx = &per_cpu(perf_cpu_context, cpu);
4312 ++ __perf_event_init_context(&cpuctx->ctx, NULL);
4313 ++ }
4314 ++}
4315 ++
4316 + static void __cpuinit perf_event_init_cpu(int cpu)
4317 + {
4318 + struct perf_cpu_context *cpuctx;
4319 +
4320 + cpuctx = &per_cpu(perf_cpu_context, cpu);
4321 +- __perf_event_init_context(&cpuctx->ctx, NULL);
4322 +
4323 + spin_lock(&perf_resource_lock);
4324 + cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
4325 +@@ -5327,6 +5332,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
4326 +
4327 + void __init perf_event_init(void)
4328 + {
4329 ++ perf_event_init_all_cpus();
4330 + perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4331 + (void *)(long)smp_processor_id());
4332 + perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
4333 +diff --git a/kernel/sched.c b/kernel/sched.c
4334 +index 00a59b0..7ca9345 100644
4335 +--- a/kernel/sched.c
4336 ++++ b/kernel/sched.c
4337 +@@ -3423,6 +3423,7 @@ struct sd_lb_stats {
4338 + unsigned long max_load;
4339 + unsigned long busiest_load_per_task;
4340 + unsigned long busiest_nr_running;
4341 ++ unsigned long busiest_group_capacity;
4342 +
4343 + int group_imb; /* Is there imbalance in this sd */
4344 + #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
4345 +@@ -3742,8 +3743,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
4346 + unsigned long load, max_cpu_load, min_cpu_load;
4347 + int i;
4348 + unsigned int balance_cpu = -1, first_idle_cpu = 0;
4349 +- unsigned long sum_avg_load_per_task;
4350 +- unsigned long avg_load_per_task;
4351 ++ unsigned long avg_load_per_task = 0;
4352 +
4353 + if (local_group) {
4354 + balance_cpu = group_first_cpu(group);
4355 +@@ -3752,7 +3752,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
4356 + }
4357 +
4358 + /* Tally up the load of all CPUs in the group */
4359 +- sum_avg_load_per_task = avg_load_per_task = 0;
4360 + max_cpu_load = 0;
4361 + min_cpu_load = ~0UL;
4362 +
4363 +@@ -3782,7 +3781,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
4364 + sgs->sum_nr_running += rq->nr_running;
4365 + sgs->sum_weighted_load += weighted_cpuload(i);
4366 +
4367 +- sum_avg_load_per_task += cpu_avg_load_per_task(i);
4368 + }
4369 +
4370 + /*
4371 +@@ -3800,7 +3798,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
4372 + /* Adjust by relative CPU power of the group */
4373 + sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
4374 +
4375 +-
4376 + /*
4377 + * Consider the group unbalanced when the imbalance is larger
4378 + * than the average weight of two tasks.
4379 +@@ -3810,8 +3807,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
4380 + * normalized nr_running number somewhere that negates
4381 + * the hierarchy?
4382 + */
4383 +- avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
4384 +- group->cpu_power;
4385 ++ if (sgs->sum_nr_running)
4386 ++ avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
4387 +
4388 + if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
4389 + sgs->group_imb = 1;
4390 +@@ -3880,6 +3877,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
4391 + sds->max_load = sgs.avg_load;
4392 + sds->busiest = group;
4393 + sds->busiest_nr_running = sgs.sum_nr_running;
4394 ++ sds->busiest_group_capacity = sgs.group_capacity;
4395 + sds->busiest_load_per_task = sgs.sum_weighted_load;
4396 + sds->group_imb = sgs.group_imb;
4397 + }
4398 +@@ -3902,6 +3900,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
4399 + {
4400 + unsigned long tmp, pwr_now = 0, pwr_move = 0;
4401 + unsigned int imbn = 2;
4402 ++ unsigned long scaled_busy_load_per_task;
4403 +
4404 + if (sds->this_nr_running) {
4405 + sds->this_load_per_task /= sds->this_nr_running;
4406 +@@ -3912,8 +3911,12 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
4407 + sds->this_load_per_task =
4408 + cpu_avg_load_per_task(this_cpu);
4409 +
4410 +- if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
4411 +- sds->busiest_load_per_task * imbn) {
4412 ++ scaled_busy_load_per_task = sds->busiest_load_per_task
4413 ++ * SCHED_LOAD_SCALE;
4414 ++ scaled_busy_load_per_task /= sds->busiest->cpu_power;
4415 ++
4416 ++ if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
4417 ++ (scaled_busy_load_per_task * imbn)) {
4418 + *imbalance = sds->busiest_load_per_task;
4419 + return;
4420 + }
4421 +@@ -3964,7 +3967,14 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
4422 + static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
4423 + unsigned long *imbalance)
4424 + {
4425 +- unsigned long max_pull;
4426 ++ unsigned long max_pull, load_above_capacity = ~0UL;
4427 ++
4428 ++ sds->busiest_load_per_task /= sds->busiest_nr_running;
4429 ++ if (sds->group_imb) {
4430 ++ sds->busiest_load_per_task =
4431 ++ min(sds->busiest_load_per_task, sds->avg_load);
4432 ++ }
4433 ++
4434 + /*
4435 + * In the presence of smp nice balancing, certain scenarios can have
4436 + * max load less than avg load(as we skip the groups at or below
4437 +@@ -3975,9 +3985,29 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
4438 + return fix_small_imbalance(sds, this_cpu, imbalance);
4439 + }
4440 +
4441 +- /* Don't want to pull so many tasks that a group would go idle */
4442 +- max_pull = min(sds->max_load - sds->avg_load,
4443 +- sds->max_load - sds->busiest_load_per_task);
4444 ++ if (!sds->group_imb) {
4445 ++ /*
4446 ++ * Don't want to pull so many tasks that a group would go idle.
4447 ++ */
4448 ++ load_above_capacity = (sds->busiest_nr_running -
4449 ++ sds->busiest_group_capacity);
4450 ++
4451 ++ load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_LOAD_SCALE);
4452 ++
4453 ++ load_above_capacity /= sds->busiest->cpu_power;
4454 ++ }
4455 ++
4456 ++ /*
4457 ++ * We're trying to get all the cpus to the average_load, so we don't
4458 ++ * want to push ourselves above the average load, nor do we wish to
4459 ++ * reduce the max loaded cpu below the average load. At the same time,
4460 ++ * we also don't want to reduce the group load below the group capacity
4461 ++ * (so that we can implement power-savings policies etc). Thus we look
4462 ++ * for the minimum possible imbalance.
4463 ++ * Be careful of negative numbers as they'll appear as very large values
4464 ++ * with unsigned longs.
4465 ++ */
4466 ++ max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
4467 +
4468 + /* How much load to actually move to equalise the imbalance */
4469 + *imbalance = min(max_pull * sds->busiest->cpu_power,
4470 +@@ -4045,7 +4075,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
4471 + * 4) This group is more busy than the avg busieness at this
4472 + * sched_domain.
4473 + * 5) The imbalance is within the specified limit.
4474 +- * 6) Any rebalance would lead to ping-pong
4475 + */
4476 + if (balance && !(*balance))
4477 + goto ret;
4478 +@@ -4064,25 +4093,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
4479 + if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
4480 + goto out_balanced;
4481 +
4482 +- sds.busiest_load_per_task /= sds.busiest_nr_running;
4483 +- if (sds.group_imb)
4484 +- sds.busiest_load_per_task =
4485 +- min(sds.busiest_load_per_task, sds.avg_load);
4486 +-
4487 +- /*
4488 +- * We're trying to get all the cpus to the average_load, so we don't
4489 +- * want to push ourselves above the average load, nor do we wish to
4490 +- * reduce the max loaded cpu below the average load, as either of these
4491 +- * actions would just result in more rebalancing later, and ping-pong
4492 +- * tasks around. Thus we look for the minimum possible imbalance.
4493 +- * Negative imbalances (*we* are more loaded than anyone else) will
4494 +- * be counted as no imbalance for these purposes -- we can't fix that
4495 +- * by pulling tasks to us. Be careful of negative numbers as they'll
4496 +- * appear as very large values with unsigned longs.
4497 +- */
4498 +- if (sds.max_load <= sds.busiest_load_per_task)
4499 +- goto out_balanced;
4500 +-
4501 + /* Looks like there is an imbalance. Compute it */
4502 + calculate_imbalance(&sds, this_cpu, imbalance);
4503 + return sds.busiest;
4504 +diff --git a/kernel/softlockup.c b/kernel/softlockup.c
4505 +index 0d4c789..4b493f6 100644
4506 +--- a/kernel/softlockup.c
4507 ++++ b/kernel/softlockup.c
4508 +@@ -155,11 +155,11 @@ void softlockup_tick(void)
4509 + * Wake up the high-prio watchdog task twice per
4510 + * threshold timespan.
4511 + */
4512 +- if (now > touch_ts + softlockup_thresh/2)
4513 ++ if (time_after(now - softlockup_thresh/2, touch_ts))
4514 + wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
4515 +
4516 + /* Warn about unreasonable delays: */
4517 +- if (now <= (touch_ts + softlockup_thresh))
4518 ++ if (time_before_eq(now - softlockup_thresh, touch_ts))
4519 + return;
4520 +
4521 + per_cpu(softlockup_print_ts, this_cpu) = touch_ts;
4522 +diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
4523 +index 1370083..0e98497 100644
4524 +--- a/kernel/time/clocksource.c
4525 ++++ b/kernel/time/clocksource.c
4526 +@@ -580,6 +580,10 @@ static inline void clocksource_select(void) { }
4527 + */
4528 + static int __init clocksource_done_booting(void)
4529 + {
4530 ++ mutex_lock(&clocksource_mutex);
4531 ++ curr_clocksource = clocksource_default_clock();
4532 ++ mutex_unlock(&clocksource_mutex);
4533 ++
4534 + finished_booting = 1;
4535 +
4536 + /*
4537 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
4538 +index 1e6640f..404c9ba 100644
4539 +--- a/kernel/trace/ftrace.c
4540 ++++ b/kernel/trace/ftrace.c
4541 +@@ -3364,6 +3364,7 @@ void ftrace_graph_init_task(struct task_struct *t)
4542 + {
4543 + /* Make sure we do not use the parent ret_stack */
4544 + t->ret_stack = NULL;
4545 ++ t->curr_ret_stack = -1;
4546 +
4547 + if (ftrace_graph_active) {
4548 + struct ftrace_ret_stack *ret_stack;
4549 +@@ -3373,7 +3374,6 @@ void ftrace_graph_init_task(struct task_struct *t)
4550 + GFP_KERNEL);
4551 + if (!ret_stack)
4552 + return;
4553 +- t->curr_ret_stack = -1;
4554 + atomic_set(&t->tracing_graph_pause, 0);
4555 + atomic_set(&t->trace_overrun, 0);
4556 + t->ftrace_timestamp = 0;
4557 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
4558 +index 8c1b2d2..54191d6 100644
4559 +--- a/kernel/trace/ring_buffer.c
4560 ++++ b/kernel/trace/ring_buffer.c
4561 +@@ -2232,12 +2232,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
4562 + if (ring_buffer_flags != RB_BUFFERS_ON)
4563 + return NULL;
4564 +
4565 +- if (atomic_read(&buffer->record_disabled))
4566 +- return NULL;
4567 +-
4568 + /* If we are tracing schedule, we don't want to recurse */
4569 + resched = ftrace_preempt_disable();
4570 +
4571 ++ if (atomic_read(&buffer->record_disabled))
4572 ++ goto out_nocheck;
4573 ++
4574 + if (trace_recursive_lock())
4575 + goto out_nocheck;
4576 +
4577 +@@ -2469,11 +2469,11 @@ int ring_buffer_write(struct ring_buffer *buffer,
4578 + if (ring_buffer_flags != RB_BUFFERS_ON)
4579 + return -EBUSY;
4580 +
4581 +- if (atomic_read(&buffer->record_disabled))
4582 +- return -EBUSY;
4583 +-
4584 + resched = ftrace_preempt_disable();
4585 +
4586 ++ if (atomic_read(&buffer->record_disabled))
4587 ++ goto out;
4588 ++
4589 + cpu = raw_smp_processor_id();
4590 +
4591 + if (!cpumask_test_cpu(cpu, buffer->cpumask))
4592 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4593 +index eac6875..45cfb6d 100644
4594 +--- a/kernel/trace/trace.c
4595 ++++ b/kernel/trace/trace.c
4596 +@@ -747,10 +747,10 @@ out:
4597 + mutex_unlock(&trace_types_lock);
4598 + }
4599 +
4600 +-static void __tracing_reset(struct trace_array *tr, int cpu)
4601 ++static void __tracing_reset(struct ring_buffer *buffer, int cpu)
4602 + {
4603 + ftrace_disable_cpu();
4604 +- ring_buffer_reset_cpu(tr->buffer, cpu);
4605 ++ ring_buffer_reset_cpu(buffer, cpu);
4606 + ftrace_enable_cpu();
4607 + }
4608 +
4609 +@@ -762,7 +762,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
4610 +
4611 + /* Make sure all commits have finished */
4612 + synchronize_sched();
4613 +- __tracing_reset(tr, cpu);
4614 ++ __tracing_reset(buffer, cpu);
4615 +
4616 + ring_buffer_record_enable(buffer);
4617 + }
4618 +@@ -780,7 +780,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
4619 + tr->time_start = ftrace_now(tr->cpu);
4620 +
4621 + for_each_online_cpu(cpu)
4622 +- __tracing_reset(tr, cpu);
4623 ++ __tracing_reset(buffer, cpu);
4624 +
4625 + ring_buffer_record_enable(buffer);
4626 + }
4627 +@@ -857,6 +857,8 @@ void tracing_start(void)
4628 + goto out;
4629 + }
4630 +
4631 ++ /* Prevent the buffers from switching */
4632 ++ arch_spin_lock(&ftrace_max_lock);
4633 +
4634 + buffer = global_trace.buffer;
4635 + if (buffer)
4636 +@@ -866,6 +868,8 @@ void tracing_start(void)
4637 + if (buffer)
4638 + ring_buffer_record_enable(buffer);
4639 +
4640 ++ arch_spin_unlock(&ftrace_max_lock);
4641 ++
4642 + ftrace_start();
4643 + out:
4644 + spin_unlock_irqrestore(&tracing_start_lock, flags);
4645 +@@ -887,6 +891,9 @@ void tracing_stop(void)
4646 + if (trace_stop_count++)
4647 + goto out;
4648 +
4649 ++ /* Prevent the buffers from switching */
4650 ++ arch_spin_lock(&ftrace_max_lock);
4651 ++
4652 + buffer = global_trace.buffer;
4653 + if (buffer)
4654 + ring_buffer_record_disable(buffer);
4655 +@@ -895,6 +902,8 @@ void tracing_stop(void)
4656 + if (buffer)
4657 + ring_buffer_record_disable(buffer);
4658 +
4659 ++ arch_spin_unlock(&ftrace_max_lock);
4660 ++
4661 + out:
4662 + spin_unlock_irqrestore(&tracing_start_lock, flags);
4663 + }
4664 +@@ -1182,6 +1191,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
4665 + if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
4666 + return;
4667 +
4668 ++ /*
4669 ++ * NMIs can not handle page faults, even with fix ups.
4670 ++ * The save user stack can (and often does) fault.
4671 ++ */
4672 ++ if (unlikely(in_nmi()))
4673 ++ return;
4674 ++
4675 + event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
4676 + sizeof(*entry), flags, pc);
4677 + if (!event)
4678 +@@ -1628,6 +1644,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
4679 +
4680 + ftrace_enable_cpu();
4681 +
4682 ++ iter->leftover = 0;
4683 + for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4684 + ;
4685 +
4686 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
4687 +index 290fb5b..0beac93 100644
4688 +--- a/mm/mempolicy.c
4689 ++++ b/mm/mempolicy.c
4690 +@@ -2167,8 +2167,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4691 + char *rest = nodelist;
4692 + while (isdigit(*rest))
4693 + rest++;
4694 +- if (!*rest)
4695 +- err = 0;
4696 ++ if (*rest)
4697 ++ goto out;
4698 + }
4699 + break;
4700 + case MPOL_INTERLEAVE:
4701 +@@ -2177,7 +2177,6 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4702 + */
4703 + if (!nodelist)
4704 + nodes = node_states[N_HIGH_MEMORY];
4705 +- err = 0;
4706 + break;
4707 + case MPOL_LOCAL:
4708 + /*
4709 +@@ -2187,11 +2186,19 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4710 + goto out;
4711 + mode = MPOL_PREFERRED;
4712 + break;
4713 +-
4714 +- /*
4715 +- * case MPOL_BIND: mpol_new() enforces non-empty nodemask.
4716 +- * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags.
4717 +- */
4718 ++ case MPOL_DEFAULT:
4719 ++ /*
4720 ++ * Insist on a empty nodelist
4721 ++ */
4722 ++ if (!nodelist)
4723 ++ err = 0;
4724 ++ goto out;
4725 ++ case MPOL_BIND:
4726 ++ /*
4727 ++ * Insist on a nodelist
4728 ++ */
4729 ++ if (!nodelist)
4730 ++ goto out;
4731 + }
4732 +
4733 + mode_flags = 0;
4734 +@@ -2205,13 +2212,14 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4735 + else if (!strcmp(flags, "relative"))
4736 + mode_flags |= MPOL_F_RELATIVE_NODES;
4737 + else
4738 +- err = 1;
4739 ++ goto out;
4740 + }
4741 +
4742 + new = mpol_new(mode, mode_flags, &nodes);
4743 + if (IS_ERR(new))
4744 +- err = 1;
4745 +- else {
4746 ++ goto out;
4747 ++
4748 ++ {
4749 + int ret;
4750 + NODEMASK_SCRATCH(scratch);
4751 + if (scratch) {
4752 +@@ -2222,13 +2230,15 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4753 + ret = -ENOMEM;
4754 + NODEMASK_SCRATCH_FREE(scratch);
4755 + if (ret) {
4756 +- err = 1;
4757 + mpol_put(new);
4758 +- } else if (no_context) {
4759 +- /* save for contextualization */
4760 +- new->w.user_nodemask = nodes;
4761 ++ goto out;
4762 + }
4763 + }
4764 ++ err = 0;
4765 ++ if (no_context) {
4766 ++ /* save for contextualization */
4767 ++ new->w.user_nodemask = nodes;
4768 ++ }
4769 +
4770 + out:
4771 + /* Restore string for error message */
4772 +diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
4773 +index e75a2f3..152760a 100644
4774 +--- a/net/8021q/vlan_core.c
4775 ++++ b/net/8021q/vlan_core.c
4776 +@@ -11,7 +11,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
4777 + if (netpoll_rx(skb))
4778 + return NET_RX_DROP;
4779 +
4780 +- if (skb_bond_should_drop(skb))
4781 ++ if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
4782 + goto drop;
4783 +
4784 + __vlan_hwaccel_put_tag(skb, vlan_tci);
4785 +@@ -82,7 +82,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
4786 + {
4787 + struct sk_buff *p;
4788 +
4789 +- if (skb_bond_should_drop(skb))
4790 ++ if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
4791 + goto drop;
4792 +
4793 + __vlan_hwaccel_put_tag(skb, vlan_tci);
4794 +diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
4795 +index 400efa2..615fecc 100644
4796 +--- a/net/bluetooth/l2cap.c
4797 ++++ b/net/bluetooth/l2cap.c
4798 +@@ -2830,6 +2830,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
4799 + int len = cmd->len - sizeof(*rsp);
4800 + char req[64];
4801 +
4802 ++ if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4803 ++ l2cap_send_disconn_req(conn, sk);
4804 ++ goto done;
4805 ++ }
4806 ++
4807 + /* throw out any old stored conf requests */
4808 + result = L2CAP_CONF_SUCCESS;
4809 + len = l2cap_parse_conf_rsp(sk, rsp->data,
4810 +@@ -3942,16 +3947,24 @@ static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
4811 + struct sock *sk;
4812 + struct hlist_node *node;
4813 + char *str = buf;
4814 ++ int size = PAGE_SIZE;
4815 +
4816 + read_lock_bh(&l2cap_sk_list.lock);
4817 +
4818 + sk_for_each(sk, node, &l2cap_sk_list.head) {
4819 + struct l2cap_pinfo *pi = l2cap_pi(sk);
4820 ++ int len;
4821 +
4822 +- str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4823 ++ len = snprintf(str, size, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4824 + batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4825 + sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
4826 + pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
4827 ++
4828 ++ size -= len;
4829 ++ if (size <= 0)
4830 ++ break;
4831 ++
4832 ++ str += len;
4833 + }
4834 +
4835 + read_unlock_bh(&l2cap_sk_list.lock);
4836 +diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
4837 +index 89f4a59..3fe9c7c 100644
4838 +--- a/net/bluetooth/rfcomm/core.c
4839 ++++ b/net/bluetooth/rfcomm/core.c
4840 +@@ -2103,6 +2103,7 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf)
4841 + struct rfcomm_session *s;
4842 + struct list_head *pp, *p;
4843 + char *str = buf;
4844 ++ int size = PAGE_SIZE;
4845 +
4846 + rfcomm_lock();
4847 +
4848 +@@ -2111,11 +2112,21 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf)
4849 + list_for_each(pp, &s->dlcs) {
4850 + struct sock *sk = s->sock->sk;
4851 + struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
4852 ++ int len;
4853 +
4854 +- str += sprintf(str, "%s %s %ld %d %d %d %d\n",
4855 ++ len = snprintf(str, size, "%s %s %ld %d %d %d %d\n",
4856 + batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4857 + d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits);
4858 ++
4859 ++ size -= len;
4860 ++ if (size <= 0)
4861 ++ break;
4862 ++
4863 ++ str += len;
4864 + }
4865 ++
4866 ++ if (size <= 0)
4867 ++ break;
4868 + }
4869 +
4870 + rfcomm_unlock();
4871 +diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
4872 +index 4b5968d..bc03b50 100644
4873 +--- a/net/bluetooth/rfcomm/sock.c
4874 ++++ b/net/bluetooth/rfcomm/sock.c
4875 +@@ -1066,13 +1066,22 @@ static ssize_t rfcomm_sock_sysfs_show(struct class *dev, char *buf)
4876 + struct sock *sk;
4877 + struct hlist_node *node;
4878 + char *str = buf;
4879 ++ int size = PAGE_SIZE;
4880 +
4881 + read_lock_bh(&rfcomm_sk_list.lock);
4882 +
4883 + sk_for_each(sk, node, &rfcomm_sk_list.head) {
4884 +- str += sprintf(str, "%s %s %d %d\n",
4885 ++ int len;
4886 ++
4887 ++ len = snprintf(str, size, "%s %s %d %d\n",
4888 + batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4889 + sk->sk_state, rfcomm_pi(sk)->channel);
4890 ++
4891 ++ size -= len;
4892 ++ if (size <= 0)
4893 ++ break;
4894 ++
4895 ++ str += len;
4896 + }
4897 +
4898 + read_unlock_bh(&rfcomm_sk_list.lock);
4899 +diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
4900 +index dd8f6ec..66cab63 100644
4901 +--- a/net/bluetooth/sco.c
4902 ++++ b/net/bluetooth/sco.c
4903 +@@ -958,13 +958,22 @@ static ssize_t sco_sysfs_show(struct class *dev, char *buf)
4904 + struct sock *sk;
4905 + struct hlist_node *node;
4906 + char *str = buf;
4907 ++ int size = PAGE_SIZE;
4908 +
4909 + read_lock_bh(&sco_sk_list.lock);
4910 +
4911 + sk_for_each(sk, node, &sco_sk_list.head) {
4912 +- str += sprintf(str, "%s %s %d\n",
4913 ++ int len;
4914 ++
4915 ++ len = snprintf(str, size, "%s %s %d\n",
4916 + batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4917 + sk->sk_state);
4918 ++
4919 ++ size -= len;
4920 ++ if (size <= 0)
4921 ++ break;
4922 ++
4923 ++ str += len;
4924 + }
4925 +
4926 + read_unlock_bh(&sco_sk_list.lock);
4927 +diff --git a/net/core/dev.c b/net/core/dev.c
4928 +index ec87421..f51f940 100644
4929 +--- a/net/core/dev.c
4930 ++++ b/net/core/dev.c
4931 +@@ -2421,6 +2421,7 @@ int netif_receive_skb(struct sk_buff *skb)
4932 + {
4933 + struct packet_type *ptype, *pt_prev;
4934 + struct net_device *orig_dev;
4935 ++ struct net_device *master;
4936 + struct net_device *null_or_orig;
4937 + int ret = NET_RX_DROP;
4938 + __be16 type;
4939 +@@ -2440,11 +2441,12 @@ int netif_receive_skb(struct sk_buff *skb)
4940 +
4941 + null_or_orig = NULL;
4942 + orig_dev = skb->dev;
4943 +- if (orig_dev->master) {
4944 +- if (skb_bond_should_drop(skb))
4945 ++ master = ACCESS_ONCE(orig_dev->master);
4946 ++ if (master) {
4947 ++ if (skb_bond_should_drop(skb, master))
4948 + null_or_orig = orig_dev; /* deliver only exact match */
4949 + else
4950 +- skb->dev = orig_dev->master;
4951 ++ skb->dev = master;
4952 + }
4953 +
4954 + __get_cpu_var(netdev_rx_stat).total++;
4955 +diff --git a/net/core/sock.c b/net/core/sock.c
4956 +index e1f6f22..5779f31 100644
4957 +--- a/net/core/sock.c
4958 ++++ b/net/core/sock.c
4959 +@@ -340,8 +340,12 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
4960 + rc = sk_backlog_rcv(sk, skb);
4961 +
4962 + mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
4963 +- } else
4964 +- sk_add_backlog(sk, skb);
4965 ++ } else if (sk_add_backlog(sk, skb)) {
4966 ++ bh_unlock_sock(sk);
4967 ++ atomic_inc(&sk->sk_drops);
4968 ++ goto discard_and_relse;
4969 ++ }
4970 ++
4971 + bh_unlock_sock(sk);
4972 + out:
4973 + sock_put(sk);
4974 +@@ -1138,6 +1142,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
4975 + sock_lock_init(newsk);
4976 + bh_lock_sock(newsk);
4977 + newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
4978 ++ newsk->sk_backlog.len = 0;
4979 +
4980 + atomic_set(&newsk->sk_rmem_alloc, 0);
4981 + /*
4982 +@@ -1541,6 +1546,12 @@ static void __release_sock(struct sock *sk)
4983 +
4984 + bh_lock_sock(sk);
4985 + } while ((skb = sk->sk_backlog.head) != NULL);
4986 ++
4987 ++ /*
4988 ++ * Doing the zeroing here guarantee we can not loop forever
4989 ++ * while a wild producer attempts to flood us.
4990 ++ */
4991 ++ sk->sk_backlog.len = 0;
4992 + }
4993 +
4994 + /**
4995 +@@ -1873,6 +1884,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
4996 + sk->sk_allocation = GFP_KERNEL;
4997 + sk->sk_rcvbuf = sysctl_rmem_default;
4998 + sk->sk_sndbuf = sysctl_wmem_default;
4999 ++ sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
5000 + sk->sk_state = TCP_CLOSE;
5001 + sk_set_socket(sk, sock);
5002 +
5003 +diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
5004 +index af226a0..0d508c3 100644
5005 +--- a/net/dccp/minisocks.c
5006 ++++ b/net/dccp/minisocks.c
5007 +@@ -254,7 +254,7 @@ int dccp_child_process(struct sock *parent, struct sock *child,
5008 + * in main socket hash table and lock on listening
5009 + * socket does not protect us more.
5010 + */
5011 +- sk_add_backlog(child, skb);
5012 ++ __sk_add_backlog(child, skb);
5013 + }
5014 +
5015 + bh_unlock_sock(child);
5016 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
5017 +index f36ce15..68c1454 100644
5018 +--- a/net/ipv4/ip_gre.c
5019 ++++ b/net/ipv4/ip_gre.c
5020 +@@ -810,11 +810,13 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
5021 + tunnel->err_count = 0;
5022 + }
5023 +
5024 +- max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
5025 ++ max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len;
5026 +
5027 + if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
5028 + (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
5029 + struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
5030 ++ if (max_headroom > dev->needed_headroom)
5031 ++ dev->needed_headroom = max_headroom;
5032 + if (!new_skb) {
5033 + ip_rt_put(rt);
5034 + txq->tx_dropped++;
5035 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
5036 +index d62b05d..af86e41 100644
5037 +--- a/net/ipv4/route.c
5038 ++++ b/net/ipv4/route.c
5039 +@@ -922,10 +922,8 @@ static void rt_secret_rebuild_oneshot(struct net *net)
5040 + {
5041 + del_timer_sync(&net->ipv4.rt_secret_timer);
5042 + rt_cache_invalidate(net);
5043 +- if (ip_rt_secret_interval) {
5044 +- net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval;
5045 +- add_timer(&net->ipv4.rt_secret_timer);
5046 +- }
5047 ++ if (ip_rt_secret_interval)
5048 ++ mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
5049 + }
5050 +
5051 + static void rt_emergency_hash_rebuild(struct net *net)
5052 +@@ -1417,7 +1415,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
5053 + dev_hold(rt->u.dst.dev);
5054 + if (rt->idev)
5055 + in_dev_hold(rt->idev);
5056 +- rt->u.dst.obsolete = 0;
5057 ++ rt->u.dst.obsolete = -1;
5058 + rt->u.dst.lastuse = jiffies;
5059 + rt->u.dst.path = &rt->u.dst;
5060 + rt->u.dst.neighbour = NULL;
5061 +@@ -1482,11 +1480,12 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
5062 + struct dst_entry *ret = dst;
5063 +
5064 + if (rt) {
5065 +- if (dst->obsolete) {
5066 ++ if (dst->obsolete > 0) {
5067 + ip_rt_put(rt);
5068 + ret = NULL;
5069 + } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
5070 +- rt->u.dst.expires) {
5071 ++ (rt->u.dst.expires &&
5072 ++ time_after_eq(jiffies, rt->u.dst.expires))) {
5073 + unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
5074 + rt->fl.oif,
5075 + rt_genid(dev_net(dst->dev)));
5076 +@@ -1702,7 +1701,9 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
5077 +
5078 + static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
5079 + {
5080 +- return NULL;
5081 ++ if (rt_is_expired((struct rtable *)dst))
5082 ++ return NULL;
5083 ++ return dst;
5084 + }
5085 +
5086 + static void ipv4_dst_destroy(struct dst_entry *dst)
5087 +@@ -1864,7 +1865,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
5088 + if (!rth)
5089 + goto e_nobufs;
5090 +
5091 +- rth->u.dst.output= ip_rt_bug;
5092 ++ rth->u.dst.output = ip_rt_bug;
5093 ++ rth->u.dst.obsolete = -1;
5094 +
5095 + atomic_set(&rth->u.dst.__refcnt, 1);
5096 + rth->u.dst.flags= DST_HOST;
5097 +@@ -2025,6 +2027,7 @@ static int __mkroute_input(struct sk_buff *skb,
5098 + rth->fl.oif = 0;
5099 + rth->rt_spec_dst= spec_dst;
5100 +
5101 ++ rth->u.dst.obsolete = -1;
5102 + rth->u.dst.input = ip_forward;
5103 + rth->u.dst.output = ip_output;
5104 + rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev));
5105 +@@ -2189,6 +2192,7 @@ local_input:
5106 + goto e_nobufs;
5107 +
5108 + rth->u.dst.output= ip_rt_bug;
5109 ++ rth->u.dst.obsolete = -1;
5110 + rth->rt_genid = rt_genid(net);
5111 +
5112 + atomic_set(&rth->u.dst.__refcnt, 1);
5113 +@@ -2415,6 +2419,7 @@ static int __mkroute_output(struct rtable **result,
5114 + rth->rt_spec_dst= fl->fl4_src;
5115 +
5116 + rth->u.dst.output=ip_output;
5117 ++ rth->u.dst.obsolete = -1;
5118 + rth->rt_genid = rt_genid(dev_net(dev_out));
5119 +
5120 + RT_CACHE_STAT_INC(out_slow_tot);
5121 +@@ -3072,22 +3077,20 @@ static void rt_secret_reschedule(int old)
5122 + rtnl_lock();
5123 + for_each_net(net) {
5124 + int deleted = del_timer_sync(&net->ipv4.rt_secret_timer);
5125 ++ long time;
5126 +
5127 + if (!new)
5128 + continue;
5129 +
5130 + if (deleted) {
5131 +- long time = net->ipv4.rt_secret_timer.expires - jiffies;
5132 ++ time = net->ipv4.rt_secret_timer.expires - jiffies;
5133 +
5134 + if (time <= 0 || (time += diff) <= 0)
5135 + time = 0;
5136 +-
5137 +- net->ipv4.rt_secret_timer.expires = time;
5138 + } else
5139 +- net->ipv4.rt_secret_timer.expires = new;
5140 ++ time = new;
5141 +
5142 +- net->ipv4.rt_secret_timer.expires += jiffies;
5143 +- add_timer(&net->ipv4.rt_secret_timer);
5144 ++ mod_timer(&net->ipv4.rt_secret_timer, jiffies + time);
5145 + }
5146 + rtnl_unlock();
5147 + }
5148 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
5149 +index b0a26bb..564a0f8 100644
5150 +--- a/net/ipv4/tcp.c
5151 ++++ b/net/ipv4/tcp.c
5152 +@@ -429,7 +429,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
5153 + if (tp->urg_seq == tp->copied_seq &&
5154 + !sock_flag(sk, SOCK_URGINLINE) &&
5155 + tp->urg_data)
5156 +- target--;
5157 ++ target++;
5158 +
5159 + /* Potential race condition. If read of tp below will
5160 + * escape above sk->sk_state, we can be illegally awaken
5161 +@@ -1254,6 +1254,39 @@ static void tcp_prequeue_process(struct sock *sk)
5162 + tp->ucopy.memory = 0;
5163 + }
5164 +
5165 ++#ifdef CONFIG_NET_DMA
5166 ++static void tcp_service_net_dma(struct sock *sk, bool wait)
5167 ++{
5168 ++ dma_cookie_t done, used;
5169 ++ dma_cookie_t last_issued;
5170 ++ struct tcp_sock *tp = tcp_sk(sk);
5171 ++
5172 ++ if (!tp->ucopy.dma_chan)
5173 ++ return;
5174 ++
5175 ++ last_issued = tp->ucopy.dma_cookie;
5176 ++ dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
5177 ++
5178 ++ do {
5179 ++ if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
5180 ++ last_issued, &done,
5181 ++ &used) == DMA_SUCCESS) {
5182 ++ /* Safe to free early-copied skbs now */
5183 ++ __skb_queue_purge(&sk->sk_async_wait_queue);
5184 ++ break;
5185 ++ } else {
5186 ++ struct sk_buff *skb;
5187 ++ while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
5188 ++ (dma_async_is_complete(skb->dma_cookie, done,
5189 ++ used) == DMA_SUCCESS)) {
5190 ++ __skb_dequeue(&sk->sk_async_wait_queue);
5191 ++ kfree_skb(skb);
5192 ++ }
5193 ++ }
5194 ++ } while (wait);
5195 ++}
5196 ++#endif
5197 ++
5198 + static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
5199 + {
5200 + struct sk_buff *skb;
5201 +@@ -1546,6 +1579,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
5202 + /* __ Set realtime policy in scheduler __ */
5203 + }
5204 +
5205 ++#ifdef CONFIG_NET_DMA
5206 ++ if (tp->ucopy.dma_chan)
5207 ++ dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
5208 ++#endif
5209 + if (copied >= target) {
5210 + /* Do not sleep, just process backlog. */
5211 + release_sock(sk);
5212 +@@ -1554,6 +1591,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
5213 + sk_wait_data(sk, &timeo);
5214 +
5215 + #ifdef CONFIG_NET_DMA
5216 ++ tcp_service_net_dma(sk, false); /* Don't block */
5217 + tp->ucopy.wakeup = 0;
5218 + #endif
5219 +
5220 +@@ -1633,6 +1671,9 @@ do_prequeue:
5221 + copied = -EFAULT;
5222 + break;
5223 + }
5224 ++
5225 ++ dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
5226 ++
5227 + if ((offset + used) == skb->len)
5228 + copied_early = 1;
5229 +
5230 +@@ -1702,27 +1743,9 @@ skip_copy:
5231 + }
5232 +
5233 + #ifdef CONFIG_NET_DMA
5234 +- if (tp->ucopy.dma_chan) {
5235 +- dma_cookie_t done, used;
5236 +-
5237 +- dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
5238 +-
5239 +- while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
5240 +- tp->ucopy.dma_cookie, &done,
5241 +- &used) == DMA_IN_PROGRESS) {
5242 +- /* do partial cleanup of sk_async_wait_queue */
5243 +- while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
5244 +- (dma_async_is_complete(skb->dma_cookie, done,
5245 +- used) == DMA_SUCCESS)) {
5246 +- __skb_dequeue(&sk->sk_async_wait_queue);
5247 +- kfree_skb(skb);
5248 +- }
5249 +- }
5250 ++ tcp_service_net_dma(sk, true); /* Wait for queue to drain */
5251 ++ tp->ucopy.dma_chan = NULL;
5252 +
5253 +- /* Safe to free early-copied skbs now */
5254 +- __skb_queue_purge(&sk->sk_async_wait_queue);
5255 +- tp->ucopy.dma_chan = NULL;
5256 +- }
5257 + if (tp->ucopy.pinned_list) {
5258 + dma_unpin_iovec_pages(tp->ucopy.pinned_list);
5259 + tp->ucopy.pinned_list = NULL;
5260 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
5261 +index 3fddc69..b347d3c 100644
5262 +--- a/net/ipv4/tcp_input.c
5263 ++++ b/net/ipv4/tcp_input.c
5264 +@@ -2499,6 +2499,9 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
5265 + int err;
5266 + unsigned int mss;
5267 +
5268 ++ if (packets == 0)
5269 ++ return;
5270 ++
5271 + WARN_ON(packets > tp->packets_out);
5272 + if (tp->lost_skb_hint) {
5273 + skb = tp->lost_skb_hint;
5274 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
5275 +index 65b8ebf..de935e3 100644
5276 +--- a/net/ipv4/tcp_ipv4.c
5277 ++++ b/net/ipv4/tcp_ipv4.c
5278 +@@ -1677,8 +1677,10 @@ process:
5279 + if (!tcp_prequeue(sk, skb))
5280 + ret = tcp_v4_do_rcv(sk, skb);
5281 + }
5282 +- } else
5283 +- sk_add_backlog(sk, skb);
5284 ++ } else if (sk_add_backlog(sk, skb)) {
5285 ++ bh_unlock_sock(sk);
5286 ++ goto discard_and_relse;
5287 ++ }
5288 + bh_unlock_sock(sk);
5289 +
5290 + sock_put(sk);
5291 +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
5292 +index f206ee5..4199bc6 100644
5293 +--- a/net/ipv4/tcp_minisocks.c
5294 ++++ b/net/ipv4/tcp_minisocks.c
5295 +@@ -728,7 +728,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
5296 + * in main socket hash table and lock on listening
5297 + * socket does not protect us more.
5298 + */
5299 +- sk_add_backlog(child, skb);
5300 ++ __sk_add_backlog(child, skb);
5301 + }
5302 +
5303 + bh_unlock_sock(child);
5304 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
5305 +index 383ce23..dc26654 100644
5306 +--- a/net/ipv4/tcp_output.c
5307 ++++ b/net/ipv4/tcp_output.c
5308 +@@ -2393,13 +2393,17 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
5309 + struct tcp_extend_values *xvp = tcp_xv(rvp);
5310 + struct inet_request_sock *ireq = inet_rsk(req);
5311 + struct tcp_sock *tp = tcp_sk(sk);
5312 ++ const struct tcp_cookie_values *cvp = tp->cookie_values;
5313 + struct tcphdr *th;
5314 + struct sk_buff *skb;
5315 + struct tcp_md5sig_key *md5;
5316 + int tcp_header_size;
5317 + int mss;
5318 ++ int s_data_desired = 0;
5319 +
5320 +- skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
5321 ++ if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
5322 ++ s_data_desired = cvp->s_data_desired;
5323 ++ skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
5324 + if (skb == NULL)
5325 + return NULL;
5326 +
5327 +@@ -2454,16 +2458,12 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
5328 + TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
5329 +
5330 + if (OPTION_COOKIE_EXTENSION & opts.options) {
5331 +- const struct tcp_cookie_values *cvp = tp->cookie_values;
5332 +-
5333 +- if (cvp != NULL &&
5334 +- cvp->s_data_constant &&
5335 +- cvp->s_data_desired > 0) {
5336 +- u8 *buf = skb_put(skb, cvp->s_data_desired);
5337 ++ if (s_data_desired) {
5338 ++ u8 *buf = skb_put(skb, s_data_desired);
5339 +
5340 + /* copy data directly from the listening socket. */
5341 +- memcpy(buf, cvp->s_data_payload, cvp->s_data_desired);
5342 +- TCP_SKB_CB(skb)->end_seq += cvp->s_data_desired;
5343 ++ memcpy(buf, cvp->s_data_payload, s_data_desired);
5344 ++ TCP_SKB_CB(skb)->end_seq += s_data_desired;
5345 + }
5346 +
5347 + if (opts.hash_size > 0) {
5348 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
5349 +index f0126fd..112c611 100644
5350 +--- a/net/ipv4/udp.c
5351 ++++ b/net/ipv4/udp.c
5352 +@@ -1372,8 +1372,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
5353 + bh_lock_sock(sk);
5354 + if (!sock_owned_by_user(sk))
5355 + rc = __udp_queue_rcv_skb(sk, skb);
5356 +- else
5357 +- sk_add_backlog(sk, skb);
5358 ++ else if (sk_add_backlog(sk, skb)) {
5359 ++ bh_unlock_sock(sk);
5360 ++ goto drop;
5361 ++ }
5362 + bh_unlock_sock(sk);
5363 +
5364 + return rc;
5365 +diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
5366 +index 67107d6..e4a1483 100644
5367 +--- a/net/ipv4/xfrm4_policy.c
5368 ++++ b/net/ipv4/xfrm4_policy.c
5369 +@@ -91,11 +91,12 @@ static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
5370 + return 0;
5371 + }
5372 +
5373 +-static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
5374 ++static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
5375 ++ struct flowi *fl)
5376 + {
5377 + struct rtable *rt = (struct rtable *)xdst->route;
5378 +
5379 +- xdst->u.rt.fl = rt->fl;
5380 ++ xdst->u.rt.fl = *fl;
5381 +
5382 + xdst->u.dst.dev = dev;
5383 + dev_hold(dev);
5384 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
5385 +index c2bd74c..6232284 100644
5386 +--- a/net/ipv6/route.c
5387 ++++ b/net/ipv6/route.c
5388 +@@ -897,12 +897,17 @@ static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
5389 + struct rt6_info *rt = (struct rt6_info *) dst;
5390 +
5391 + if (rt) {
5392 +- if (rt->rt6i_flags & RTF_CACHE)
5393 +- ip6_del_rt(rt);
5394 +- else
5395 ++ if (rt->rt6i_flags & RTF_CACHE) {
5396 ++ if (rt6_check_expired(rt)) {
5397 ++ ip6_del_rt(rt);
5398 ++ dst = NULL;
5399 ++ }
5400 ++ } else {
5401 + dst_release(dst);
5402 ++ dst = NULL;
5403 ++ }
5404 + }
5405 +- return NULL;
5406 ++ return dst;
5407 + }
5408 +
5409 + static void ip6_link_failure(struct sk_buff *skb)
5410 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
5411 +index febfd59..548a06e 100644
5412 +--- a/net/ipv6/tcp_ipv6.c
5413 ++++ b/net/ipv6/tcp_ipv6.c
5414 +@@ -1732,8 +1732,10 @@ process:
5415 + if (!tcp_prequeue(sk, skb))
5416 + ret = tcp_v6_do_rcv(sk, skb);
5417 + }
5418 +- } else
5419 +- sk_add_backlog(sk, skb);
5420 ++ } else if (sk_add_backlog(sk, skb)) {
5421 ++ bh_unlock_sock(sk);
5422 ++ goto discard_and_relse;
5423 ++ }
5424 + bh_unlock_sock(sk);
5425 +
5426 + sock_put(sk);
5427 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
5428 +index 69ebdbe..d9714d2 100644
5429 +--- a/net/ipv6/udp.c
5430 ++++ b/net/ipv6/udp.c
5431 +@@ -584,16 +584,20 @@ static void flush_stack(struct sock **stack, unsigned int count,
5432 + bh_lock_sock(sk);
5433 + if (!sock_owned_by_user(sk))
5434 + udpv6_queue_rcv_skb(sk, skb1);
5435 +- else
5436 +- sk_add_backlog(sk, skb1);
5437 ++ else if (sk_add_backlog(sk, skb1)) {
5438 ++ kfree_skb(skb1);
5439 ++ bh_unlock_sock(sk);
5440 ++ goto drop;
5441 ++ }
5442 + bh_unlock_sock(sk);
5443 +- } else {
5444 +- atomic_inc(&sk->sk_drops);
5445 +- UDP6_INC_STATS_BH(sock_net(sk),
5446 +- UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
5447 +- UDP6_INC_STATS_BH(sock_net(sk),
5448 +- UDP_MIB_INERRORS, IS_UDPLITE(sk));
5449 ++ continue;
5450 + }
5451 ++drop:
5452 ++ atomic_inc(&sk->sk_drops);
5453 ++ UDP6_INC_STATS_BH(sock_net(sk),
5454 ++ UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
5455 ++ UDP6_INC_STATS_BH(sock_net(sk),
5456 ++ UDP_MIB_INERRORS, IS_UDPLITE(sk));
5457 + }
5458 + }
5459 + /*
5460 +@@ -756,8 +760,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
5461 + bh_lock_sock(sk);
5462 + if (!sock_owned_by_user(sk))
5463 + udpv6_queue_rcv_skb(sk, skb);
5464 +- else
5465 +- sk_add_backlog(sk, skb);
5466 ++ else if (sk_add_backlog(sk, skb)) {
5467 ++ atomic_inc(&sk->sk_drops);
5468 ++ bh_unlock_sock(sk);
5469 ++ sock_put(sk);
5470 ++ goto discard;
5471 ++ }
5472 + bh_unlock_sock(sk);
5473 + sock_put(sk);
5474 + return 0;
5475 +diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
5476 +index dbdc696..ae18165 100644
5477 +--- a/net/ipv6/xfrm6_policy.c
5478 ++++ b/net/ipv6/xfrm6_policy.c
5479 +@@ -116,7 +116,8 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
5480 + return 0;
5481 + }
5482 +
5483 +-static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
5484 ++static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
5485 ++ struct flowi *fl)
5486 + {
5487 + struct rt6_info *rt = (struct rt6_info*)xdst->route;
5488 +
5489 +diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
5490 +index 019c780..86d6985 100644
5491 +--- a/net/llc/llc_c_ac.c
5492 ++++ b/net/llc/llc_c_ac.c
5493 +@@ -1437,7 +1437,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb)
5494 + llc_conn_state_process(sk, skb);
5495 + else {
5496 + llc_set_backlog_type(skb, LLC_EVENT);
5497 +- sk_add_backlog(sk, skb);
5498 ++ __sk_add_backlog(sk, skb);
5499 + }
5500 + }
5501 + }
5502 +diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
5503 +index c6bab39..c61ca88 100644
5504 +--- a/net/llc/llc_conn.c
5505 ++++ b/net/llc/llc_conn.c
5506 +@@ -756,7 +756,8 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
5507 + else {
5508 + dprintk("%s: adding to backlog...\n", __func__);
5509 + llc_set_backlog_type(skb, LLC_PACKET);
5510 +- sk_add_backlog(sk, skb);
5511 ++ if (sk_add_backlog(sk, skb))
5512 ++ goto drop_unlock;
5513 + }
5514 + out:
5515 + bh_unlock_sock(sk);
5516 +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
5517 +index 91dc863..3521c17 100644
5518 +--- a/net/mac80211/ieee80211_i.h
5519 ++++ b/net/mac80211/ieee80211_i.h
5520 +@@ -264,6 +264,7 @@ enum ieee80211_sta_flags {
5521 + IEEE80211_STA_DISABLE_11N = BIT(4),
5522 + IEEE80211_STA_CSA_RECEIVED = BIT(5),
5523 + IEEE80211_STA_MFP_ENABLED = BIT(6),
5524 ++ IEEE80211_STA_NULLFUNC_ACKED = BIT(7),
5525 + };
5526 +
5527 + /* flags for MLME request */
5528 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
5529 +index 05a18f4..1a209ac 100644
5530 +--- a/net/mac80211/mlme.c
5531 ++++ b/net/mac80211/mlme.c
5532 +@@ -205,7 +205,8 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
5533 + sta = sta_info_get(local, bssid);
5534 + if (sta)
5535 + rate_control_rate_update(local, sband, sta,
5536 +- IEEE80211_RC_HT_CHANGED);
5537 ++ IEEE80211_RC_HT_CHANGED,
5538 ++ local->oper_channel_type);
5539 + rcu_read_unlock();
5540 + }
5541 +
5542 +@@ -661,8 +662,11 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
5543 + } else {
5544 + if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
5545 + ieee80211_send_nullfunc(local, sdata, 1);
5546 +- conf->flags |= IEEE80211_CONF_PS;
5547 +- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
5548 ++
5549 ++ if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
5550 ++ conf->flags |= IEEE80211_CONF_PS;
5551 ++ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
5552 ++ }
5553 + }
5554 + }
5555 +
5556 +@@ -753,6 +757,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
5557 + container_of(work, struct ieee80211_local,
5558 + dynamic_ps_enable_work);
5559 + struct ieee80211_sub_if_data *sdata = local->ps_sdata;
5560 ++ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
5561 +
5562 + /* can only happen when PS was just disabled anyway */
5563 + if (!sdata)
5564 +@@ -761,11 +766,16 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
5565 + if (local->hw.conf.flags & IEEE80211_CONF_PS)
5566 + return;
5567 +
5568 +- if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
5569 ++ if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
5570 ++ (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
5571 + ieee80211_send_nullfunc(local, sdata, 1);
5572 +
5573 +- local->hw.conf.flags |= IEEE80211_CONF_PS;
5574 +- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
5575 ++ if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) ||
5576 ++ (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
5577 ++ ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
5578 ++ local->hw.conf.flags |= IEEE80211_CONF_PS;
5579 ++ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
5580 ++ }
5581 + }
5582 +
5583 + void ieee80211_dynamic_ps_timer(unsigned long data)
5584 +@@ -2467,6 +2477,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
5585 + list_add(&wk->list, &ifmgd->work_list);
5586 +
5587 + ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
5588 ++ ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
5589 +
5590 + for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
5591 + if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
5592 +diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
5593 +index cb9bd1f..3e02ea4 100644
5594 +--- a/net/mac80211/rate.h
5595 ++++ b/net/mac80211/rate.h
5596 +@@ -69,7 +69,8 @@ static inline void rate_control_rate_init(struct sta_info *sta)
5597 +
5598 + static inline void rate_control_rate_update(struct ieee80211_local *local,
5599 + struct ieee80211_supported_band *sband,
5600 +- struct sta_info *sta, u32 changed)
5601 ++ struct sta_info *sta, u32 changed,
5602 ++ enum nl80211_channel_type oper_chan_type)
5603 + {
5604 + struct rate_control_ref *ref = local->rate_ctrl;
5605 + struct ieee80211_sta *ista = &sta->sta;
5606 +@@ -77,7 +78,7 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
5607 +
5608 + if (ref && ref->ops->rate_update)
5609 + ref->ops->rate_update(ref->priv, sband, ista,
5610 +- priv_sta, changed);
5611 ++ priv_sta, changed, oper_chan_type);
5612 + }
5613 +
5614 + static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
5615 +diff --git a/net/mac80211/status.c b/net/mac80211/status.c
5616 +index d78f36c..f5abeec 100644
5617 +--- a/net/mac80211/status.c
5618 ++++ b/net/mac80211/status.c
5619 +@@ -165,6 +165,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
5620 + rcu_read_lock();
5621 +
5622 + sband = local->hw.wiphy->bands[info->band];
5623 ++ fc = hdr->frame_control;
5624 +
5625 + sta = sta_info_get(local, hdr->addr1);
5626 +
5627 +@@ -180,8 +181,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
5628 + return;
5629 + }
5630 +
5631 +- fc = hdr->frame_control;
5632 +-
5633 + if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
5634 + (ieee80211_is_data_qos(fc))) {
5635 + u16 tid, ssn;
5636 +@@ -246,6 +245,20 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
5637 + local->dot11FailedCount++;
5638 + }
5639 +
5640 ++ if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
5641 ++ (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
5642 ++ !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
5643 ++ local->ps_sdata && !(local->scanning)) {
5644 ++ if (info->flags & IEEE80211_TX_STAT_ACK) {
5645 ++ local->ps_sdata->u.mgd.flags |=
5646 ++ IEEE80211_STA_NULLFUNC_ACKED;
5647 ++ ieee80211_queue_work(&local->hw,
5648 ++ &local->dynamic_ps_enable_work);
5649 ++ } else
5650 ++ mod_timer(&local->dynamic_ps_timer, jiffies +
5651 ++ msecs_to_jiffies(10));
5652 ++ }
5653 ++
5654 + /* this was a transmitted frame, but now we want to reuse it */
5655 + skb_orphan(skb);
5656 +
5657 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
5658 +index 0ffe689..eeac97f 100644
5659 +--- a/net/netfilter/nf_conntrack_netlink.c
5660 ++++ b/net/netfilter/nf_conntrack_netlink.c
5661 +@@ -571,7 +571,8 @@ nla_put_failure:
5662 + nlmsg_failure:
5663 + kfree_skb(skb);
5664 + errout:
5665 +- nfnetlink_set_err(0, group, -ENOBUFS);
5666 ++ if (nfnetlink_set_err(0, group, -ENOBUFS) > 0)
5667 ++ return -ENOBUFS;
5668 + return 0;
5669 + }
5670 + #endif /* CONFIG_NF_CONNTRACK_EVENTS */
5671 +diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
5672 +index eedc0c1..35fe185 100644
5673 +--- a/net/netfilter/nfnetlink.c
5674 ++++ b/net/netfilter/nfnetlink.c
5675 +@@ -114,9 +114,9 @@ int nfnetlink_send(struct sk_buff *skb, u32 pid,
5676 + }
5677 + EXPORT_SYMBOL_GPL(nfnetlink_send);
5678 +
5679 +-void nfnetlink_set_err(u32 pid, u32 group, int error)
5680 ++int nfnetlink_set_err(u32 pid, u32 group, int error)
5681 + {
5682 +- netlink_set_err(nfnl, pid, group, error);
5683 ++ return netlink_set_err(nfnl, pid, group, error);
5684 + }
5685 + EXPORT_SYMBOL_GPL(nfnetlink_set_err);
5686 +
5687 +diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
5688 +index 43e83a4..e460bf9 100644
5689 +--- a/net/netfilter/xt_recent.c
5690 ++++ b/net/netfilter/xt_recent.c
5691 +@@ -260,7 +260,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
5692 + for (i = 0; i < e->nstamps; i++) {
5693 + if (info->seconds && time_after(time, e->stamps[i]))
5694 + continue;
5695 +- if (info->hit_count && ++hits >= info->hit_count) {
5696 ++ if (!info->hit_count || ++hits >= info->hit_count) {
5697 + ret = !ret;
5698 + break;
5699 + }
5700 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
5701 +index 4c5972b..0052d3c 100644
5702 +--- a/net/netlink/af_netlink.c
5703 ++++ b/net/netlink/af_netlink.c
5704 +@@ -1093,6 +1093,7 @@ static inline int do_one_set_err(struct sock *sk,
5705 + struct netlink_set_err_data *p)
5706 + {
5707 + struct netlink_sock *nlk = nlk_sk(sk);
5708 ++ int ret = 0;
5709 +
5710 + if (sk == p->exclude_sk)
5711 + goto out;
5712 +@@ -1104,10 +1105,15 @@ static inline int do_one_set_err(struct sock *sk,
5713 + !test_bit(p->group - 1, nlk->groups))
5714 + goto out;
5715 +
5716 ++ if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
5717 ++ ret = 1;
5718 ++ goto out;
5719 ++ }
5720 ++
5721 + sk->sk_err = p->code;
5722 + sk->sk_error_report(sk);
5723 + out:
5724 +- return 0;
5725 ++ return ret;
5726 + }
5727 +
5728 + /**
5729 +@@ -1116,12 +1122,16 @@ out:
5730 + * @pid: the PID of a process that we want to skip (if any)
5731 + * @groups: the broadcast group that will notice the error
5732 + * @code: error code, must be negative (as usual in kernelspace)
5733 ++ *
5734 ++ * This function returns the number of broadcast listeners that have set the
5735 ++ * NETLINK_RECV_NO_ENOBUFS socket option.
5736 + */
5737 +-void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
5738 ++int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
5739 + {
5740 + struct netlink_set_err_data info;
5741 + struct hlist_node *node;
5742 + struct sock *sk;
5743 ++ int ret = 0;
5744 +
5745 + info.exclude_sk = ssk;
5746 + info.pid = pid;
5747 +@@ -1132,9 +1142,10 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
5748 + read_lock(&nl_table_lock);
5749 +
5750 + sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
5751 +- do_one_set_err(sk, &info);
5752 ++ ret += do_one_set_err(sk, &info);
5753 +
5754 + read_unlock(&nl_table_lock);
5755 ++ return ret;
5756 + }
5757 + EXPORT_SYMBOL(netlink_set_err);
5758 +
5759 +diff --git a/net/sctp/input.c b/net/sctp/input.c
5760 +index c0c973e..3d74b26 100644
5761 +--- a/net/sctp/input.c
5762 ++++ b/net/sctp/input.c
5763 +@@ -75,7 +75,7 @@ static struct sctp_association *__sctp_lookup_association(
5764 + const union sctp_addr *peer,
5765 + struct sctp_transport **pt);
5766 +
5767 +-static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
5768 ++static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
5769 +
5770 +
5771 + /* Calculate the SCTP checksum of an SCTP packet. */
5772 +@@ -265,8 +265,13 @@ int sctp_rcv(struct sk_buff *skb)
5773 + }
5774 +
5775 + if (sock_owned_by_user(sk)) {
5776 ++ if (sctp_add_backlog(sk, skb)) {
5777 ++ sctp_bh_unlock_sock(sk);
5778 ++ sctp_chunk_free(chunk);
5779 ++ skb = NULL; /* sctp_chunk_free already freed the skb */
5780 ++ goto discard_release;
5781 ++ }
5782 + SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
5783 +- sctp_add_backlog(sk, skb);
5784 + } else {
5785 + SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
5786 + sctp_inq_push(&chunk->rcvr->inqueue, chunk);
5787 +@@ -336,8 +341,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
5788 + sctp_bh_lock_sock(sk);
5789 +
5790 + if (sock_owned_by_user(sk)) {
5791 +- sk_add_backlog(sk, skb);
5792 +- backloged = 1;
5793 ++ if (sk_add_backlog(sk, skb))
5794 ++ sctp_chunk_free(chunk);
5795 ++ else
5796 ++ backloged = 1;
5797 + } else
5798 + sctp_inq_push(inqueue, chunk);
5799 +
5800 +@@ -362,22 +369,27 @@ done:
5801 + return 0;
5802 + }
5803 +
5804 +-static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
5805 ++static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
5806 + {
5807 + struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
5808 + struct sctp_ep_common *rcvr = chunk->rcvr;
5809 ++ int ret;
5810 +
5811 +- /* Hold the assoc/ep while hanging on the backlog queue.
5812 +- * This way, we know structures we need will not disappear from us
5813 +- */
5814 +- if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
5815 +- sctp_association_hold(sctp_assoc(rcvr));
5816 +- else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
5817 +- sctp_endpoint_hold(sctp_ep(rcvr));
5818 +- else
5819 +- BUG();
5820 ++ ret = sk_add_backlog(sk, skb);
5821 ++ if (!ret) {
5822 ++ /* Hold the assoc/ep while hanging on the backlog queue.
5823 ++ * This way, we know structures we need will not disappear
5824 ++ * from us
5825 ++ */
5826 ++ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
5827 ++ sctp_association_hold(sctp_assoc(rcvr));
5828 ++ else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
5829 ++ sctp_endpoint_hold(sctp_ep(rcvr));
5830 ++ else
5831 ++ BUG();
5832 ++ }
5833 ++ return ret;
5834 +
5835 +- sk_add_backlog(sk, skb);
5836 + }
5837 +
5838 + /* Handle icmp frag needed error. */
5839 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
5840 +index 67fdac9..9bd9d82 100644
5841 +--- a/net/sctp/socket.c
5842 ++++ b/net/sctp/socket.c
5843 +@@ -3720,6 +3720,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
5844 + SCTP_DBG_OBJCNT_INC(sock);
5845 + percpu_counter_inc(&sctp_sockets_allocated);
5846 +
5847 ++ /* Set socket backlog limit. */
5848 ++ sk->sk_backlog.limit = sysctl_sctp_rmem[1];
5849 ++
5850 + local_bh_disable();
5851 + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
5852 + local_bh_enable();
5853 +diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
5854 +index f7a7f83..50346a6 100644
5855 +--- a/net/sunrpc/auth_gss/auth_gss.c
5856 ++++ b/net/sunrpc/auth_gss/auth_gss.c
5857 +@@ -1273,9 +1273,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
5858 + rqstp->rq_release_snd_buf = priv_release_snd_buf;
5859 + return 0;
5860 + out_free:
5861 +- for (i--; i >= 0; i--) {
5862 +- __free_page(rqstp->rq_enc_pages[i]);
5863 +- }
5864 ++ rqstp->rq_enc_pages_num = i;
5865 ++ priv_release_snd_buf(rqstp);
5866 + out:
5867 + return -EAGAIN;
5868 + }
5869 +diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
5870 +index 49278f8..27a2378 100644
5871 +--- a/net/sunrpc/rpc_pipe.c
5872 ++++ b/net/sunrpc/rpc_pipe.c
5873 +@@ -587,6 +587,8 @@ static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
5874 + struct dentry *dentry;
5875 +
5876 + dentry = __rpc_lookup_create(parent, name);
5877 ++ if (IS_ERR(dentry))
5878 ++ return dentry;
5879 + if (dentry->d_inode == NULL)
5880 + return dentry;
5881 + dput(dentry);
5882 +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
5883 +index 4f30336..6bd41a9 100644
5884 +--- a/net/sunrpc/svc_xprt.c
5885 ++++ b/net/sunrpc/svc_xprt.c
5886 +@@ -699,8 +699,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
5887 + spin_unlock_bh(&pool->sp_lock);
5888 +
5889 + len = 0;
5890 +- if (test_bit(XPT_LISTENER, &xprt->xpt_flags) &&
5891 +- !test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
5892 ++ if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
5893 ++ dprintk("svc_recv: found XPT_CLOSE\n");
5894 ++ svc_delete_xprt(xprt);
5895 ++ } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
5896 + struct svc_xprt *newxpt;
5897 + newxpt = xprt->xpt_ops->xpo_accept(xprt);
5898 + if (newxpt) {
5899 +@@ -726,7 +728,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
5900 + svc_xprt_received(newxpt);
5901 + }
5902 + svc_xprt_received(xprt);
5903 +- } else if (!test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
5904 ++ } else {
5905 + dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
5906 + rqstp, pool->sp_id, xprt,
5907 + atomic_read(&xprt->xpt_ref.refcount));
5908 +@@ -739,11 +741,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
5909 + dprintk("svc: got len=%d\n", len);
5910 + }
5911 +
5912 +- if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
5913 +- dprintk("svc_recv: found XPT_CLOSE\n");
5914 +- svc_delete_xprt(xprt);
5915 +- }
5916 +-
5917 + /* No data, incomplete (TCP) read, or accept() */
5918 + if (len == 0 || len == -EAGAIN) {
5919 + rqstp->rq_res.len = 0;
5920 +diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
5921 +index 870929e..528efef 100644
5922 +--- a/net/sunrpc/svcsock.c
5923 ++++ b/net/sunrpc/svcsock.c
5924 +@@ -968,6 +968,7 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
5925 + return len;
5926 + err_delete:
5927 + set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
5928 ++ svc_xprt_received(&svsk->sk_xprt);
5929 + err_again:
5930 + return -EAGAIN;
5931 + }
5932 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
5933 +index 1ea64f0..4b235fc 100644
5934 +--- a/net/tipc/socket.c
5935 ++++ b/net/tipc/socket.c
5936 +@@ -1322,8 +1322,10 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
5937 + if (!sock_owned_by_user(sk)) {
5938 + res = filter_rcv(sk, buf);
5939 + } else {
5940 +- sk_add_backlog(sk, buf);
5941 +- res = TIPC_OK;
5942 ++ if (sk_add_backlog(sk, buf))
5943 ++ res = TIPC_ERR_OVERLOAD;
5944 ++ else
5945 ++ res = TIPC_OK;
5946 + }
5947 + bh_unlock_sock(sk);
5948 +
5949 +diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
5950 +index 3e1efe5..52e3042 100644
5951 +--- a/net/x25/x25_dev.c
5952 ++++ b/net/x25/x25_dev.c
5953 +@@ -53,7 +53,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
5954 + if (!sock_owned_by_user(sk)) {
5955 + queued = x25_process_rx_frame(sk, skb);
5956 + } else {
5957 +- sk_add_backlog(sk, skb);
5958 ++ queued = !sk_add_backlog(sk, skb);
5959 + }
5960 + bh_unlock_sock(sk);
5961 + sock_put(sk);
5962 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
5963 +index 0ecb16a..f12dd3d 100644
5964 +--- a/net/xfrm/xfrm_policy.c
5965 ++++ b/net/xfrm/xfrm_policy.c
5966 +@@ -1354,7 +1354,8 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
5967 + return err;
5968 + }
5969 +
5970 +-static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
5971 ++static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
5972 ++ struct flowi *fl)
5973 + {
5974 + struct xfrm_policy_afinfo *afinfo =
5975 + xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
5976 +@@ -1363,7 +1364,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
5977 + if (!afinfo)
5978 + return -EINVAL;
5979 +
5980 +- err = afinfo->fill_dst(xdst, dev);
5981 ++ err = afinfo->fill_dst(xdst, dev, fl);
5982 +
5983 + xfrm_policy_put_afinfo(afinfo);
5984 +
5985 +@@ -1468,7 +1469,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
5986 + for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
5987 + struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
5988 +
5989 +- err = xfrm_fill_dst(xdst, dev);
5990 ++ err = xfrm_fill_dst(xdst, dev, fl);
5991 + if (err)
5992 + goto free_dst;
5993 +
5994 +diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
5995 +index d9266ba..4e5f2f7 100644
5996 +--- a/sound/pci/ac97/ac97_patch.c
5997 ++++ b/sound/pci/ac97/ac97_patch.c
5998 +@@ -1867,12 +1867,14 @@ static unsigned int ad1981_jacks_blacklist[] = {
5999 + 0x10140523, /* Thinkpad R40 */
6000 + 0x10140534, /* Thinkpad X31 */
6001 + 0x10140537, /* Thinkpad T41p */
6002 ++ 0x1014053e, /* Thinkpad R40e */
6003 + 0x10140554, /* Thinkpad T42p/R50p */
6004 + 0x10140567, /* Thinkpad T43p 2668-G7U */
6005 + 0x10140581, /* Thinkpad X41-2527 */
6006 + 0x10280160, /* Dell Dimension 2400 */
6007 + 0x104380b0, /* Asus A7V8X-MX */
6008 + 0x11790241, /* Toshiba Satellite A-15 S127 */
6009 ++ 0x1179ff10, /* Toshiba P500 */
6010 + 0x144dc01a, /* Samsung NP-X20C004/SEG */
6011 + 0 /* end */
6012 + };
6013 +diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
6014 +index a312bae..bbaec22 100644
6015 +--- a/sound/pci/cmipci.c
6016 ++++ b/sound/pci/cmipci.c
6017 +@@ -941,13 +941,21 @@ static snd_pcm_uframes_t snd_cmipci_pcm_pointer(struct cmipci *cm, struct cmipci
6018 + struct snd_pcm_substream *substream)
6019 + {
6020 + size_t ptr;
6021 +- unsigned int reg;
6022 ++ unsigned int reg, rem, tries;
6023 ++
6024 + if (!rec->running)
6025 + return 0;
6026 + #if 1 // this seems better..
6027 + reg = rec->ch ? CM_REG_CH1_FRAME2 : CM_REG_CH0_FRAME2;
6028 +- ptr = rec->dma_size - (snd_cmipci_read_w(cm, reg) + 1);
6029 +- ptr >>= rec->shift;
6030 ++ for (tries = 0; tries < 3; tries++) {
6031 ++ rem = snd_cmipci_read_w(cm, reg);
6032 ++ if (rem < rec->dma_size)
6033 ++ goto ok;
6034 ++ }
6035 ++ printk(KERN_ERR "cmipci: invalid PCM pointer: %#x\n", rem);
6036 ++ return SNDRV_PCM_POS_XRUN;
6037 ++ok:
6038 ++ ptr = (rec->dma_size - (rem + 1)) >> rec->shift;
6039 + #else
6040 + reg = rec->ch ? CM_REG_CH1_FRAME1 : CM_REG_CH0_FRAME1;
6041 + ptr = snd_cmipci_read(cm, reg) - rec->offset;
6042 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6043 +index 6d6e307..9ace8eb 100644
6044 +--- a/sound/pci/hda/hda_intel.c
6045 ++++ b/sound/pci/hda/hda_intel.c
6046 +@@ -2265,8 +2265,10 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
6047 + SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
6048 + SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
6049 + SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
6050 ++ SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
6051 + SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
6052 + SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
6053 ++ SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
6054 + {}
6055 + };
6056 +
6057 +@@ -2354,6 +2356,7 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
6058 + static struct snd_pci_quirk msi_black_list[] __devinitdata = {
6059 + SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
6060 + SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
6061 ++ SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
6062 + {}
6063 + };
6064 +
6065 +@@ -2372,6 +2375,13 @@ static void __devinit check_msi(struct azx *chip)
6066 + "hda_intel: msi for device %04x:%04x set to %d\n",
6067 + q->subvendor, q->subdevice, q->value);
6068 + chip->msi = q->value;
6069 ++ return;
6070 ++ }
6071 ++
6072 ++ /* NVidia chipsets seem to cause troubles with MSI */
6073 ++ if (chip->driver_type == AZX_DRIVER_NVIDIA) {
6074 ++ printk(KERN_INFO "hda_intel: Disable MSI for Nvidia chipset\n");
6075 ++ chip->msi = 0;
6076 + }
6077 + }
6078 +
6079 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
6080 +index c578c28..71b7a96 100644
6081 +--- a/sound/pci/hda/patch_conexant.c
6082 ++++ b/sound/pci/hda/patch_conexant.c
6083 +@@ -1570,6 +1570,21 @@ static int patch_cxt5047(struct hda_codec *codec)
6084 + #endif
6085 + }
6086 + spec->vmaster_nid = 0x13;
6087 ++
6088 ++ switch (codec->subsystem_id >> 16) {
6089 ++ case 0x103c:
6090 ++ /* HP laptops have really bad sound over 0 dB on NID 0x10.
6091 ++ * Fix max PCM level to 0 dB (originally it has 0x1e steps
6092 ++ * with 0 dB offset 0x17)
6093 ++ */
6094 ++ snd_hda_override_amp_caps(codec, 0x10, HDA_INPUT,
6095 ++ (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
6096 ++ (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
6097 ++ (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
6098 ++ (1 << AC_AMPCAP_MUTE_SHIFT));
6099 ++ break;
6100 ++ }
6101 ++
6102 + return 0;
6103 + }
6104 +
6105 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6106 +index da34095..a79f841 100644
6107 +--- a/sound/pci/hda/patch_realtek.c
6108 ++++ b/sound/pci/hda/patch_realtek.c
6109 +@@ -407,6 +407,8 @@ static int alc_mux_enum_info(struct snd_kcontrol *kcontrol,
6110 + unsigned int mux_idx = snd_ctl_get_ioffidx(kcontrol, &uinfo->id);
6111 + if (mux_idx >= spec->num_mux_defs)
6112 + mux_idx = 0;
6113 ++ if (!spec->input_mux[mux_idx].num_items && mux_idx > 0)
6114 ++ mux_idx = 0;
6115 + return snd_hda_input_mux_info(&spec->input_mux[mux_idx], uinfo);
6116 + }
6117 +
6118 +@@ -435,6 +437,8 @@ static int alc_mux_enum_put(struct snd_kcontrol *kcontrol,
6119 +
6120 + mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx;
6121 + imux = &spec->input_mux[mux_idx];
6122 ++ if (!imux->num_items && mux_idx > 0)
6123 ++ imux = &spec->input_mux[0];
6124 +
6125 + type = get_wcaps_type(get_wcaps(codec, nid));
6126 + if (type == AC_WID_AUD_MIX) {
6127 +@@ -6380,7 +6384,7 @@ static struct alc_config_preset alc260_presets[] = {
6128 + .num_dacs = ARRAY_SIZE(alc260_dac_nids),
6129 + .dac_nids = alc260_dac_nids,
6130 + .num_adc_nids = ARRAY_SIZE(alc260_dual_adc_nids),
6131 +- .adc_nids = alc260_adc_nids,
6132 ++ .adc_nids = alc260_dual_adc_nids,
6133 + .num_channel_mode = ARRAY_SIZE(alc260_modes),
6134 + .channel_mode = alc260_modes,
6135 + .input_mux = &alc260_capture_source,
6136 +@@ -9097,7 +9101,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
6137 + SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC889_INTEL),
6138 + SND_PCI_QUIRK(0x8086, 0x0021, "Intel IbexPeak", ALC889A_INTEL),
6139 + SND_PCI_QUIRK(0x8086, 0x3b56, "Intel IbexPeak", ALC889A_INTEL),
6140 +- SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch),
6141 ++ SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC882_6ST_DIG),
6142 +
6143 + {}
6144 + };
6145 +@@ -9941,6 +9945,8 @@ static void alc882_auto_init_input_src(struct hda_codec *codec)
6146 + continue;
6147 + mux_idx = c >= spec->num_mux_defs ? 0 : c;
6148 + imux = &spec->input_mux[mux_idx];
6149 ++ if (!imux->num_items && mux_idx > 0)
6150 ++ imux = &spec->input_mux[0];
6151 + for (idx = 0; idx < conns; idx++) {
6152 + /* if the current connection is the selected one,
6153 + * unmute it as default - otherwise mute it
6154 +diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
6155 +index bdd3b7e..bd498d4 100644
6156 +--- a/tools/perf/Documentation/Makefile
6157 ++++ b/tools/perf/Documentation/Makefile
6158 +@@ -24,7 +24,10 @@ DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT))
6159 + DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT))
6160 + DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT))
6161 +
6162 ++# Make the path relative to DESTDIR, not prefix
6163 ++ifndef DESTDIR
6164 + prefix?=$(HOME)
6165 ++endif
6166 + bindir?=$(prefix)/bin
6167 + htmldir?=$(prefix)/share/doc/perf-doc
6168 + pdfdir?=$(prefix)/share/doc/perf-doc
6169 +@@ -32,7 +35,6 @@ mandir?=$(prefix)/share/man
6170 + man1dir=$(mandir)/man1
6171 + man5dir=$(mandir)/man5
6172 + man7dir=$(mandir)/man7
6173 +-# DESTDIR=
6174 +
6175 + ASCIIDOC=asciidoc
6176 + ASCIIDOC_EXTRA = --unsafe
6177 +diff --git a/tools/perf/Makefile b/tools/perf/Makefile
6178 +index 2e7fa3a..03eb7c9 100644
6179 +--- a/tools/perf/Makefile
6180 ++++ b/tools/perf/Makefile
6181 +@@ -216,7 +216,10 @@ STRIP ?= strip
6182 + # runtime figures out where they are based on the path to the executable.
6183 + # This can help installing the suite in a relocatable way.
6184 +
6185 ++# Make the path relative to DESTDIR, not to prefix
6186 ++ifndef DESTDIR
6187 + prefix = $(HOME)
6188 ++endif
6189 + bindir_relative = bin
6190 + bindir = $(prefix)/$(bindir_relative)
6191 + mandir = share/man
6192 +@@ -233,7 +236,6 @@ sysconfdir = $(prefix)/etc
6193 + ETC_PERFCONFIG = etc/perfconfig
6194 + endif
6195 + lib = lib
6196 +-# DESTDIR=
6197 +
6198 + export prefix bindir sharedir sysconfdir
6199 +
6200 +diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
6201 +index 593ff25..0b1ba36 100644
6202 +--- a/tools/perf/builtin-annotate.c
6203 ++++ b/tools/perf/builtin-annotate.c
6204 +@@ -53,32 +53,20 @@ struct sym_priv {
6205 +
6206 + static const char *sym_hist_filter;
6207 +
6208 +-static int symbol_filter(struct map *map __used, struct symbol *sym)
6209 ++static int sym__alloc_hist(struct symbol *self)
6210 + {
6211 +- if (sym_hist_filter == NULL ||
6212 +- strcmp(sym->name, sym_hist_filter) == 0) {
6213 +- struct sym_priv *priv = symbol__priv(sym);
6214 +- const int size = (sizeof(*priv->hist) +
6215 +- (sym->end - sym->start) * sizeof(u64));
6216 ++ struct sym_priv *priv = symbol__priv(self);
6217 ++ const int size = (sizeof(*priv->hist) +
6218 ++ (self->end - self->start) * sizeof(u64));
6219 +
6220 +- priv->hist = malloc(size);
6221 +- if (priv->hist)
6222 +- memset(priv->hist, 0, size);
6223 +- return 0;
6224 +- }
6225 +- /*
6226 +- * FIXME: We should really filter it out, as we don't want to go thru symbols
6227 +- * we're not interested, and if a DSO ends up with no symbols, delete it too,
6228 +- * but right now the kernel loading routines in symbol.c bail out if no symbols
6229 +- * are found, fix it later.
6230 +- */
6231 +- return 0;
6232 ++ priv->hist = zalloc(size);
6233 ++ return priv->hist == NULL ? -1 : 0;
6234 + }
6235 +
6236 + /*
6237 + * collect histogram counts
6238 + */
6239 +-static void hist_hit(struct hist_entry *he, u64 ip)
6240 ++static int annotate__hist_hit(struct hist_entry *he, u64 ip)
6241 + {
6242 + unsigned int sym_size, offset;
6243 + struct symbol *sym = he->sym;
6244 +@@ -88,11 +76,11 @@ static void hist_hit(struct hist_entry *he, u64 ip)
6245 + he->count++;
6246 +
6247 + if (!sym || !he->map)
6248 +- return;
6249 ++ return 0;
6250 +
6251 + priv = symbol__priv(sym);
6252 +- if (!priv->hist)
6253 +- return;
6254 ++ if (priv->hist == NULL && sym__alloc_hist(sym) < 0)
6255 ++ return -ENOMEM;
6256 +
6257 + sym_size = sym->end - sym->start;
6258 + offset = ip - sym->start;
6259 +@@ -102,7 +90,7 @@ static void hist_hit(struct hist_entry *he, u64 ip)
6260 + he->map->unmap_ip(he->map, ip));
6261 +
6262 + if (offset >= sym_size)
6263 +- return;
6264 ++ return 0;
6265 +
6266 + h = priv->hist;
6267 + h->sum++;
6268 +@@ -114,18 +102,31 @@ static void hist_hit(struct hist_entry *he, u64 ip)
6269 + he->sym->name,
6270 + (void *)(unsigned long)ip, ip - he->sym->start,
6271 + h->ip[offset]);
6272 ++ return 0;
6273 + }
6274 +
6275 + static int perf_session__add_hist_entry(struct perf_session *self,
6276 + struct addr_location *al, u64 count)
6277 + {
6278 +- bool hit;
6279 +- struct hist_entry *he = __perf_session__add_hist_entry(self, al, NULL,
6280 +- count, &hit);
6281 +- if (he == NULL)
6282 +- return -ENOMEM;
6283 +- hist_hit(he, al->addr);
6284 +- return 0;
6285 ++ bool hit;
6286 ++ struct hist_entry *he;
6287 ++
6288 ++ if (sym_hist_filter != NULL &&
6289 ++ (al->sym == NULL || strcmp(sym_hist_filter, al->sym->name) != 0)) {
6290 ++ /* We're only interested in a symbol named sym_hist_filter */
6291 ++ if (al->sym != NULL) {
6292 ++ rb_erase(&al->sym->rb_node,
6293 ++ &al->map->dso->symbols[al->map->type]);
6294 ++ symbol__delete(al->sym);
6295 ++ }
6296 ++ return 0;
6297 ++ }
6298 ++
6299 ++ he = __perf_session__add_hist_entry(self, al, NULL, count, &hit);
6300 ++ if (he == NULL)
6301 ++ return -ENOMEM;
6302 ++
6303 ++ return annotate__hist_hit(he, al->addr);
6304 + }
6305 +
6306 + static int process_sample_event(event_t *event, struct perf_session *session)
6307 +@@ -135,7 +136,7 @@ static int process_sample_event(event_t *event, struct perf_session *session)
6308 + dump_printf("(IP, %d): %d: %p\n", event->header.misc,
6309 + event->ip.pid, (void *)(long)event->ip.ip);
6310 +
6311 +- if (event__preprocess_sample(event, session, &al, symbol_filter) < 0) {
6312 ++ if (event__preprocess_sample(event, session, &al, NULL) < 0) {
6313 + fprintf(stderr, "problem processing %d event, skipping it.\n",
6314 + event->header.type);
6315 + return -1;
6316 +diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
6317 +index c1e6774..fa626eb 100644
6318 +--- a/tools/perf/builtin-probe.c
6319 ++++ b/tools/perf/builtin-probe.c
6320 +@@ -48,7 +48,6 @@
6321 + #include "util/probe-event.h"
6322 +
6323 + #define MAX_PATH_LEN 256
6324 +-#define MAX_PROBES 128
6325 +
6326 + /* Session management structure */
6327 + static struct {
6328 +diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
6329 +index 4b852c0..7f81ded 100644
6330 +--- a/tools/perf/util/probe-finder.c
6331 ++++ b/tools/perf/util/probe-finder.c
6332 +@@ -544,6 +544,9 @@ static void show_probepoint(Dwarf_Die sp_die, Dwarf_Signed offs,
6333 + }
6334 + free_current_frame_base(pf);
6335 +
6336 ++ if (pp->found == MAX_PROBES)
6337 ++ die("Too many( > %d) probe point found.\n", MAX_PROBES);
6338 ++
6339 + pp->probes[pp->found] = strdup(tmp);
6340 + pp->found++;
6341 + }
6342 +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
6343 +index 72547b9..fcb8919 100644
6344 +--- a/tools/perf/util/symbol.c
6345 ++++ b/tools/perf/util/symbol.c
6346 +@@ -149,7 +149,7 @@ static struct symbol *symbol__new(u64 start, u64 len, const char *name)
6347 + return self;
6348 + }
6349 +
6350 +-static void symbol__delete(struct symbol *self)
6351 ++void symbol__delete(struct symbol *self)
6352 + {
6353 + free(((void *)self) - symbol_conf.priv_size);
6354 + }
6355 +diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
6356 +index 8aded23..400227a 100644
6357 +--- a/tools/perf/util/symbol.h
6358 ++++ b/tools/perf/util/symbol.h
6359 +@@ -49,6 +49,8 @@ struct symbol {
6360 + char name[0];
6361 + };
6362 +
6363 ++void symbol__delete(struct symbol *self);
6364 ++
6365 + struct strlist;
6366 +
6367 + struct symbol_conf {