Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r2228 - genpatches-2.6/trunk/3.0
Date: Mon, 29 Oct 2012 18:07:13
Message-Id: 20121029180703.6331D21600@flycatcher.gentoo.org
1 Author: mpagano
2 Date: 2012-10-29 18:05:59 +0000 (Mon, 29 Oct 2012)
3 New Revision: 2228
4
5 Added:
6 genpatches-2.6/trunk/3.0/1046_linux-3.0.47.patch
7 genpatches-2.6/trunk/3.0/1047_linux-3.0.48.patch
8 genpatches-2.6/trunk/3.0/1048_linux-3.0.49.patch
9 Removed:
10 genpatches-2.6/trunk/3.0/2500_5906-device-DMA-frag-workaround.patch
11 Modified:
12 genpatches-2.6/trunk/3.0/0000_README
13 Log:
14 Linux patches 3.0.47, 3.0.48 and 3.0.49. Removal of redundant patch
15
16 Modified: genpatches-2.6/trunk/3.0/0000_README
17 ===================================================================
18 --- genpatches-2.6/trunk/3.0/0000_README 2012-10-29 14:35:56 UTC (rev 2227)
19 +++ genpatches-2.6/trunk/3.0/0000_README 2012-10-29 18:05:59 UTC (rev 2228)
20 @@ -219,14 +219,22 @@
21 From: http://www.kernel.org
22 Desc: Linux 3.0.46
23
24 +Patch: 1046_linux-3.0.47.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 3.0.47
27 +
28 +Patch: 1047_linux-3.0.48.patch
29 +From: http://www.kernel.org
30 +Desc: Linux 3.0.48
31 +
32 +Patch: 1048_linux-3.0.49.patch
33 +From: http://www.kernel.org
34 +Desc: Linux 3.0.49
35 +
36 Patch: 1800_fix-zcache-build.patch
37 From: http://bugs.gentoo.org/show_bug.cgi?id=376325
38 Desc: Fix zcache build error
39
40 -Patch: 2500_5906-device-DMA-frag-workaround.patch
41 -From: http://bugs.gentoo.org/show_bug.cgi?id=434992
42 -Desc: 5906 device short DMA fragment workaround
43 -
44 Patch: 2600_Input-ALPS-Move-protocol-information-to-Documentation.patch
45 From: http://bugs.gentoo.org/show_bug.cgi?id=318567
46 Desc: ALPS Touchpad - Move protocol information to Documentation
47
48 Added: genpatches-2.6/trunk/3.0/1046_linux-3.0.47.patch
49 ===================================================================
50 --- genpatches-2.6/trunk/3.0/1046_linux-3.0.47.patch (rev 0)
51 +++ genpatches-2.6/trunk/3.0/1046_linux-3.0.47.patch 2012-10-29 18:05:59 UTC (rev 2228)
52 @@ -0,0 +1,1464 @@
53 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
54 +index aa47be7..397ee05 100644
55 +--- a/Documentation/kernel-parameters.txt
56 ++++ b/Documentation/kernel-parameters.txt
57 +@@ -1764,6 +1764,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
58 +
59 + noresidual [PPC] Don't use residual data on PReP machines.
60 +
61 ++ nordrand [X86] Disable the direct use of the RDRAND
62 ++ instruction even if it is supported by the
63 ++ processor. RDRAND is still available to user
64 ++ space applications.
65 ++
66 + noresume [SWSUSP] Disables resume and restores original swap
67 + space.
68 +
69 +diff --git a/Makefile b/Makefile
70 +index 1cb8c1d..82f6dfe 100644
71 +--- a/Makefile
72 ++++ b/Makefile
73 +@@ -1,6 +1,6 @@
74 + VERSION = 3
75 + PATCHLEVEL = 0
76 +-SUBLEVEL = 46
77 ++SUBLEVEL = 47
78 + EXTRAVERSION =
79 + NAME = Sneaky Weasel
80 +
81 +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
82 +index 157781e..17d179c 100644
83 +--- a/arch/arm/Kconfig
84 ++++ b/arch/arm/Kconfig
85 +@@ -1260,6 +1260,16 @@ config PL310_ERRATA_769419
86 + on systems with an outer cache, the store buffer is drained
87 + explicitly.
88 +
89 ++config ARM_ERRATA_775420
90 ++ bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock"
91 ++ depends on CPU_V7
92 ++ help
93 ++ This option enables the workaround for the 775420 Cortex-A9 (r2p2,
94 ++ r2p6,r2p8,r2p10,r3p0) erratum. In case a date cache maintenance
95 ++ operation aborts with MMU exception, it might cause the processor
96 ++ to deadlock. This workaround puts DSB before executing ISB if
97 ++ an abort may occur on cache maintenance.
98 ++
99 + endmenu
100 +
101 + source "arch/arm/common/Kconfig"
102 +diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
103 +index 3d5fc41..bf53047 100644
104 +--- a/arch/arm/include/asm/vfpmacros.h
105 ++++ b/arch/arm/include/asm/vfpmacros.h
106 +@@ -28,7 +28,7 @@
107 + ldr \tmp, =elf_hwcap @ may not have MVFR regs
108 + ldr \tmp, [\tmp, #0]
109 + tst \tmp, #HWCAP_VFPv3D16
110 +- ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
111 ++ ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
112 + addne \base, \base, #32*4 @ step over unused register space
113 + #else
114 + VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
115 +@@ -52,7 +52,7 @@
116 + ldr \tmp, =elf_hwcap @ may not have MVFR regs
117 + ldr \tmp, [\tmp, #0]
118 + tst \tmp, #HWCAP_VFPv3D16
119 +- stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
120 ++ stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
121 + addne \base, \base, #32*4 @ step over unused register space
122 + #else
123 + VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
124 +diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
125 +index 1ed1fd3..428b243 100644
126 +--- a/arch/arm/mm/cache-v7.S
127 ++++ b/arch/arm/mm/cache-v7.S
128 +@@ -211,6 +211,9 @@ ENTRY(v7_coherent_user_range)
129 + * isn't mapped, just try the next page.
130 + */
131 + 9001:
132 ++#ifdef CONFIG_ARM_ERRATA_775420
133 ++ dsb
134 ++#endif
135 + mov r12, r12, lsr #12
136 + mov r12, r12, lsl #12
137 + add r12, r12, #4096
138 +diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
139 +index f4546e9..23817a6 100644
140 +--- a/arch/mips/kernel/kgdb.c
141 ++++ b/arch/mips/kernel/kgdb.c
142 +@@ -283,6 +283,15 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
143 + struct pt_regs *regs = args->regs;
144 + int trap = (regs->cp0_cause & 0x7c) >> 2;
145 +
146 ++#ifdef CONFIG_KPROBES
147 ++ /*
148 ++ * Return immediately if the kprobes fault notifier has set
149 ++ * DIE_PAGE_FAULT.
150 ++ */
151 ++ if (cmd == DIE_PAGE_FAULT)
152 ++ return NOTIFY_DONE;
153 ++#endif /* CONFIG_KPROBES */
154 ++
155 + /* Userspace events, ignore. */
156 + if (user_mode(regs))
157 + return NOTIFY_DONE;
158 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
159 +index 37357a5..a0e9bda 100644
160 +--- a/arch/x86/Kconfig
161 ++++ b/arch/x86/Kconfig
162 +@@ -1451,6 +1451,15 @@ config ARCH_USES_PG_UNCACHED
163 + def_bool y
164 + depends on X86_PAT
165 +
166 ++config ARCH_RANDOM
167 ++ def_bool y
168 ++ prompt "x86 architectural random number generator" if EXPERT
169 ++ ---help---
170 ++ Enable the x86 architectural RDRAND instruction
171 ++ (Intel Bull Mountain technology) to generate random numbers.
172 ++ If supported, this is a high bandwidth, cryptographically
173 ++ secure hardware random number generator.
174 ++
175 + config EFI
176 + bool "EFI runtime service support"
177 + depends on ACPI
178 +diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h
179 +new file mode 100644
180 +index 0000000..0d9ec77
181 +--- /dev/null
182 ++++ b/arch/x86/include/asm/archrandom.h
183 +@@ -0,0 +1,75 @@
184 ++/*
185 ++ * This file is part of the Linux kernel.
186 ++ *
187 ++ * Copyright (c) 2011, Intel Corporation
188 ++ * Authors: Fenghua Yu <fenghua.yu@×××××.com>,
189 ++ * H. Peter Anvin <hpa@×××××××××××.com>
190 ++ *
191 ++ * This program is free software; you can redistribute it and/or modify it
192 ++ * under the terms and conditions of the GNU General Public License,
193 ++ * version 2, as published by the Free Software Foundation.
194 ++ *
195 ++ * This program is distributed in the hope it will be useful, but WITHOUT
196 ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
197 ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
198 ++ * more details.
199 ++ *
200 ++ * You should have received a copy of the GNU General Public License along with
201 ++ * this program; if not, write to the Free Software Foundation, Inc.,
202 ++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
203 ++ *
204 ++ */
205 ++
206 ++#ifndef ASM_X86_ARCHRANDOM_H
207 ++#define ASM_X86_ARCHRANDOM_H
208 ++
209 ++#include <asm/processor.h>
210 ++#include <asm/cpufeature.h>
211 ++#include <asm/alternative.h>
212 ++#include <asm/nops.h>
213 ++
214 ++#define RDRAND_RETRY_LOOPS 10
215 ++
216 ++#define RDRAND_INT ".byte 0x0f,0xc7,0xf0"
217 ++#ifdef CONFIG_X86_64
218 ++# define RDRAND_LONG ".byte 0x48,0x0f,0xc7,0xf0"
219 ++#else
220 ++# define RDRAND_LONG RDRAND_INT
221 ++#endif
222 ++
223 ++#ifdef CONFIG_ARCH_RANDOM
224 ++
225 ++#define GET_RANDOM(name, type, rdrand, nop) \
226 ++static inline int name(type *v) \
227 ++{ \
228 ++ int ok; \
229 ++ alternative_io("movl $0, %0\n\t" \
230 ++ nop, \
231 ++ "\n1: " rdrand "\n\t" \
232 ++ "jc 2f\n\t" \
233 ++ "decl %0\n\t" \
234 ++ "jnz 1b\n\t" \
235 ++ "2:", \
236 ++ X86_FEATURE_RDRAND, \
237 ++ ASM_OUTPUT2("=r" (ok), "=a" (*v)), \
238 ++ "0" (RDRAND_RETRY_LOOPS)); \
239 ++ return ok; \
240 ++}
241 ++
242 ++#ifdef CONFIG_X86_64
243 ++
244 ++GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5);
245 ++GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4);
246 ++
247 ++#else
248 ++
249 ++GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3);
250 ++GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3);
251 ++
252 ++#endif /* CONFIG_X86_64 */
253 ++
254 ++#endif /* CONFIG_ARCH_RANDOM */
255 ++
256 ++extern void x86_init_rdrand(struct cpuinfo_x86 *c);
257 ++
258 ++#endif /* ASM_X86_ARCHRANDOM_H */
259 +diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
260 +index 6042981..0e3a82a 100644
261 +--- a/arch/x86/kernel/cpu/Makefile
262 ++++ b/arch/x86/kernel/cpu/Makefile
263 +@@ -15,6 +15,7 @@ CFLAGS_common.o := $(nostackp)
264 + obj-y := intel_cacheinfo.o scattered.o topology.o
265 + obj-y += proc.o capflags.o powerflags.o common.o
266 + obj-y += vmware.o hypervisor.o sched.o mshyperv.o
267 ++obj-y += rdrand.o
268 +
269 + obj-$(CONFIG_X86_32) += bugs.o
270 + obj-$(CONFIG_X86_64) += bugs_64.o
271 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
272 +index 0cb2883..1579ab9 100644
273 +--- a/arch/x86/kernel/cpu/common.c
274 ++++ b/arch/x86/kernel/cpu/common.c
275 +@@ -15,6 +15,7 @@
276 + #include <asm/stackprotector.h>
277 + #include <asm/perf_event.h>
278 + #include <asm/mmu_context.h>
279 ++#include <asm/archrandom.h>
280 + #include <asm/hypervisor.h>
281 + #include <asm/processor.h>
282 + #include <asm/sections.h>
283 +@@ -852,6 +853,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
284 + #endif
285 +
286 + init_hypervisor(c);
287 ++ x86_init_rdrand(c);
288 +
289 + /*
290 + * Clear/Set all flags overriden by options, need do it
291 +diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
292 +new file mode 100644
293 +index 0000000..feca286
294 +--- /dev/null
295 ++++ b/arch/x86/kernel/cpu/rdrand.c
296 +@@ -0,0 +1,73 @@
297 ++/*
298 ++ * This file is part of the Linux kernel.
299 ++ *
300 ++ * Copyright (c) 2011, Intel Corporation
301 ++ * Authors: Fenghua Yu <fenghua.yu@×××××.com>,
302 ++ * H. Peter Anvin <hpa@×××××××××××.com>
303 ++ *
304 ++ * This program is free software; you can redistribute it and/or modify it
305 ++ * under the terms and conditions of the GNU General Public License,
306 ++ * version 2, as published by the Free Software Foundation.
307 ++ *
308 ++ * This program is distributed in the hope it will be useful, but WITHOUT
309 ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
310 ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
311 ++ * more details.
312 ++ *
313 ++ * You should have received a copy of the GNU General Public License along with
314 ++ * this program; if not, write to the Free Software Foundation, Inc.,
315 ++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
316 ++ *
317 ++ */
318 ++
319 ++#include <asm/processor.h>
320 ++#include <asm/archrandom.h>
321 ++#include <asm/sections.h>
322 ++
323 ++static int __init x86_rdrand_setup(char *s)
324 ++{
325 ++ setup_clear_cpu_cap(X86_FEATURE_RDRAND);
326 ++ return 1;
327 ++}
328 ++__setup("nordrand", x86_rdrand_setup);
329 ++
330 ++/* We can't use arch_get_random_long() here since alternatives haven't run */
331 ++static inline int rdrand_long(unsigned long *v)
332 ++{
333 ++ int ok;
334 ++ asm volatile("1: " RDRAND_LONG "\n\t"
335 ++ "jc 2f\n\t"
336 ++ "decl %0\n\t"
337 ++ "jnz 1b\n\t"
338 ++ "2:"
339 ++ : "=r" (ok), "=a" (*v)
340 ++ : "0" (RDRAND_RETRY_LOOPS));
341 ++ return ok;
342 ++}
343 ++
344 ++/*
345 ++ * Force a reseed cycle; we are architecturally guaranteed a reseed
346 ++ * after no more than 512 128-bit chunks of random data. This also
347 ++ * acts as a test of the CPU capability.
348 ++ */
349 ++#define RESEED_LOOP ((512*128)/sizeof(unsigned long))
350 ++
351 ++void __cpuinit x86_init_rdrand(struct cpuinfo_x86 *c)
352 ++{
353 ++#ifdef CONFIG_ARCH_RANDOM
354 ++ unsigned long tmp;
355 ++ int i, count, ok;
356 ++
357 ++ if (!cpu_has(c, X86_FEATURE_RDRAND))
358 ++ return; /* Nothing to do */
359 ++
360 ++ for (count = i = 0; i < RESEED_LOOP; i++) {
361 ++ ok = rdrand_long(&tmp);
362 ++ if (ok)
363 ++ count++;
364 ++ }
365 ++
366 ++ if (count != RESEED_LOOP)
367 ++ clear_cpu_cap(c, X86_FEATURE_RDRAND);
368 ++#endif
369 ++}
370 +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
371 +index 8385d1d..9f808af 100644
372 +--- a/arch/x86/xen/enlighten.c
373 ++++ b/arch/x86/xen/enlighten.c
374 +@@ -803,7 +803,16 @@ static void xen_write_cr4(unsigned long cr4)
375 +
376 + native_write_cr4(cr4);
377 + }
378 +-
379 ++#ifdef CONFIG_X86_64
380 ++static inline unsigned long xen_read_cr8(void)
381 ++{
382 ++ return 0;
383 ++}
384 ++static inline void xen_write_cr8(unsigned long val)
385 ++{
386 ++ BUG_ON(val);
387 ++}
388 ++#endif
389 + static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
390 + {
391 + int ret;
392 +@@ -968,6 +977,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
393 + .read_cr4_safe = native_read_cr4_safe,
394 + .write_cr4 = xen_write_cr4,
395 +
396 ++#ifdef CONFIG_X86_64
397 ++ .read_cr8 = xen_read_cr8,
398 ++ .write_cr8 = xen_write_cr8,
399 ++#endif
400 ++
401 + .wbinvd = native_wbinvd,
402 +
403 + .read_msr = native_read_msr_safe,
404 +@@ -975,6 +989,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
405 + .read_tsc = native_read_tsc,
406 + .read_pmc = native_read_pmc,
407 +
408 ++ .read_tscp = native_read_tscp,
409 ++
410 + .iret = xen_iret,
411 + .irq_enable_sysexit = xen_sysexit,
412 + #ifdef CONFIG_X86_64
413 +diff --git a/block/blk-core.c b/block/blk-core.c
414 +index 35ae52d..2f49f43 100644
415 +--- a/block/blk-core.c
416 ++++ b/block/blk-core.c
417 +@@ -524,7 +524,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
418 + q->request_fn = rfn;
419 + q->prep_rq_fn = NULL;
420 + q->unprep_rq_fn = NULL;
421 +- q->queue_flags = QUEUE_FLAG_DEFAULT;
422 ++ q->queue_flags |= QUEUE_FLAG_DEFAULT;
423 +
424 + /* Override internal queue lock with supplied lock pointer */
425 + if (lock)
426 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
427 +index b19a18d..d2519b2 100644
428 +--- a/drivers/acpi/ec.c
429 ++++ b/drivers/acpi/ec.c
430 +@@ -71,9 +71,6 @@ enum ec_command {
431 + #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
432 + #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
433 +
434 +-#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
435 +- per one transaction */
436 +-
437 + enum {
438 + EC_FLAGS_QUERY_PENDING, /* Query is pending */
439 + EC_FLAGS_GPE_STORM, /* GPE storm detected */
440 +@@ -87,6 +84,15 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
441 + module_param(ec_delay, uint, 0644);
442 + MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
443 +
444 ++/*
445 ++ * If the number of false interrupts per one transaction exceeds
446 ++ * this threshold, will think there is a GPE storm happened and
447 ++ * will disable the GPE for normal transaction.
448 ++ */
449 ++static unsigned int ec_storm_threshold __read_mostly = 8;
450 ++module_param(ec_storm_threshold, uint, 0644);
451 ++MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
452 ++
453 + /* If we find an EC via the ECDT, we need to keep a ptr to its context */
454 + /* External interfaces use first EC only, so remember */
455 + typedef int (*acpi_ec_query_func) (void *data);
456 +@@ -319,7 +325,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
457 + msleep(1);
458 + /* It is safe to enable the GPE outside of the transaction. */
459 + acpi_enable_gpe(NULL, ec->gpe);
460 +- } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
461 ++ } else if (t->irq_count > ec_storm_threshold) {
462 + pr_info(PREFIX "GPE storm detected, "
463 + "transactions will use polling mode\n");
464 + set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
465 +@@ -914,6 +920,17 @@ static int ec_flag_msi(const struct dmi_system_id *id)
466 + return 0;
467 + }
468 +
469 ++/*
470 ++ * Clevo M720 notebook actually works ok with IRQ mode, if we lifted
471 ++ * the GPE storm threshold back to 20
472 ++ */
473 ++static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
474 ++{
475 ++ pr_debug("Setting the EC GPE storm threshold to 20\n");
476 ++ ec_storm_threshold = 20;
477 ++ return 0;
478 ++}
479 ++
480 + static struct dmi_system_id __initdata ec_dmi_table[] = {
481 + {
482 + ec_skip_dsdt_scan, "Compal JFL92", {
483 +@@ -945,10 +962,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
484 + {
485 + ec_validate_ecdt, "ASUS hardware", {
486 + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
487 ++ {
488 ++ ec_enlarge_storm_threshold, "CLEVO hardware", {
489 ++ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
490 ++ DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
491 + {},
492 + };
493 +
494 +-
495 + int __init acpi_ec_ecdt_probe(void)
496 + {
497 + acpi_status status;
498 +diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
499 +index b85ee76..65b9d6f 100644
500 +--- a/drivers/char/tpm/tpm.c
501 ++++ b/drivers/char/tpm/tpm.c
502 +@@ -1019,17 +1019,20 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
503 + size_t size, loff_t *off)
504 + {
505 + struct tpm_chip *chip = file->private_data;
506 +- size_t in_size = size, out_size;
507 ++ size_t in_size = size;
508 ++ ssize_t out_size;
509 +
510 + /* cannot perform a write until the read has cleared
511 +- either via tpm_read or a user_read_timer timeout */
512 +- while (atomic_read(&chip->data_pending) != 0)
513 +- msleep(TPM_TIMEOUT);
514 +-
515 +- mutex_lock(&chip->buffer_mutex);
516 ++ either via tpm_read or a user_read_timer timeout.
517 ++ This also prevents splitted buffered writes from blocking here.
518 ++ */
519 ++ if (atomic_read(&chip->data_pending) != 0)
520 ++ return -EBUSY;
521 +
522 + if (in_size > TPM_BUFSIZE)
523 +- in_size = TPM_BUFSIZE;
524 ++ return -E2BIG;
525 ++
526 ++ mutex_lock(&chip->buffer_mutex);
527 +
528 + if (copy_from_user
529 + (chip->data_buffer, (void __user *) buf, in_size)) {
530 +@@ -1039,6 +1042,10 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
531 +
532 + /* atomic tpm command send and result receive */
533 + out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
534 ++ if (out_size < 0) {
535 ++ mutex_unlock(&chip->buffer_mutex);
536 ++ return out_size;
537 ++ }
538 +
539 + atomic_set(&chip->data_pending, out_size);
540 + mutex_unlock(&chip->buffer_mutex);
541 +diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
542 +index 4799393..b97d4f0 100644
543 +--- a/drivers/firewire/core-cdev.c
544 ++++ b/drivers/firewire/core-cdev.c
545 +@@ -471,8 +471,8 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
546 + client->bus_reset_closure = a->bus_reset_closure;
547 + if (a->bus_reset != 0) {
548 + fill_bus_reset_event(&bus_reset, client);
549 +- ret = copy_to_user(u64_to_uptr(a->bus_reset),
550 +- &bus_reset, sizeof(bus_reset));
551 ++ /* unaligned size of bus_reset is 36 bytes */
552 ++ ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
553 + }
554 + if (ret == 0 && list_empty(&client->link))
555 + list_add_tail(&client->link, &client->device->client_list);
556 +diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
557 +index 2f46e0c..3ad3cc6 100644
558 +--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
559 ++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
560 +@@ -973,11 +973,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
561 + static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
562 + {
563 + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
564 +- struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
565 +- if (tmds) {
566 +- if (tmds->i2c_bus)
567 +- radeon_i2c_destroy(tmds->i2c_bus);
568 +- }
569 ++ /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
570 + kfree(radeon_encoder->enc_priv);
571 + drm_encoder_cleanup(encoder);
572 + kfree(radeon_encoder);
573 +diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
574 +index 85931ca..10a99e4 100644
575 +--- a/drivers/net/tg3.c
576 ++++ b/drivers/net/tg3.c
577 +@@ -13689,8 +13689,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
578 + */
579 + tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
580 +
581 +- if (tg3_flag(tp, 5755_PLUS))
582 +- tg3_flag_set(tp, SHORT_DMA_BUG);
583 ++ if (tg3_flag(tp, 5755_PLUS) ||
584 ++ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
585 ++ tg3_flag_set(tp, SHORT_DMA_BUG);
586 + else
587 + tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
588 +
589 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
590 +index de80171..3b2ce7d 100644
591 +--- a/drivers/usb/class/cdc-acm.c
592 ++++ b/drivers/usb/class/cdc-acm.c
593 +@@ -1496,6 +1496,9 @@ static const struct usb_device_id acm_ids[] = {
594 + Maybe we should define a new
595 + quirk for this. */
596 + },
597 ++ { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
598 ++ .driver_info = NO_UNION_NORMAL,
599 ++ },
600 + { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
601 + .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
602 + },
603 +diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
604 +index 415e9b2..6a7725a 100644
605 +--- a/drivers/video/udlfb.c
606 ++++ b/drivers/video/udlfb.c
607 +@@ -613,7 +613,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
608 + result = fb_sys_write(info, buf, count, ppos);
609 +
610 + if (result > 0) {
611 +- int start = max((int)(offset / info->fix.line_length) - 1, 0);
612 ++ int start = max((int)(offset / info->fix.line_length), 0);
613 + int lines = min((u32)((result / info->fix.line_length) + 1),
614 + (u32)info->var.yres);
615 +
616 +diff --git a/drivers/video/via/via_clock.c b/drivers/video/via/via_clock.c
617 +index af8f26b..db1e392 100644
618 +--- a/drivers/video/via/via_clock.c
619 ++++ b/drivers/video/via/via_clock.c
620 +@@ -25,6 +25,7 @@
621 +
622 + #include <linux/kernel.h>
623 + #include <linux/via-core.h>
624 ++#include <asm/olpc.h>
625 + #include "via_clock.h"
626 + #include "global.h"
627 + #include "debug.h"
628 +@@ -289,6 +290,10 @@ static void dummy_set_pll(struct via_pll_config config)
629 + printk(KERN_INFO "Using undocumented set PLL.\n%s", via_slap);
630 + }
631 +
632 ++static void noop_set_clock_state(u8 state)
633 ++{
634 ++}
635 ++
636 + void via_clock_init(struct via_clock *clock, int gfx_chip)
637 + {
638 + switch (gfx_chip) {
639 +@@ -346,4 +351,18 @@ void via_clock_init(struct via_clock *clock, int gfx_chip)
640 + break;
641 +
642 + }
643 ++
644 ++ if (machine_is_olpc()) {
645 ++ /* The OLPC XO-1.5 cannot suspend/resume reliably if the
646 ++ * IGA1/IGA2 clocks are set as on or off (memory rot
647 ++ * occasionally happens during suspend under such
648 ++ * configurations).
649 ++ *
650 ++ * The only known stable scenario is to leave this bits as-is,
651 ++ * which in their default states are documented to enable the
652 ++ * clock only when it is needed.
653 ++ */
654 ++ clock->set_primary_clock_state = noop_set_clock_state;
655 ++ clock->set_secondary_clock_state = noop_set_clock_state;
656 ++ }
657 + }
658 +diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
659 +index f55ae23..790fa63 100644
660 +--- a/fs/autofs4/root.c
661 ++++ b/fs/autofs4/root.c
662 +@@ -392,10 +392,12 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
663 + ino->flags |= AUTOFS_INF_PENDING;
664 + spin_unlock(&sbi->fs_lock);
665 + status = autofs4_mount_wait(dentry);
666 +- if (status)
667 +- return ERR_PTR(status);
668 + spin_lock(&sbi->fs_lock);
669 + ino->flags &= ~AUTOFS_INF_PENDING;
670 ++ if (status) {
671 ++ spin_unlock(&sbi->fs_lock);
672 ++ return ERR_PTR(status);
673 ++ }
674 + }
675 + done:
676 + if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
677 +diff --git a/fs/ceph/export.c b/fs/ceph/export.c
678 +index f67b687..a080779 100644
679 +--- a/fs/ceph/export.c
680 ++++ b/fs/ceph/export.c
681 +@@ -84,7 +84,7 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
682 + * FIXME: we should try harder by querying the mds for the ino.
683 + */
684 + static struct dentry *__fh_to_dentry(struct super_block *sb,
685 +- struct ceph_nfs_fh *fh)
686 ++ struct ceph_nfs_fh *fh, int fh_len)
687 + {
688 + struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
689 + struct inode *inode;
690 +@@ -92,6 +92,9 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
691 + struct ceph_vino vino;
692 + int err;
693 +
694 ++ if (fh_len < sizeof(*fh) / 4)
695 ++ return ERR_PTR(-ESTALE);
696 ++
697 + dout("__fh_to_dentry %llx\n", fh->ino);
698 + vino.ino = fh->ino;
699 + vino.snap = CEPH_NOSNAP;
700 +@@ -136,7 +139,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
701 + * convert connectable fh to dentry
702 + */
703 + static struct dentry *__cfh_to_dentry(struct super_block *sb,
704 +- struct ceph_nfs_confh *cfh)
705 ++ struct ceph_nfs_confh *cfh, int fh_len)
706 + {
707 + struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
708 + struct inode *inode;
709 +@@ -144,6 +147,9 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
710 + struct ceph_vino vino;
711 + int err;
712 +
713 ++ if (fh_len < sizeof(*cfh) / 4)
714 ++ return ERR_PTR(-ESTALE);
715 ++
716 + dout("__cfh_to_dentry %llx (%llx/%x)\n",
717 + cfh->ino, cfh->parent_ino, cfh->parent_name_hash);
718 +
719 +@@ -193,9 +199,11 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb, struct fid *fid,
720 + int fh_len, int fh_type)
721 + {
722 + if (fh_type == 1)
723 +- return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw);
724 ++ return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw,
725 ++ fh_len);
726 + else
727 +- return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw);
728 ++ return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw,
729 ++ fh_len);
730 + }
731 +
732 + /*
733 +@@ -216,6 +224,8 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
734 +
735 + if (fh_type == 1)
736 + return ERR_PTR(-ESTALE);
737 ++ if (fh_len < sizeof(*cfh) / 4)
738 ++ return ERR_PTR(-ESTALE);
739 +
740 + pr_debug("fh_to_parent %llx/%d\n", cfh->parent_ino,
741 + cfh->parent_name_hash);
742 +diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
743 +index fe9945f..5235d6e 100644
744 +--- a/fs/gfs2/export.c
745 ++++ b/fs/gfs2/export.c
746 +@@ -167,6 +167,8 @@ static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
747 + case GFS2_SMALL_FH_SIZE:
748 + case GFS2_LARGE_FH_SIZE:
749 + case GFS2_OLD_FH_SIZE:
750 ++ if (fh_len < GFS2_SMALL_FH_SIZE)
751 ++ return NULL;
752 + this.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32;
753 + this.no_formal_ino |= be32_to_cpu(fh[1]);
754 + this.no_addr = ((u64)be32_to_cpu(fh[2])) << 32;
755 +@@ -186,6 +188,8 @@ static struct dentry *gfs2_fh_to_parent(struct super_block *sb, struct fid *fid,
756 + switch (fh_type) {
757 + case GFS2_LARGE_FH_SIZE:
758 + case GFS2_OLD_FH_SIZE:
759 ++ if (fh_len < GFS2_LARGE_FH_SIZE)
760 ++ return NULL;
761 + parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32;
762 + parent.no_formal_ino |= be32_to_cpu(fh[5]);
763 + parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32;
764 +diff --git a/fs/isofs/export.c b/fs/isofs/export.c
765 +index dd4687f..516eb21 100644
766 +--- a/fs/isofs/export.c
767 ++++ b/fs/isofs/export.c
768 +@@ -179,7 +179,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb,
769 + {
770 + struct isofs_fid *ifid = (struct isofs_fid *)fid;
771 +
772 +- if (fh_type != 2)
773 ++ if (fh_len < 2 || fh_type != 2)
774 + return NULL;
775 +
776 + return isofs_export_iget(sb,
777 +diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
778 +index 72ffa97..dcd23f8 100644
779 +--- a/fs/jbd/commit.c
780 ++++ b/fs/jbd/commit.c
781 +@@ -85,7 +85,12 @@ nope:
782 + static void release_data_buffer(struct buffer_head *bh)
783 + {
784 + if (buffer_freed(bh)) {
785 ++ WARN_ON_ONCE(buffer_dirty(bh));
786 + clear_buffer_freed(bh);
787 ++ clear_buffer_mapped(bh);
788 ++ clear_buffer_new(bh);
789 ++ clear_buffer_req(bh);
790 ++ bh->b_bdev = NULL;
791 + release_buffer_page(bh);
792 + } else
793 + put_bh(bh);
794 +@@ -840,17 +845,35 @@ restart_loop:
795 + * there's no point in keeping a checkpoint record for
796 + * it. */
797 +
798 +- /* A buffer which has been freed while still being
799 +- * journaled by a previous transaction may end up still
800 +- * being dirty here, but we want to avoid writing back
801 +- * that buffer in the future after the "add to orphan"
802 +- * operation been committed, That's not only a performance
803 +- * gain, it also stops aliasing problems if the buffer is
804 +- * left behind for writeback and gets reallocated for another
805 +- * use in a different page. */
806 +- if (buffer_freed(bh) && !jh->b_next_transaction) {
807 +- clear_buffer_freed(bh);
808 +- clear_buffer_jbddirty(bh);
809 ++ /*
810 ++ * A buffer which has been freed while still being journaled by
811 ++ * a previous transaction.
812 ++ */
813 ++ if (buffer_freed(bh)) {
814 ++ /*
815 ++ * If the running transaction is the one containing
816 ++ * "add to orphan" operation (b_next_transaction !=
817 ++ * NULL), we have to wait for that transaction to
818 ++ * commit before we can really get rid of the buffer.
819 ++ * So just clear b_modified to not confuse transaction
820 ++ * credit accounting and refile the buffer to
821 ++ * BJ_Forget of the running transaction. If the just
822 ++ * committed transaction contains "add to orphan"
823 ++ * operation, we can completely invalidate the buffer
824 ++ * now. We are rather throughout in that since the
825 ++ * buffer may be still accessible when blocksize <
826 ++ * pagesize and it is attached to the last partial
827 ++ * page.
828 ++ */
829 ++ jh->b_modified = 0;
830 ++ if (!jh->b_next_transaction) {
831 ++ clear_buffer_freed(bh);
832 ++ clear_buffer_jbddirty(bh);
833 ++ clear_buffer_mapped(bh);
834 ++ clear_buffer_new(bh);
835 ++ clear_buffer_req(bh);
836 ++ bh->b_bdev = NULL;
837 ++ }
838 + }
839 +
840 + if (buffer_jbddirty(bh)) {
841 +diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
842 +index f7ee81a..b0161a6 100644
843 +--- a/fs/jbd/transaction.c
844 ++++ b/fs/jbd/transaction.c
845 +@@ -1837,15 +1837,16 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
846 + * We're outside-transaction here. Either or both of j_running_transaction
847 + * and j_committing_transaction may be NULL.
848 + */
849 +-static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
850 ++static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
851 ++ int partial_page)
852 + {
853 + transaction_t *transaction;
854 + struct journal_head *jh;
855 + int may_free = 1;
856 +- int ret;
857 +
858 + BUFFER_TRACE(bh, "entry");
859 +
860 ++retry:
861 + /*
862 + * It is safe to proceed here without the j_list_lock because the
863 + * buffers cannot be stolen by try_to_free_buffers as long as we are
864 +@@ -1873,10 +1874,18 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
865 + * clear the buffer dirty bit at latest at the moment when the
866 + * transaction marking the buffer as freed in the filesystem
867 + * structures is committed because from that moment on the
868 +- * buffer can be reallocated and used by a different page.
869 ++ * block can be reallocated and used by a different page.
870 + * Since the block hasn't been freed yet but the inode has
871 + * already been added to orphan list, it is safe for us to add
872 + * the buffer to BJ_Forget list of the newest transaction.
873 ++ *
874 ++ * Also we have to clear buffer_mapped flag of a truncated buffer
875 ++ * because the buffer_head may be attached to the page straddling
876 ++ * i_size (can happen only when blocksize < pagesize) and thus the
877 ++ * buffer_head can be reused when the file is extended again. So we end
878 ++ * up keeping around invalidated buffers attached to transactions'
879 ++ * BJ_Forget list just to stop checkpointing code from cleaning up
880 ++ * the transaction this buffer was modified in.
881 + */
882 + transaction = jh->b_transaction;
883 + if (transaction == NULL) {
884 +@@ -1903,13 +1912,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
885 + * committed, the buffer won't be needed any
886 + * longer. */
887 + JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
888 +- ret = __dispose_buffer(jh,
889 ++ may_free = __dispose_buffer(jh,
890 + journal->j_running_transaction);
891 +- journal_put_journal_head(jh);
892 +- spin_unlock(&journal->j_list_lock);
893 +- jbd_unlock_bh_state(bh);
894 +- spin_unlock(&journal->j_state_lock);
895 +- return ret;
896 ++ goto zap_buffer;
897 + } else {
898 + /* There is no currently-running transaction. So the
899 + * orphan record which we wrote for this file must have
900 +@@ -1917,13 +1922,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
901 + * the committing transaction, if it exists. */
902 + if (journal->j_committing_transaction) {
903 + JBUFFER_TRACE(jh, "give to committing trans");
904 +- ret = __dispose_buffer(jh,
905 ++ may_free = __dispose_buffer(jh,
906 + journal->j_committing_transaction);
907 +- journal_put_journal_head(jh);
908 +- spin_unlock(&journal->j_list_lock);
909 +- jbd_unlock_bh_state(bh);
910 +- spin_unlock(&journal->j_state_lock);
911 +- return ret;
912 ++ goto zap_buffer;
913 + } else {
914 + /* The orphan record's transaction has
915 + * committed. We can cleanse this buffer */
916 +@@ -1944,10 +1945,24 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
917 + }
918 + /*
919 + * The buffer is committing, we simply cannot touch
920 +- * it. So we just set j_next_transaction to the
921 +- * running transaction (if there is one) and mark
922 +- * buffer as freed so that commit code knows it should
923 +- * clear dirty bits when it is done with the buffer.
924 ++ * it. If the page is straddling i_size we have to wait
925 ++ * for commit and try again.
926 ++ */
927 ++ if (partial_page) {
928 ++ tid_t tid = journal->j_committing_transaction->t_tid;
929 ++
930 ++ journal_put_journal_head(jh);
931 ++ spin_unlock(&journal->j_list_lock);
932 ++ jbd_unlock_bh_state(bh);
933 ++ spin_unlock(&journal->j_state_lock);
934 ++ log_wait_commit(journal, tid);
935 ++ goto retry;
936 ++ }
937 ++ /*
938 ++ * OK, buffer won't be reachable after truncate. We just set
939 ++ * j_next_transaction to the running transaction (if there is
940 ++ * one) and mark buffer as freed so that commit code knows it
941 ++ * should clear dirty bits when it is done with the buffer.
942 + */
943 + set_buffer_freed(bh);
944 + if (journal->j_running_transaction && buffer_jbddirty(bh))
945 +@@ -1970,6 +1985,14 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
946 + }
947 +
948 + zap_buffer:
949 ++ /*
950 ++ * This is tricky. Although the buffer is truncated, it may be reused
951 ++ * if blocksize < pagesize and it is attached to the page straddling
952 ++ * EOF. Since the buffer might have been added to BJ_Forget list of the
953 ++ * running transaction, journal_get_write_access() won't clear
954 ++ * b_modified and credit accounting gets confused. So clear b_modified
955 ++ * here. */
956 ++ jh->b_modified = 0;
957 + journal_put_journal_head(jh);
958 + zap_buffer_no_jh:
959 + spin_unlock(&journal->j_list_lock);
960 +@@ -2018,7 +2041,8 @@ void journal_invalidatepage(journal_t *journal,
961 + if (offset <= curr_off) {
962 + /* This block is wholly outside the truncation point */
963 + lock_buffer(bh);
964 +- may_free &= journal_unmap_buffer(journal, bh);
965 ++ may_free &= journal_unmap_buffer(journal, bh,
966 ++ offset > 0);
967 + unlock_buffer(bh);
968 + }
969 + curr_off = next_off;
970 +diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
971 +index 23d7451..df753a1 100644
972 +--- a/fs/lockd/mon.c
973 ++++ b/fs/lockd/mon.c
974 +@@ -40,6 +40,7 @@ struct nsm_args {
975 + u32 proc;
976 +
977 + char *mon_name;
978 ++ char *nodename;
979 + };
980 +
981 + struct nsm_res {
982 +@@ -93,6 +94,7 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res)
983 + .vers = 3,
984 + .proc = NLMPROC_NSM_NOTIFY,
985 + .mon_name = nsm->sm_mon_name,
986 ++ .nodename = utsname()->nodename,
987 + };
988 + struct rpc_message msg = {
989 + .rpc_argp = &args,
990 +@@ -429,7 +431,7 @@ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
991 + {
992 + __be32 *p;
993 +
994 +- encode_nsm_string(xdr, utsname()->nodename);
995 ++ encode_nsm_string(xdr, argp->nodename);
996 + p = xdr_reserve_space(xdr, 4 + 4 + 4);
997 + *p++ = cpu_to_be32(argp->prog);
998 + *p++ = cpu_to_be32(argp->vers);
999 +diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
1000 +index 4fd5bb3..0363aa4 100644
1001 +--- a/fs/reiserfs/inode.c
1002 ++++ b/fs/reiserfs/inode.c
1003 +@@ -1568,8 +1568,10 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1004 + reiserfs_warning(sb, "reiserfs-13077",
1005 + "nfsd/reiserfs, fhtype=%d, len=%d - odd",
1006 + fh_type, fh_len);
1007 +- fh_type = 5;
1008 ++ fh_type = fh_len;
1009 + }
1010 ++ if (fh_len < 2)
1011 ++ return NULL;
1012 +
1013 + return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
1014 + (fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
1015 +@@ -1578,6 +1580,8 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1016 + struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
1017 + int fh_len, int fh_type)
1018 + {
1019 ++ if (fh_type > fh_len)
1020 ++ fh_type = fh_len;
1021 + if (fh_type < 4)
1022 + return NULL;
1023 +
1024 +diff --git a/fs/udf/super.c b/fs/udf/super.c
1025 +index a8e867a..b0c7b53 100644
1026 +--- a/fs/udf/super.c
1027 ++++ b/fs/udf/super.c
1028 +@@ -1316,6 +1316,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1029 + udf_error(sb, __func__, "error loading logical volume descriptor: "
1030 + "Partition table too long (%u > %lu)\n", table_len,
1031 + sb->s_blocksize - sizeof(*lvd));
1032 ++ ret = 1;
1033 + goto out_bh;
1034 + }
1035 +
1036 +@@ -1360,8 +1361,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1037 + UDF_ID_SPARABLE,
1038 + strlen(UDF_ID_SPARABLE))) {
1039 + if (udf_load_sparable_map(sb, map,
1040 +- (struct sparablePartitionMap *)gpm) < 0)
1041 ++ (struct sparablePartitionMap *)gpm) < 0) {
1042 ++ ret = 1;
1043 + goto out_bh;
1044 ++ }
1045 + } else if (!strncmp(upm2->partIdent.ident,
1046 + UDF_ID_METADATA,
1047 + strlen(UDF_ID_METADATA))) {
1048 +diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
1049 +index fed3f3c..844b22b 100644
1050 +--- a/fs/xfs/linux-2.6/xfs_export.c
1051 ++++ b/fs/xfs/linux-2.6/xfs_export.c
1052 +@@ -195,6 +195,9 @@ xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid,
1053 + struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fid;
1054 + struct inode *inode = NULL;
1055 +
1056 ++ if (fh_len < xfs_fileid_length(fileid_type))
1057 ++ return NULL;
1058 ++
1059 + switch (fileid_type) {
1060 + case FILEID_INO32_GEN_PARENT:
1061 + inode = xfs_nfs_get_inode(sb, fid->i32.parent_ino,
1062 +diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
1063 +index 481f856..15b62bb 100644
1064 +--- a/include/net/ip_vs.h
1065 ++++ b/include/net/ip_vs.h
1066 +@@ -1361,7 +1361,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
1067 + struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1068 +
1069 + if (!ct || !nf_ct_is_untracked(ct)) {
1070 +- nf_reset(skb);
1071 ++ nf_conntrack_put(skb->nfct);
1072 + skb->nfct = &nf_ct_untracked_get()->ct_general;
1073 + skb->nfctinfo = IP_CT_NEW;
1074 + nf_conntrack_get(skb->nfct);
1075 +diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
1076 +index 4283508..3a0feb1 100644
1077 +--- a/include/net/netfilter/nf_conntrack_ecache.h
1078 ++++ b/include/net/netfilter/nf_conntrack_ecache.h
1079 +@@ -18,6 +18,7 @@ struct nf_conntrack_ecache {
1080 + u16 ctmask; /* bitmask of ct events to be delivered */
1081 + u16 expmask; /* bitmask of expect events to be delivered */
1082 + u32 pid; /* netlink pid of destroyer */
1083 ++ struct timer_list timeout;
1084 + };
1085 +
1086 + static inline struct nf_conntrack_ecache *
1087 +diff --git a/kernel/module.c b/kernel/module.c
1088 +index b9d0667..a8bd215 100644
1089 +--- a/kernel/module.c
1090 ++++ b/kernel/module.c
1091 +@@ -2605,6 +2605,10 @@ static int check_module_license_and_versions(struct module *mod)
1092 + if (strcmp(mod->name, "driverloader") == 0)
1093 + add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
1094 +
1095 ++ /* lve claims to be GPL but upstream won't provide source */
1096 ++ if (strcmp(mod->name, "lve") == 0)
1097 ++ add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
1098 ++
1099 + #ifdef CONFIG_MODVERSIONS
1100 + if ((mod->num_syms && !mod->crcs)
1101 + || (mod->num_gpl_syms && !mod->gpl_crcs)
1102 +diff --git a/kernel/timer.c b/kernel/timer.c
1103 +index 8cff361..27982d9 100644
1104 +--- a/kernel/timer.c
1105 ++++ b/kernel/timer.c
1106 +@@ -63,6 +63,7 @@ EXPORT_SYMBOL(jiffies_64);
1107 + #define TVR_SIZE (1 << TVR_BITS)
1108 + #define TVN_MASK (TVN_SIZE - 1)
1109 + #define TVR_MASK (TVR_SIZE - 1)
1110 ++#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
1111 +
1112 + struct tvec {
1113 + struct list_head vec[TVN_SIZE];
1114 +@@ -356,11 +357,12 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
1115 + vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
1116 + } else {
1117 + int i;
1118 +- /* If the timeout is larger than 0xffffffff on 64-bit
1119 +- * architectures then we use the maximum timeout:
1120 ++ /* If the timeout is larger than MAX_TVAL (on 64-bit
1121 ++ * architectures or with CONFIG_BASE_SMALL=1) then we
1122 ++ * use the maximum timeout.
1123 + */
1124 +- if (idx > 0xffffffffUL) {
1125 +- idx = 0xffffffffUL;
1126 ++ if (idx > MAX_TVAL) {
1127 ++ idx = MAX_TVAL;
1128 + expires = idx + base->timer_jiffies;
1129 + }
1130 + i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
1131 +diff --git a/mm/shmem.c b/mm/shmem.c
1132 +index fcedf54..769941f 100644
1133 +--- a/mm/shmem.c
1134 ++++ b/mm/shmem.c
1135 +@@ -2348,12 +2348,14 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
1136 + {
1137 + struct inode *inode;
1138 + struct dentry *dentry = NULL;
1139 +- u64 inum = fid->raw[2];
1140 +- inum = (inum << 32) | fid->raw[1];
1141 ++ u64 inum;
1142 +
1143 + if (fh_len < 3)
1144 + return NULL;
1145 +
1146 ++ inum = fid->raw[2];
1147 ++ inum = (inum << 32) | fid->raw[1];
1148 ++
1149 + inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
1150 + shmem_match, fid->raw);
1151 + if (inode) {
1152 +diff --git a/net/core/pktgen.c b/net/core/pktgen.c
1153 +index c0e0f76..01890e1 100644
1154 +--- a/net/core/pktgen.c
1155 ++++ b/net/core/pktgen.c
1156 +@@ -2932,7 +2932,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
1157 + sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
1158 + pkt_dev->pkt_overhead;
1159 +
1160 +- if (datalen < sizeof(struct pktgen_hdr)) {
1161 ++ if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) {
1162 + datalen = sizeof(struct pktgen_hdr);
1163 + if (net_ratelimit())
1164 + pr_info("increased datalen to %d\n", datalen);
1165 +diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
1166 +index de9da21..d7d63f4 100644
1167 +--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
1168 ++++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
1169 +@@ -84,6 +84,14 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
1170 + *dataoff = nhoff + (iph->ihl << 2);
1171 + *protonum = iph->protocol;
1172 +
1173 ++ /* Check bogus IP headers */
1174 ++ if (*dataoff > skb->len) {
1175 ++ pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: "
1176 ++ "nhoff %u, ihl %u, skblen %u\n",
1177 ++ nhoff, iph->ihl << 2, skb->len);
1178 ++ return -NF_ACCEPT;
1179 ++ }
1180 ++
1181 + return NF_ACCEPT;
1182 + }
1183 +
1184 +diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
1185 +index e40cf78..cd6881e 100644
1186 +--- a/net/ipv4/netfilter/nf_nat_sip.c
1187 ++++ b/net/ipv4/netfilter/nf_nat_sip.c
1188 +@@ -148,7 +148,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
1189 + if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
1190 + hdr, NULL, &matchoff, &matchlen,
1191 + &addr, &port) > 0) {
1192 +- unsigned int matchend, poff, plen, buflen, n;
1193 ++ unsigned int olen, matchend, poff, plen, buflen, n;
1194 + char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
1195 +
1196 + /* We're only interested in headers related to this
1197 +@@ -163,11 +163,12 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
1198 + goto next;
1199 + }
1200 +
1201 ++ olen = *datalen;
1202 + if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
1203 + &addr, port))
1204 + return NF_DROP;
1205 +
1206 +- matchend = matchoff + matchlen;
1207 ++ matchend = matchoff + matchlen + *datalen - olen;
1208 +
1209 + /* The maddr= parameter (RFC 2361) specifies where to send
1210 + * the reply. */
1211 +@@ -501,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
1212 + ret = nf_ct_expect_related(rtcp_exp);
1213 + if (ret == 0)
1214 + break;
1215 +- else if (ret != -EBUSY) {
1216 ++ else if (ret == -EBUSY) {
1217 ++ nf_ct_unexpect_related(rtp_exp);
1218 ++ continue;
1219 ++ } else if (ret < 0) {
1220 + nf_ct_unexpect_related(rtp_exp);
1221 + port = 0;
1222 + break;
1223 +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
1224 +index 9528ea0..d75eb39 100644
1225 +--- a/net/netfilter/ipvs/ip_vs_ctl.c
1226 ++++ b/net/netfilter/ipvs/ip_vs_ctl.c
1227 +@@ -1520,11 +1520,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
1228 + {
1229 + struct net_device *dev = ptr;
1230 + struct net *net = dev_net(dev);
1231 ++ struct netns_ipvs *ipvs = net_ipvs(net);
1232 + struct ip_vs_service *svc;
1233 + struct ip_vs_dest *dest;
1234 + unsigned int idx;
1235 +
1236 +- if (event != NETDEV_UNREGISTER)
1237 ++ if (event != NETDEV_UNREGISTER || !ipvs)
1238 + return NOTIFY_DONE;
1239 + IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
1240 + EnterFunction(2);
1241 +@@ -1550,7 +1551,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
1242 + }
1243 + }
1244 +
1245 +- list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
1246 ++ list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
1247 + __ip_vs_dev_reset(dest, dev);
1248 + }
1249 + mutex_unlock(&__ip_vs_mutex);
1250 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
1251 +index f7af8b8..dff164e 100644
1252 +--- a/net/netfilter/nf_conntrack_core.c
1253 ++++ b/net/netfilter/nf_conntrack_core.c
1254 +@@ -247,12 +247,15 @@ static void death_by_event(unsigned long ul_conntrack)
1255 + {
1256 + struct nf_conn *ct = (void *)ul_conntrack;
1257 + struct net *net = nf_ct_net(ct);
1258 ++ struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
1259 ++
1260 ++ BUG_ON(ecache == NULL);
1261 +
1262 + if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
1263 + /* bad luck, let's retry again */
1264 +- ct->timeout.expires = jiffies +
1265 ++ ecache->timeout.expires = jiffies +
1266 + (random32() % net->ct.sysctl_events_retry_timeout);
1267 +- add_timer(&ct->timeout);
1268 ++ add_timer(&ecache->timeout);
1269 + return;
1270 + }
1271 + /* we've got the event delivered, now it's dying */
1272 +@@ -266,6 +269,9 @@ static void death_by_event(unsigned long ul_conntrack)
1273 + void nf_ct_insert_dying_list(struct nf_conn *ct)
1274 + {
1275 + struct net *net = nf_ct_net(ct);
1276 ++ struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
1277 ++
1278 ++ BUG_ON(ecache == NULL);
1279 +
1280 + /* add this conntrack to the dying list */
1281 + spin_lock_bh(&nf_conntrack_lock);
1282 +@@ -273,10 +279,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
1283 + &net->ct.dying);
1284 + spin_unlock_bh(&nf_conntrack_lock);
1285 + /* set a new timer to retry event delivery */
1286 +- setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
1287 +- ct->timeout.expires = jiffies +
1288 ++ setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
1289 ++ ecache->timeout.expires = jiffies +
1290 + (random32() % net->ct.sysctl_events_retry_timeout);
1291 +- add_timer(&ct->timeout);
1292 ++ add_timer(&ecache->timeout);
1293 + }
1294 + EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
1295 +
1296 +diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
1297 +index cd1e8e0..a3dffab 100644
1298 +--- a/net/netfilter/nf_conntrack_expect.c
1299 ++++ b/net/netfilter/nf_conntrack_expect.c
1300 +@@ -364,23 +364,6 @@ static void evict_oldest_expect(struct nf_conn *master,
1301 + }
1302 + }
1303 +
1304 +-static inline int refresh_timer(struct nf_conntrack_expect *i)
1305 +-{
1306 +- struct nf_conn_help *master_help = nfct_help(i->master);
1307 +- const struct nf_conntrack_expect_policy *p;
1308 +-
1309 +- if (!del_timer(&i->timeout))
1310 +- return 0;
1311 +-
1312 +- p = &rcu_dereference_protected(
1313 +- master_help->helper,
1314 +- lockdep_is_held(&nf_conntrack_lock)
1315 +- )->expect_policy[i->class];
1316 +- i->timeout.expires = jiffies + p->timeout * HZ;
1317 +- add_timer(&i->timeout);
1318 +- return 1;
1319 +-}
1320 +-
1321 + static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
1322 + {
1323 + const struct nf_conntrack_expect_policy *p;
1324 +@@ -388,7 +371,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
1325 + struct nf_conn *master = expect->master;
1326 + struct nf_conn_help *master_help = nfct_help(master);
1327 + struct net *net = nf_ct_exp_net(expect);
1328 +- struct hlist_node *n;
1329 ++ struct hlist_node *n, *next;
1330 + unsigned int h;
1331 + int ret = 1;
1332 +
1333 +@@ -399,12 +382,12 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
1334 + goto out;
1335 + }
1336 + h = nf_ct_expect_dst_hash(&expect->tuple);
1337 +- hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
1338 ++ hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) {
1339 + if (expect_matches(i, expect)) {
1340 +- /* Refresh timer: if it's dying, ignore.. */
1341 +- if (refresh_timer(i)) {
1342 +- ret = 0;
1343 +- goto out;
1344 ++ if (del_timer(&i->timeout)) {
1345 ++ nf_ct_unlink_expect(i);
1346 ++ nf_ct_expect_put(i);
1347 ++ break;
1348 + }
1349 + } else if (expect_clash(i, expect)) {
1350 + ret = -EBUSY;
1351 +diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
1352 +index 9228ee0d..6092b0c 100644
1353 +--- a/net/netfilter/xt_hashlimit.c
1354 ++++ b/net/netfilter/xt_hashlimit.c
1355 +@@ -392,8 +392,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
1356 + #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
1357 +
1358 + /* Precision saver. */
1359 +-static inline u_int32_t
1360 +-user2credits(u_int32_t user)
1361 ++static u32 user2credits(u32 user)
1362 + {
1363 + /* If multiplying would overflow... */
1364 + if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
1365 +@@ -403,7 +402,7 @@ user2credits(u_int32_t user)
1366 + return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
1367 + }
1368 +
1369 +-static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
1370 ++static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
1371 + {
1372 + dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY;
1373 + if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
1374 +@@ -534,8 +533,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
1375 + dh->rateinfo.prev = jiffies;
1376 + dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
1377 + hinfo->cfg.burst);
1378 +- dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
1379 +- hinfo->cfg.burst);
1380 ++ dh->rateinfo.credit_cap = dh->rateinfo.credit;
1381 + dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
1382 + } else {
1383 + /* update expiration timeout */
1384 +diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
1385 +index 32b7a57..a4c1e45 100644
1386 +--- a/net/netfilter/xt_limit.c
1387 ++++ b/net/netfilter/xt_limit.c
1388 +@@ -88,8 +88,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
1389 + }
1390 +
1391 + /* Precision saver. */
1392 +-static u_int32_t
1393 +-user2credits(u_int32_t user)
1394 ++static u32 user2credits(u32 user)
1395 + {
1396 + /* If multiplying would overflow... */
1397 + if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
1398 +@@ -118,12 +117,12 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
1399 +
1400 + /* For SMP, we only want to use one set of state. */
1401 + r->master = priv;
1402 ++ /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
1403 ++ 128. */
1404 ++ priv->prev = jiffies;
1405 ++ priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
1406 + if (r->cost == 0) {
1407 +- /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
1408 +- 128. */
1409 +- priv->prev = jiffies;
1410 +- priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
1411 +- r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
1412 ++ r->credit_cap = priv->credit; /* Credits full. */
1413 + r->cost = user2credits(r->avg);
1414 + }
1415 + return 0;
1416 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
1417 +index 554111f..cfd7d15 100644
1418 +--- a/net/sunrpc/xprtsock.c
1419 ++++ b/net/sunrpc/xprtsock.c
1420 +@@ -1015,6 +1015,16 @@ static void xs_udp_data_ready(struct sock *sk, int len)
1421 + read_unlock_bh(&sk->sk_callback_lock);
1422 + }
1423 +
1424 ++/*
1425 ++ * Helper function to force a TCP close if the server is sending
1426 ++ * junk and/or it has put us in CLOSE_WAIT
1427 ++ */
1428 ++static void xs_tcp_force_close(struct rpc_xprt *xprt)
1429 ++{
1430 ++ set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1431 ++ xprt_force_disconnect(xprt);
1432 ++}
1433 ++
1434 + static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
1435 + {
1436 + struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1437 +@@ -1041,7 +1051,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea
1438 + /* Sanity check of the record length */
1439 + if (unlikely(transport->tcp_reclen < 8)) {
1440 + dprintk("RPC: invalid TCP record fragment length\n");
1441 +- xprt_force_disconnect(xprt);
1442 ++ xs_tcp_force_close(xprt);
1443 + return;
1444 + }
1445 + dprintk("RPC: reading TCP record fragment of length %d\n",
1446 +@@ -1122,7 +1132,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
1447 + break;
1448 + default:
1449 + dprintk("RPC: invalid request message type\n");
1450 +- xprt_force_disconnect(&transport->xprt);
1451 ++ xs_tcp_force_close(&transport->xprt);
1452 + }
1453 + xs_tcp_check_fraghdr(transport);
1454 + }
1455 +@@ -1445,6 +1455,8 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
1456 + static void xs_sock_mark_closed(struct rpc_xprt *xprt)
1457 + {
1458 + smp_mb__before_clear_bit();
1459 ++ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1460 ++ clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1461 + clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1462 + clear_bit(XPRT_CLOSING, &xprt->state);
1463 + smp_mb__after_clear_bit();
1464 +@@ -1502,8 +1514,8 @@ static void xs_tcp_state_change(struct sock *sk)
1465 + break;
1466 + case TCP_CLOSE_WAIT:
1467 + /* The server initiated a shutdown of the socket */
1468 +- xprt_force_disconnect(xprt);
1469 + xprt->connect_cookie++;
1470 ++ xs_tcp_force_close(xprt);
1471 + case TCP_CLOSING:
1472 + /*
1473 + * If the server closed down the connection, make sure that
1474 +@@ -2146,8 +2158,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
1475 + /* We're probably in TIME_WAIT. Get rid of existing socket,
1476 + * and retry
1477 + */
1478 +- set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1479 +- xprt_force_disconnect(xprt);
1480 ++ xs_tcp_force_close(xprt);
1481 + break;
1482 + case -ECONNREFUSED:
1483 + case -ECONNRESET:
1484 +diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
1485 +index 7f4d619..11ccc23 100644
1486 +--- a/sound/pci/ac97/ac97_codec.c
1487 ++++ b/sound/pci/ac97/ac97_codec.c
1488 +@@ -1271,6 +1271,8 @@ static int snd_ac97_cvol_new(struct snd_card *card, char *name, int reg, unsigne
1489 + tmp.index = ac97->num;
1490 + kctl = snd_ctl_new1(&tmp, ac97);
1491 + }
1492 ++ if (!kctl)
1493 ++ return -ENOMEM;
1494 + if (reg >= AC97_PHONE && reg <= AC97_PCM)
1495 + set_tlv_db_scale(kctl, db_scale_5bit_12db_max);
1496 + else
1497 +diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
1498 +index 15f0161..0800bcc 100644
1499 +--- a/sound/pci/emu10k1/emu10k1_main.c
1500 ++++ b/sound/pci/emu10k1/emu10k1_main.c
1501 +@@ -1415,6 +1415,15 @@ static struct snd_emu_chip_details emu_chip_details[] = {
1502 + .ca0108_chip = 1,
1503 + .spk71 = 1,
1504 + .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 new revision */
1505 ++ /* Tested by Maxim Kachur <mcdebugger@×××××××.ru> 17th Oct 2012. */
1506 ++ /* This is MAEM8986, 0202 is MAEM8980 */
1507 ++ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40071102,
1508 ++ .driver = "Audigy2", .name = "E-mu 1010 PCIe [MAEM8986]",
1509 ++ .id = "EMU1010",
1510 ++ .emu10k2_chip = 1,
1511 ++ .ca0108_chip = 1,
1512 ++ .spk71 = 1,
1513 ++ .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 PCIe */
1514 + /* Tested by James@×××××××××××.uk 8th July 2005. */
1515 + /* This is MAEM8810, 0202 is MAEM8820 */
1516 + {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40011102,
1517
1518 Added: genpatches-2.6/trunk/3.0/1047_linux-3.0.48.patch
1519 ===================================================================
1520 --- genpatches-2.6/trunk/3.0/1047_linux-3.0.48.patch (rev 0)
1521 +++ genpatches-2.6/trunk/3.0/1047_linux-3.0.48.patch 2012-10-29 18:05:59 UTC (rev 2228)
1522 @@ -0,0 +1,25 @@
1523 +diff --git a/Makefile b/Makefile
1524 +index 82f6dfe..8dc65e0 100644
1525 +--- a/Makefile
1526 ++++ b/Makefile
1527 +@@ -1,6 +1,6 @@
1528 + VERSION = 3
1529 + PATCHLEVEL = 0
1530 +-SUBLEVEL = 47
1531 ++SUBLEVEL = 48
1532 + EXTRAVERSION =
1533 + NAME = Sneaky Weasel
1534 +
1535 +diff --git a/block/blk-core.c b/block/blk-core.c
1536 +index 2f49f43..35ae52d 100644
1537 +--- a/block/blk-core.c
1538 ++++ b/block/blk-core.c
1539 +@@ -524,7 +524,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
1540 + q->request_fn = rfn;
1541 + q->prep_rq_fn = NULL;
1542 + q->unprep_rq_fn = NULL;
1543 +- q->queue_flags |= QUEUE_FLAG_DEFAULT;
1544 ++ q->queue_flags = QUEUE_FLAG_DEFAULT;
1545 +
1546 + /* Override internal queue lock with supplied lock pointer */
1547 + if (lock)
1548
1549 Added: genpatches-2.6/trunk/3.0/1048_linux-3.0.49.patch
1550 ===================================================================
1551 --- genpatches-2.6/trunk/3.0/1048_linux-3.0.49.patch (rev 0)
1552 +++ genpatches-2.6/trunk/3.0/1048_linux-3.0.49.patch 2012-10-29 18:05:59 UTC (rev 2228)
1553 @@ -0,0 +1,1386 @@
1554 +diff --git a/Makefile b/Makefile
1555 +index 8dc65e0..1c962a1 100644
1556 +--- a/Makefile
1557 ++++ b/Makefile
1558 +@@ -1,6 +1,6 @@
1559 + VERSION = 3
1560 + PATCHLEVEL = 0
1561 +-SUBLEVEL = 48
1562 ++SUBLEVEL = 49
1563 + EXTRAVERSION =
1564 + NAME = Sneaky Weasel
1565 +
1566 +diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
1567 +index 6860d40..904ed63 100644
1568 +--- a/arch/sparc/kernel/perf_event.c
1569 ++++ b/arch/sparc/kernel/perf_event.c
1570 +@@ -513,11 +513,13 @@ static u64 nop_for_index(int idx)
1571 +
1572 + static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
1573 + {
1574 +- u64 val, mask = mask_for_index(idx);
1575 ++ u64 enc, val, mask = mask_for_index(idx);
1576 ++
1577 ++ enc = perf_event_get_enc(cpuc->events[idx]);
1578 +
1579 + val = cpuc->pcr;
1580 + val &= ~mask;
1581 +- val |= hwc->config;
1582 ++ val |= event_encoding(enc, idx);
1583 + cpuc->pcr = val;
1584 +
1585 + pcr_ops->write(cpuc->pcr);
1586 +@@ -1380,8 +1382,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1587 + {
1588 + unsigned long ufp;
1589 +
1590 +- perf_callchain_store(entry, regs->tpc);
1591 +-
1592 + ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1593 + do {
1594 + struct sparc_stackf *usf, sf;
1595 +@@ -1402,8 +1402,6 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1596 + {
1597 + unsigned long ufp;
1598 +
1599 +- perf_callchain_store(entry, regs->tpc);
1600 +-
1601 + ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
1602 + do {
1603 + struct sparc_stackf32 *usf, sf;
1604 +@@ -1422,6 +1420,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1605 + void
1606 + perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1607 + {
1608 ++ perf_callchain_store(entry, regs->tpc);
1609 ++
1610 ++ if (!current->mm)
1611 ++ return;
1612 ++
1613 + flushw_user();
1614 + if (test_thread_flag(TIF_32BIT))
1615 + perf_callchain_user_32(entry, regs);
1616 +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
1617 +index 908b47a..10c9b36 100644
1618 +--- a/arch/sparc/kernel/sys_sparc_64.c
1619 ++++ b/arch/sparc/kernel/sys_sparc_64.c
1620 +@@ -519,12 +519,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
1621 + {
1622 + int ret;
1623 +
1624 +- if (current->personality == PER_LINUX32 &&
1625 +- personality == PER_LINUX)
1626 +- personality = PER_LINUX32;
1627 ++ if (personality(current->personality) == PER_LINUX32 &&
1628 ++ personality(personality) == PER_LINUX)
1629 ++ personality |= PER_LINUX32;
1630 + ret = sys_personality(personality);
1631 +- if (ret == PER_LINUX32)
1632 +- ret = PER_LINUX;
1633 ++ if (personality(ret) == PER_LINUX32)
1634 ++ ret &= ~PER_LINUX32;
1635 +
1636 + return ret;
1637 + }
1638 +diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
1639 +index 1d7e274..7f5f65d 100644
1640 +--- a/arch/sparc/kernel/syscalls.S
1641 ++++ b/arch/sparc/kernel/syscalls.S
1642 +@@ -212,24 +212,20 @@ linux_sparc_syscall:
1643 + 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1644 + ret_sys_call:
1645 + ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
1646 +- ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
1647 + sra %o0, 0, %o0
1648 + mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
1649 + sllx %g2, 32, %g2
1650 +
1651 +- /* Check if force_successful_syscall_return()
1652 +- * was invoked.
1653 +- */
1654 +- ldub [%g6 + TI_SYS_NOERROR], %l2
1655 +- brnz,a,pn %l2, 80f
1656 +- stb %g0, [%g6 + TI_SYS_NOERROR]
1657 +-
1658 + cmp %o0, -ERESTART_RESTARTBLOCK
1659 + bgeu,pn %xcc, 1f
1660 +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
1661 +-80:
1662 ++ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
1663 ++ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
1664 ++
1665 ++2:
1666 ++ stb %g0, [%g6 + TI_SYS_NOERROR]
1667 + /* System call success, clear Carry condition code. */
1668 + andn %g3, %g2, %g3
1669 ++3:
1670 + stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
1671 + bne,pn %icc, linux_syscall_trace2
1672 + add %l1, 0x4, %l2 ! npc = npc+4
1673 +@@ -238,20 +234,20 @@ ret_sys_call:
1674 + stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1675 +
1676 + 1:
1677 ++ /* Check if force_successful_syscall_return()
1678 ++ * was invoked.
1679 ++ */
1680 ++ ldub [%g6 + TI_SYS_NOERROR], %l2
1681 ++ brnz,pn %l2, 2b
1682 ++ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
1683 + /* System call failure, set Carry condition code.
1684 + * Also, get abs(errno) to return to the process.
1685 + */
1686 +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
1687 + sub %g0, %o0, %o0
1688 +- or %g3, %g2, %g3
1689 + stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1690 +- stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
1691 +- bne,pn %icc, linux_syscall_trace2
1692 +- add %l1, 0x4, %l2 ! npc = npc+4
1693 +- stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
1694 ++ ba,pt %xcc, 3b
1695 ++ or %g3, %g2, %g3
1696 +
1697 +- b,pt %xcc, rtrap
1698 +- stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1699 + linux_syscall_trace2:
1700 + call syscall_trace_leave
1701 + add %sp, PTREGS_OFF, %o0
1702 +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
1703 +index 8e073d8..6ff4d78 100644
1704 +--- a/arch/sparc/mm/init_64.c
1705 ++++ b/arch/sparc/mm/init_64.c
1706 +@@ -2118,6 +2118,9 @@ EXPORT_SYMBOL(_PAGE_CACHE);
1707 + #ifdef CONFIG_SPARSEMEM_VMEMMAP
1708 + unsigned long vmemmap_table[VMEMMAP_SIZE];
1709 +
1710 ++static long __meminitdata addr_start, addr_end;
1711 ++static int __meminitdata node_start;
1712 ++
1713 + int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
1714 + {
1715 + unsigned long vstart = (unsigned long) start;
1716 +@@ -2148,15 +2151,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
1717 +
1718 + *vmem_pp = pte_base | __pa(block);
1719 +
1720 +- printk(KERN_INFO "[%p-%p] page_structs=%lu "
1721 +- "node=%d entry=%lu/%lu\n", start, block, nr,
1722 +- node,
1723 +- addr >> VMEMMAP_CHUNK_SHIFT,
1724 +- VMEMMAP_SIZE);
1725 ++ /* check to see if we have contiguous blocks */
1726 ++ if (addr_end != addr || node_start != node) {
1727 ++ if (addr_start)
1728 ++ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
1729 ++ addr_start, addr_end-1, node_start);
1730 ++ addr_start = addr;
1731 ++ node_start = node;
1732 ++ }
1733 ++ addr_end = addr + VMEMMAP_CHUNK;
1734 + }
1735 + }
1736 + return 0;
1737 + }
1738 ++
1739 ++void __meminit vmemmap_populate_print_last(void)
1740 ++{
1741 ++ if (addr_start) {
1742 ++ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
1743 ++ addr_start, addr_end-1, node_start);
1744 ++ addr_start = 0;
1745 ++ addr_end = 0;
1746 ++ node_start = 0;
1747 ++ }
1748 ++}
1749 + #endif /* CONFIG_SPARSEMEM_VMEMMAP */
1750 +
1751 + static void prot_init_common(unsigned long page_none,
1752 +diff --git a/arch/tile/Makefile b/arch/tile/Makefile
1753 +index 17acce7..04c637c 100644
1754 +--- a/arch/tile/Makefile
1755 ++++ b/arch/tile/Makefile
1756 +@@ -26,6 +26,10 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
1757 + endif
1758 + endif
1759 +
1760 ++# The tile compiler may emit .eh_frame information for backtracing.
1761 ++# In kernel modules, this causes load failures due to unsupported relocations.
1762 ++KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
1763 ++
1764 + ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
1765 + KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
1766 + endif
1767 +diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
1768 +index edb3d46..268b40d 100644
1769 +--- a/arch/x86/kernel/entry_32.S
1770 ++++ b/arch/x86/kernel/entry_32.S
1771 +@@ -1029,7 +1029,7 @@ ENTRY(xen_sysenter_target)
1772 +
1773 + ENTRY(xen_hypervisor_callback)
1774 + CFI_STARTPROC
1775 +- pushl_cfi $0
1776 ++ pushl_cfi $-1 /* orig_ax = -1 => not a system call */
1777 + SAVE_ALL
1778 + TRACE_IRQS_OFF
1779 +
1780 +@@ -1071,14 +1071,16 @@ ENTRY(xen_failsafe_callback)
1781 + 2: mov 8(%esp),%es
1782 + 3: mov 12(%esp),%fs
1783 + 4: mov 16(%esp),%gs
1784 ++ /* EAX == 0 => Category 1 (Bad segment)
1785 ++ EAX != 0 => Category 2 (Bad IRET) */
1786 + testl %eax,%eax
1787 + popl_cfi %eax
1788 + lea 16(%esp),%esp
1789 + CFI_ADJUST_CFA_OFFSET -16
1790 + jz 5f
1791 + addl $16,%esp
1792 +- jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
1793 +-5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment)
1794 ++ jmp iret_exc
1795 ++5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
1796 + SAVE_ALL
1797 + jmp ret_from_exception
1798 + CFI_ENDPROC
1799 +diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
1800 +index 8a445a0..dd4dba4 100644
1801 +--- a/arch/x86/kernel/entry_64.S
1802 ++++ b/arch/x86/kernel/entry_64.S
1803 +@@ -1308,7 +1308,7 @@ ENTRY(xen_failsafe_callback)
1804 + CFI_RESTORE r11
1805 + addq $0x30,%rsp
1806 + CFI_ADJUST_CFA_OFFSET -0x30
1807 +- pushq_cfi $0
1808 ++ pushq_cfi $-1 /* orig_ax = -1 => not a system call */
1809 + SAVE_ALL
1810 + jmp error_exit
1811 + CFI_ENDPROC
1812 +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
1813 +index afaf384..af19a61 100644
1814 +--- a/arch/x86/kernel/setup.c
1815 ++++ b/arch/x86/kernel/setup.c
1816 +@@ -937,8 +937,21 @@ void __init setup_arch(char **cmdline_p)
1817 +
1818 + #ifdef CONFIG_X86_64
1819 + if (max_pfn > max_low_pfn) {
1820 +- max_pfn_mapped = init_memory_mapping(1UL<<32,
1821 +- max_pfn<<PAGE_SHIFT);
1822 ++ int i;
1823 ++ for (i = 0; i < e820.nr_map; i++) {
1824 ++ struct e820entry *ei = &e820.map[i];
1825 ++
1826 ++ if (ei->addr + ei->size <= 1UL << 32)
1827 ++ continue;
1828 ++
1829 ++ if (ei->type == E820_RESERVED)
1830 ++ continue;
1831 ++
1832 ++ max_pfn_mapped = init_memory_mapping(
1833 ++ ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr,
1834 ++ ei->addr + ei->size);
1835 ++ }
1836 ++
1837 + /* can we preseve max_low_pfn ?*/
1838 + max_low_pfn = max_pfn;
1839 + }
1840 +diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
1841 +index 68894fd..a00c588 100644
1842 +--- a/arch/x86/oprofile/nmi_int.c
1843 ++++ b/arch/x86/oprofile/nmi_int.c
1844 +@@ -55,7 +55,7 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
1845 + val |= counter_config->extra;
1846 + event &= model->event_mask ? model->event_mask : 0xFF;
1847 + val |= event & 0xFF;
1848 +- val |= (event & 0x0F00) << 24;
1849 ++ val |= (u64)(event & 0x0F00) << 24;
1850 +
1851 + return val;
1852 + }
1853 +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
1854 +index 9a8bebc..feb2d10 100644
1855 +--- a/drivers/edac/amd64_edac.c
1856 ++++ b/drivers/edac/amd64_edac.c
1857 +@@ -161,8 +161,11 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
1858 + * memory controller and apply to register. Search for the first
1859 + * bandwidth entry that is greater or equal than the setting requested
1860 + * and program that. If at last entry, turn off DRAM scrubbing.
1861 ++ *
1862 ++ * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
1863 ++ * by falling back to the last element in scrubrates[].
1864 + */
1865 +- for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
1866 ++ for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
1867 + /*
1868 + * skip scrub rates which aren't recommended
1869 + * (see F10 BKDG, F3x58)
1870 +@@ -172,12 +175,6 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
1871 +
1872 + if (scrubrates[i].bandwidth <= new_bw)
1873 + break;
1874 +-
1875 +- /*
1876 +- * if no suitable bandwidth found, turn off DRAM scrubbing
1877 +- * entirely by falling back to the last element in the
1878 +- * scrubrates array.
1879 +- */
1880 + }
1881 +
1882 + scrubval = scrubrates[i].scrubval;
1883 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1884 +index 387b2b3..557e007 100644
1885 +--- a/drivers/gpu/drm/i915/i915_reg.h
1886 ++++ b/drivers/gpu/drm/i915/i915_reg.h
1887 +@@ -3113,6 +3113,11 @@
1888 + #define TRANS_6BPC (2<<5)
1889 + #define TRANS_12BPC (3<<5)
1890 +
1891 ++#define _TRANSA_CHICKEN2 0xf0064
1892 ++#define _TRANSB_CHICKEN2 0xf1064
1893 ++#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
1894 ++#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
1895 ++
1896 + #define SOUTH_CHICKEN2 0xc2004
1897 + #define DPLS_EDP_PPS_FIX_DIS (1<<0)
1898 +
1899 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1900 +index 4b8e235..36d76989 100644
1901 +--- a/drivers/gpu/drm/i915/intel_display.c
1902 ++++ b/drivers/gpu/drm/i915/intel_display.c
1903 +@@ -7584,6 +7584,7 @@ static void ibx_init_clock_gating(struct drm_device *dev)
1904 + static void cpt_init_clock_gating(struct drm_device *dev)
1905 + {
1906 + struct drm_i915_private *dev_priv = dev->dev_private;
1907 ++ int pipe;
1908 +
1909 + /*
1910 + * On Ibex Peak and Cougar Point, we need to disable clock
1911 +@@ -7593,6 +7594,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
1912 + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
1913 + I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
1914 + DPLS_EDP_PPS_FIX_DIS);
1915 ++ /* Without this, mode sets may fail silently on FDI */
1916 ++ for_each_pipe(pipe)
1917 ++ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
1918 + }
1919 +
1920 + static void ironlake_teardown_rc6(struct drm_device *dev)
1921 +diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
1922 +index c03eb29..9945aaf 100644
1923 +--- a/drivers/media/video/au0828/au0828-video.c
1924 ++++ b/drivers/media/video/au0828/au0828-video.c
1925 +@@ -1697,14 +1697,18 @@ static int vidioc_streamoff(struct file *file, void *priv,
1926 + (AUVI_INPUT(i).audio_setup)(dev, 0);
1927 + }
1928 +
1929 +- videobuf_streamoff(&fh->vb_vidq);
1930 +- res_free(fh, AU0828_RESOURCE_VIDEO);
1931 ++ if (res_check(fh, AU0828_RESOURCE_VIDEO)) {
1932 ++ videobuf_streamoff(&fh->vb_vidq);
1933 ++ res_free(fh, AU0828_RESOURCE_VIDEO);
1934 ++ }
1935 + } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
1936 + dev->vbi_timeout_running = 0;
1937 + del_timer_sync(&dev->vbi_timeout);
1938 +
1939 +- videobuf_streamoff(&fh->vb_vbiq);
1940 +- res_free(fh, AU0828_RESOURCE_VBI);
1941 ++ if (res_check(fh, AU0828_RESOURCE_VBI)) {
1942 ++ videobuf_streamoff(&fh->vb_vbiq);
1943 ++ res_free(fh, AU0828_RESOURCE_VBI);
1944 ++ }
1945 + }
1946 +
1947 + return 0;
1948 +diff --git a/drivers/net/skge.c b/drivers/net/skge.c
1949 +index f4be5c7..b446e7e 100644
1950 +--- a/drivers/net/skge.c
1951 ++++ b/drivers/net/skge.c
1952 +@@ -4097,6 +4097,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
1953 + DMI_MATCH(DMI_BOARD_NAME, "nForce"),
1954 + },
1955 + },
1956 ++ {
1957 ++ .ident = "ASUS P5NSLI",
1958 ++ .matches = {
1959 ++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1960 ++ DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
1961 ++ },
1962 ++ },
1963 + {}
1964 + };
1965 +
1966 +diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
1967 +index 81af2b3..097fa00 100644
1968 +--- a/drivers/pcmcia/pxa2xx_sharpsl.c
1969 ++++ b/drivers/pcmcia/pxa2xx_sharpsl.c
1970 +@@ -222,7 +222,7 @@ static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
1971 + sharpsl_pcmcia_init_reset(skt);
1972 + }
1973 +
1974 +-static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = {
1975 ++static struct pcmcia_low_level sharpsl_pcmcia_ops = {
1976 + .owner = THIS_MODULE,
1977 + .hw_init = sharpsl_pcmcia_hw_init,
1978 + .hw_shutdown = sharpsl_pcmcia_hw_shutdown,
1979 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1980 +index 3b2ce7d..b107339 100644
1981 +--- a/drivers/usb/class/cdc-acm.c
1982 ++++ b/drivers/usb/class/cdc-acm.c
1983 +@@ -760,10 +760,6 @@ static const __u32 acm_tty_speed[] = {
1984 + 2500000, 3000000, 3500000, 4000000
1985 + };
1986 +
1987 +-static const __u8 acm_tty_size[] = {
1988 +- 5, 6, 7, 8
1989 +-};
1990 +-
1991 + static void acm_tty_set_termios(struct tty_struct *tty,
1992 + struct ktermios *termios_old)
1993 + {
1994 +@@ -780,7 +776,21 @@ static void acm_tty_set_termios(struct tty_struct *tty,
1995 + newline.bParityType = termios->c_cflag & PARENB ?
1996 + (termios->c_cflag & PARODD ? 1 : 2) +
1997 + (termios->c_cflag & CMSPAR ? 2 : 0) : 0;
1998 +- newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4];
1999 ++ switch (termios->c_cflag & CSIZE) {
2000 ++ case CS5:
2001 ++ newline.bDataBits = 5;
2002 ++ break;
2003 ++ case CS6:
2004 ++ newline.bDataBits = 6;
2005 ++ break;
2006 ++ case CS7:
2007 ++ newline.bDataBits = 7;
2008 ++ break;
2009 ++ case CS8:
2010 ++ default:
2011 ++ newline.bDataBits = 8;
2012 ++ break;
2013 ++ }
2014 + /* FIXME: Needs to clear unsupported bits in the termios */
2015 + acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
2016 +
2017 +@@ -1172,7 +1182,7 @@ made_compressed_probe:
2018 +
2019 + if (usb_endpoint_xfer_int(epwrite))
2020 + usb_fill_int_urb(snd->urb, usb_dev,
2021 +- usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
2022 ++ usb_sndintpipe(usb_dev, epwrite->bEndpointAddress),
2023 + NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval);
2024 + else
2025 + usb_fill_bulk_urb(snd->urb, usb_dev,
2026 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
2027 +index b455f4c..a44f2d4 100644
2028 +--- a/drivers/usb/host/xhci-mem.c
2029 ++++ b/drivers/usb/host/xhci-mem.c
2030 +@@ -1505,6 +1505,7 @@ void xhci_free_command(struct xhci_hcd *xhci,
2031 + void xhci_mem_cleanup(struct xhci_hcd *xhci)
2032 + {
2033 + struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
2034 ++ struct xhci_cd *cur_cd, *next_cd;
2035 + int size;
2036 + int i;
2037 +
2038 +@@ -1525,6 +1526,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
2039 + xhci_ring_free(xhci, xhci->cmd_ring);
2040 + xhci->cmd_ring = NULL;
2041 + xhci_dbg(xhci, "Freed command ring\n");
2042 ++ list_for_each_entry_safe(cur_cd, next_cd,
2043 ++ &xhci->cancel_cmd_list, cancel_cmd_list) {
2044 ++ list_del(&cur_cd->cancel_cmd_list);
2045 ++ kfree(cur_cd);
2046 ++ }
2047 +
2048 + for (i = 1; i < MAX_HC_SLOTS; ++i)
2049 + xhci_free_virt_device(xhci, i);
2050 +@@ -2014,6 +2020,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2051 + xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
2052 + if (!xhci->cmd_ring)
2053 + goto fail;
2054 ++ INIT_LIST_HEAD(&xhci->cancel_cmd_list);
2055 + xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
2056 + xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
2057 + (unsigned long long)xhci->cmd_ring->first_seg->dma);
2058 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2059 +index 152daca..fd56407 100644
2060 +--- a/drivers/usb/host/xhci-ring.c
2061 ++++ b/drivers/usb/host/xhci-ring.c
2062 +@@ -311,12 +311,123 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
2063 + /* Ring the host controller doorbell after placing a command on the ring */
2064 + void xhci_ring_cmd_db(struct xhci_hcd *xhci)
2065 + {
2066 ++ if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
2067 ++ return;
2068 ++
2069 + xhci_dbg(xhci, "// Ding dong!\n");
2070 + xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
2071 + /* Flush PCI posted writes */
2072 + xhci_readl(xhci, &xhci->dba->doorbell[0]);
2073 + }
2074 +
2075 ++static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
2076 ++{
2077 ++ u64 temp_64;
2078 ++ int ret;
2079 ++
2080 ++ xhci_dbg(xhci, "Abort command ring\n");
2081 ++
2082 ++ if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
2083 ++ xhci_dbg(xhci, "The command ring isn't running, "
2084 ++ "Have the command ring been stopped?\n");
2085 ++ return 0;
2086 ++ }
2087 ++
2088 ++ temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2089 ++ if (!(temp_64 & CMD_RING_RUNNING)) {
2090 ++ xhci_dbg(xhci, "Command ring had been stopped\n");
2091 ++ return 0;
2092 ++ }
2093 ++ xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
2094 ++ xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
2095 ++ &xhci->op_regs->cmd_ring);
2096 ++
2097 ++ /* Section 4.6.1.2 of xHCI 1.0 spec says software should
2098 ++ * time the completion od all xHCI commands, including
2099 ++ * the Command Abort operation. If software doesn't see
2100 ++ * CRR negated in a timely manner (e.g. longer than 5
2101 ++ * seconds), then it should assume that the there are
2102 ++ * larger problems with the xHC and assert HCRST.
2103 ++ */
2104 ++ ret = handshake(xhci, &xhci->op_regs->cmd_ring,
2105 ++ CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
2106 ++ if (ret < 0) {
2107 ++ xhci_err(xhci, "Stopped the command ring failed, "
2108 ++ "maybe the host is dead\n");
2109 ++ xhci->xhc_state |= XHCI_STATE_DYING;
2110 ++ xhci_quiesce(xhci);
2111 ++ xhci_halt(xhci);
2112 ++ return -ESHUTDOWN;
2113 ++ }
2114 ++
2115 ++ return 0;
2116 ++}
2117 ++
2118 ++static int xhci_queue_cd(struct xhci_hcd *xhci,
2119 ++ struct xhci_command *command,
2120 ++ union xhci_trb *cmd_trb)
2121 ++{
2122 ++ struct xhci_cd *cd;
2123 ++ cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
2124 ++ if (!cd)
2125 ++ return -ENOMEM;
2126 ++ INIT_LIST_HEAD(&cd->cancel_cmd_list);
2127 ++
2128 ++ cd->command = command;
2129 ++ cd->cmd_trb = cmd_trb;
2130 ++ list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
2131 ++
2132 ++ return 0;
2133 ++}
2134 ++
2135 ++/*
2136 ++ * Cancel the command which has issue.
2137 ++ *
2138 ++ * Some commands may hang due to waiting for acknowledgement from
2139 ++ * usb device. It is outside of the xHC's ability to control and
2140 ++ * will cause the command ring is blocked. When it occurs software
2141 ++ * should intervene to recover the command ring.
2142 ++ * See Section 4.6.1.1 and 4.6.1.2
2143 ++ */
2144 ++int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
2145 ++ union xhci_trb *cmd_trb)
2146 ++{
2147 ++ int retval = 0;
2148 ++ unsigned long flags;
2149 ++
2150 ++ spin_lock_irqsave(&xhci->lock, flags);
2151 ++
2152 ++ if (xhci->xhc_state & XHCI_STATE_DYING) {
2153 ++ xhci_warn(xhci, "Abort the command ring,"
2154 ++ " but the xHCI is dead.\n");
2155 ++ retval = -ESHUTDOWN;
2156 ++ goto fail;
2157 ++ }
2158 ++
2159 ++ /* queue the cmd desriptor to cancel_cmd_list */
2160 ++ retval = xhci_queue_cd(xhci, command, cmd_trb);
2161 ++ if (retval) {
2162 ++ xhci_warn(xhci, "Queuing command descriptor failed.\n");
2163 ++ goto fail;
2164 ++ }
2165 ++
2166 ++ /* abort command ring */
2167 ++ retval = xhci_abort_cmd_ring(xhci);
2168 ++ if (retval) {
2169 ++ xhci_err(xhci, "Abort command ring failed\n");
2170 ++ if (unlikely(retval == -ESHUTDOWN)) {
2171 ++ spin_unlock_irqrestore(&xhci->lock, flags);
2172 ++ usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
2173 ++ xhci_dbg(xhci, "xHCI host controller is dead.\n");
2174 ++ return retval;
2175 ++ }
2176 ++ }
2177 ++
2178 ++fail:
2179 ++ spin_unlock_irqrestore(&xhci->lock, flags);
2180 ++ return retval;
2181 ++}
2182 ++
2183 + void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
2184 + unsigned int slot_id,
2185 + unsigned int ep_index,
2186 +@@ -1046,6 +1157,20 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
2187 + }
2188 + }
2189 +
2190 ++/* Complete the command and detele it from the devcie's command queue.
2191 ++ */
2192 ++static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
2193 ++ struct xhci_command *command, u32 status)
2194 ++{
2195 ++ command->status = status;
2196 ++ list_del(&command->cmd_list);
2197 ++ if (command->completion)
2198 ++ complete(command->completion);
2199 ++ else
2200 ++ xhci_free_command(xhci, command);
2201 ++}
2202 ++
2203 ++
2204 + /* Check to see if a command in the device's command queue matches this one.
2205 + * Signal the completion or free the command, and return 1. Return 0 if the
2206 + * completed command isn't at the head of the command list.
2207 +@@ -1064,15 +1189,144 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
2208 + if (xhci->cmd_ring->dequeue != command->command_trb)
2209 + return 0;
2210 +
2211 +- command->status = GET_COMP_CODE(le32_to_cpu(event->status));
2212 +- list_del(&command->cmd_list);
2213 +- if (command->completion)
2214 +- complete(command->completion);
2215 +- else
2216 +- xhci_free_command(xhci, command);
2217 ++ xhci_complete_cmd_in_cmd_wait_list(xhci, command,
2218 ++ GET_COMP_CODE(le32_to_cpu(event->status)));
2219 + return 1;
2220 + }
2221 +
2222 ++/*
2223 ++ * Finding the command trb need to be cancelled and modifying it to
2224 ++ * NO OP command. And if the command is in device's command wait
2225 ++ * list, finishing and freeing it.
2226 ++ *
2227 ++ * If we can't find the command trb, we think it had already been
2228 ++ * executed.
2229 ++ */
2230 ++static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
2231 ++{
2232 ++ struct xhci_segment *cur_seg;
2233 ++ union xhci_trb *cmd_trb;
2234 ++ u32 cycle_state;
2235 ++
2236 ++ if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
2237 ++ return;
2238 ++
2239 ++ /* find the current segment of command ring */
2240 ++ cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
2241 ++ xhci->cmd_ring->dequeue, &cycle_state);
2242 ++
2243 ++ /* find the command trb matched by cd from command ring */
2244 ++ for (cmd_trb = xhci->cmd_ring->dequeue;
2245 ++ cmd_trb != xhci->cmd_ring->enqueue;
2246 ++ next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
2247 ++ /* If the trb is link trb, continue */
2248 ++ if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
2249 ++ continue;
2250 ++
2251 ++ if (cur_cd->cmd_trb == cmd_trb) {
2252 ++
2253 ++ /* If the command in device's command list, we should
2254 ++ * finish it and free the command structure.
2255 ++ */
2256 ++ if (cur_cd->command)
2257 ++ xhci_complete_cmd_in_cmd_wait_list(xhci,
2258 ++ cur_cd->command, COMP_CMD_STOP);
2259 ++
2260 ++ /* get cycle state from the origin command trb */
2261 ++ cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
2262 ++ & TRB_CYCLE;
2263 ++
2264 ++ /* modify the command trb to NO OP command */
2265 ++ cmd_trb->generic.field[0] = 0;
2266 ++ cmd_trb->generic.field[1] = 0;
2267 ++ cmd_trb->generic.field[2] = 0;
2268 ++ cmd_trb->generic.field[3] = cpu_to_le32(
2269 ++ TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
2270 ++ break;
2271 ++ }
2272 ++ }
2273 ++}
2274 ++
2275 ++static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
2276 ++{
2277 ++ struct xhci_cd *cur_cd, *next_cd;
2278 ++
2279 ++ if (list_empty(&xhci->cancel_cmd_list))
2280 ++ return;
2281 ++
2282 ++ list_for_each_entry_safe(cur_cd, next_cd,
2283 ++ &xhci->cancel_cmd_list, cancel_cmd_list) {
2284 ++ xhci_cmd_to_noop(xhci, cur_cd);
2285 ++ list_del(&cur_cd->cancel_cmd_list);
2286 ++ kfree(cur_cd);
2287 ++ }
2288 ++}
2289 ++
2290 ++/*
2291 ++ * traversing the cancel_cmd_list. If the command descriptor according
2292 ++ * to cmd_trb is found, the function free it and return 1, otherwise
2293 ++ * return 0.
2294 ++ */
2295 ++static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
2296 ++ union xhci_trb *cmd_trb)
2297 ++{
2298 ++ struct xhci_cd *cur_cd, *next_cd;
2299 ++
2300 ++ if (list_empty(&xhci->cancel_cmd_list))
2301 ++ return 0;
2302 ++
2303 ++ list_for_each_entry_safe(cur_cd, next_cd,
2304 ++ &xhci->cancel_cmd_list, cancel_cmd_list) {
2305 ++ if (cur_cd->cmd_trb == cmd_trb) {
2306 ++ if (cur_cd->command)
2307 ++ xhci_complete_cmd_in_cmd_wait_list(xhci,
2308 ++ cur_cd->command, COMP_CMD_STOP);
2309 ++ list_del(&cur_cd->cancel_cmd_list);
2310 ++ kfree(cur_cd);
2311 ++ return 1;
2312 ++ }
2313 ++ }
2314 ++
2315 ++ return 0;
2316 ++}
2317 ++
2318 ++/*
2319 ++ * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
2320 ++ * trb pointed by the command ring dequeue pointer is the trb we want to
2321 ++ * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
2322 ++ * traverse the cancel_cmd_list to trun the all of the commands according
2323 ++ * to command descriptor to NO-OP trb.
2324 ++ */
2325 ++static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
2326 ++ int cmd_trb_comp_code)
2327 ++{
2328 ++ int cur_trb_is_good = 0;
2329 ++
2330 ++ /* Searching the cmd trb pointed by the command ring dequeue
2331 ++ * pointer in command descriptor list. If it is found, free it.
2332 ++ */
2333 ++ cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
2334 ++ xhci->cmd_ring->dequeue);
2335 ++
2336 ++ if (cmd_trb_comp_code == COMP_CMD_ABORT)
2337 ++ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
2338 ++ else if (cmd_trb_comp_code == COMP_CMD_STOP) {
2339 ++ /* traversing the cancel_cmd_list and canceling
2340 ++ * the command according to command descriptor
2341 ++ */
2342 ++ xhci_cancel_cmd_in_cd_list(xhci);
2343 ++
2344 ++ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
2345 ++ /*
2346 ++ * ring command ring doorbell again to restart the
2347 ++ * command ring
2348 ++ */
2349 ++ if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
2350 ++ xhci_ring_cmd_db(xhci);
2351 ++ }
2352 ++ return cur_trb_is_good;
2353 ++}
2354 ++
2355 + static void handle_cmd_completion(struct xhci_hcd *xhci,
2356 + struct xhci_event_cmd *event)
2357 + {
2358 +@@ -1098,6 +1352,22 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
2359 + xhci->error_bitmask |= 1 << 5;
2360 + return;
2361 + }
2362 ++
2363 ++ if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
2364 ++ (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
2365 ++ /* If the return value is 0, we think the trb pointed by
2366 ++ * command ring dequeue pointer is a good trb. The good
2367 ++ * trb means we don't want to cancel the trb, but it have
2368 ++ * been stopped by host. So we should handle it normally.
2369 ++ * Otherwise, driver should invoke inc_deq() and return.
2370 ++ */
2371 ++ if (handle_stopped_cmd_ring(xhci,
2372 ++ GET_COMP_CODE(le32_to_cpu(event->status)))) {
2373 ++ inc_deq(xhci, xhci->cmd_ring, false);
2374 ++ return;
2375 ++ }
2376 ++ }
2377 ++
2378 + switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
2379 + & TRB_TYPE_BITMASK) {
2380 + case TRB_TYPE(TRB_ENABLE_SLOT):
2381 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
2382 +index c39ab20..4864b25 100644
2383 +--- a/drivers/usb/host/xhci.c
2384 ++++ b/drivers/usb/host/xhci.c
2385 +@@ -51,7 +51,7 @@ MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
2386 + * handshake done). There are two failure modes: "usec" have passed (major
2387 + * hardware flakeout), or the register reads as all-ones (hardware removed).
2388 + */
2389 +-static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
2390 ++int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
2391 + u32 mask, u32 done, int usec)
2392 + {
2393 + u32 result;
2394 +@@ -104,8 +104,10 @@ int xhci_halt(struct xhci_hcd *xhci)
2395 +
2396 + ret = handshake(xhci, &xhci->op_regs->status,
2397 + STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
2398 +- if (!ret)
2399 ++ if (!ret) {
2400 + xhci->xhc_state |= XHCI_STATE_HALTED;
2401 ++ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
2402 ++ }
2403 + return ret;
2404 + }
2405 +
2406 +@@ -390,6 +392,7 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
2407 + return -ENODEV;
2408 + }
2409 + xhci->shared_hcd->state = HC_STATE_RUNNING;
2410 ++ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
2411 +
2412 + if (xhci->quirks & XHCI_NEC_HOST)
2413 + xhci_ring_cmd_db(xhci);
2414 +@@ -1775,6 +1778,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2415 + struct completion *cmd_completion;
2416 + u32 *cmd_status;
2417 + struct xhci_virt_device *virt_dev;
2418 ++ union xhci_trb *cmd_trb;
2419 +
2420 + spin_lock_irqsave(&xhci->lock, flags);
2421 + virt_dev = xhci->devs[udev->slot_id];
2422 +@@ -1817,6 +1821,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2423 + }
2424 + init_completion(cmd_completion);
2425 +
2426 ++ cmd_trb = xhci->cmd_ring->dequeue;
2427 + if (!ctx_change)
2428 + ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2429 + udev->slot_id, must_succeed);
2430 +@@ -1838,14 +1843,17 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2431 + /* Wait for the configure endpoint command to complete */
2432 + timeleft = wait_for_completion_interruptible_timeout(
2433 + cmd_completion,
2434 +- USB_CTRL_SET_TIMEOUT);
2435 ++ XHCI_CMD_DEFAULT_TIMEOUT);
2436 + if (timeleft <= 0) {
2437 + xhci_warn(xhci, "%s while waiting for %s command\n",
2438 + timeleft == 0 ? "Timeout" : "Signal",
2439 + ctx_change == 0 ?
2440 + "configure endpoint" :
2441 + "evaluate context");
2442 +- /* FIXME cancel the configure endpoint command */
2443 ++ /* cancel the configure endpoint command */
2444 ++ ret = xhci_cancel_cmd(xhci, command, cmd_trb);
2445 ++ if (ret < 0)
2446 ++ return ret;
2447 + return -ETIME;
2448 + }
2449 +
2450 +@@ -2778,8 +2786,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2451 + unsigned long flags;
2452 + int timeleft;
2453 + int ret;
2454 ++ union xhci_trb *cmd_trb;
2455 +
2456 + spin_lock_irqsave(&xhci->lock, flags);
2457 ++ cmd_trb = xhci->cmd_ring->dequeue;
2458 + ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
2459 + if (ret) {
2460 + spin_unlock_irqrestore(&xhci->lock, flags);
2461 +@@ -2791,12 +2801,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2462 +
2463 + /* XXX: how much time for xHC slot assignment? */
2464 + timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2465 +- USB_CTRL_SET_TIMEOUT);
2466 ++ XHCI_CMD_DEFAULT_TIMEOUT);
2467 + if (timeleft <= 0) {
2468 + xhci_warn(xhci, "%s while waiting for a slot\n",
2469 + timeleft == 0 ? "Timeout" : "Signal");
2470 +- /* FIXME cancel the enable slot request */
2471 +- return 0;
2472 ++ /* cancel the enable slot request */
2473 ++ return xhci_cancel_cmd(xhci, NULL, cmd_trb);
2474 + }
2475 +
2476 + if (!xhci->slot_id) {
2477 +@@ -2857,6 +2867,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2478 + struct xhci_slot_ctx *slot_ctx;
2479 + struct xhci_input_control_ctx *ctrl_ctx;
2480 + u64 temp_64;
2481 ++ union xhci_trb *cmd_trb;
2482 +
2483 + if (!udev->slot_id) {
2484 + xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
2485 +@@ -2895,6 +2906,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2486 + xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
2487 +
2488 + spin_lock_irqsave(&xhci->lock, flags);
2489 ++ cmd_trb = xhci->cmd_ring->dequeue;
2490 + ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
2491 + udev->slot_id);
2492 + if (ret) {
2493 +@@ -2907,7 +2919,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2494 +
2495 + /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
2496 + timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2497 +- USB_CTRL_SET_TIMEOUT);
2498 ++ XHCI_CMD_DEFAULT_TIMEOUT);
2499 + /* FIXME: From section 4.3.4: "Software shall be responsible for timing
2500 + * the SetAddress() "recovery interval" required by USB and aborting the
2501 + * command on a timeout.
2502 +@@ -2915,7 +2927,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2503 + if (timeleft <= 0) {
2504 + xhci_warn(xhci, "%s while waiting for a slot\n",
2505 + timeleft == 0 ? "Timeout" : "Signal");
2506 +- /* FIXME cancel the address device command */
2507 ++ /* cancel the address device command */
2508 ++ ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
2509 ++ if (ret < 0)
2510 ++ return ret;
2511 + return -ETIME;
2512 + }
2513 +
2514 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2515 +index 21482df..1d72895 100644
2516 +--- a/drivers/usb/host/xhci.h
2517 ++++ b/drivers/usb/host/xhci.h
2518 +@@ -1070,6 +1070,9 @@ union xhci_trb {
2519 + #define TRB_MFINDEX_WRAP 39
2520 + /* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
2521 +
2522 ++#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
2523 ++ cpu_to_le32(TRB_TYPE(TRB_LINK)))
2524 ++
2525 + /* Nec vendor-specific command completion event. */
2526 + #define TRB_NEC_CMD_COMP 48
2527 + /* Get NEC firmware revision. */
2528 +@@ -1111,6 +1114,16 @@ struct xhci_td {
2529 + union xhci_trb *last_trb;
2530 + };
2531 +
2532 ++/* xHCI command default timeout value */
2533 ++#define XHCI_CMD_DEFAULT_TIMEOUT (5 * HZ)
2534 ++
2535 ++/* command descriptor */
2536 ++struct xhci_cd {
2537 ++ struct list_head cancel_cmd_list;
2538 ++ struct xhci_command *command;
2539 ++ union xhci_trb *cmd_trb;
2540 ++};
2541 ++
2542 + struct xhci_dequeue_state {
2543 + struct xhci_segment *new_deq_seg;
2544 + union xhci_trb *new_deq_ptr;
2545 +@@ -1252,6 +1265,11 @@ struct xhci_hcd {
2546 + /* data structures */
2547 + struct xhci_device_context_array *dcbaa;
2548 + struct xhci_ring *cmd_ring;
2549 ++ unsigned int cmd_ring_state;
2550 ++#define CMD_RING_STATE_RUNNING (1 << 0)
2551 ++#define CMD_RING_STATE_ABORTED (1 << 1)
2552 ++#define CMD_RING_STATE_STOPPED (1 << 2)
2553 ++ struct list_head cancel_cmd_list;
2554 + unsigned int cmd_ring_reserved_trbs;
2555 + struct xhci_ring *event_ring;
2556 + struct xhci_erst erst;
2557 +@@ -1486,6 +1504,8 @@ void xhci_unregister_pci(void);
2558 + #endif
2559 +
2560 + /* xHCI host controller glue */
2561 ++int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
2562 ++ u32 mask, u32 done, int usec);
2563 + void xhci_quiesce(struct xhci_hcd *xhci);
2564 + int xhci_halt(struct xhci_hcd *xhci);
2565 + int xhci_reset(struct xhci_hcd *xhci);
2566 +@@ -1568,6 +1588,8 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
2567 + unsigned int slot_id, unsigned int ep_index,
2568 + struct xhci_dequeue_state *deq_state);
2569 + void xhci_stop_endpoint_command_watchdog(unsigned long arg);
2570 ++int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
2571 ++ union xhci_trb *cmd_trb);
2572 + void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
2573 + unsigned int ep_index, unsigned int stream_id);
2574 +
2575 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2576 +index 4e0c118..c334670 100644
2577 +--- a/drivers/usb/serial/option.c
2578 ++++ b/drivers/usb/serial/option.c
2579 +@@ -503,11 +503,19 @@ static const struct option_blacklist_info net_intf5_blacklist = {
2580 + .reserved = BIT(5),
2581 + };
2582 +
2583 ++static const struct option_blacklist_info net_intf6_blacklist = {
2584 ++ .reserved = BIT(6),
2585 ++};
2586 ++
2587 + static const struct option_blacklist_info zte_mf626_blacklist = {
2588 + .sendsetup = BIT(0) | BIT(1),
2589 + .reserved = BIT(4),
2590 + };
2591 +
2592 ++static const struct option_blacklist_info zte_1255_blacklist = {
2593 ++ .reserved = BIT(3) | BIT(4),
2594 ++};
2595 ++
2596 + static const struct usb_device_id option_ids[] = {
2597 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
2598 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
2599 +@@ -853,13 +861,19 @@ static const struct usb_device_id option_ids[] = {
2600 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
2601 + .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
2602 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
2603 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) },
2604 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) },
2605 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
2606 ++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
2607 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
2608 ++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
2609 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
2610 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) },
2611 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) },
2612 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) },
2613 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) },
2614 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
2615 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2616 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
2617 ++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
2618 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
2619 ++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
2620 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
2621 ++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
2622 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
2623 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
2624 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
2625 +@@ -872,7 +886,8 @@ static const struct usb_device_id option_ids[] = {
2626 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
2627 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
2628 + .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
2629 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
2630 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
2631 ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
2632 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
2633 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
2634 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
2635 +@@ -880,9 +895,22 @@ static const struct usb_device_id option_ids[] = {
2636 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
2637 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
2638 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2639 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
2640 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
2641 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
2642 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
2643 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2644 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
2645 ++ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
2646 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
2647 ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
2648 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
2649 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2650 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
2651 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2652 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
2653 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2654 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
2655 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2656 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
2657 ++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
2658 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
2659 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
2660 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
2661 +@@ -998,18 +1026,24 @@ static const struct usb_device_id option_ids[] = {
2662 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
2663 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
2664 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
2665 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) },
2666 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
2667 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2668 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
2669 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) },
2670 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
2671 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2672 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
2673 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
2674 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
2675 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
2676 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) },
2677 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
2678 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2679 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
2680 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) },
2681 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) },
2682 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) },
2683 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
2684 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2685 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
2686 ++ .driver_info = (kernel_ulong_t)&zte_1255_blacklist },
2687 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
2688 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2689 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
2690 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
2691 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
2692 +@@ -1054,8 +1088,16 @@ static const struct usb_device_id option_ids[] = {
2693 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
2694 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
2695 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
2696 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
2697 ++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
2698 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
2699 + .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
2700 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
2701 ++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
2702 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
2703 ++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
2704 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
2705 ++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
2706 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
2707 + 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
2708 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
2709 +@@ -1067,15 +1109,21 @@ static const struct usb_device_id option_ids[] = {
2710 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
2711 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
2712 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
2713 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
2714 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) },
2715 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
2716 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
2717 ++ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
2718 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
2719 ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
2720 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
2721 ++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
2722 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
2723 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
2724 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
2725 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
2726 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2727 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
2728 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
2729 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
2730 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
2731 ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
2732 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
2733 ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
2734 +
2735 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
2736 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
2737 +diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
2738 +index 36057ce..6e2a2d5 100644
2739 +--- a/fs/lockd/clntxdr.c
2740 ++++ b/fs/lockd/clntxdr.c
2741 +@@ -223,7 +223,7 @@ static void encode_nlm_stat(struct xdr_stream *xdr,
2742 + {
2743 + __be32 *p;
2744 +
2745 +- BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
2746 ++ WARN_ON_ONCE(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
2747 + p = xdr_reserve_space(xdr, 4);
2748 + *p = stat;
2749 + }
2750 +diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
2751 +index df753a1..23d7451 100644
2752 +--- a/fs/lockd/mon.c
2753 ++++ b/fs/lockd/mon.c
2754 +@@ -40,7 +40,6 @@ struct nsm_args {
2755 + u32 proc;
2756 +
2757 + char *mon_name;
2758 +- char *nodename;
2759 + };
2760 +
2761 + struct nsm_res {
2762 +@@ -94,7 +93,6 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res)
2763 + .vers = 3,
2764 + .proc = NLMPROC_NSM_NOTIFY,
2765 + .mon_name = nsm->sm_mon_name,
2766 +- .nodename = utsname()->nodename,
2767 + };
2768 + struct rpc_message msg = {
2769 + .rpc_argp = &args,
2770 +@@ -431,7 +429,7 @@ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
2771 + {
2772 + __be32 *p;
2773 +
2774 +- encode_nsm_string(xdr, argp->nodename);
2775 ++ encode_nsm_string(xdr, utsname()->nodename);
2776 + p = xdr_reserve_space(xdr, 4 + 4 + 4);
2777 + *p++ = cpu_to_be32(argp->prog);
2778 + *p++ = cpu_to_be32(argp->vers);
2779 +diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
2780 +index d27aab1..d413af3 100644
2781 +--- a/fs/lockd/svcproc.c
2782 ++++ b/fs/lockd/svcproc.c
2783 +@@ -67,7 +67,8 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
2784 +
2785 + /* Obtain file pointer. Not used by FREE_ALL call. */
2786 + if (filp != NULL) {
2787 +- if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
2788 ++ error = cast_status(nlm_lookup_file(rqstp, &file, &lock->fh));
2789 ++ if (error != 0)
2790 + goto no_locks;
2791 + *filp = file;
2792 +
2793 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
2794 +index 2efce77..69158d5 100644
2795 +--- a/kernel/cgroup.c
2796 ++++ b/kernel/cgroup.c
2797 +@@ -1800,9 +1800,8 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
2798 + * trading it for newcg is protected by cgroup_mutex, we're safe to drop
2799 + * it here; it will be freed under RCU.
2800 + */
2801 +- put_css_set(oldcg);
2802 +-
2803 + set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
2804 ++ put_css_set(oldcg);
2805 + return 0;
2806 + }
2807 +
2808 +diff --git a/kernel/sys.c b/kernel/sys.c
2809 +index dd29555..84e353b1 100644
2810 +--- a/kernel/sys.c
2811 ++++ b/kernel/sys.c
2812 +@@ -1133,15 +1133,16 @@ DECLARE_RWSEM(uts_sem);
2813 + * Work around broken programs that cannot handle "Linux 3.0".
2814 + * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
2815 + */
2816 +-static int override_release(char __user *release, int len)
2817 ++static int override_release(char __user *release, size_t len)
2818 + {
2819 + int ret = 0;
2820 +- char buf[65];
2821 +
2822 + if (current->personality & UNAME26) {
2823 +- char *rest = UTS_RELEASE;
2824 ++ const char *rest = UTS_RELEASE;
2825 ++ char buf[65] = { 0 };
2826 + int ndots = 0;
2827 + unsigned v;
2828 ++ size_t copy;
2829 +
2830 + while (*rest) {
2831 + if (*rest == '.' && ++ndots >= 3)
2832 +@@ -1151,8 +1152,9 @@ static int override_release(char __user *release, int len)
2833 + rest++;
2834 + }
2835 + v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
2836 +- snprintf(buf, len, "2.6.%u%s", v, rest);
2837 +- ret = copy_to_user(release, buf, len);
2838 ++ copy = clamp_t(size_t, len, 1, sizeof(buf));
2839 ++ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
2840 ++ ret = copy_to_user(release, buf, copy + 1);
2841 + }
2842 + return ret;
2843 + }
2844 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2845 +index 96bb0a3..eb8857a 100644
2846 +--- a/net/core/neighbour.c
2847 ++++ b/net/core/neighbour.c
2848 +@@ -1313,8 +1313,6 @@ int neigh_resolve_output(struct sk_buff *skb)
2849 + if (!dst)
2850 + goto discard;
2851 +
2852 +- __skb_pull(skb, skb_network_offset(skb));
2853 +-
2854 + if (!neigh_event_send(neigh, skb)) {
2855 + int err;
2856 + struct net_device *dev = neigh->dev;
2857 +@@ -1326,6 +1324,7 @@ int neigh_resolve_output(struct sk_buff *skb)
2858 + neigh_hh_init(neigh, dst, dst->ops->protocol);
2859 +
2860 + do {
2861 ++ __skb_pull(skb, skb_network_offset(skb));
2862 + seq = read_seqbegin(&neigh->ha_lock);
2863 + err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2864 + neigh->ha, NULL, skb->len);
2865 +@@ -1358,9 +1357,8 @@ int neigh_connected_output(struct sk_buff *skb)
2866 + struct net_device *dev = neigh->dev;
2867 + unsigned int seq;
2868 +
2869 +- __skb_pull(skb, skb_network_offset(skb));
2870 +-
2871 + do {
2872 ++ __skb_pull(skb, skb_network_offset(skb));
2873 + seq = read_seqbegin(&neigh->ha_lock);
2874 + err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2875 + neigh->ha, NULL, skb->len);
2876 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
2877 +index 53a5af6..d645c6f 100644
2878 +--- a/net/ipv4/tcp_ipv4.c
2879 ++++ b/net/ipv4/tcp_ipv4.c
2880 +@@ -651,10 +651,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
2881 + arg.csumoffset = offsetof(struct tcphdr, check) / 2;
2882 + arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
2883 + /* When socket is gone, all binding information is lost.
2884 +- * routing might fail in this case. using iif for oif to
2885 +- * make sure we can deliver it
2886 ++ * routing might fail in this case. No choice here, if we choose to force
2887 ++ * input interface, we will misroute in case of asymmetric route.
2888 + */
2889 +- arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
2890 ++ if (sk)
2891 ++ arg.bound_dev_if = sk->sk_bound_dev_if;
2892 +
2893 + net = dev_net(skb_dst(skb)->dev);
2894 + ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
2895 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
2896 +index 848f963..a6d5850 100644
2897 +--- a/net/ipv6/tcp_ipv6.c
2898 ++++ b/net/ipv6/tcp_ipv6.c
2899 +@@ -1060,7 +1060,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
2900 + __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
2901 +
2902 + fl6.flowi6_proto = IPPROTO_TCP;
2903 +- fl6.flowi6_oif = inet6_iif(skb);
2904 ++ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
2905 ++ fl6.flowi6_oif = inet6_iif(skb);
2906 + fl6.fl6_dport = t1->dest;
2907 + fl6.fl6_sport = t1->source;
2908 + security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
2909 +diff --git a/net/rds/send.c b/net/rds/send.c
2910 +index c803341..f6bdfb0 100644
2911 +--- a/net/rds/send.c
2912 ++++ b/net/rds/send.c
2913 +@@ -1121,7 +1121,7 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
2914 + rds_stats_inc(s_send_pong);
2915 +
2916 + if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
2917 +- rds_send_xmit(conn);
2918 ++ queue_delayed_work(rds_wq, &conn->c_send_w, 0);
2919 +
2920 + rds_message_put(rm);
2921 + return 0;
2922 +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
2923 +index 4530a91..237a2ee 100644
2924 +--- a/net/sunrpc/cache.c
2925 ++++ b/net/sunrpc/cache.c
2926 +@@ -1404,11 +1404,11 @@ static ssize_t read_flush(struct file *file, char __user *buf,
2927 + size_t count, loff_t *ppos,
2928 + struct cache_detail *cd)
2929 + {
2930 +- char tbuf[20];
2931 ++ char tbuf[22];
2932 + unsigned long p = *ppos;
2933 + size_t len;
2934 +
2935 +- sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time));
2936 ++ snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
2937 + len = strlen(tbuf);
2938 + if (p >= len)
2939 + return 0;
2940
2941 Deleted: genpatches-2.6/trunk/3.0/2500_5906-device-DMA-frag-workaround.patch
2942 ===================================================================
2943 --- genpatches-2.6/trunk/3.0/2500_5906-device-DMA-frag-workaround.patch 2012-10-29 14:35:56 UTC (rev 2227)
2944 +++ genpatches-2.6/trunk/3.0/2500_5906-device-DMA-frag-workaround.patch 2012-10-29 18:05:59 UTC (rev 2228)
2945 @@ -1,34 +0,0 @@
2946 -From 94bf3cba3df8cdbc1bae805bf878bb8f0743e317 Mon Sep 17 00:00:00 2001
2947 -From: Mike Pagano <mpagano@g.o>
2948 -Date: Thu, 11 Oct 2012 20:45:26 -0400
2949 -Subject: [PATCH] tg3: Apply short DMA frag workaround to 5906
2950 -
2951 -Backport of b7abee6ef888117f92db370620ebf116a38e3f4d for 3.0.X
2952 -
2953 -tg3: Apply short DMA frag workaround to 5906
2954 -
2955 -5906 devices also need the short DMA fragment workaround. This patch makes the necessary change.
2956 -
2957 -Signed-off-by: Mike Pagano <mpagano@g.o>
2958 ----
2959 - drivers/net/tg3.c | 5 +++--
2960 - 1 files changed, 3 insertions(+), 2 deletions(-)
2961 -
2962 -diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
2963 -index c4ab8a7..45a4cbd 100644
2964 ---- a/drivers/net/tg3.c
2965 -+++ b/drivers/net/tg3.c
2966 -@@ -13685,8 +13685,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
2967 - */
2968 - tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
2969 -
2970 -- if (tg3_flag(tp, 5755_PLUS))
2971 -- tg3_flag_set(tp, SHORT_DMA_BUG);
2972 -+ if (tg3_flag(tp, 5755_PLUS) ||
2973 -+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
2974 -+ tg3_flag_set(tp, SHORT_DMA_BUG);
2975 - else
2976 - tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
2977 -
2978 ---
2979 -1.7.8.6