Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1909 - genpatches-2.6/trunk/2.6.38
Date: Mon, 02 May 2011 20:29:07
Message-Id: 20110502202849.08F2520054@flycatcher.gentoo.org
1 Author: mpagano
2 Date: 2011-05-02 20:28:48 +0000 (Mon, 02 May 2011)
3 New Revision: 1909
4
5 Added:
6 genpatches-2.6/trunk/2.6.38/1004_linux-2.6.38.5.patch
7 Modified:
8 genpatches-2.6/trunk/2.6.38/0000_README
9 Log:
10 Linux patch 2.6.38.5
11
12 Modified: genpatches-2.6/trunk/2.6.38/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/2.6.38/0000_README 2011-04-27 23:16:27 UTC (rev 1908)
15 +++ genpatches-2.6/trunk/2.6.38/0000_README 2011-05-02 20:28:48 UTC (rev 1909)
16 @@ -55,6 +55,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 2.6.38.4
19
20 +Patch: 1004_linux-2.6.38.5.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 2.6.38.5
23 +
24 Patch: 1700_XEN-PVHVM-compile-issue-fix.patch
25 From: http://bugs.gentoo.org/show_bug.cgi?id=362415
26 Desc: Fix compile issue if XEN is enabled but XEN_PVHVM is disabled
27
28 Added: genpatches-2.6/trunk/2.6.38/1004_linux-2.6.38.5.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/2.6.38/1004_linux-2.6.38.5.patch (rev 0)
31 +++ genpatches-2.6/trunk/2.6.38/1004_linux-2.6.38.5.patch 2011-05-02 20:28:48 UTC (rev 1909)
32 @@ -0,0 +1,1956 @@
33 +diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
34 +index 02b7a03..8b3db1c 100644
35 +--- a/arch/m68k/mm/motorola.c
36 ++++ b/arch/m68k/mm/motorola.c
37 +@@ -300,6 +300,8 @@ void __init paging_init(void)
38 + zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
39 + free_area_init_node(i, zones_size,
40 + m68k_memory[i].addr >> PAGE_SHIFT, NULL);
41 ++ if (node_present_pages(i))
42 ++ node_set_state(i, N_NORMAL_MEMORY);
43 + }
44 + }
45 +
46 +diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
47 +index f4f4d70..7fd8aad 100644
48 +--- a/arch/parisc/mm/init.c
49 ++++ b/arch/parisc/mm/init.c
50 +@@ -266,8 +266,10 @@ static void __init setup_bootmem(void)
51 + }
52 + memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
53 +
54 +- for (i = 0; i < npmem_ranges; i++)
55 ++ for (i = 0; i < npmem_ranges; i++) {
56 ++ node_set_state(i, N_NORMAL_MEMORY);
57 + node_set_online(i);
58 ++ }
59 + #endif
60 +
61 + /*
62 +diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
63 +index 7e9d30d..ab0e041 100644
64 +--- a/arch/s390/kvm/sie64a.S
65 ++++ b/arch/s390/kvm/sie64a.S
66 +@@ -48,10 +48,10 @@ sie_irq_handler:
67 + tm __TI_flags+7(%r2),_TIF_EXIT_SIE
68 + jz 0f
69 + larl %r2,sie_exit # work pending, leave sie
70 +- stg %r2,__LC_RETURN_PSW+8
71 ++ stg %r2,SPI_PSW+8(0,%r15)
72 + br %r14
73 + 0: larl %r2,sie_reenter # re-enter with guest id
74 +- stg %r2,__LC_RETURN_PSW+8
75 ++ stg %r2,SPI_PSW+8(0,%r15)
76 + 1: br %r14
77 +
78 + /*
79 +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
80 +index 2c57806..0f900c8 100644
81 +--- a/arch/s390/mm/fault.c
82 ++++ b/arch/s390/mm/fault.c
83 +@@ -558,9 +558,9 @@ static void pfault_interrupt(unsigned int ext_int_code,
84 + * Get the token (= address of the task structure of the affected task).
85 + */
86 + #ifdef CONFIG_64BIT
87 +- tsk = *(struct task_struct **) param64;
88 ++ tsk = (struct task_struct *) param64;
89 + #else
90 +- tsk = *(struct task_struct **) param32;
91 ++ tsk = (struct task_struct *) param32;
92 + #endif
93 +
94 + if (subcode & 0x0080) {
95 +diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
96 +index 804b28d..b1da91c 100644
97 +--- a/arch/um/sys-i386/Makefile
98 ++++ b/arch/um/sys-i386/Makefile
99 +@@ -4,7 +4,7 @@
100 +
101 + obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
102 + ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \
103 +- sys_call_table.o tls.o
104 ++ sys_call_table.o tls.o atomic64_cx8_32.o
105 +
106 + obj-$(CONFIG_BINFMT_ELF) += elfcore.o
107 +
108 +diff --git a/arch/um/sys-i386/atomic64_cx8_32.S b/arch/um/sys-i386/atomic64_cx8_32.S
109 +new file mode 100644
110 +index 0000000..1e901d3d
111 +--- /dev/null
112 ++++ b/arch/um/sys-i386/atomic64_cx8_32.S
113 +@@ -0,0 +1,225 @@
114 ++/*
115 ++ * atomic64_t for 586+
116 ++ *
117 ++ * Copied from arch/x86/lib/atomic64_cx8_32.S
118 ++ *
119 ++ * Copyright © 2010 Luca Barbieri
120 ++ *
121 ++ * This program is free software; you can redistribute it and/or modify
122 ++ * it under the terms of the GNU General Public License as published by
123 ++ * the Free Software Foundation; either version 2 of the License, or
124 ++ * (at your option) any later version.
125 ++ *
126 ++ */
127 ++
128 ++#include <linux/linkage.h>
129 ++#include <asm/alternative-asm.h>
130 ++#include <asm/dwarf2.h>
131 ++
132 ++.macro SAVE reg
133 ++ pushl_cfi %\reg
134 ++ CFI_REL_OFFSET \reg, 0
135 ++.endm
136 ++
137 ++.macro RESTORE reg
138 ++ popl_cfi %\reg
139 ++ CFI_RESTORE \reg
140 ++.endm
141 ++
142 ++.macro read64 reg
143 ++ movl %ebx, %eax
144 ++ movl %ecx, %edx
145 ++/* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */
146 ++ LOCK_PREFIX
147 ++ cmpxchg8b (\reg)
148 ++.endm
149 ++
150 ++ENTRY(atomic64_read_cx8)
151 ++ CFI_STARTPROC
152 ++
153 ++ read64 %ecx
154 ++ ret
155 ++ CFI_ENDPROC
156 ++ENDPROC(atomic64_read_cx8)
157 ++
158 ++ENTRY(atomic64_set_cx8)
159 ++ CFI_STARTPROC
160 ++
161 ++1:
162 ++/* we don't need LOCK_PREFIX since aligned 64-bit writes
163 ++ * are atomic on 586 and newer */
164 ++ cmpxchg8b (%esi)
165 ++ jne 1b
166 ++
167 ++ ret
168 ++ CFI_ENDPROC
169 ++ENDPROC(atomic64_set_cx8)
170 ++
171 ++ENTRY(atomic64_xchg_cx8)
172 ++ CFI_STARTPROC
173 ++
174 ++ movl %ebx, %eax
175 ++ movl %ecx, %edx
176 ++1:
177 ++ LOCK_PREFIX
178 ++ cmpxchg8b (%esi)
179 ++ jne 1b
180 ++
181 ++ ret
182 ++ CFI_ENDPROC
183 ++ENDPROC(atomic64_xchg_cx8)
184 ++
185 ++.macro addsub_return func ins insc
186 ++ENTRY(atomic64_\func\()_return_cx8)
187 ++ CFI_STARTPROC
188 ++ SAVE ebp
189 ++ SAVE ebx
190 ++ SAVE esi
191 ++ SAVE edi
192 ++
193 ++ movl %eax, %esi
194 ++ movl %edx, %edi
195 ++ movl %ecx, %ebp
196 ++
197 ++ read64 %ebp
198 ++1:
199 ++ movl %eax, %ebx
200 ++ movl %edx, %ecx
201 ++ \ins\()l %esi, %ebx
202 ++ \insc\()l %edi, %ecx
203 ++ LOCK_PREFIX
204 ++ cmpxchg8b (%ebp)
205 ++ jne 1b
206 ++
207 ++10:
208 ++ movl %ebx, %eax
209 ++ movl %ecx, %edx
210 ++ RESTORE edi
211 ++ RESTORE esi
212 ++ RESTORE ebx
213 ++ RESTORE ebp
214 ++ ret
215 ++ CFI_ENDPROC
216 ++ENDPROC(atomic64_\func\()_return_cx8)
217 ++.endm
218 ++
219 ++addsub_return add add adc
220 ++addsub_return sub sub sbb
221 ++
222 ++.macro incdec_return func ins insc
223 ++ENTRY(atomic64_\func\()_return_cx8)
224 ++ CFI_STARTPROC
225 ++ SAVE ebx
226 ++
227 ++ read64 %esi
228 ++1:
229 ++ movl %eax, %ebx
230 ++ movl %edx, %ecx
231 ++ \ins\()l $1, %ebx
232 ++ \insc\()l $0, %ecx
233 ++ LOCK_PREFIX
234 ++ cmpxchg8b (%esi)
235 ++ jne 1b
236 ++
237 ++10:
238 ++ movl %ebx, %eax
239 ++ movl %ecx, %edx
240 ++ RESTORE ebx
241 ++ ret
242 ++ CFI_ENDPROC
243 ++ENDPROC(atomic64_\func\()_return_cx8)
244 ++.endm
245 ++
246 ++incdec_return inc add adc
247 ++incdec_return dec sub sbb
248 ++
249 ++ENTRY(atomic64_dec_if_positive_cx8)
250 ++ CFI_STARTPROC
251 ++ SAVE ebx
252 ++
253 ++ read64 %esi
254 ++1:
255 ++ movl %eax, %ebx
256 ++ movl %edx, %ecx
257 ++ subl $1, %ebx
258 ++ sbb $0, %ecx
259 ++ js 2f
260 ++ LOCK_PREFIX
261 ++ cmpxchg8b (%esi)
262 ++ jne 1b
263 ++
264 ++2:
265 ++ movl %ebx, %eax
266 ++ movl %ecx, %edx
267 ++ RESTORE ebx
268 ++ ret
269 ++ CFI_ENDPROC
270 ++ENDPROC(atomic64_dec_if_positive_cx8)
271 ++
272 ++ENTRY(atomic64_add_unless_cx8)
273 ++ CFI_STARTPROC
274 ++ SAVE ebp
275 ++ SAVE ebx
276 ++/* these just push these two parameters on the stack */
277 ++ SAVE edi
278 ++ SAVE esi
279 ++
280 ++ movl %ecx, %ebp
281 ++ movl %eax, %esi
282 ++ movl %edx, %edi
283 ++
284 ++ read64 %ebp
285 ++1:
286 ++ cmpl %eax, 0(%esp)
287 ++ je 4f
288 ++2:
289 ++ movl %eax, %ebx
290 ++ movl %edx, %ecx
291 ++ addl %esi, %ebx
292 ++ adcl %edi, %ecx
293 ++ LOCK_PREFIX
294 ++ cmpxchg8b (%ebp)
295 ++ jne 1b
296 ++
297 ++ movl $1, %eax
298 ++3:
299 ++ addl $8, %esp
300 ++ CFI_ADJUST_CFA_OFFSET -8
301 ++ RESTORE ebx
302 ++ RESTORE ebp
303 ++ ret
304 ++4:
305 ++ cmpl %edx, 4(%esp)
306 ++ jne 2b
307 ++ xorl %eax, %eax
308 ++ jmp 3b
309 ++ CFI_ENDPROC
310 ++ENDPROC(atomic64_add_unless_cx8)
311 ++
312 ++ENTRY(atomic64_inc_not_zero_cx8)
313 ++ CFI_STARTPROC
314 ++ SAVE ebx
315 ++
316 ++ read64 %esi
317 ++1:
318 ++ testl %eax, %eax
319 ++ je 4f
320 ++2:
321 ++ movl %eax, %ebx
322 ++ movl %edx, %ecx
323 ++ addl $1, %ebx
324 ++ adcl $0, %ecx
325 ++ LOCK_PREFIX
326 ++ cmpxchg8b (%esi)
327 ++ jne 1b
328 ++
329 ++ movl $1, %eax
330 ++3:
331 ++ RESTORE ebx
332 ++ ret
333 ++4:
334 ++ testl %edx, %edx
335 ++ jne 2b
336 ++ jmp 3b
337 ++ CFI_ENDPROC
338 ++ENDPROC(atomic64_inc_not_zero_cx8)
339 +diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h
340 +index 43085bf..3e7349f 100644
341 +--- a/arch/x86/include/asm/gart.h
342 ++++ b/arch/x86/include/asm/gart.h
343 +@@ -66,7 +66,7 @@ static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order)
344 + * Don't enable translation but enable GART IO and CPU accesses.
345 + * Also, set DISTLBWALKPRB since GART tables memory is UC.
346 + */
347 +- ctl = DISTLBWALKPRB | order << 1;
348 ++ ctl = order << 1;
349 +
350 + pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
351 + }
352 +@@ -83,7 +83,7 @@ static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
353 +
354 + /* Enable GART translation for this hammer. */
355 + pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
356 +- ctl |= GARTEN;
357 ++ ctl |= GARTEN | DISTLBWALKPRB;
358 + ctl &= ~(DISGARTCPU | DISGARTIO);
359 + pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
360 + }
361 +diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
362 +index 5955a78..f6a1c23 100644
363 +--- a/arch/x86/kernel/aperture_64.c
364 ++++ b/arch/x86/kernel/aperture_64.c
365 +@@ -500,7 +500,7 @@ out:
366 + * Don't enable translation yet but enable GART IO and CPU
367 + * accesses and set DISTLBWALKPRB since GART table memory is UC.
368 + */
369 +- u32 ctl = DISTLBWALKPRB | aper_order << 1;
370 ++ u32 ctl = aper_order << 1;
371 +
372 + bus = amd_nb_bus_dev_ranges[i].bus;
373 + dev_base = amd_nb_bus_dev_ranges[i].dev_base;
374 +diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
375 +index c01ffa5..197a46f 100644
376 +--- a/arch/x86/kernel/pci-gart_64.c
377 ++++ b/arch/x86/kernel/pci-gart_64.c
378 +@@ -81,6 +81,9 @@ static u32 gart_unmapped_entry;
379 + #define AGPEXTERN
380 + #endif
381 +
382 ++/* GART can only remap to physical addresses < 1TB */
383 ++#define GART_MAX_PHYS_ADDR (1ULL << 40)
384 ++
385 + /* backdoor interface to AGP driver */
386 + AGPEXTERN int agp_memory_reserved;
387 + AGPEXTERN __u32 *agp_gatt_table;
388 +@@ -212,9 +215,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
389 + size_t size, int dir, unsigned long align_mask)
390 + {
391 + unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
392 +- unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
393 ++ unsigned long iommu_page;
394 + int i;
395 +
396 ++ if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
397 ++ return bad_dma_addr;
398 ++
399 ++ iommu_page = alloc_iommu(dev, npages, align_mask);
400 + if (iommu_page == -1) {
401 + if (!nonforced_iommu(dev, phys_mem, size))
402 + return phys_mem;
403 +diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
404 +index 41fb691..3655e19 100644
405 +--- a/block/blk-sysfs.c
406 ++++ b/block/blk-sysfs.c
407 +@@ -511,8 +511,10 @@ int blk_register_queue(struct gendisk *disk)
408 + return ret;
409 +
410 + ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
411 +- if (ret < 0)
412 ++ if (ret < 0) {
413 ++ blk_trace_remove_sysfs(dev);
414 + return ret;
415 ++ }
416 +
417 + kobject_uevent(&q->kobj, KOBJ_ADD);
418 +
419 +diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
420 +index ac1a599..fcc13ac 100644
421 +--- a/drivers/acpi/battery.c
422 ++++ b/drivers/acpi/battery.c
423 +@@ -33,6 +33,7 @@
424 + #include <linux/async.h>
425 + #include <linux/dmi.h>
426 + #include <linux/slab.h>
427 ++#include <linux/suspend.h>
428 +
429 + #ifdef CONFIG_ACPI_PROCFS_POWER
430 + #include <linux/proc_fs.h>
431 +@@ -102,6 +103,7 @@ struct acpi_battery {
432 + struct mutex lock;
433 + struct power_supply bat;
434 + struct acpi_device *device;
435 ++ struct notifier_block pm_nb;
436 + unsigned long update_time;
437 + int rate_now;
438 + int capacity_now;
439 +@@ -940,6 +942,21 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
440 + power_supply_changed(&battery->bat);
441 + }
442 +
443 ++static int battery_notify(struct notifier_block *nb,
444 ++ unsigned long mode, void *_unused)
445 ++{
446 ++ struct acpi_battery *battery = container_of(nb, struct acpi_battery,
447 ++ pm_nb);
448 ++ switch (mode) {
449 ++ case PM_POST_SUSPEND:
450 ++ sysfs_remove_battery(battery);
451 ++ sysfs_add_battery(battery);
452 ++ break;
453 ++ }
454 ++
455 ++ return 0;
456 ++}
457 ++
458 + static int acpi_battery_add(struct acpi_device *device)
459 + {
460 + int result = 0;
461 +@@ -972,6 +989,10 @@ static int acpi_battery_add(struct acpi_device *device)
462 + #endif
463 + kfree(battery);
464 + }
465 ++
466 ++ battery->pm_nb.notifier_call = battery_notify;
467 ++ register_pm_notifier(&battery->pm_nb);
468 ++
469 + return result;
470 + }
471 +
472 +@@ -982,6 +1003,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
473 + if (!device || !acpi_driver_data(device))
474 + return -EINVAL;
475 + battery = acpi_driver_data(device);
476 ++ unregister_pm_notifier(&battery->pm_nb);
477 + #ifdef CONFIG_ACPI_PROCFS_POWER
478 + acpi_battery_remove_fs(device);
479 + #endif
480 +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
481 +index b99e624..8eee69f 100644
482 +--- a/drivers/acpi/scan.c
483 ++++ b/drivers/acpi/scan.c
484 +@@ -944,6 +944,10 @@ static int acpi_bus_get_flags(struct acpi_device *device)
485 + if (ACPI_SUCCESS(status))
486 + device->flags.lockable = 1;
487 +
488 ++ /* Power resources cannot be power manageable. */
489 ++ if (device->device_type == ACPI_BUS_TYPE_POWER)
490 ++ return 0;
491 ++
492 + /* Presence of _PS0|_PR0 indicates 'power manageable' */
493 + status = acpi_get_handle(device->handle, "_PS0", &temp);
494 + if (ACPI_FAILURE(status))
495 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
496 +index 34e08f6..54c096b 100644
497 +--- a/drivers/ata/ahci.c
498 ++++ b/drivers/ata/ahci.c
499 +@@ -150,7 +150,7 @@ static const struct ata_port_info ahci_port_info[] = {
500 + {
501 + AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
502 + AHCI_HFLAG_YES_NCQ),
503 +- .flags = AHCI_FLAG_COMMON,
504 ++ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
505 + .pio_mask = ATA_PIO4,
506 + .udma_mask = ATA_UDMA6,
507 + .port_ops = &ahci_ops,
508 +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
509 +index 26d4523..8498eb5 100644
510 +--- a/drivers/ata/libahci.c
511 ++++ b/drivers/ata/libahci.c
512 +@@ -1897,7 +1897,17 @@ static void ahci_pmp_attach(struct ata_port *ap)
513 + ahci_enable_fbs(ap);
514 +
515 + pp->intr_mask |= PORT_IRQ_BAD_PMP;
516 +- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
517 ++
518 ++ /*
519 ++ * We must not change the port interrupt mask register if the
520 ++ * port is marked frozen, the value in pp->intr_mask will be
521 ++ * restored later when the port is thawed.
522 ++ *
523 ++ * Note that during initialization, the port is marked as
524 ++ * frozen since the irq handler is not yet registered.
525 ++ */
526 ++ if (!(ap->pflags & ATA_PFLAG_FROZEN))
527 ++ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
528 + }
529 +
530 + static void ahci_pmp_detach(struct ata_port *ap)
531 +@@ -1913,7 +1923,10 @@ static void ahci_pmp_detach(struct ata_port *ap)
532 + writel(cmd, port_mmio + PORT_CMD);
533 +
534 + pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
535 +- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
536 ++
537 ++ /* see comment above in ahci_pmp_attach() */
538 ++ if (!(ap->pflags & ATA_PFLAG_FROZEN))
539 ++ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
540 + }
541 +
542 + int ahci_port_resume(struct ata_port *ap)
543 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
544 +index d4e52e2..4ccce0f 100644
545 +--- a/drivers/ata/libata-core.c
546 ++++ b/drivers/ata/libata-core.c
547 +@@ -5479,8 +5479,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
548 + ap = kzalloc(sizeof(*ap), GFP_KERNEL);
549 + if (!ap)
550 + return NULL;
551 +-
552 +- ap->pflags |= ATA_PFLAG_INITIALIZING;
553 ++
554 ++ ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
555 + ap->lock = &host->lock;
556 + ap->print_id = -1;
557 + ap->host = host;
558 +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
559 +index e16850e..fe18c2d 100644
560 +--- a/drivers/ata/libata-eh.c
561 ++++ b/drivers/ata/libata-eh.c
562 +@@ -3276,6 +3276,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
563 + struct ata_eh_context *ehc = &link->eh_context;
564 + struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
565 + enum ata_lpm_policy old_policy = link->lpm_policy;
566 ++ bool no_dipm = ap->flags & ATA_FLAG_NO_DIPM;
567 + unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
568 + unsigned int err_mask;
569 + int rc;
570 +@@ -3292,7 +3293,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
571 + */
572 + ata_for_each_dev(dev, link, ENABLED) {
573 + bool hipm = ata_id_has_hipm(dev->id);
574 +- bool dipm = ata_id_has_dipm(dev->id);
575 ++ bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
576 +
577 + /* find the first enabled and LPM enabled devices */
578 + if (!link_dev)
579 +@@ -3349,7 +3350,8 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
580 +
581 + /* host config updated, enable DIPM if transitioning to MIN_POWER */
582 + ata_for_each_dev(dev, link, ENABLED) {
583 +- if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) {
584 ++ if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
585 ++ ata_id_has_dipm(dev->id)) {
586 + err_mask = ata_dev_set_feature(dev,
587 + SETFEATURES_SATA_ENABLE, SATA_DIPM);
588 + if (err_mask && err_mask != AC_ERR_DEV) {
589 +diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
590 +index 012cba0..b072648 100644
591 +--- a/drivers/char/agp/generic.c
592 ++++ b/drivers/char/agp/generic.c
593 +@@ -115,6 +115,9 @@ static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
594 + struct agp_memory *new;
595 + unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
596 +
597 ++ if (INT_MAX/sizeof(struct page *) < num_agp_pages)
598 ++ return NULL;
599 ++
600 + new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
601 + if (new == NULL)
602 + return NULL;
603 +@@ -234,11 +237,14 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
604 + int scratch_pages;
605 + struct agp_memory *new;
606 + size_t i;
607 ++ int cur_memory;
608 +
609 + if (!bridge)
610 + return NULL;
611 +
612 +- if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
613 ++ cur_memory = atomic_read(&bridge->current_memory_agp);
614 ++ if ((cur_memory + page_count > bridge->max_memory_agp) ||
615 ++ (cur_memory + page_count < page_count))
616 + return NULL;
617 +
618 + if (type >= AGP_USER_TYPES) {
619 +@@ -1089,8 +1095,8 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
620 + return -EINVAL;
621 + }
622 +
623 +- /* AK: could wrap */
624 +- if ((pg_start + mem->page_count) > num_entries)
625 ++ if (((pg_start + mem->page_count) > num_entries) ||
626 ++ ((pg_start + mem->page_count) < pg_start))
627 + return -EINVAL;
628 +
629 + j = pg_start;
630 +@@ -1124,7 +1130,7 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
631 + {
632 + size_t i;
633 + struct agp_bridge_data *bridge;
634 +- int mask_type;
635 ++ int mask_type, num_entries;
636 +
637 + bridge = mem->bridge;
638 + if (!bridge)
639 +@@ -1136,6 +1142,11 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
640 + if (type != mem->type)
641 + return -EINVAL;
642 +
643 ++ num_entries = agp_num_entries();
644 ++ if (((pg_start + mem->page_count) > num_entries) ||
645 ++ ((pg_start + mem->page_count) < pg_start))
646 ++ return -EINVAL;
647 ++
648 + mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
649 + if (mask_type != 0) {
650 + /* The generic routines know nothing of memory types */
651 +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
652 +index 84b164d..838568a 100644
653 +--- a/drivers/char/virtio_console.c
654 ++++ b/drivers/char/virtio_console.c
655 +@@ -1280,18 +1280,7 @@ static void unplug_port(struct port *port)
656 + spin_lock_irq(&pdrvdata_lock);
657 + list_del(&port->cons.list);
658 + spin_unlock_irq(&pdrvdata_lock);
659 +-#if 0
660 +- /*
661 +- * hvc_remove() not called as removing one hvc port
662 +- * results in other hvc ports getting frozen.
663 +- *
664 +- * Once this is resolved in hvc, this functionality
665 +- * will be enabled. Till that is done, the -EPIPE
666 +- * return from get_chars() above will help
667 +- * hvc_console.c to clean up on ports we remove here.
668 +- */
669 + hvc_remove(port->cons.hvc);
670 +-#endif
671 + }
672 +
673 + /* Remove unused data this port might have received. */
674 +diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
675 +index 0902d44..4b4b545 100644
676 +--- a/drivers/gpu/drm/Kconfig
677 ++++ b/drivers/gpu/drm/Kconfig
678 +@@ -24,6 +24,7 @@ config DRM_KMS_HELPER
679 + depends on DRM
680 + select FB
681 + select FRAMEBUFFER_CONSOLE if !EXPERT
682 ++ select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
683 + help
684 + FB and CRTC helpers for KMS drivers.
685 +
686 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
687 +index 49fb54f..ecf8f94 100644
688 +--- a/drivers/gpu/drm/i915/intel_display.c
689 ++++ b/drivers/gpu/drm/i915/intel_display.c
690 +@@ -5630,36 +5630,6 @@ cleanup_work:
691 + return ret;
692 + }
693 +
694 +-static void intel_crtc_reset(struct drm_crtc *crtc)
695 +-{
696 +- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
697 +-
698 +- /* Reset flags back to the 'unknown' status so that they
699 +- * will be correctly set on the initial modeset.
700 +- */
701 +- intel_crtc->dpms_mode = -1;
702 +-}
703 +-
704 +-static struct drm_crtc_helper_funcs intel_helper_funcs = {
705 +- .dpms = intel_crtc_dpms,
706 +- .mode_fixup = intel_crtc_mode_fixup,
707 +- .mode_set = intel_crtc_mode_set,
708 +- .mode_set_base = intel_pipe_set_base,
709 +- .mode_set_base_atomic = intel_pipe_set_base_atomic,
710 +- .load_lut = intel_crtc_load_lut,
711 +- .disable = intel_crtc_disable,
712 +-};
713 +-
714 +-static const struct drm_crtc_funcs intel_crtc_funcs = {
715 +- .reset = intel_crtc_reset,
716 +- .cursor_set = intel_crtc_cursor_set,
717 +- .cursor_move = intel_crtc_cursor_move,
718 +- .gamma_set = intel_crtc_gamma_set,
719 +- .set_config = drm_crtc_helper_set_config,
720 +- .destroy = intel_crtc_destroy,
721 +- .page_flip = intel_crtc_page_flip,
722 +-};
723 +-
724 + static void intel_sanitize_modesetting(struct drm_device *dev,
725 + int pipe, int plane)
726 + {
727 +@@ -5710,6 +5680,42 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
728 + }
729 + }
730 +
731 ++static void intel_crtc_reset(struct drm_crtc *crtc)
732 ++{
733 ++ struct drm_device *dev = crtc->dev;
734 ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
735 ++
736 ++ /* Reset flags back to the 'unknown' status so that they
737 ++ * will be correctly set on the initial modeset.
738 ++ */
739 ++ intel_crtc->dpms_mode = -1;
740 ++
741 ++ /* We need to fix up any BIOS configuration that conflicts with
742 ++ * our expectations.
743 ++ */
744 ++ intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
745 ++}
746 ++
747 ++static struct drm_crtc_helper_funcs intel_helper_funcs = {
748 ++ .dpms = intel_crtc_dpms,
749 ++ .mode_fixup = intel_crtc_mode_fixup,
750 ++ .mode_set = intel_crtc_mode_set,
751 ++ .mode_set_base = intel_pipe_set_base,
752 ++ .mode_set_base_atomic = intel_pipe_set_base_atomic,
753 ++ .load_lut = intel_crtc_load_lut,
754 ++ .disable = intel_crtc_disable,
755 ++};
756 ++
757 ++static const struct drm_crtc_funcs intel_crtc_funcs = {
758 ++ .reset = intel_crtc_reset,
759 ++ .cursor_set = intel_crtc_cursor_set,
760 ++ .cursor_move = intel_crtc_cursor_move,
761 ++ .gamma_set = intel_crtc_gamma_set,
762 ++ .set_config = drm_crtc_helper_set_config,
763 ++ .destroy = intel_crtc_destroy,
764 ++ .page_flip = intel_crtc_page_flip,
765 ++};
766 ++
767 + static void intel_crtc_init(struct drm_device *dev, int pipe)
768 + {
769 + drm_i915_private_t *dev_priv = dev->dev_private;
770 +@@ -5759,8 +5765,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
771 +
772 + setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
773 + (unsigned long)intel_crtc);
774 +-
775 +- intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
776 + }
777 +
778 + int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
779 +diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
780 +index fe4a53a..65edb22 100644
781 +--- a/drivers/gpu/drm/i915/intel_tv.c
782 ++++ b/drivers/gpu/drm/i915/intel_tv.c
783 +@@ -1380,7 +1380,9 @@ intel_tv_detect(struct drm_connector *connector, bool force)
784 + if (type < 0)
785 + return connector_status_disconnected;
786 +
787 ++ intel_tv->type = type;
788 + intel_tv_find_better_format(connector);
789 ++
790 + return connector_status_connected;
791 + }
792 +
793 +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
794 +index 60769d2..7826be0 100644
795 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
796 ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
797 +@@ -181,13 +181,13 @@ nouveau_fbcon_sync(struct fb_info *info)
798 + OUT_RING (chan, 0);
799 + }
800 +
801 +- nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
802 ++ nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
803 + FIRE_RING(chan);
804 + mutex_unlock(&chan->mutex);
805 +
806 + ret = -EBUSY;
807 + for (i = 0; i < 100000; i++) {
808 +- if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) {
809 ++ if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
810 + ret = 0;
811 + break;
812 + }
813 +diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
814 +index d71d375..7bd7456 100644
815 +--- a/drivers/gpu/drm/radeon/atom.c
816 ++++ b/drivers/gpu/drm/radeon/atom.c
817 +@@ -135,7 +135,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
818 + case ATOM_IIO_MOVE_INDEX:
819 + temp &=
820 + ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
821 +- CU8(base + 2));
822 ++ CU8(base + 3));
823 + temp |=
824 + ((index >> CU8(base + 2)) &
825 + (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
826 +@@ -145,7 +145,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
827 + case ATOM_IIO_MOVE_DATA:
828 + temp &=
829 + ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
830 +- CU8(base + 2));
831 ++ CU8(base + 3));
832 + temp |=
833 + ((data >> CU8(base + 2)) &
834 + (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
835 +@@ -155,7 +155,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
836 + case ATOM_IIO_MOVE_ATTR:
837 + temp &=
838 + ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
839 +- CU8(base + 2));
840 ++ CU8(base + 3));
841 + temp |=
842 + ((ctx->
843 + io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
844 +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
845 +index 0861257..bede31c 100644
846 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c
847 ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
848 +@@ -532,10 +532,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
849 + else
850 + pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
851 +
852 +- if ((rdev->family == CHIP_R600) ||
853 +- (rdev->family == CHIP_RV610) ||
854 +- (rdev->family == CHIP_RV630) ||
855 +- (rdev->family == CHIP_RV670))
856 ++ if (rdev->family < CHIP_RV770)
857 + pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
858 + } else {
859 + pll->flags |= RADEON_PLL_LEGACY;
860 +@@ -565,7 +562,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
861 + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
862 + if (ss_enabled) {
863 + if (ss->refdiv) {
864 +- pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
865 + pll->flags |= RADEON_PLL_USE_REF_DIV;
866 + pll->reference_div = ss->refdiv;
867 + if (ASIC_IS_AVIVO(rdev))
868 +diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
869 +index 53e6273..c35ab94 100644
870 +--- a/drivers/input/xen-kbdfront.c
871 ++++ b/drivers/input/xen-kbdfront.c
872 +@@ -286,7 +286,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
873 + enum xenbus_state backend_state)
874 + {
875 + struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
876 +- int val;
877 ++ int ret, val;
878 +
879 + switch (backend_state) {
880 + case XenbusStateInitialising:
881 +@@ -299,6 +299,16 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
882 +
883 + case XenbusStateInitWait:
884 + InitWait:
885 ++ ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
886 ++ "feature-abs-pointer", "%d", &val);
887 ++ if (ret < 0)
888 ++ val = 0;
889 ++ if (val) {
890 ++ ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
891 ++ "request-abs-pointer", "1");
892 ++ if (ret)
893 ++ pr_warning("can't request abs-pointer\n");
894 ++ }
895 + xenbus_switch_state(dev, XenbusStateConnected);
896 + break;
897 +
898 +diff --git a/drivers/media/dvb/b2c2/flexcop-pci.c b/drivers/media/dvb/b2c2/flexcop-pci.c
899 +index 227c020..4f3e3ce 100644
900 +--- a/drivers/media/dvb/b2c2/flexcop-pci.c
901 ++++ b/drivers/media/dvb/b2c2/flexcop-pci.c
902 +@@ -38,7 +38,7 @@ MODULE_PARM_DESC(debug,
903 + DEBSTATUS);
904 +
905 + #define DRIVER_VERSION "0.1"
906 +-#define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV PCI Driver"
907 ++#define DRIVER_NAME "flexcop-pci"
908 + #define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@××××.de>"
909 +
910 + struct flexcop_pci {
911 +diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
912 +index a113805..10f86e0 100644
913 +--- a/drivers/net/netxen/netxen_nic.h
914 ++++ b/drivers/net/netxen/netxen_nic.h
915 +@@ -174,7 +174,7 @@
916 +
917 + #define MAX_NUM_CARDS 4
918 +
919 +-#define MAX_BUFFERS_PER_CMD 32
920 ++#define NETXEN_MAX_FRAGS_PER_TX 14
921 + #define MAX_TSO_HEADER_DESC 2
922 + #define MGMT_CMD_DESC_RESV 4
923 + #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
924 +@@ -558,7 +558,7 @@ struct netxen_recv_crb {
925 + */
926 + struct netxen_cmd_buffer {
927 + struct sk_buff *skb;
928 +- struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
929 ++ struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1];
930 + u32 frag_count;
931 + };
932 +
933 +diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
934 +index 33fac32..28139df 100644
935 +--- a/drivers/net/netxen/netxen_nic_main.c
936 ++++ b/drivers/net/netxen/netxen_nic_main.c
937 +@@ -1841,6 +1841,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
938 + struct cmd_desc_type0 *hwdesc, *first_desc;
939 + struct pci_dev *pdev;
940 + int i, k;
941 ++ int delta = 0;
942 ++ struct skb_frag_struct *frag;
943 +
944 + u32 producer;
945 + int frag_count, no_of_desc;
946 +@@ -1848,6 +1850,21 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
947 +
948 + frag_count = skb_shinfo(skb)->nr_frags + 1;
949 +
950 ++ /* 14 frags supported for normal packet and
951 ++ * 32 frags supported for TSO packet
952 ++ */
953 ++ if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
954 ++
955 ++ for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
956 ++ frag = &skb_shinfo(skb)->frags[i];
957 ++ delta += frag->size;
958 ++ }
959 ++
960 ++ if (!__pskb_pull_tail(skb, delta))
961 ++ goto drop_packet;
962 ++
963 ++ frag_count = 1 + skb_shinfo(skb)->nr_frags;
964 ++ }
965 + /* 4 fragments per cmd des */
966 + no_of_desc = (frag_count + 3) >> 2;
967 +
968 +diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
969 +index 44e316f..0f136ff 100644
970 +--- a/drivers/net/qlcnic/qlcnic.h
971 ++++ b/drivers/net/qlcnic/qlcnic.h
972 +@@ -99,6 +99,7 @@
973 + #define TX_UDPV6_PKT 0x0c
974 +
975 + /* Tx defines */
976 ++#define QLCNIC_MAX_FRAGS_PER_TX 14
977 + #define MAX_TSO_HEADER_DESC 2
978 + #define MGMT_CMD_DESC_RESV 4
979 + #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
980 +diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
981 +index 37c04b4..92619d7 100644
982 +--- a/drivers/net/qlcnic/qlcnic_main.c
983 ++++ b/drivers/net/qlcnic/qlcnic_main.c
984 +@@ -2099,6 +2099,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
985 + struct cmd_desc_type0 *hwdesc, *first_desc;
986 + struct pci_dev *pdev;
987 + struct ethhdr *phdr;
988 ++ int delta = 0;
989 + int i, k;
990 +
991 + u32 producer;
992 +@@ -2118,6 +2119,19 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
993 + }
994 +
995 + frag_count = skb_shinfo(skb)->nr_frags + 1;
996 ++ /* 14 frags supported for normal packet and
997 ++ * 32 frags supported for TSO packet
998 ++ */
999 ++ if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
1000 ++
1001 ++ for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
1002 ++ delta += skb_shinfo(skb)->frags[i].size;
1003 ++
1004 ++ if (!__pskb_pull_tail(skb, delta))
1005 ++ goto drop_packet;
1006 ++
1007 ++ frag_count = 1 + skb_shinfo(skb)->nr_frags;
1008 ++ }
1009 +
1010 + /* 4 fragments per cmd des */
1011 + no_of_desc = (frag_count + 3) >> 2;
1012 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
1013 +index 4ceddbb..038a0cb 100644
1014 +--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
1015 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
1016 +@@ -615,7 +615,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
1017 + */
1018 + if (rxsp->status11 & AR_CRCErr)
1019 + rxs->rs_status |= ATH9K_RXERR_CRC;
1020 +- if (rxsp->status11 & AR_PHYErr) {
1021 ++ else if (rxsp->status11 & AR_PHYErr) {
1022 + phyerr = MS(rxsp->status11, AR_PHYErrCode);
1023 + /*
1024 + * If we reach a point here where AR_PostDelimCRCErr is
1025 +@@ -638,11 +638,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
1026 + rxs->rs_phyerr = phyerr;
1027 + }
1028 +
1029 +- }
1030 +- if (rxsp->status11 & AR_DecryptCRCErr)
1031 ++ } else if (rxsp->status11 & AR_DecryptCRCErr)
1032 + rxs->rs_status |= ATH9K_RXERR_DECRYPT;
1033 +- if (rxsp->status11 & AR_MichaelErr)
1034 ++ else if (rxsp->status11 & AR_MichaelErr)
1035 + rxs->rs_status |= ATH9K_RXERR_MIC;
1036 ++
1037 + if (rxsp->status11 & AR_KeyMiss)
1038 + rxs->rs_status |= ATH9K_RXERR_DECRYPT;
1039 + }
1040 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
1041 +index 7c0a7c4..a3b77ae 100644
1042 +--- a/drivers/net/wireless/ath/ath9k/hw.c
1043 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
1044 +@@ -1218,15 +1218,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1045 + ah->txchainmask = common->tx_chainmask;
1046 + ah->rxchainmask = common->rx_chainmask;
1047 +
1048 +- if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
1049 +- ath9k_hw_abortpcurecv(ah);
1050 +- if (!ath9k_hw_stopdmarecv(ah)) {
1051 +- ath_dbg(common, ATH_DBG_XMIT,
1052 +- "Failed to stop receive dma\n");
1053 +- bChannelChange = false;
1054 +- }
1055 +- }
1056 +-
1057 + if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
1058 + return -EIO;
1059 +
1060 +diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
1061 +index 2915b11..e9fc97d 100644
1062 +--- a/drivers/net/wireless/ath/ath9k/mac.c
1063 ++++ b/drivers/net/wireless/ath/ath9k/mac.c
1064 +@@ -690,17 +690,23 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
1065 + rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
1066 +
1067 + if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
1068 ++ /*
1069 ++ * Treat these errors as mutually exclusive to avoid spurious
1070 ++ * extra error reports from the hardware. If a CRC error is
1071 ++ * reported, then decryption and MIC errors are irrelevant,
1072 ++ * the frame is going to be dropped either way
1073 ++ */
1074 + if (ads.ds_rxstatus8 & AR_CRCErr)
1075 + rs->rs_status |= ATH9K_RXERR_CRC;
1076 +- if (ads.ds_rxstatus8 & AR_PHYErr) {
1077 ++ else if (ads.ds_rxstatus8 & AR_PHYErr) {
1078 + rs->rs_status |= ATH9K_RXERR_PHY;
1079 + phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
1080 + rs->rs_phyerr = phyerr;
1081 +- }
1082 +- if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
1083 ++ } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
1084 + rs->rs_status |= ATH9K_RXERR_DECRYPT;
1085 +- if (ads.ds_rxstatus8 & AR_MichaelErr)
1086 ++ else if (ads.ds_rxstatus8 & AR_MichaelErr)
1087 + rs->rs_status |= ATH9K_RXERR_MIC;
1088 ++
1089 + if (ads.ds_rxstatus8 & AR_KeyMiss)
1090 + rs->rs_status |= ATH9K_RXERR_DECRYPT;
1091 + }
1092 +@@ -770,28 +776,47 @@ void ath9k_hw_abortpcurecv(struct ath_hw *ah)
1093 + }
1094 + EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
1095 +
1096 +-bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
1097 ++bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
1098 + {
1099 + #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
1100 + #define AH_RX_TIME_QUANTUM 100 /* usec */
1101 + struct ath_common *common = ath9k_hw_common(ah);
1102 ++ u32 mac_status, last_mac_status = 0;
1103 + int i;
1104 +
1105 ++ /* Enable access to the DMA observation bus */
1106 ++ REG_WRITE(ah, AR_MACMISC,
1107 ++ ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
1108 ++ (AR_MACMISC_MISC_OBS_BUS_1 <<
1109 ++ AR_MACMISC_MISC_OBS_BUS_MSB_S)));
1110 ++
1111 + REG_WRITE(ah, AR_CR, AR_CR_RXD);
1112 +
1113 + /* Wait for rx enable bit to go low */
1114 + for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
1115 + if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
1116 + break;
1117 ++
1118 ++ if (!AR_SREV_9300_20_OR_LATER(ah)) {
1119 ++ mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
1120 ++ if (mac_status == 0x1c0 && mac_status == last_mac_status) {
1121 ++ *reset = true;
1122 ++ break;
1123 ++ }
1124 ++
1125 ++ last_mac_status = mac_status;
1126 ++ }
1127 ++
1128 + udelay(AH_TIME_QUANTUM);
1129 + }
1130 +
1131 + if (i == 0) {
1132 + ath_err(common,
1133 +- "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
1134 ++ "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
1135 + AH_RX_STOP_DMA_TIMEOUT / 1000,
1136 + REG_READ(ah, AR_CR),
1137 +- REG_READ(ah, AR_DIAG_SW));
1138 ++ REG_READ(ah, AR_DIAG_SW),
1139 ++ REG_READ(ah, AR_DMADBG_7));
1140 + return false;
1141 + } else {
1142 + return true;
1143 +diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
1144 +index 7512f97..d9cc299 100644
1145 +--- a/drivers/net/wireless/ath/ath9k/mac.h
1146 ++++ b/drivers/net/wireless/ath/ath9k/mac.h
1147 +@@ -692,7 +692,7 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
1148 + void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
1149 + void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
1150 + void ath9k_hw_abortpcurecv(struct ath_hw *ah);
1151 +-bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
1152 ++bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset);
1153 + int ath9k_hw_beaconq_setup(struct ath_hw *ah);
1154 +
1155 + /* Interrupt Handling */
1156 +diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
1157 +index 3867a2e..89546bc 100644
1158 +--- a/drivers/net/wireless/ath/ath9k/recv.c
1159 ++++ b/drivers/net/wireless/ath/ath9k/recv.c
1160 +@@ -513,12 +513,12 @@ start_recv:
1161 + bool ath_stoprecv(struct ath_softc *sc)
1162 + {
1163 + struct ath_hw *ah = sc->sc_ah;
1164 +- bool stopped;
1165 ++ bool stopped, reset = false;
1166 +
1167 + spin_lock_bh(&sc->rx.rxbuflock);
1168 + ath9k_hw_abortpcurecv(ah);
1169 + ath9k_hw_setrxfilter(ah, 0);
1170 +- stopped = ath9k_hw_stopdmarecv(ah);
1171 ++ stopped = ath9k_hw_stopdmarecv(ah, &reset);
1172 +
1173 + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
1174 + ath_edma_stop_recv(sc);
1175 +@@ -533,7 +533,7 @@ bool ath_stoprecv(struct ath_softc *sc)
1176 + "confusing the DMA engine when we start RX up\n");
1177 + ATH_DBG_WARN_ON_ONCE(!stopped);
1178 + }
1179 +- return stopped;
1180 ++ return stopped || reset;
1181 + }
1182 +
1183 + void ath_flushrecv(struct ath_softc *sc)
1184 +diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
1185 +index 248c670..5c2cfe6 100644
1186 +--- a/drivers/net/wireless/ath/regd_common.h
1187 ++++ b/drivers/net/wireless/ath/regd_common.h
1188 +@@ -195,6 +195,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
1189 + {APL9_WORLD, CTL_ETSI, CTL_ETSI},
1190 +
1191 + {APL3_FCCA, CTL_FCC, CTL_FCC},
1192 ++ {APL7_FCCA, CTL_FCC, CTL_FCC},
1193 + {APL1_ETSIC, CTL_FCC, CTL_ETSI},
1194 + {APL2_ETSIC, CTL_FCC, CTL_ETSI},
1195 + {APL2_APLD, CTL_FCC, NO_CTL},
1196 +diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
1197 +index 65b5834..c2dd4cd 100644
1198 +--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
1199 ++++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
1200 +@@ -74,8 +74,6 @@
1201 + /* RSSI to dBm */
1202 + #define IWL39_RSSI_OFFSET 95
1203 +
1204 +-#define IWL_DEFAULT_TX_POWER 0x0F
1205 +-
1206 + /*
1207 + * EEPROM related constants, enums, and structures.
1208 + */
1209 +diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
1210 +index 39b6f16..4e7b58b 100644
1211 +--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
1212 ++++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
1213 +@@ -1823,7 +1823,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1214 +
1215 + /* If we issue a new RXON command which required a tune then we must
1216 + * send a new TXPOWER command or we won't be able to Tx any frames */
1217 +- rc = priv->cfg->ops->lib->send_tx_power(priv);
1218 ++ rc = iwl_set_tx_power(priv, priv->tx_power_next, true);
1219 + if (rc) {
1220 + IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
1221 + return rc;
1222 +diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
1223 +index 91a9f52..992caa0 100644
1224 +--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
1225 ++++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
1226 +@@ -1571,7 +1571,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1227 +
1228 + /* If we issue a new RXON command which required a tune then we must
1229 + * send a new TXPOWER command or we won't be able to Tx any frames */
1230 +- ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
1231 ++ ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
1232 + if (ret) {
1233 + IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
1234 + return ret;
1235 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
1236 +index 6d140bd..ee802fe 100644
1237 +--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
1238 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
1239 +@@ -288,10 +288,9 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1240 + * If we issue a new RXON command which required a tune then we must
1241 + * send a new TXPOWER command or we won't be able to Tx any frames.
1242 + *
1243 +- * FIXME: which RXON requires a tune? Can we optimise this out in
1244 +- * some cases?
1245 ++ * It's expected we set power here if channel is changing.
1246 + */
1247 +- ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
1248 ++ ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
1249 + if (ret) {
1250 + IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
1251 + return ret;
1252 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
1253 +index c1cfd99..35239f0 100644
1254 +--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
1255 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
1256 +@@ -3841,12 +3841,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
1257 + priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
1258 + }
1259 +
1260 +- /* Set the tx_power_user_lmt to the lowest power level
1261 +- * this value will get overwritten by channel max power avg
1262 +- * from eeprom */
1263 +- priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN;
1264 +- priv->tx_power_next = IWLAGN_TX_POWER_TARGET_POWER_MIN;
1265 +-
1266 + ret = iwl_init_channel_map(priv);
1267 + if (ret) {
1268 + IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
1269 +diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
1270 +index efbde1f..294e9fc 100644
1271 +--- a/drivers/net/wireless/iwlwifi/iwl-core.c
1272 ++++ b/drivers/net/wireless/iwlwifi/iwl-core.c
1273 +@@ -168,6 +168,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
1274 + struct ieee80211_channel *geo_ch;
1275 + struct ieee80211_rate *rates;
1276 + int i = 0;
1277 ++ s8 max_tx_power = 0;
1278 +
1279 + if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
1280 + priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
1281 +@@ -244,8 +245,8 @@ int iwlcore_init_geos(struct iwl_priv *priv)
1282 +
1283 + geo_ch->flags |= ch->ht40_extension_channel;
1284 +
1285 +- if (ch->max_power_avg > priv->tx_power_device_lmt)
1286 +- priv->tx_power_device_lmt = ch->max_power_avg;
1287 ++ if (ch->max_power_avg > max_tx_power)
1288 ++ max_tx_power = ch->max_power_avg;
1289 + } else {
1290 + geo_ch->flags |= IEEE80211_CHAN_DISABLED;
1291 + }
1292 +@@ -258,6 +259,10 @@ int iwlcore_init_geos(struct iwl_priv *priv)
1293 + geo_ch->flags);
1294 + }
1295 +
1296 ++ priv->tx_power_device_lmt = max_tx_power;
1297 ++ priv->tx_power_user_lmt = max_tx_power;
1298 ++ priv->tx_power_next = max_tx_power;
1299 ++
1300 + if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
1301 + priv->cfg->sku & IWL_SKU_A) {
1302 + IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
1303 +@@ -1161,6 +1166,8 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1304 + {
1305 + int ret;
1306 + s8 prev_tx_power;
1307 ++ bool defer;
1308 ++ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1309 +
1310 + lockdep_assert_held(&priv->mutex);
1311 +
1312 +@@ -1188,10 +1195,15 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1313 + if (!iwl_is_ready_rf(priv))
1314 + return -EIO;
1315 +
1316 +- /* scan complete use tx_power_next, need to be updated */
1317 ++ /* scan complete and commit_rxon use tx_power_next value,
1318 ++ * it always need to be updated for newest request */
1319 + priv->tx_power_next = tx_power;
1320 +- if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
1321 +- IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n");
1322 ++
1323 ++ /* do not set tx power when scanning or channel changing */
1324 ++ defer = test_bit(STATUS_SCANNING, &priv->status) ||
1325 ++ memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
1326 ++ if (defer && !force) {
1327 ++ IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
1328 + return 0;
1329 + }
1330 +
1331 +diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
1332 +index 358cfd7..8b3c127 100644
1333 +--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
1334 ++++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
1335 +@@ -724,13 +724,6 @@ int iwl_init_channel_map(struct iwl_priv *priv)
1336 + flags & EEPROM_CHANNEL_RADAR))
1337 + ? "" : "not ");
1338 +
1339 +- /* Set the tx_power_user_lmt to the highest power
1340 +- * supported by any channel */
1341 +- if (eeprom_ch_info[ch].max_power_avg >
1342 +- priv->tx_power_user_lmt)
1343 +- priv->tx_power_user_lmt =
1344 +- eeprom_ch_info[ch].max_power_avg;
1345 +-
1346 + ch_info++;
1347 + }
1348 + }
1349 +diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
1350 +index 371abbf..64917ed 100644
1351 +--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
1352 ++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
1353 +@@ -94,6 +94,7 @@ MODULE_LICENSE("GPL");
1354 + struct iwl_mod_params iwl3945_mod_params = {
1355 + .sw_crypto = 1,
1356 + .restart_fw = 1,
1357 ++ .disable_hw_scan = 1,
1358 + /* the rest are 0 by default */
1359 + };
1360 +
1361 +@@ -3858,10 +3859,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
1362 + priv->force_reset[IWL_FW_RESET].reset_duration =
1363 + IWL_DELAY_NEXT_FORCE_FW_RELOAD;
1364 +
1365 +-
1366 +- priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
1367 +- priv->tx_power_next = IWL_DEFAULT_TX_POWER;
1368 +-
1369 + if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
1370 + IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
1371 + eeprom->version);
1372 +@@ -3995,8 +3992,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
1373 + * "the hard way", rather than using device's scan.
1374 + */
1375 + if (iwl3945_mod_params.disable_hw_scan) {
1376 +- dev_printk(KERN_DEBUG, &(pdev->dev),
1377 +- "sw scan support is deprecated\n");
1378 ++ IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
1379 + iwl3945_hw_ops.hw_scan = NULL;
1380 + }
1381 +
1382 +@@ -4318,8 +4314,7 @@ MODULE_PARM_DESC(debug, "debug output mask");
1383 + #endif
1384 + module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
1385 + int, S_IRUGO);
1386 +-MODULE_PARM_DESC(disable_hw_scan,
1387 +- "disable hardware scanning (default 0) (deprecated)");
1388 ++MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
1389 + module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO);
1390 + MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error");
1391 +
1392 +diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
1393 +index f618b96..2cfdd38 100644
1394 +--- a/drivers/net/wireless/p54/txrx.c
1395 ++++ b/drivers/net/wireless/p54/txrx.c
1396 +@@ -705,7 +705,7 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
1397 + struct p54_tx_info *p54info;
1398 + struct p54_hdr *hdr;
1399 + struct p54_tx_data *txhdr;
1400 +- unsigned int padding, len, extra_len;
1401 ++ unsigned int padding, len, extra_len = 0;
1402 + int i, j, ridx;
1403 + u16 hdr_flags = 0, aid = 0;
1404 + u8 rate, queue = 0, crypt_offset = 0;
1405 +diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
1406 +index 4789f8e..5dc5d3e 100644
1407 +--- a/drivers/pci/intel-iommu.c
1408 ++++ b/drivers/pci/intel-iommu.c
1409 +@@ -1835,7 +1835,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1410 +
1411 + ret = iommu_attach_domain(domain, iommu);
1412 + if (ret) {
1413 +- domain_exit(domain);
1414 ++ free_domain_mem(domain);
1415 + goto error;
1416 + }
1417 +
1418 +@@ -3260,9 +3260,15 @@ static int device_notifier(struct notifier_block *nb,
1419 + if (!domain)
1420 + return 0;
1421 +
1422 +- if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through)
1423 ++ if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
1424 + domain_remove_one_dev_info(domain, pdev);
1425 +
1426 ++ if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
1427 ++ !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
1428 ++ list_empty(&domain->devices))
1429 ++ domain_exit(domain);
1430 ++ }
1431 ++
1432 + return 0;
1433 + }
1434 +
1435 +@@ -3411,6 +3417,11 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
1436 + domain->iommu_count--;
1437 + domain_update_iommu_cap(domain);
1438 + spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
1439 ++
1440 ++ spin_lock_irqsave(&iommu->lock, tmp_flags);
1441 ++ clear_bit(domain->id, iommu->domain_ids);
1442 ++ iommu->domains[domain->id] = NULL;
1443 ++ spin_unlock_irqrestore(&iommu->lock, tmp_flags);
1444 + }
1445 +
1446 + spin_unlock_irqrestore(&device_domain_lock, flags);
1447 +@@ -3627,9 +3638,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
1448 +
1449 + pte = dmar_domain->pgd;
1450 + if (dma_pte_present(pte)) {
1451 +- free_pgtable_page(dmar_domain->pgd);
1452 + dmar_domain->pgd = (struct dma_pte *)
1453 + phys_to_virt(dma_pte_addr(pte));
1454 ++ free_pgtable_page(pte);
1455 + }
1456 + dmar_domain->agaw--;
1457 + }
1458 +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
1459 +index 114d952..21b1018 100644
1460 +--- a/drivers/platform/x86/ideapad-laptop.c
1461 ++++ b/drivers/platform/x86/ideapad-laptop.c
1462 +@@ -459,6 +459,8 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
1463 + if (test_bit(vpc_bit, &vpc1)) {
1464 + if (vpc_bit == 9)
1465 + ideapad_sync_rfk_state(adevice);
1466 ++ else if (vpc_bit == 4)
1467 ++ read_ec_data(handle, 0x12, &vpc2);
1468 + else
1469 + ideapad_input_report(priv, vpc_bit);
1470 + }
1471 +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
1472 +index aa2e5d3..c4b0ef1 100644
1473 +--- a/drivers/tty/n_gsm.c
1474 ++++ b/drivers/tty/n_gsm.c
1475 +@@ -1659,8 +1659,12 @@ static void gsm_queue(struct gsm_mux *gsm)
1476 +
1477 + if ((gsm->control & ~PF) == UI)
1478 + gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len);
1479 +- /* generate final CRC with received FCS */
1480 +- gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs);
1481 ++ if (gsm->encoding == 0){
1482 ++ /* WARNING: gsm->received_fcs is used for gsm->encoding = 0 only.
1483 ++ In this case it contain the last piece of data
1484 ++ required to generate final CRC */
1485 ++ gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs);
1486 ++ }
1487 + if (gsm->fcs != GOOD_FCS) {
1488 + gsm->bad_fcs++;
1489 + if (debug & 4)
1490 +diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
1491 +index dfcf4b1..0d66751 100644
1492 +--- a/drivers/tty/serial/imx.c
1493 ++++ b/drivers/tty/serial/imx.c
1494 +@@ -382,12 +382,13 @@ static void imx_start_tx(struct uart_port *port)
1495 + static irqreturn_t imx_rtsint(int irq, void *dev_id)
1496 + {
1497 + struct imx_port *sport = dev_id;
1498 +- unsigned int val = readl(sport->port.membase + USR1) & USR1_RTSS;
1499 ++ unsigned int val;
1500 + unsigned long flags;
1501 +
1502 + spin_lock_irqsave(&sport->port.lock, flags);
1503 +
1504 + writel(USR1_RTSD, sport->port.membase + USR1);
1505 ++ val = readl(sport->port.membase + USR1) & USR1_RTSS;
1506 + uart_handle_cts_change(&sport->port, !!val);
1507 + wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
1508 +
1509 +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
1510 +index cc2f73e..b0043fb 100644
1511 +--- a/drivers/virtio/virtio_ring.c
1512 ++++ b/drivers/virtio/virtio_ring.c
1513 +@@ -371,6 +371,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
1514 + /* detach_buf clears data, so grab it now. */
1515 + buf = vq->data[i];
1516 + detach_buf(vq, i);
1517 ++ vq->vring.avail->idx--;
1518 + END_USE(vq);
1519 + return buf;
1520 + }
1521 +diff --git a/fs/file.c b/fs/file.c
1522 +index 0be3447..4c6992d 100644
1523 +--- a/fs/file.c
1524 ++++ b/fs/file.c
1525 +@@ -9,6 +9,7 @@
1526 + #include <linux/module.h>
1527 + #include <linux/fs.h>
1528 + #include <linux/mm.h>
1529 ++#include <linux/mmzone.h>
1530 + #include <linux/time.h>
1531 + #include <linux/sched.h>
1532 + #include <linux/slab.h>
1533 +@@ -39,14 +40,17 @@ int sysctl_nr_open_max = 1024 * 1024; /* raised later */
1534 + */
1535 + static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list);
1536 +
1537 +-static inline void *alloc_fdmem(unsigned int size)
1538 ++static void *alloc_fdmem(unsigned int size)
1539 + {
1540 +- void *data;
1541 +-
1542 +- data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
1543 +- if (data != NULL)
1544 +- return data;
1545 +-
1546 ++ /*
1547 ++ * Very large allocations can stress page reclaim, so fall back to
1548 ++ * vmalloc() if the allocation size will be considered "large" by the VM.
1549 ++ */
1550 ++ if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
1551 ++ void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
1552 ++ if (data != NULL)
1553 ++ return data;
1554 ++ }
1555 + return vmalloc(size);
1556 + }
1557 +
1558 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
1559 +index 0592288..6221640 100644
1560 +--- a/fs/nfs/nfs4state.c
1561 ++++ b/fs/nfs/nfs4state.c
1562 +@@ -1600,7 +1600,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
1563 + int status = 0;
1564 +
1565 + /* Ensure exclusive access to NFSv4 state */
1566 +- for(;;) {
1567 ++ do {
1568 + if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
1569 + /* We're going to have to re-establish a clientid */
1570 + status = nfs4_reclaim_lease(clp);
1571 +@@ -1684,7 +1684,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
1572 + break;
1573 + if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1574 + break;
1575 +- }
1576 ++ } while (atomic_read(&clp->cl_count) > 1);
1577 + return;
1578 + out_error:
1579 + printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
1580 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
1581 +index b68c860..6a2ec50 100644
1582 +--- a/fs/nfs/super.c
1583 ++++ b/fs/nfs/super.c
1584 +@@ -2077,6 +2077,15 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
1585 + if (error < 0)
1586 + goto out;
1587 +
1588 ++ /*
1589 ++ * noac is a special case. It implies -o sync, but that's not
1590 ++ * necessarily reflected in the mtab options. do_remount_sb
1591 ++ * will clear MS_SYNCHRONOUS if -o sync wasn't specified in the
1592 ++ * remount options, so we have to explicitly reset it.
1593 ++ */
1594 ++ if (data->flags & NFS_MOUNT_NOAC)
1595 ++ *flags |= MS_SYNCHRONOUS;
1596 ++
1597 + /* compare new mount options with old ones */
1598 + error = nfs_compare_remount_data(nfss, data);
1599 + out:
1600 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1601 +index 96aaaa4..18c356c 100644
1602 +--- a/fs/nfsd/nfs4state.c
1603 ++++ b/fs/nfsd/nfs4state.c
1604 +@@ -258,6 +258,7 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp)
1605 + if (atomic_dec_and_test(&fp->fi_delegees)) {
1606 + vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
1607 + fp->fi_lease = NULL;
1608 ++ fput(fp->fi_deleg_file);
1609 + fp->fi_deleg_file = NULL;
1610 + }
1611 + }
1612 +@@ -402,8 +403,8 @@ static void free_generic_stateid(struct nfs4_stateid *stp)
1613 + if (stp->st_access_bmap) {
1614 + oflag = nfs4_access_bmap_to_omode(stp);
1615 + nfs4_file_put_access(stp->st_file, oflag);
1616 +- put_nfs4_file(stp->st_file);
1617 + }
1618 ++ put_nfs4_file(stp->st_file);
1619 + kmem_cache_free(stateid_slab, stp);
1620 + }
1621 +
1622 +diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
1623 +index 77e9b87..c0c590f 100644
1624 +--- a/fs/ubifs/recovery.c
1625 ++++ b/fs/ubifs/recovery.c
1626 +@@ -300,6 +300,32 @@ int ubifs_recover_master_node(struct ubifs_info *c)
1627 + goto out_free;
1628 + }
1629 + memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ);
1630 ++
1631 ++ /*
1632 ++ * We had to recover the master node, which means there was an
1633 ++ * unclean reboot. However, it is possible that the master node
1634 ++ * is clean at this point, i.e., %UBIFS_MST_DIRTY is not set.
1635 ++ * E.g., consider the following chain of events:
1636 ++ *
1637 ++ * 1. UBIFS was cleanly unmounted, so the master node is clean
1638 ++ * 2. UBIFS is being mounted R/W and starts changing the master
1639 ++ * node in the first (%UBIFS_MST_LNUM). A power cut happens,
1640 ++ * so this LEB ends up with some amount of garbage at the
1641 ++ * end.
1642 ++ * 3. UBIFS is being mounted R/O. We reach this place and
1643 ++ * recover the master node from the second LEB
1644 ++ * (%UBIFS_MST_LNUM + 1). But we cannot update the media
1645 ++ * because we are being mounted R/O. We have to defer the
1646 ++ * operation.
1647 ++ * 4. However, this master node (@c->mst_node) is marked as
1648 ++ * clean (since the step 1). And if we just return, the
1649 ++ * mount code will be confused and won't recover the master
1650 ++ * node when it is re-mounter R/W later.
1651 ++ *
1652 ++ * Thus, to force the recovery by marking the master node as
1653 ++ * dirty.
1654 ++ */
1655 ++ c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
1656 + } else {
1657 + /* Write the recovered master node */
1658 + c->max_sqnum = le64_to_cpu(mst->ch.sqnum) - 1;
1659 +diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
1660 +index 0f029e1..e94d962 100644
1661 +--- a/fs/ubifs/super.c
1662 ++++ b/fs/ubifs/super.c
1663 +@@ -1643,15 +1643,27 @@ static int ubifs_remount_rw(struct ubifs_info *c)
1664 + if (err)
1665 + goto out;
1666 +
1667 ++ dbg_gen("re-mounted read-write");
1668 ++ c->remounting_rw = 0;
1669 ++
1670 + if (c->need_recovery) {
1671 + c->need_recovery = 0;
1672 + ubifs_msg("deferred recovery completed");
1673 ++ } else {
1674 ++ /*
1675 ++ * Do not run the debugging space check if the were doing
1676 ++ * recovery, because when we saved the information we had the
1677 ++ * file-system in a state where the TNC and lprops has been
1678 ++ * modified in memory, but all the I/O operations (including a
1679 ++ * commit) were deferred. So the file-system was in
1680 ++ * "non-committed" state. Now the file-system is in committed
1681 ++ * state, and of course the amount of free space will change
1682 ++ * because, for example, the old index size was imprecise.
1683 ++ */
1684 ++ err = dbg_check_space_info(c);
1685 + }
1686 +
1687 +- dbg_gen("re-mounted read-write");
1688 +- c->remounting_rw = 0;
1689 + c->always_chk_crc = 0;
1690 +- err = dbg_check_space_info(c);
1691 + mutex_unlock(&c->umount_mutex);
1692 + return err;
1693 +
1694 +diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
1695 +index df29c8f..8847c8c 100644
1696 +--- a/include/linux/huge_mm.h
1697 ++++ b/include/linux/huge_mm.h
1698 +@@ -117,7 +117,7 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
1699 + unsigned long end,
1700 + long adjust_next)
1701 + {
1702 +- if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
1703 ++ if (!vma->anon_vma || vma->vm_ops)
1704 + return;
1705 + __vma_adjust_trans_huge(vma, start, end, adjust_next);
1706 + }
1707 +diff --git a/include/linux/libata.h b/include/linux/libata.h
1708 +index c9c5d7a..1f00080 100644
1709 +--- a/include/linux/libata.h
1710 ++++ b/include/linux/libata.h
1711 +@@ -203,6 +203,7 @@ enum {
1712 + * management */
1713 + ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
1714 + * led */
1715 ++ ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */
1716 +
1717 + /* bits 24:31 of ap->flags are reserved for LLD specific flags */
1718 +
1719 +diff --git a/include/linux/mm.h b/include/linux/mm.h
1720 +index c67adb4..248c946 100644
1721 +--- a/include/linux/mm.h
1722 ++++ b/include/linux/mm.h
1723 +@@ -137,7 +137,8 @@ extern unsigned int kobjsize(const void *objp);
1724 + #define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
1725 +
1726 + /*
1727 +- * special vmas that are non-mergable, non-mlock()able
1728 ++ * Special vmas that are non-mergable, non-mlock()able.
1729 ++ * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
1730 + */
1731 + #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
1732 +
1733 +diff --git a/init/Kconfig b/init/Kconfig
1734 +index be788c0..47dd02f 100644
1735 +--- a/init/Kconfig
1736 ++++ b/init/Kconfig
1737 +@@ -1209,6 +1209,7 @@ config SLAB
1738 + per cpu and per node queues.
1739 +
1740 + config SLUB
1741 ++ depends on BROKEN || NUMA || !DISCONTIGMEM
1742 + bool "SLUB (Unqueued Allocator)"
1743 + help
1744 + SLUB is a slab allocator that minimizes cache line usage
1745 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1746 +index 8f76561..56cac93 100644
1747 +--- a/mm/huge_memory.c
1748 ++++ b/mm/huge_memory.c
1749 +@@ -1400,6 +1400,9 @@ out:
1750 + return ret;
1751 + }
1752 +
1753 ++#define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \
1754 ++ VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
1755 ++
1756 + int hugepage_madvise(struct vm_area_struct *vma,
1757 + unsigned long *vm_flags, int advice)
1758 + {
1759 +@@ -1408,11 +1411,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
1760 + /*
1761 + * Be somewhat over-protective like KSM for now!
1762 + */
1763 +- if (*vm_flags & (VM_HUGEPAGE |
1764 +- VM_SHARED | VM_MAYSHARE |
1765 +- VM_PFNMAP | VM_IO | VM_DONTEXPAND |
1766 +- VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
1767 +- VM_MIXEDMAP | VM_SAO))
1768 ++ if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
1769 + return -EINVAL;
1770 + *vm_flags &= ~VM_NOHUGEPAGE;
1771 + *vm_flags |= VM_HUGEPAGE;
1772 +@@ -1428,11 +1427,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
1773 + /*
1774 + * Be somewhat over-protective like KSM for now!
1775 + */
1776 +- if (*vm_flags & (VM_NOHUGEPAGE |
1777 +- VM_SHARED | VM_MAYSHARE |
1778 +- VM_PFNMAP | VM_IO | VM_DONTEXPAND |
1779 +- VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
1780 +- VM_MIXEDMAP | VM_SAO))
1781 ++ if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
1782 + return -EINVAL;
1783 + *vm_flags &= ~VM_HUGEPAGE;
1784 + *vm_flags |= VM_NOHUGEPAGE;
1785 +@@ -1566,10 +1561,14 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
1786 + * page fault if needed.
1787 + */
1788 + return 0;
1789 +- if (vma->vm_file || vma->vm_ops)
1790 ++ if (vma->vm_ops)
1791 + /* khugepaged not yet working on file or special mappings */
1792 + return 0;
1793 +- VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1794 ++ /*
1795 ++ * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1796 ++ * true too, verify it here.
1797 ++ */
1798 ++ VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
1799 + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1800 + hend = vma->vm_end & HPAGE_PMD_MASK;
1801 + if (hstart < hend)
1802 +@@ -1818,12 +1817,15 @@ static void collapse_huge_page(struct mm_struct *mm,
1803 + (vma->vm_flags & VM_NOHUGEPAGE))
1804 + goto out;
1805 +
1806 +- /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
1807 +- if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
1808 ++ if (!vma->anon_vma || vma->vm_ops)
1809 + goto out;
1810 + if (is_vma_temporary_stack(vma))
1811 + goto out;
1812 +- VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1813 ++ /*
1814 ++ * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1815 ++ * true too, verify it here.
1816 ++ */
1817 ++ VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
1818 +
1819 + pgd = pgd_offset(mm, address);
1820 + if (!pgd_present(*pgd))
1821 +@@ -2056,13 +2058,16 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1822 + progress++;
1823 + continue;
1824 + }
1825 +- /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
1826 +- if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
1827 ++ if (!vma->anon_vma || vma->vm_ops)
1828 + goto skip;
1829 + if (is_vma_temporary_stack(vma))
1830 + goto skip;
1831 +-
1832 +- VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1833 ++ /*
1834 ++ * If is_pfn_mapping() is true is_learn_pfn_mapping()
1835 ++ * must be true too, verify it here.
1836 ++ */
1837 ++ VM_BUG_ON(is_linear_pfn_mapping(vma) ||
1838 ++ vma->vm_flags & VM_NO_THP);
1839 +
1840 + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1841 + hend = vma->vm_end & HPAGE_PMD_MASK;
1842 +diff --git a/mm/memory.c b/mm/memory.c
1843 +index f17746a..ab88d09 100644
1844 +--- a/mm/memory.c
1845 ++++ b/mm/memory.c
1846 +@@ -3332,7 +3332,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1847 + * run pte_offset_map on the pmd, if an huge pmd could
1848 + * materialize from under us from a different thread.
1849 + */
1850 +- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
1851 ++ if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
1852 + return VM_FAULT_OOM;
1853 + /* if an huge pmd materialized from under us just retry later */
1854 + if (unlikely(pmd_trans_huge(*pmd)))
1855 +diff --git a/mm/oom_kill.c b/mm/oom_kill.c
1856 +index ea16f72..49ea0cc 100644
1857 +--- a/mm/oom_kill.c
1858 ++++ b/mm/oom_kill.c
1859 +@@ -172,10 +172,13 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
1860 +
1861 + /*
1862 + * The baseline for the badness score is the proportion of RAM that each
1863 +- * task's rss and swap space use.
1864 ++ * task's rss, pagetable and swap space use.
1865 + */
1866 +- points = (get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS)) * 1000 /
1867 +- totalpages;
1868 ++ points = get_mm_rss(p->mm) + p->mm->nr_ptes;
1869 ++ points += get_mm_counter(p->mm, MM_SWAPENTS);
1870 ++
1871 ++ points *= 1000;
1872 ++ points /= totalpages;
1873 + task_unlock(p);
1874 +
1875 + /*
1876 +diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
1877 +index 659326c..006ad81 100644
1878 +--- a/scripts/kconfig/conf.c
1879 ++++ b/scripts/kconfig/conf.c
1880 +@@ -332,7 +332,7 @@ static int conf_choice(struct menu *menu)
1881 + }
1882 + if (!child)
1883 + continue;
1884 +- if (line[strlen(line) - 1] == '?') {
1885 ++ if (line[0] && line[strlen(line) - 1] == '?') {
1886 + print_help(child);
1887 + continue;
1888 + }
1889 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1890 +index da7cdca..8ff0223 100644
1891 +--- a/sound/pci/hda/patch_realtek.c
1892 ++++ b/sound/pci/hda/patch_realtek.c
1893 +@@ -14945,6 +14945,23 @@ static void alc269_fixup_hweq(struct hda_codec *codec,
1894 + alc_write_coef_idx(codec, 0x1e, coef | 0x80);
1895 + }
1896 +
1897 ++static void alc271_fixup_dmic(struct hda_codec *codec,
1898 ++ const struct alc_fixup *fix, int action)
1899 ++{
1900 ++ static struct hda_verb verbs[] = {
1901 ++ {0x20, AC_VERB_SET_COEF_INDEX, 0x0d},
1902 ++ {0x20, AC_VERB_SET_PROC_COEF, 0x4000},
1903 ++ {}
1904 ++ };
1905 ++ unsigned int cfg;
1906 ++
1907 ++ if (strcmp(codec->chip_name, "ALC271X"))
1908 ++ return;
1909 ++ cfg = snd_hda_codec_get_pincfg(codec, 0x12);
1910 ++ if (get_defcfg_connect(cfg) == AC_JACK_PORT_FIXED)
1911 ++ snd_hda_sequence_write(codec, verbs);
1912 ++}
1913 ++
1914 + enum {
1915 + ALC269_FIXUP_SONY_VAIO,
1916 + ALC275_FIXUP_SONY_VAIO_GPIO2,
1917 +@@ -14953,6 +14970,7 @@ enum {
1918 + ALC269_FIXUP_ASUS_G73JW,
1919 + ALC269_FIXUP_LENOVO_EAPD,
1920 + ALC275_FIXUP_SONY_HWEQ,
1921 ++ ALC271_FIXUP_DMIC,
1922 + };
1923 +
1924 + static const struct alc_fixup alc269_fixups[] = {
1925 +@@ -15006,7 +15024,11 @@ static const struct alc_fixup alc269_fixups[] = {
1926 + .v.func = alc269_fixup_hweq,
1927 + .chained = true,
1928 + .chain_id = ALC275_FIXUP_SONY_VAIO_GPIO2
1929 +- }
1930 ++ },
1931 ++ [ALC271_FIXUP_DMIC] = {
1932 ++ .type = ALC_FIXUP_FUNC,
1933 ++ .v.func = alc271_fixup_dmic,
1934 ++ },
1935 + };
1936 +
1937 + static struct snd_pci_quirk alc269_fixup_tbl[] = {
1938 +@@ -15015,6 +15037,7 @@ static struct snd_pci_quirk alc269_fixup_tbl[] = {
1939 + SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
1940 + SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
1941 + SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
1942 ++ SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
1943 + SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
1944 + SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
1945 + SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
1946 +diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
1947 +index f7cd346..f5ccdbf 100644
1948 +--- a/sound/soc/codecs/jz4740.c
1949 ++++ b/sound/soc/codecs/jz4740.c
1950 +@@ -308,8 +308,6 @@ static int jz4740_codec_dev_probe(struct snd_soc_codec *codec)
1951 + snd_soc_dapm_add_routes(dapm, jz4740_codec_dapm_routes,
1952 + ARRAY_SIZE(jz4740_codec_dapm_routes));
1953 +
1954 +- snd_soc_dapm_new_widgets(codec);
1955 +-
1956 + jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
1957 +
1958 + return 0;
1959 +diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
1960 +index 5168927..d365f43 100644
1961 +--- a/sound/soc/codecs/wm_hubs.c
1962 ++++ b/sound/soc/codecs/wm_hubs.c
1963 +@@ -739,12 +739,12 @@ static const struct snd_soc_dapm_route analogue_routes[] = {
1964 +
1965 + { "SPKL", "Input Switch", "MIXINL" },
1966 + { "SPKL", "IN1LP Switch", "IN1LP" },
1967 +- { "SPKL", "Output Switch", "Left Output Mixer" },
1968 ++ { "SPKL", "Output Switch", "Left Output PGA" },
1969 + { "SPKL", NULL, "TOCLK" },
1970 +
1971 + { "SPKR", "Input Switch", "MIXINR" },
1972 + { "SPKR", "IN1RP Switch", "IN1RP" },
1973 +- { "SPKR", "Output Switch", "Right Output Mixer" },
1974 ++ { "SPKR", "Output Switch", "Right Output PGA" },
1975 + { "SPKR", NULL, "TOCLK" },
1976 +
1977 + { "SPKL Boost", "Direct Voice Switch", "Direct Voice" },
1978 +@@ -766,8 +766,8 @@ static const struct snd_soc_dapm_route analogue_routes[] = {
1979 + { "SPKOUTRP", NULL, "SPKR Driver" },
1980 + { "SPKOUTRN", NULL, "SPKR Driver" },
1981 +
1982 +- { "Left Headphone Mux", "Mixer", "Left Output Mixer" },
1983 +- { "Right Headphone Mux", "Mixer", "Right Output Mixer" },
1984 ++ { "Left Headphone Mux", "Mixer", "Left Output PGA" },
1985 ++ { "Right Headphone Mux", "Mixer", "Right Output PGA" },
1986 +
1987 + { "Headphone PGA", NULL, "Left Headphone Mux" },
1988 + { "Headphone PGA", NULL, "Right Headphone Mux" },