Gentoo Archives: gentoo-commits

From: "Tom Wijsman (tomwij)" <tomwij@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r2306 - genpatches-2.6/trunk/3.4
Date: Mon, 04 Mar 2013 18:30:12
Message-Id: 20130304183008.381EA2171D@flycatcher.gentoo.org
1 Author: tomwij
2 Date: 2013-03-04 18:29:04 +0000 (Mon, 04 Mar 2013)
3 New Revision: 2306
4
5 Added:
6 genpatches-2.6/trunk/3.4/1034_linux-3.4.35.patch
7 Modified:
8 genpatches-2.6/trunk/3.4/0000_README
9 Log:
10 Linux patch 3.4.35.
11
12 Modified: genpatches-2.6/trunk/3.4/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/3.4/0000_README 2013-03-04 00:14:10 UTC (rev 2305)
15 +++ genpatches-2.6/trunk/3.4/0000_README 2013-03-04 18:29:04 UTC (rev 2306)
16 @@ -175,6 +175,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 3.4.34
19
20 +Patch: 1034_linux-3.4.35.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 3.4.35
23 +
24 Patch: 1700_correct-bnx2-firware-ver-mips.patch
25 From: https://bugs.gentoo.org/show_bug.cgi?id=424609
26 Desc: Correct firmware version for bnx2 on mips
27
28 Added: genpatches-2.6/trunk/3.4/1034_linux-3.4.35.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/3.4/1034_linux-3.4.35.patch (rev 0)
31 +++ genpatches-2.6/trunk/3.4/1034_linux-3.4.35.patch 2013-03-04 18:29:04 UTC (rev 2306)
32 @@ -0,0 +1,1383 @@
33 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
34 +index c1601e5..753d18a 100644
35 +--- a/Documentation/kernel-parameters.txt
36 ++++ b/Documentation/kernel-parameters.txt
37 +@@ -557,6 +557,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
38 + UART at the specified I/O port or MMIO address,
39 + switching to the matching ttyS device later. The
40 + options are the same as for ttyS, above.
41 ++ hvc<n> Use the hypervisor console device <n>. This is for
42 ++ both Xen and PowerPC hypervisors.
43 +
44 + If the device connected to the port is not a TTY but a braille
45 + device, prepend "brl," before the device type, for instance
46 +@@ -742,6 +744,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
47 +
48 + earlyprintk= [X86,SH,BLACKFIN]
49 + earlyprintk=vga
50 ++ earlyprintk=xen
51 + earlyprintk=serial[,ttySn[,baudrate]]
52 + earlyprintk=ttySn[,baudrate]
53 + earlyprintk=dbgp[debugController#]
54 +@@ -759,6 +762,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
55 + The VGA output is eventually overwritten by the real
56 + console.
57 +
58 ++ The xen output can only be used by Xen PV guests.
59 ++
60 + ekgdboc= [X86,KGDB] Allow early kernel console debugging
61 + ekgdboc=kbd
62 +
63 +diff --git a/Makefile b/Makefile
64 +index 250be36..282e8da 100644
65 +--- a/Makefile
66 ++++ b/Makefile
67 +@@ -1,6 +1,6 @@
68 + VERSION = 3
69 + PATCHLEVEL = 4
70 +-SUBLEVEL = 34
71 ++SUBLEVEL = 35
72 + EXTRAVERSION =
73 + NAME = Saber-toothed Squirrel
74 +
75 +diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
76 +index 48d9d4e..992f442 100644
77 +--- a/arch/x86/kernel/head.c
78 ++++ b/arch/x86/kernel/head.c
79 +@@ -5,8 +5,6 @@
80 + #include <asm/setup.h>
81 + #include <asm/bios_ebda.h>
82 +
83 +-#define BIOS_LOWMEM_KILOBYTES 0x413
84 +-
85 + /*
86 + * The BIOS places the EBDA/XBDA at the top of conventional
87 + * memory, and usually decreases the reported amount of
88 +@@ -16,17 +14,30 @@
89 + * chipset: reserve a page before VGA to prevent PCI prefetch
90 + * into it (errata #56). Usually the page is reserved anyways,
91 + * unless you have no PS/2 mouse plugged in.
92 ++ *
93 ++ * This functions is deliberately very conservative. Losing
94 ++ * memory in the bottom megabyte is rarely a problem, as long
95 ++ * as we have enough memory to install the trampoline. Using
96 ++ * memory that is in use by the BIOS or by some DMA device
97 ++ * the BIOS didn't shut down *is* a big problem.
98 + */
99 ++
100 ++#define BIOS_LOWMEM_KILOBYTES 0x413
101 ++#define LOWMEM_CAP 0x9f000U /* Absolute maximum */
102 ++#define INSANE_CUTOFF 0x20000U /* Less than this = insane */
103 ++
104 + void __init reserve_ebda_region(void)
105 + {
106 + unsigned int lowmem, ebda_addr;
107 +
108 +- /* To determine the position of the EBDA and the */
109 +- /* end of conventional memory, we need to look at */
110 +- /* the BIOS data area. In a paravirtual environment */
111 +- /* that area is absent. We'll just have to assume */
112 +- /* that the paravirt case can handle memory setup */
113 +- /* correctly, without our help. */
114 ++ /*
115 ++ * To determine the position of the EBDA and the
116 ++ * end of conventional memory, we need to look at
117 ++ * the BIOS data area. In a paravirtual environment
118 ++ * that area is absent. We'll just have to assume
119 ++ * that the paravirt case can handle memory setup
120 ++ * correctly, without our help.
121 ++ */
122 + if (paravirt_enabled())
123 + return;
124 +
125 +@@ -37,19 +48,23 @@ void __init reserve_ebda_region(void)
126 + /* start of EBDA area */
127 + ebda_addr = get_bios_ebda();
128 +
129 +- /* Fixup: bios puts an EBDA in the top 64K segment */
130 +- /* of conventional memory, but does not adjust lowmem. */
131 +- if ((lowmem - ebda_addr) <= 0x10000)
132 +- lowmem = ebda_addr;
133 ++ /*
134 ++ * Note: some old Dells seem to need 4k EBDA without
135 ++ * reporting so, so just consider the memory above 0x9f000
136 ++ * to be off limits (bugzilla 2990).
137 ++ */
138 ++
139 ++ /* If the EBDA address is below 128K, assume it is bogus */
140 ++ if (ebda_addr < INSANE_CUTOFF)
141 ++ ebda_addr = LOWMEM_CAP;
142 +
143 +- /* Fixup: bios does not report an EBDA at all. */
144 +- /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */
145 +- if ((ebda_addr == 0) && (lowmem >= 0x9f000))
146 +- lowmem = 0x9f000;
147 ++ /* If lowmem is less than 128K, assume it is bogus */
148 ++ if (lowmem < INSANE_CUTOFF)
149 ++ lowmem = LOWMEM_CAP;
150 +
151 +- /* Paranoia: should never happen, but... */
152 +- if ((lowmem == 0) || (lowmem >= 0x100000))
153 +- lowmem = 0x9f000;
154 ++ /* Use the lower of the lowmem and EBDA markers as the cutoff */
155 ++ lowmem = min(lowmem, ebda_addr);
156 ++ lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
157 +
158 + /* reserve all memory between lowmem and the 1MB mark */
159 + memblock_reserve(lowmem, 0x100000 - lowmem);
160 +diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
161 +index 3705bb0..1e40637 100644
162 +--- a/arch/x86/platform/efi/efi.c
163 ++++ b/arch/x86/platform/efi/efi.c
164 +@@ -84,9 +84,10 @@ int efi_enabled(int facility)
165 + }
166 + EXPORT_SYMBOL(efi_enabled);
167 +
168 ++static bool disable_runtime = false;
169 + static int __init setup_noefi(char *arg)
170 + {
171 +- clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
172 ++ disable_runtime = true;
173 + return 0;
174 + }
175 + early_param("noefi", setup_noefi);
176 +@@ -733,7 +734,7 @@ void __init efi_init(void)
177 + if (!efi_is_native())
178 + pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
179 + else {
180 +- if (efi_runtime_init())
181 ++ if (disable_runtime || efi_runtime_init())
182 + return;
183 + set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
184 + }
185 +diff --git a/block/genhd.c b/block/genhd.c
186 +index 9cf5583..60108d9 100644
187 +--- a/block/genhd.c
188 ++++ b/block/genhd.c
189 +@@ -25,7 +25,7 @@ static DEFINE_MUTEX(block_class_lock);
190 + struct kobject *block_depr;
191 +
192 + /* for extended dynamic devt allocation, currently only one major is used */
193 +-#define MAX_EXT_DEVT (1 << MINORBITS)
194 ++#define NR_EXT_DEVT (1 << MINORBITS)
195 +
196 + /* For extended devt allocation. ext_devt_mutex prevents look up
197 + * results from going away underneath its user.
198 +@@ -420,17 +420,18 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
199 + do {
200 + if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
201 + return -ENOMEM;
202 ++ mutex_lock(&ext_devt_mutex);
203 + rc = idr_get_new(&ext_devt_idr, part, &idx);
204 ++ if (!rc && idx >= NR_EXT_DEVT) {
205 ++ idr_remove(&ext_devt_idr, idx);
206 ++ rc = -EBUSY;
207 ++ }
208 ++ mutex_unlock(&ext_devt_mutex);
209 + } while (rc == -EAGAIN);
210 +
211 + if (rc)
212 + return rc;
213 +
214 +- if (idx > MAX_EXT_DEVT) {
215 +- idr_remove(&ext_devt_idr, idx);
216 +- return -EBUSY;
217 +- }
218 +-
219 + *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
220 + return 0;
221 + }
222 +@@ -644,7 +645,6 @@ void del_gendisk(struct gendisk *disk)
223 + disk_part_iter_exit(&piter);
224 +
225 + invalidate_partition(disk, 0);
226 +- blk_free_devt(disk_to_dev(disk)->devt);
227 + set_capacity(disk, 0);
228 + disk->flags &= ~GENHD_FL_UP;
229 +
230 +@@ -662,6 +662,7 @@ void del_gendisk(struct gendisk *disk)
231 + if (!sysfs_deprecated)
232 + sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
233 + device_del(disk_to_dev(disk));
234 ++ blk_free_devt(disk_to_dev(disk)->devt);
235 + }
236 + EXPORT_SYMBOL(del_gendisk);
237 +
238 +diff --git a/block/partition-generic.c b/block/partition-generic.c
239 +index 6df5d69..7b8b8d1 100644
240 +--- a/block/partition-generic.c
241 ++++ b/block/partition-generic.c
242 +@@ -249,11 +249,11 @@ void delete_partition(struct gendisk *disk, int partno)
243 + if (!part)
244 + return;
245 +
246 +- blk_free_devt(part_devt(part));
247 + rcu_assign_pointer(ptbl->part[partno], NULL);
248 + rcu_assign_pointer(ptbl->last_lookup, NULL);
249 + kobject_put(part->holder_dir);
250 + device_del(part_to_dev(part));
251 ++ blk_free_devt(part_devt(part));
252 +
253 + hd_struct_put(part);
254 + }
255 +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
256 +index aa0a904..189c704 100644
257 +--- a/drivers/acpi/sleep.c
258 ++++ b/drivers/acpi/sleep.c
259 +@@ -186,6 +186,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
260 + },
261 + {
262 + .callback = init_nvs_nosave,
263 ++ .ident = "Sony Vaio VGN-FW41E_H",
264 ++ .matches = {
265 ++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
266 ++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
267 ++ },
268 ++ },
269 ++ {
270 ++ .callback = init_nvs_nosave,
271 + .ident = "Sony Vaio VGN-FW21E",
272 + .matches = {
273 + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
274 +diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
275 +index 3c809bf..88f6908 100644
276 +--- a/drivers/ata/ata_piix.c
277 ++++ b/drivers/ata/ata_piix.c
278 +@@ -331,6 +331,23 @@ static const struct pci_device_id piix_pci_tbl[] = {
279 + { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
280 + /* SATA Controller IDE (DH89xxCC) */
281 + { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
282 ++ /* SATA Controller IDE (Avoton) */
283 ++ { 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
284 ++ /* SATA Controller IDE (Avoton) */
285 ++ { 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
286 ++ /* SATA Controller IDE (Avoton) */
287 ++ { 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
288 ++ /* SATA Controller IDE (Avoton) */
289 ++ { 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
290 ++ /* SATA Controller IDE (Wellsburg) */
291 ++ { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
292 ++ /* SATA Controller IDE (Wellsburg) */
293 ++ { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
294 ++ /* SATA Controller IDE (Wellsburg) */
295 ++ { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
296 ++ /* SATA Controller IDE (Wellsburg) */
297 ++ { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
298 ++
299 + { } /* terminate list */
300 + };
301 +
302 +diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
303 +index c6decb9..73d8c92 100644
304 +--- a/drivers/block/xen-blkback/blkback.c
305 ++++ b/drivers/block/xen-blkback/blkback.c
306 +@@ -623,7 +623,6 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
307 + goto fail_response;
308 + }
309 +
310 +- preq.dev = req->u.rw.handle;
311 + preq.sector_number = req->u.rw.sector_number;
312 + preq.nr_sects = 0;
313 +
314 +diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
315 +index 4f66171..a155254 100644
316 +--- a/drivers/block/xen-blkback/xenbus.c
317 ++++ b/drivers/block/xen-blkback/xenbus.c
318 +@@ -367,6 +367,7 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
319 + be->blkif = NULL;
320 + }
321 +
322 ++ kfree(be->mode);
323 + kfree(be);
324 + dev_set_drvdata(&dev->dev, NULL);
325 + return 0;
326 +@@ -502,6 +503,7 @@ static void backend_changed(struct xenbus_watch *watch,
327 + = container_of(watch, struct backend_info, backend_watch);
328 + struct xenbus_device *dev = be->dev;
329 + int cdrom = 0;
330 ++ unsigned long handle;
331 + char *device_type;
332 +
333 + DPRINTK("");
334 +@@ -521,10 +523,10 @@ static void backend_changed(struct xenbus_watch *watch,
335 + return;
336 + }
337 +
338 +- if ((be->major || be->minor) &&
339 +- ((be->major != major) || (be->minor != minor))) {
340 +- pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
341 +- be->major, be->minor, major, minor);
342 ++ if (be->major | be->minor) {
343 ++ if (be->major != major || be->minor != minor)
344 ++ pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
345 ++ be->major, be->minor, major, minor);
346 + return;
347 + }
348 +
349 +@@ -542,36 +544,33 @@ static void backend_changed(struct xenbus_watch *watch,
350 + kfree(device_type);
351 + }
352 +
353 +- if (be->major == 0 && be->minor == 0) {
354 +- /* Front end dir is a number, which is used as the handle. */
355 +-
356 +- char *p = strrchr(dev->otherend, '/') + 1;
357 +- long handle;
358 +- err = strict_strtoul(p, 0, &handle);
359 +- if (err)
360 +- return;
361 ++ /* Front end dir is a number, which is used as the handle. */
362 ++ err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
363 ++ if (err)
364 ++ return;
365 +
366 +- be->major = major;
367 +- be->minor = minor;
368 ++ be->major = major;
369 ++ be->minor = minor;
370 +
371 +- err = xen_vbd_create(be->blkif, handle, major, minor,
372 +- (NULL == strchr(be->mode, 'w')), cdrom);
373 +- if (err) {
374 +- be->major = 0;
375 +- be->minor = 0;
376 +- xenbus_dev_fatal(dev, err, "creating vbd structure");
377 +- return;
378 +- }
379 ++ err = xen_vbd_create(be->blkif, handle, major, minor,
380 ++ !strchr(be->mode, 'w'), cdrom);
381 +
382 ++ if (err)
383 ++ xenbus_dev_fatal(dev, err, "creating vbd structure");
384 ++ else {
385 + err = xenvbd_sysfs_addif(dev);
386 + if (err) {
387 + xen_vbd_free(&be->blkif->vbd);
388 +- be->major = 0;
389 +- be->minor = 0;
390 + xenbus_dev_fatal(dev, err, "creating sysfs entries");
391 +- return;
392 + }
393 ++ }
394 +
395 ++ if (err) {
396 ++ kfree(be->mode);
397 ++ be->mode = NULL;
398 ++ be->major = 0;
399 ++ be->minor = 0;
400 ++ } else {
401 + /* We're potentially connected now */
402 + xen_update_blkif_status(be->blkif);
403 + }
404 +diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
405 +index 68109e9..04ebeaf 100644
406 +--- a/drivers/firewire/core-device.c
407 ++++ b/drivers/firewire/core-device.c
408 +@@ -999,6 +999,10 @@ static void fw_device_init(struct work_struct *work)
409 + ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
410 + idr_get_new(&fw_device_idr, device, &minor) :
411 + -ENOMEM;
412 ++ if (minor >= 1 << MINORBITS) {
413 ++ idr_remove(&fw_device_idr, minor);
414 ++ minor = -ENOSPC;
415 ++ }
416 + up_write(&fw_device_rwsem);
417 +
418 + if (ret < 0)
419 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
420 +index 1a92a27..ff73d60 100644
421 +--- a/drivers/hid/hid-core.c
422 ++++ b/drivers/hid/hid-core.c
423 +@@ -1922,6 +1922,7 @@ static const struct hid_device_id hid_ignore_list[] = {
424 + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
425 + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
426 + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
427 ++ { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) },
428 + { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
429 + { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
430 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
431 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
432 +index 5583899..02f4664 100644
433 +--- a/drivers/hid/hid-ids.h
434 ++++ b/drivers/hid/hid-ids.h
435 +@@ -520,6 +520,9 @@
436 + #define USB_VENDOR_ID_MADCATZ 0x0738
437 + #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
438 +
439 ++#define USB_VENDOR_ID_MASTERKIT 0x16c0
440 ++#define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df
441 ++
442 + #define USB_VENDOR_ID_MCC 0x09db
443 + #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
444 + #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
445 +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
446 +index ef0ae93..b573f80 100644
447 +--- a/drivers/iommu/amd_iommu_init.c
448 ++++ b/drivers/iommu/amd_iommu_init.c
449 +@@ -1572,8 +1572,6 @@ int __init amd_iommu_init_hardware(void)
450 + if (amd_iommu_pd_alloc_bitmap == NULL)
451 + goto free;
452 +
453 +- /* init the device table */
454 +- init_device_table();
455 +
456 + /*
457 + * let all alias entries point to itself
458 +@@ -1655,6 +1653,7 @@ out:
459 + */
460 + static int __init amd_iommu_init(void)
461 + {
462 ++ struct amd_iommu *iommu;
463 + int ret = 0;
464 +
465 + ret = amd_iommu_init_hardware();
466 +@@ -1673,6 +1672,12 @@ static int __init amd_iommu_init(void)
467 + if (ret)
468 + goto free;
469 +
470 ++ /* init the device table */
471 ++ init_device_table();
472 ++
473 ++ for_each_iommu(iommu)
474 ++ iommu_flush_all_caches(iommu);
475 ++
476 + amd_iommu_init_api();
477 +
478 + x86_platform.iommu_shutdown = disable_iommus;
479 +diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
480 +index cabc19c..cec1f8c 100644
481 +--- a/drivers/media/rc/rc-main.c
482 ++++ b/drivers/media/rc/rc-main.c
483 +@@ -778,8 +778,10 @@ static ssize_t show_protocols(struct device *device,
484 + } else if (dev->raw) {
485 + enabled = dev->raw->enabled_protocols;
486 + allowed = ir_raw_get_allowed_protocols();
487 +- } else
488 ++ } else {
489 ++ mutex_unlock(&dev->lock);
490 + return -ENODEV;
491 ++ }
492 +
493 + IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n",
494 + (long long)allowed,
495 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
496 +index 63089cc..9284bca 100644
497 +--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
498 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
499 +@@ -938,6 +938,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
500 + AR_PHY_CL_TAB_1,
501 + AR_PHY_CL_TAB_2 };
502 +
503 ++ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
504 ++
505 + if (rtt) {
506 + if (!ar9003_hw_rtt_restore(ah, chan))
507 + run_rtt_cal = true;
508 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
509 +index 600aca9..f86ee0c 100644
510 +--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
511 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
512 +@@ -543,7 +543,7 @@ static void ar9003_hw_init_bb(struct ath_hw *ah,
513 + udelay(synthDelay + BASE_ACTIVATE_DELAY);
514 + }
515 +
516 +-static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
517 ++void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
518 + {
519 + switch (rx) {
520 + case 0x5:
521 +diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
522 +index f8e1fbb..d5c5dca 100644
523 +--- a/drivers/net/wireless/ath/ath9k/hw.h
524 ++++ b/drivers/net/wireless/ath/ath9k/hw.h
525 +@@ -1014,6 +1014,7 @@ int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
526 + int ar9003_paprd_init_table(struct ath_hw *ah);
527 + bool ar9003_paprd_is_done(struct ath_hw *ah);
528 + void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains);
529 ++void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
530 +
531 + /* Hardware family op attach helpers */
532 + void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
533 +diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
534 +index d8bb993..2a6bf76 100644
535 +--- a/drivers/power/ab8500_btemp.c
536 ++++ b/drivers/power/ab8500_btemp.c
537 +@@ -1115,7 +1115,7 @@ static void __exit ab8500_btemp_exit(void)
538 + platform_driver_unregister(&ab8500_btemp_driver);
539 + }
540 +
541 +-subsys_initcall_sync(ab8500_btemp_init);
542 ++device_initcall(ab8500_btemp_init);
543 + module_exit(ab8500_btemp_exit);
544 +
545 + MODULE_LICENSE("GPL v2");
546 +diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
547 +index 804b88c..d8cd151 100644
548 +--- a/drivers/power/abx500_chargalg.c
549 ++++ b/drivers/power/abx500_chargalg.c
550 +@@ -1698,7 +1698,7 @@ static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
551 + static struct attribute abx500_chargalg_en_charger = \
552 + {
553 + .name = "chargalg",
554 +- .mode = S_IWUGO,
555 ++ .mode = S_IWUSR,
556 + };
557 +
558 + static struct attribute *abx500_chargalg_chg[] = {
559 +diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
560 +index cf67ce5..3799cf1 100644
561 +--- a/drivers/staging/comedi/comedi_fops.c
562 ++++ b/drivers/staging/comedi/comedi_fops.c
563 +@@ -1577,7 +1577,7 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
564 +
565 + mask = 0;
566 + read_subdev = comedi_get_read_subdevice(dev_file_info);
567 +- if (read_subdev) {
568 ++ if (read_subdev && read_subdev->async) {
569 + poll_wait(file, &read_subdev->async->wait_head, wait);
570 + if (!read_subdev->busy
571 + || comedi_buf_read_n_available(read_subdev->async) > 0
572 +@@ -1587,7 +1587,7 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
573 + }
574 + }
575 + write_subdev = comedi_get_write_subdevice(dev_file_info);
576 +- if (write_subdev) {
577 ++ if (write_subdev && write_subdev->async) {
578 + poll_wait(file, &write_subdev->async->wait_head, wait);
579 + comedi_buf_write_alloc(write_subdev->async,
580 + write_subdev->async->prealloc_bufsz);
581 +@@ -1629,7 +1629,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
582 + }
583 +
584 + s = comedi_get_write_subdevice(dev_file_info);
585 +- if (s == NULL) {
586 ++ if (s == NULL || s->async == NULL) {
587 + retval = -EIO;
588 + goto done;
589 + }
590 +@@ -1740,7 +1740,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
591 + }
592 +
593 + s = comedi_get_read_subdevice(dev_file_info);
594 +- if (s == NULL) {
595 ++ if (s == NULL || s->async == NULL) {
596 + retval = -EIO;
597 + goto done;
598 + }
599 +diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
600 +index 721b2be..0517a23 100644
601 +--- a/drivers/staging/comedi/drivers/ni_labpc.c
602 ++++ b/drivers/staging/comedi/drivers/ni_labpc.c
603 +@@ -1264,7 +1264,9 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
604 + else
605 + channel = CR_CHAN(cmd->chanlist[0]);
606 + /* munge channel bits for differential / scan disabled mode */
607 +- if (labpc_ai_scan_mode(cmd) != MODE_SINGLE_CHAN && aref == AREF_DIFF)
608 ++ if ((labpc_ai_scan_mode(cmd) == MODE_SINGLE_CHAN ||
609 ++ labpc_ai_scan_mode(cmd) == MODE_SINGLE_CHAN_INTERVAL) &&
610 ++ aref == AREF_DIFF)
611 + channel *= 2;
612 + devpriv->command1_bits |= ADC_CHAN_BITS(channel);
613 + devpriv->command1_bits |= thisboard->ai_range_code[range];
614 +@@ -1280,21 +1282,6 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
615 + devpriv->write_byte(devpriv->command1_bits,
616 + dev->iobase + COMMAND1_REG);
617 + }
618 +- /* setup any external triggering/pacing (command4 register) */
619 +- devpriv->command4_bits = 0;
620 +- if (cmd->convert_src != TRIG_EXT)
621 +- devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT;
622 +- /* XXX should discard first scan when using interval scanning
623 +- * since manual says it is not synced with scan clock */
624 +- if (labpc_use_continuous_mode(cmd) == 0) {
625 +- devpriv->command4_bits |= INTERVAL_SCAN_EN_BIT;
626 +- if (cmd->scan_begin_src == TRIG_EXT)
627 +- devpriv->command4_bits |= EXT_SCAN_EN_BIT;
628 +- }
629 +- /* single-ended/differential */
630 +- if (aref == AREF_DIFF)
631 +- devpriv->command4_bits |= ADC_DIFF_BIT;
632 +- devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG);
633 +
634 + devpriv->write_byte(cmd->chanlist_len,
635 + dev->iobase + INTERVAL_COUNT_REG);
636 +@@ -1374,6 +1361,22 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
637 + devpriv->command3_bits &= ~ADC_FNE_INTR_EN_BIT;
638 + devpriv->write_byte(devpriv->command3_bits, dev->iobase + COMMAND3_REG);
639 +
640 ++ /* setup any external triggering/pacing (command4 register) */
641 ++ devpriv->command4_bits = 0;
642 ++ if (cmd->convert_src != TRIG_EXT)
643 ++ devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT;
644 ++ /* XXX should discard first scan when using interval scanning
645 ++ * since manual says it is not synced with scan clock */
646 ++ if (labpc_use_continuous_mode(cmd) == 0) {
647 ++ devpriv->command4_bits |= INTERVAL_SCAN_EN_BIT;
648 ++ if (cmd->scan_begin_src == TRIG_EXT)
649 ++ devpriv->command4_bits |= EXT_SCAN_EN_BIT;
650 ++ }
651 ++ /* single-ended/differential */
652 ++ if (aref == AREF_DIFF)
653 ++ devpriv->command4_bits |= ADC_DIFF_BIT;
654 ++ devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG);
655 ++
656 + /* startup acquisition */
657 +
658 + /* command2 reg */
659 +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
660 +index 4ecf9d6..79d9865 100644
661 +--- a/drivers/target/target_core_device.c
662 ++++ b/drivers/target/target_core_device.c
663 +@@ -1483,24 +1483,18 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
664 +
665 + struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
666 + struct se_portal_group *tpg,
667 ++ struct se_node_acl *nacl,
668 + u32 mapped_lun,
669 +- char *initiatorname,
670 + int *ret)
671 + {
672 + struct se_lun_acl *lacl;
673 +- struct se_node_acl *nacl;
674 +
675 +- if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
676 ++ if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
677 + pr_err("%s InitiatorName exceeds maximum size.\n",
678 + tpg->se_tpg_tfo->get_fabric_name());
679 + *ret = -EOVERFLOW;
680 + return NULL;
681 + }
682 +- nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
683 +- if (!nacl) {
684 +- *ret = -EINVAL;
685 +- return NULL;
686 +- }
687 + lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
688 + if (!lacl) {
689 + pr_err("Unable to allocate memory for struct se_lun_acl.\n");
690 +@@ -1511,7 +1505,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
691 + INIT_LIST_HEAD(&lacl->lacl_list);
692 + lacl->mapped_lun = mapped_lun;
693 + lacl->se_lun_nacl = nacl;
694 +- snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
695 ++ snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
696 ++ nacl->initiatorname);
697 +
698 + return lacl;
699 + }
700 +diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
701 +index 817ba7c..6b79ee7 100644
702 +--- a/drivers/target/target_core_fabric_configfs.c
703 ++++ b/drivers/target/target_core_fabric_configfs.c
704 +@@ -356,9 +356,17 @@ static struct config_group *target_fabric_make_mappedlun(
705 + ret = -EINVAL;
706 + goto out;
707 + }
708 ++ if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
709 ++ pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG"
710 ++ "-1: %u for Target Portal Group: %u\n", mapped_lun,
711 ++ TRANSPORT_MAX_LUNS_PER_TPG-1,
712 ++ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
713 ++ ret = -EINVAL;
714 ++ goto out;
715 ++ }
716 +
717 +- lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
718 +- config_item_name(acl_ci), &ret);
719 ++ lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
720 ++ mapped_lun, &ret);
721 + if (!lacl) {
722 + ret = -EINVAL;
723 + goto out;
724 +diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
725 +index 21c0563..17179b1 100644
726 +--- a/drivers/target/target_core_internal.h
727 ++++ b/drivers/target/target_core_internal.h
728 +@@ -61,7 +61,7 @@ struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
729 + int core_dev_del_lun(struct se_portal_group *, u32);
730 + struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
731 + struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
732 +- u32, char *, int *);
733 ++ struct se_node_acl *, u32, int *);
734 + int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
735 + struct se_lun_acl *, u32, u32);
736 + int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
737 +diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
738 +index ba537b6..0e17fa3 100644
739 +--- a/drivers/target/target_core_tpg.c
740 ++++ b/drivers/target/target_core_tpg.c
741 +@@ -114,16 +114,10 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
742 + struct se_node_acl *acl;
743 +
744 + spin_lock_irq(&tpg->acl_node_lock);
745 +- list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
746 +- if (!strcmp(acl->initiatorname, initiatorname) &&
747 +- !acl->dynamic_node_acl) {
748 +- spin_unlock_irq(&tpg->acl_node_lock);
749 +- return acl;
750 +- }
751 +- }
752 ++ acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
753 + spin_unlock_irq(&tpg->acl_node_lock);
754 +
755 +- return NULL;
756 ++ return acl;
757 + }
758 +
759 + /* core_tpg_add_node_to_devs():
760 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
761 +index 51e4c1e..1a9e2a9 100644
762 +--- a/drivers/vhost/vhost.c
763 ++++ b/drivers/vhost/vhost.c
764 +@@ -1074,7 +1074,7 @@ static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
765 + }
766 + _iov = iov + ret;
767 + size = reg->memory_size - addr + reg->guest_phys_addr;
768 +- _iov->iov_len = min((u64)len, size);
769 ++ _iov->iov_len = min((u64)len - s, size);
770 + _iov->iov_base = (void __user *)(unsigned long)
771 + (reg->userspace_addr + addr - reg->guest_phys_addr);
772 + s += size;
773 +diff --git a/fs/direct-io.c b/fs/direct-io.c
774 +index f4aadd1..29c4fda 100644
775 +--- a/fs/direct-io.c
776 ++++ b/fs/direct-io.c
777 +@@ -305,9 +305,9 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is
778 + dio->end_io(dio->iocb, offset, transferred,
779 + dio->private, ret, is_async);
780 + } else {
781 ++ inode_dio_done(dio->inode);
782 + if (is_async)
783 + aio_complete(dio->iocb, ret, 0);
784 +- inode_dio_done(dio->inode);
785 + }
786 +
787 + return ret;
788 +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
789 +index df76291..3e1018a 100644
790 +--- a/fs/ext4/balloc.c
791 ++++ b/fs/ext4/balloc.c
792 +@@ -326,7 +326,7 @@ err_out:
793 + return 0;
794 + }
795 + /**
796 +- * ext4_read_block_bitmap()
797 ++ * ext4_read_block_bitmap_nowait()
798 + * @sb: super block
799 + * @block_group: given block group
800 + *
801 +@@ -422,6 +422,8 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
802 + struct buffer_head *bh;
803 +
804 + bh = ext4_read_block_bitmap_nowait(sb, block_group);
805 ++ if (!bh)
806 ++ return NULL;
807 + if (ext4_wait_block_bitmap(sb, block_group, bh)) {
808 + put_bh(bh);
809 + return NULL;
810 +@@ -447,11 +449,16 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
811 +
812 + free_clusters = percpu_counter_read_positive(fcc);
813 + dirty_clusters = percpu_counter_read_positive(dcc);
814 +- root_clusters = EXT4_B2C(sbi, ext4_r_blocks_count(sbi->s_es));
815 ++
816 ++ /*
817 ++ * r_blocks_count should always be multiple of the cluster ratio so
818 ++ * we are safe to do a plane bit shift only.
819 ++ */
820 ++ root_clusters = ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
821 +
822 + if (free_clusters - (nclusters + root_clusters + dirty_clusters) <
823 + EXT4_FREECLUSTERS_WATERMARK) {
824 +- free_clusters = EXT4_C2B(sbi, percpu_counter_sum_positive(fcc));
825 ++ free_clusters = percpu_counter_sum_positive(fcc);
826 + dirty_clusters = percpu_counter_sum_positive(dcc);
827 + }
828 + /* Check whether we have space after accounting for current
829 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
830 +index e77c4fe..3122ece 100644
831 +--- a/fs/ext4/mballoc.c
832 ++++ b/fs/ext4/mballoc.c
833 +@@ -4126,7 +4126,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
834 + /* The max size of hash table is PREALLOC_TB_SIZE */
835 + order = PREALLOC_TB_SIZE - 1;
836 + /* Add the prealloc space to lg */
837 +- rcu_read_lock();
838 ++ spin_lock(&lg->lg_prealloc_lock);
839 + list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
840 + pa_inode_list) {
841 + spin_lock(&tmp_pa->pa_lock);
842 +@@ -4150,12 +4150,12 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
843 + if (!added)
844 + list_add_tail_rcu(&pa->pa_inode_list,
845 + &lg->lg_prealloc_list[order]);
846 +- rcu_read_unlock();
847 ++ spin_unlock(&lg->lg_prealloc_lock);
848 +
849 + /* Now trim the list to be not more than 8 elements */
850 + if (lg_prealloc_count > 8) {
851 + ext4_mb_discard_lg_preallocations(sb, lg,
852 +- order, lg_prealloc_count);
853 ++ order, lg_prealloc_count);
854 + return;
855 + }
856 + return ;
857 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
858 +index e88748e..e712a8c 100644
859 +--- a/fs/ext4/xattr.c
860 ++++ b/fs/ext4/xattr.c
861 +@@ -495,7 +495,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
862 + error = ext4_handle_dirty_metadata(handle, inode, bh);
863 + if (IS_SYNC(inode))
864 + ext4_handle_sync(handle);
865 +- dquot_free_block(inode, 1);
866 ++ dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
867 + ea_bdebug(bh, "refcount now=%d; releasing",
868 + le32_to_cpu(BHDR(bh)->h_refcount));
869 + }
870 +@@ -784,7 +784,8 @@ inserted:
871 + else {
872 + /* The old block is released after updating
873 + the inode. */
874 +- error = dquot_alloc_block(inode, 1);
875 ++ error = dquot_alloc_block(inode,
876 ++ EXT4_C2B(EXT4_SB(sb), 1));
877 + if (error)
878 + goto cleanup;
879 + error = ext4_journal_get_write_access(handle,
880 +@@ -880,7 +881,7 @@ cleanup:
881 + return error;
882 +
883 + cleanup_dquot:
884 +- dquot_free_block(inode, 1);
885 ++ dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
886 + goto cleanup;
887 +
888 + bad_block:
889 +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
890 +index bc43832..d48478a 100644
891 +--- a/fs/fuse/dir.c
892 ++++ b/fs/fuse/dir.c
893 +@@ -645,7 +645,14 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
894 +
895 + spin_lock(&fc->lock);
896 + fi->attr_version = ++fc->attr_version;
897 +- drop_nlink(inode);
898 ++ /*
899 ++ * If i_nlink == 0 then unlink doesn't make sense, yet this can
900 ++ * happen if userspace filesystem is careless. It would be
901 ++ * difficult to enforce correct nlink usage so just ignore this
902 ++ * condition here
903 ++ */
904 ++ if (inode->i_nlink > 0)
905 ++ drop_nlink(inode);
906 + spin_unlock(&fc->lock);
907 + fuse_invalidate_attr(inode);
908 + fuse_invalidate_attr(dir);
909 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
910 +index 537731e..abd785e 100644
911 +--- a/fs/nfsd/nfs4state.c
912 ++++ b/fs/nfsd/nfs4state.c
913 +@@ -1053,6 +1053,8 @@ free_client(struct nfs4_client *clp)
914 + put_group_info(clp->cl_cred.cr_group_info);
915 + kfree(clp->cl_principal);
916 + kfree(clp->cl_name.data);
917 ++ idr_remove_all(&clp->cl_stateids);
918 ++ idr_destroy(&clp->cl_stateids);
919 + kfree(clp);
920 + }
921 +
922 +diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
923 +index 6577432..340bd02 100644
924 +--- a/fs/ocfs2/aops.c
925 ++++ b/fs/ocfs2/aops.c
926 +@@ -593,9 +593,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
927 + level = ocfs2_iocb_rw_locked_level(iocb);
928 + ocfs2_rw_unlock(inode, level);
929 +
930 ++ inode_dio_done(inode);
931 + if (is_async)
932 + aio_complete(iocb, ret, 0);
933 +- inode_dio_done(inode);
934 + }
935 +
936 + /*
937 +diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
938 +index f169da4..b7e74b5 100644
939 +--- a/fs/ocfs2/suballoc.c
940 ++++ b/fs/ocfs2/suballoc.c
941 +@@ -642,7 +642,7 @@ ocfs2_block_group_alloc_discontig(handle_t *handle,
942 + * cluster groups will be staying in cache for the duration of
943 + * this operation.
944 + */
945 +- ac->ac_allow_chain_relink = 0;
946 ++ ac->ac_disable_chain_relink = 1;
947 +
948 + /* Claim the first region */
949 + status = ocfs2_block_group_claim_bits(osb, handle, ac, min_bits,
950 +@@ -1823,7 +1823,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
951 + * Do this *after* figuring out how many bits we're taking out
952 + * of our target group.
953 + */
954 +- if (ac->ac_allow_chain_relink &&
955 ++ if (!ac->ac_disable_chain_relink &&
956 + (prev_group_bh) &&
957 + (ocfs2_block_group_reasonably_empty(bg, res->sr_bits))) {
958 + status = ocfs2_relink_block_group(handle, alloc_inode,
959 +@@ -1928,7 +1928,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
960 +
961 + victim = ocfs2_find_victim_chain(cl);
962 + ac->ac_chain = victim;
963 +- ac->ac_allow_chain_relink = 1;
964 +
965 + status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
966 + res, &bits_left);
967 +@@ -1947,7 +1946,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
968 + * searching each chain in order. Don't allow chain relinking
969 + * because we only calculate enough journal credits for one
970 + * relink per alloc. */
971 +- ac->ac_allow_chain_relink = 0;
972 ++ ac->ac_disable_chain_relink = 1;
973 + for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
974 + if (i == victim)
975 + continue;
976 +diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
977 +index b8afabf..a36d0aa 100644
978 +--- a/fs/ocfs2/suballoc.h
979 ++++ b/fs/ocfs2/suballoc.h
980 +@@ -49,7 +49,7 @@ struct ocfs2_alloc_context {
981 +
982 + /* these are used by the chain search */
983 + u16 ac_chain;
984 +- int ac_allow_chain_relink;
985 ++ int ac_disable_chain_relink;
986 + group_search_t *ac_group_search;
987 +
988 + u64 ac_last_group;
989 +diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
990 +index 0ba9ea1..2e3ea30 100644
991 +--- a/fs/ocfs2/xattr.c
992 ++++ b/fs/ocfs2/xattr.c
993 +@@ -7189,7 +7189,7 @@ int ocfs2_init_security_and_acl(struct inode *dir,
994 + struct buffer_head *dir_bh = NULL;
995 +
996 + ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
997 +- if (!ret) {
998 ++ if (ret) {
999 + mlog_errno(ret);
1000 + goto leave;
1001 + }
1002 +diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
1003 +index 82c585f..4a66a5c 100644
1004 +--- a/fs/pstore/platform.c
1005 ++++ b/fs/pstore/platform.c
1006 +@@ -88,6 +88,27 @@ static const char *get_reason_str(enum kmsg_dump_reason reason)
1007 + }
1008 + }
1009 +
1010 ++bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
1011 ++{
1012 ++ /*
1013 ++ * In case of NMI path, pstore shouldn't be blocked
1014 ++ * regardless of reason.
1015 ++ */
1016 ++ if (in_nmi())
1017 ++ return true;
1018 ++
1019 ++ switch (reason) {
1020 ++ /* In panic case, other cpus are stopped by smp_send_stop(). */
1021 ++ case KMSG_DUMP_PANIC:
1022 ++ /* Emergency restart shouldn't be blocked by spin lock. */
1023 ++ case KMSG_DUMP_EMERG:
1024 ++ return true;
1025 ++ default:
1026 ++ return false;
1027 ++ }
1028 ++}
1029 ++EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
1030 ++
1031 + /*
1032 + * callback from kmsg_dump. (s2,l2) has the most recently
1033 + * written bytes, older bytes are in (s1,l1). Save as much
1034 +@@ -111,10 +132,12 @@ static void pstore_dump(struct kmsg_dumper *dumper,
1035 +
1036 + why = get_reason_str(reason);
1037 +
1038 +- if (in_nmi()) {
1039 +- is_locked = spin_trylock(&psinfo->buf_lock);
1040 +- if (!is_locked)
1041 +- pr_err("pstore dump routine blocked in NMI, may corrupt error record\n");
1042 ++ if (pstore_cannot_block_path(reason)) {
1043 ++ is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
1044 ++ if (!is_locked) {
1045 ++ pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
1046 ++ , in_nmi() ? "NMI" : why);
1047 ++ }
1048 + } else
1049 + spin_lock_irqsave(&psinfo->buf_lock, flags);
1050 + oopscount++;
1051 +@@ -145,9 +168,9 @@ static void pstore_dump(struct kmsg_dumper *dumper,
1052 + total += l1_cpy + l2_cpy;
1053 + part++;
1054 + }
1055 +- if (in_nmi()) {
1056 ++ if (pstore_cannot_block_path(reason)) {
1057 + if (is_locked)
1058 +- spin_unlock(&psinfo->buf_lock);
1059 ++ spin_unlock_irqrestore(&psinfo->buf_lock, flags);
1060 + } else
1061 + spin_unlock_irqrestore(&psinfo->buf_lock, flags);
1062 + }
1063 +diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
1064 +index da64e15..6cdabb4 100644
1065 +--- a/include/linux/auto_fs.h
1066 ++++ b/include/linux/auto_fs.h
1067 +@@ -31,25 +31,16 @@
1068 + #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION
1069 +
1070 + /*
1071 +- * Architectures where both 32- and 64-bit binaries can be executed
1072 +- * on 64-bit kernels need this. This keeps the structure format
1073 +- * uniform, and makes sure the wait_queue_token isn't too big to be
1074 +- * passed back down to the kernel.
1075 +- *
1076 +- * This assumes that on these architectures:
1077 +- * mode 32 bit 64 bit
1078 +- * -------------------------
1079 +- * int 32 bit 32 bit
1080 +- * long 32 bit 64 bit
1081 +- *
1082 +- * If so, 32-bit user-space code should be backwards compatible.
1083 ++ * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
1084 ++ * back to the kernel via ioctl from userspace. On architectures where 32- and
1085 ++ * 64-bit userspace binaries can be executed it's important that the size of
1086 ++ * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we
1087 ++ * do not break the binary ABI interface by changing the structure size.
1088 + */
1089 +-
1090 +-#if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) \
1091 +- || defined(__powerpc__) || defined(__s390__)
1092 +-typedef unsigned int autofs_wqt_t;
1093 +-#else
1094 ++#if defined(__ia64__) || defined(__alpha__) /* pure 64bit architectures */
1095 + typedef unsigned long autofs_wqt_t;
1096 ++#else
1097 ++typedef unsigned int autofs_wqt_t;
1098 + #endif
1099 +
1100 + /* Packet types */
1101 +diff --git a/include/linux/pstore.h b/include/linux/pstore.h
1102 +index e1461e1..318cca1 100644
1103 +--- a/include/linux/pstore.h
1104 ++++ b/include/linux/pstore.h
1105 +@@ -54,12 +54,18 @@ struct pstore_info {
1106 +
1107 + #ifdef CONFIG_PSTORE
1108 + extern int pstore_register(struct pstore_info *);
1109 ++extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
1110 + #else
1111 + static inline int
1112 + pstore_register(struct pstore_info *psi)
1113 + {
1114 + return -ENODEV;
1115 + }
1116 ++static inline bool
1117 ++pstore_cannot_block_path(enum kmsg_dump_reason reason)
1118 ++{
1119 ++ return false;
1120 ++}
1121 + #endif
1122 +
1123 + #endif /*_LINUX_PSTORE_H*/
1124 +diff --git a/include/linux/quota.h b/include/linux/quota.h
1125 +index c09fa04..ffd8607 100644
1126 +--- a/include/linux/quota.h
1127 ++++ b/include/linux/quota.h
1128 +@@ -417,6 +417,7 @@ struct quota_module_name {
1129 + #define INIT_QUOTA_MODULE_NAMES {\
1130 + {QFMT_VFS_OLD, "quota_v1"},\
1131 + {QFMT_VFS_V0, "quota_v2"},\
1132 ++ {QFMT_VFS_V1, "quota_v2"},\
1133 + {0, NULL}}
1134 +
1135 + #endif /* __KERNEL__ */
1136 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
1137 +index a5dccd4..a4c47d1b 100644
1138 +--- a/kernel/cgroup.c
1139 ++++ b/kernel/cgroup.c
1140 +@@ -378,12 +378,20 @@ static void __put_css_set(struct css_set *cg, int taskexit)
1141 + struct cgroup *cgrp = link->cgrp;
1142 + list_del(&link->cg_link_list);
1143 + list_del(&link->cgrp_link_list);
1144 ++
1145 ++ /*
1146 ++ * We may not be holding cgroup_mutex, and if cgrp->count is
1147 ++ * dropped to 0 the cgroup can be destroyed at any time, hence
1148 ++ * rcu_read_lock is used to keep it alive.
1149 ++ */
1150 ++ rcu_read_lock();
1151 + if (atomic_dec_and_test(&cgrp->count) &&
1152 + notify_on_release(cgrp)) {
1153 + if (taskexit)
1154 + set_bit(CGRP_RELEASABLE, &cgrp->flags);
1155 + check_for_release(cgrp);
1156 + }
1157 ++ rcu_read_unlock();
1158 +
1159 + kfree(link);
1160 + }
1161 +diff --git a/kernel/cpuset.c b/kernel/cpuset.c
1162 +index 5fc1570..8fe6f6b6 100644
1163 +--- a/kernel/cpuset.c
1164 ++++ b/kernel/cpuset.c
1165 +@@ -2479,8 +2479,16 @@ void cpuset_print_task_mems_allowed(struct task_struct *tsk)
1166 +
1167 + dentry = task_cs(tsk)->css.cgroup->dentry;
1168 + spin_lock(&cpuset_buffer_lock);
1169 +- snprintf(cpuset_name, CPUSET_NAME_LEN,
1170 +- dentry ? (const char *)dentry->d_name.name : "/");
1171 ++
1172 ++ if (!dentry) {
1173 ++ strcpy(cpuset_name, "/");
1174 ++ } else {
1175 ++ spin_lock(&dentry->d_lock);
1176 ++ strlcpy(cpuset_name, (const char *)dentry->d_name.name,
1177 ++ CPUSET_NAME_LEN);
1178 ++ spin_unlock(&dentry->d_lock);
1179 ++ }
1180 ++
1181 + nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
1182 + tsk->mems_allowed);
1183 + printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
1184 +diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
1185 +index 69185ae..e885be1 100644
1186 +--- a/kernel/posix-timers.c
1187 ++++ b/kernel/posix-timers.c
1188 +@@ -639,6 +639,13 @@ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
1189 + {
1190 + struct k_itimer *timr;
1191 +
1192 ++ /*
1193 ++ * timer_t could be any type >= int and we want to make sure any
1194 ++ * @timer_id outside positive int range fails lookup.
1195 ++ */
1196 ++ if ((unsigned long long)timer_id > INT_MAX)
1197 ++ return NULL;
1198 ++
1199 + rcu_read_lock();
1200 + timr = idr_find(&posix_timers_id, (int)timer_id);
1201 + if (timr) {
1202 +diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
1203 +index a650694..9f9aa32 100644
1204 +--- a/kernel/sysctl_binary.c
1205 ++++ b/kernel/sysctl_binary.c
1206 +@@ -1194,9 +1194,10 @@ static ssize_t bin_dn_node_address(struct file *file,
1207 +
1208 + /* Convert the decnet address to binary */
1209 + result = -EIO;
1210 +- nodep = strchr(buf, '.') + 1;
1211 ++ nodep = strchr(buf, '.');
1212 + if (!nodep)
1213 + goto out;
1214 ++ ++nodep;
1215 +
1216 + area = simple_strtoul(buf, NULL, 10);
1217 + node = simple_strtoul(nodep, NULL, 10);
1218 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1219 +index 6b194d8..4a86e64 100644
1220 +--- a/kernel/trace/ftrace.c
1221 ++++ b/kernel/trace/ftrace.c
1222 +@@ -3841,37 +3841,51 @@ static void ftrace_init_module(struct module *mod,
1223 + ftrace_process_locs(mod, start, end);
1224 + }
1225 +
1226 +-static int ftrace_module_notify(struct notifier_block *self,
1227 +- unsigned long val, void *data)
1228 ++static int ftrace_module_notify_enter(struct notifier_block *self,
1229 ++ unsigned long val, void *data)
1230 + {
1231 + struct module *mod = data;
1232 +
1233 +- switch (val) {
1234 +- case MODULE_STATE_COMING:
1235 ++ if (val == MODULE_STATE_COMING)
1236 + ftrace_init_module(mod, mod->ftrace_callsites,
1237 + mod->ftrace_callsites +
1238 + mod->num_ftrace_callsites);
1239 +- break;
1240 +- case MODULE_STATE_GOING:
1241 ++ return 0;
1242 ++}
1243 ++
1244 ++static int ftrace_module_notify_exit(struct notifier_block *self,
1245 ++ unsigned long val, void *data)
1246 ++{
1247 ++ struct module *mod = data;
1248 ++
1249 ++ if (val == MODULE_STATE_GOING)
1250 + ftrace_release_mod(mod);
1251 +- break;
1252 +- }
1253 +
1254 + return 0;
1255 + }
1256 + #else
1257 +-static int ftrace_module_notify(struct notifier_block *self,
1258 +- unsigned long val, void *data)
1259 ++static int ftrace_module_notify_enter(struct notifier_block *self,
1260 ++ unsigned long val, void *data)
1261 ++{
1262 ++ return 0;
1263 ++}
1264 ++static int ftrace_module_notify_exit(struct notifier_block *self,
1265 ++ unsigned long val, void *data)
1266 + {
1267 + return 0;
1268 + }
1269 + #endif /* CONFIG_MODULES */
1270 +
1271 +-struct notifier_block ftrace_module_nb = {
1272 +- .notifier_call = ftrace_module_notify,
1273 ++struct notifier_block ftrace_module_enter_nb = {
1274 ++ .notifier_call = ftrace_module_notify_enter,
1275 + .priority = INT_MAX, /* Run before anything that can use kprobes */
1276 + };
1277 +
1278 ++struct notifier_block ftrace_module_exit_nb = {
1279 ++ .notifier_call = ftrace_module_notify_exit,
1280 ++ .priority = INT_MIN, /* Run after anything that can remove kprobes */
1281 ++};
1282 ++
1283 + extern unsigned long __start_mcount_loc[];
1284 + extern unsigned long __stop_mcount_loc[];
1285 +
1286 +@@ -3903,9 +3917,13 @@ void __init ftrace_init(void)
1287 + __start_mcount_loc,
1288 + __stop_mcount_loc);
1289 +
1290 +- ret = register_module_notifier(&ftrace_module_nb);
1291 ++ ret = register_module_notifier(&ftrace_module_enter_nb);
1292 ++ if (ret)
1293 ++ pr_warning("Failed to register trace ftrace module enter notifier\n");
1294 ++
1295 ++ ret = register_module_notifier(&ftrace_module_exit_nb);
1296 + if (ret)
1297 +- pr_warning("Failed to register trace ftrace module notifier\n");
1298 ++ pr_warning("Failed to register trace ftrace module exit notifier\n");
1299 +
1300 + set_ftrace_early_filters();
1301 +
1302 +diff --git a/lib/idr.c b/lib/idr.c
1303 +index 4046e29..e90d2d0 100644
1304 +--- a/lib/idr.c
1305 ++++ b/lib/idr.c
1306 +@@ -625,7 +625,14 @@ void *idr_get_next(struct idr *idp, int *nextidp)
1307 + return p;
1308 + }
1309 +
1310 +- id += 1 << n;
1311 ++ /*
1312 ++ * Proceed to the next layer at the current level. Unlike
1313 ++ * idr_for_each(), @id isn't guaranteed to be aligned to
1314 ++ * layer boundary at this point and adding 1 << n may
1315 ++ * incorrectly skip IDs. Make sure we jump to the
1316 ++ * beginning of the next layer using round_up().
1317 ++ */
1318 ++ id = round_up(id + 1, 1 << n);
1319 + while (n < fls(id)) {
1320 + n += IDR_BITS;
1321 + p = *--paa;
1322 +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
1323 +index fd9b288..aec7dbb 100644
1324 +--- a/net/sunrpc/svc_xprt.c
1325 ++++ b/net/sunrpc/svc_xprt.c
1326 +@@ -817,7 +817,6 @@ static void svc_age_temp_xprts(unsigned long closure)
1327 + struct svc_serv *serv = (struct svc_serv *)closure;
1328 + struct svc_xprt *xprt;
1329 + struct list_head *le, *next;
1330 +- LIST_HEAD(to_be_aged);
1331 +
1332 + dprintk("svc_age_temp_xprts\n");
1333 +
1334 +@@ -838,25 +837,15 @@ static void svc_age_temp_xprts(unsigned long closure)
1335 + if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
1336 + test_bit(XPT_BUSY, &xprt->xpt_flags))
1337 + continue;
1338 +- svc_xprt_get(xprt);
1339 +- list_move(le, &to_be_aged);
1340 ++ list_del_init(le);
1341 + set_bit(XPT_CLOSE, &xprt->xpt_flags);
1342 + set_bit(XPT_DETACHED, &xprt->xpt_flags);
1343 +- }
1344 +- spin_unlock_bh(&serv->sv_lock);
1345 +-
1346 +- while (!list_empty(&to_be_aged)) {
1347 +- le = to_be_aged.next;
1348 +- /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */
1349 +- list_del_init(le);
1350 +- xprt = list_entry(le, struct svc_xprt, xpt_list);
1351 +-
1352 + dprintk("queuing xprt %p for closing\n", xprt);
1353 +
1354 + /* a thread will dequeue and close it soon */
1355 + svc_xprt_enqueue(xprt);
1356 +- svc_xprt_put(xprt);
1357 + }
1358 ++ spin_unlock_bh(&serv->sv_lock);
1359 +
1360 + mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
1361 + }
1362 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
1363 +index 02a6e3f..fa2ce0c 100644
1364 +--- a/sound/pci/hda/patch_hdmi.c
1365 ++++ b/sound/pci/hda/patch_hdmi.c
1366 +@@ -1244,6 +1244,9 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx)
1367 +
1368 + if (pcmdev > 0)
1369 + sprintf(hdmi_str + strlen(hdmi_str), ",pcm=%d", pcmdev);
1370 ++ if (!is_jack_detectable(codec, per_pin->pin_nid))
1371 ++ strncat(hdmi_str, " Phantom",
1372 ++ sizeof(hdmi_str) - strlen(hdmi_str) - 1);
1373 +
1374 + return snd_hda_jack_add_kctl(codec, per_pin->pin_nid, hdmi_str, 0);
1375 + }
1376 +diff --git a/tools/perf/Makefile b/tools/perf/Makefile
1377 +index c3dd3d4..2db7ba0 100644
1378 +--- a/tools/perf/Makefile
1379 ++++ b/tools/perf/Makefile
1380 +@@ -241,13 +241,13 @@ $(OUTPUT)util/parse-events-flex.c: util/parse-events.l
1381 + $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
1382 +
1383 + $(OUTPUT)util/parse-events-bison.c: util/parse-events.y
1384 +- $(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c
1385 ++ $(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c -p parse_events_
1386 +
1387 + $(OUTPUT)util/pmu-flex.c: util/pmu.l
1388 + $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
1389 +
1390 + $(OUTPUT)util/pmu-bison.c: util/pmu.y
1391 +- $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c
1392 ++ $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c -p perf_pmu_
1393 +
1394 + $(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c
1395 + $(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
1396 +diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
1397 +index d9637da..3f35ea3 100644
1398 +--- a/tools/perf/util/parse-events.y
1399 ++++ b/tools/perf/util/parse-events.y
1400 +@@ -1,5 +1,4 @@
1401 +
1402 +-%name-prefix "parse_events_"
1403 + %parse-param {struct list_head *list_all}
1404 + %parse-param {struct list_head *list_event}
1405 + %parse-param {int *idx}
1406 +diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y
1407 +index 20ea77e..522943f 100644
1408 +--- a/tools/perf/util/pmu.y
1409 ++++ b/tools/perf/util/pmu.y
1410 +@@ -1,5 +1,4 @@
1411 +
1412 +-%name-prefix "perf_pmu_"
1413 + %parse-param {struct list_head *format}
1414 + %parse-param {char *name}
1415 +