Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.4 commit in: /
Date: Mon, 21 Sep 2015 15:57:27
Message-Id: 1442851035.37749aac60489f971a0be77982f7550a80209d39.mpagano@gentoo
1 commit: 37749aac60489f971a0be77982f7550a80209d39
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Sep 21 15:57:15 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Sep 21 15:57:15 2015 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=37749aac
7
8 Linux patch 3.4.109
9
10 0000_README | 4 +
11 1108_linux-3.4.109.patch | 5116 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 5120 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 98c0230..0fcce0d 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -471,6 +471,10 @@ Patch: 1107_linux-3.4.108.patch
19 From: http://www.kernel.org
20 Desc: Linux 3.4.108
21
22 +Patch: 1108_linux-3.4.109.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 3.4.109
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1108_linux-3.4.109.patch b/1108_linux-3.4.109.patch
31 new file mode 100644
32 index 0000000..47549c8
33 --- /dev/null
34 +++ b/1108_linux-3.4.109.patch
35 @@ -0,0 +1,5116 @@
36 +diff --git a/Documentation/networking/rds.txt b/Documentation/networking/rds.txt
37 +index c67077cbeb80..e1a3d59bbe0f 100644
38 +--- a/Documentation/networking/rds.txt
39 ++++ b/Documentation/networking/rds.txt
40 +@@ -62,11 +62,10 @@ Socket Interface
41 + ================
42 +
43 + AF_RDS, PF_RDS, SOL_RDS
44 +- These constants haven't been assigned yet, because RDS isn't in
45 +- mainline yet. Currently, the kernel module assigns some constant
46 +- and publishes it to user space through two sysctl files
47 +- /proc/sys/net/rds/pf_rds
48 +- /proc/sys/net/rds/sol_rds
49 ++ AF_RDS and PF_RDS are the domain type to be used with socket(2)
50 ++ to create RDS sockets. SOL_RDS is the socket-level to be used
51 ++ with setsockopt(2) and getsockopt(2) for RDS specific socket
52 ++ options.
53 +
54 + fd = socket(PF_RDS, SOCK_SEQPACKET, 0);
55 + This creates a new, unbound RDS socket.
56 +diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
57 +index d97bccf46147..d4647168eba0 100644
58 +--- a/Documentation/pinctrl.txt
59 ++++ b/Documentation/pinctrl.txt
60 +@@ -72,7 +72,6 @@ static struct pinctrl_desc foo_desc = {
61 + .name = "foo",
62 + .pins = foo_pins,
63 + .npins = ARRAY_SIZE(foo_pins),
64 +- .maxpin = 63,
65 + .owner = THIS_MODULE,
66 + };
67 +
68 +@@ -166,8 +165,8 @@ static const char *foo_get_group_name(struct pinctrl_dev *pctldev,
69 + }
70 +
71 + static int foo_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
72 +- unsigned ** const pins,
73 +- unsigned * const num_pins)
74 ++ const unsigned **pins,
75 ++ unsigned *num_pins)
76 + {
77 + *pins = (unsigned *) foo_groups[selector].pins;
78 + *num_pins = foo_groups[selector].num_pins;
79 +@@ -1043,7 +1042,7 @@ The semantics of the pinctrl APIs are:
80 +
81 + Usually the pin control core handled the get/put pair and call out to the
82 + device drivers bookkeeping operations, like checking available functions and
83 +-the associated pins, whereas the enable/disable pass on to the pin controller
84 ++the associated pins, whereas select_state pass on to the pin controller
85 + driver which takes care of activating and/or deactivating the mux setting by
86 + quickly poking some registers.
87 +
88 +@@ -1089,8 +1088,9 @@ function, but with different named in the mapping as described under
89 + "Advanced mapping" above. So that for an SPI device, we have two states named
90 + "pos-A" and "pos-B".
91 +
92 +-This snippet first muxes the function in the pins defined by group A, enables
93 +-it, disables and releases it, and muxes it in on the pins defined by group B:
94 ++This snippet first initializes a state object for both groups (in foo_probe()),
95 ++then muxes the function in the pins defined by group A, and finally muxes it in
96 ++on the pins defined by group B:
97 +
98 + #include <linux/pinctrl/consumer.h>
99 +
100 +diff --git a/Makefile b/Makefile
101 +index 5056e1bba7e8..7337720d6599 100644
102 +--- a/Makefile
103 ++++ b/Makefile
104 +@@ -1,6 +1,6 @@
105 + VERSION = 3
106 + PATCHLEVEL = 4
107 +-SUBLEVEL = 108
108 ++SUBLEVEL = 109
109 + EXTRAVERSION =
110 + NAME = Saber-toothed Squirrel
111 +
112 +diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
113 +index bc5e7d5ddd54..9cc8ed2310e7 100644
114 +--- a/arch/arm/boot/dts/imx27.dtsi
115 ++++ b/arch/arm/boot/dts/imx27.dtsi
116 +@@ -208,7 +208,7 @@
117 +
118 + fec: fec@1002b000 {
119 + compatible = "fsl,imx27-fec";
120 +- reg = <0x1002b000 0x4000>;
121 ++ reg = <0x1002b000 0x1000>;
122 + interrupts = <50>;
123 + status = "disabled";
124 + };
125 +diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
126 +index 38050b1c4800..dfc980ba7416 100644
127 +--- a/arch/arm/include/asm/elf.h
128 ++++ b/arch/arm/include/asm/elf.h
129 +@@ -116,7 +116,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
130 + the loader. We need to make sure that it is out of the way of the program
131 + that it will "exec", and that there is sufficient room for the brk. */
132 +
133 +-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
134 ++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
135 +
136 + /* When the program starts, a1 contains a pointer to a function to be
137 + registered with atexit, as per the SVR4 ABI. A value of 0 means we
138 +diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
139 +index fe2d1f80ef50..2f8b17163a51 100644
140 +--- a/arch/arm/mach-pxa/Kconfig
141 ++++ b/arch/arm/mach-pxa/Kconfig
142 +@@ -718,4 +718,13 @@ config PXA_HAVE_ISA_IRQS
143 + config PXA310_ULPI
144 + bool
145 +
146 ++config PXA_SYSTEMS_CPLDS
147 ++ tristate "Motherboard cplds"
148 ++ default ARCH_LUBBOCK || MACH_MAINSTONE
149 ++ help
150 ++ This driver supports the Lubbock and Mainstone multifunction chip
151 ++ found on the pxa25x development platform system (Lubbock) and pxa27x
152 ++ development platform system (Mainstone). This IO board supports the
153 ++ interrupts handling, ethernet controller, flash chips, etc ...
154 ++
155 + endif
156 +diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
157 +index be0f7df8685c..318c0d756bc7 100644
158 +--- a/arch/arm/mach-pxa/Makefile
159 ++++ b/arch/arm/mach-pxa/Makefile
160 +@@ -103,4 +103,5 @@ led-$(CONFIG_ARCH_PXA_IDP) += leds-idp.o
161 +
162 + obj-$(CONFIG_LEDS) += $(led-y)
163 +
164 ++obj-$(CONFIG_PXA_SYSTEMS_CPLDS) += pxa_cplds_irqs.o
165 + obj-$(CONFIG_TOSA_BT) += tosa-bt.o
166 +diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h
167 +index 2a086e8373eb..b20b97e1df4c 100644
168 +--- a/arch/arm/mach-pxa/include/mach/lubbock.h
169 ++++ b/arch/arm/mach-pxa/include/mach/lubbock.h
170 +@@ -35,7 +35,9 @@
171 + #define LUB_GP __LUB_REG(LUBBOCK_FPGA_PHYS + 0x100)
172 +
173 + /* Board specific IRQs */
174 +-#define LUBBOCK_IRQ(x) (IRQ_BOARD_START + (x))
175 ++#define LUBBOCK_NR_IRQS IRQ_BOARD_START
176 ++
177 ++#define LUBBOCK_IRQ(x) (LUBBOCK_NR_IRQS + (x))
178 + #define LUBBOCK_SD_IRQ LUBBOCK_IRQ(0)
179 + #define LUBBOCK_SA1111_IRQ LUBBOCK_IRQ(1)
180 + #define LUBBOCK_USB_IRQ LUBBOCK_IRQ(2) /* usb connect */
181 +@@ -45,8 +47,7 @@
182 + #define LUBBOCK_USB_DISC_IRQ LUBBOCK_IRQ(6) /* usb disconnect */
183 + #define LUBBOCK_LAST_IRQ LUBBOCK_IRQ(6)
184 +
185 +-#define LUBBOCK_SA1111_IRQ_BASE (IRQ_BOARD_START + 16)
186 +-#define LUBBOCK_NR_IRQS (IRQ_BOARD_START + 16 + 55)
187 ++#define LUBBOCK_SA1111_IRQ_BASE (LUBBOCK_NR_IRQS + 32)
188 +
189 + #ifndef __ASSEMBLY__
190 + extern void lubbock_set_misc_wr(unsigned int mask, unsigned int set);
191 +diff --git a/arch/arm/mach-pxa/include/mach/mainstone.h b/arch/arm/mach-pxa/include/mach/mainstone.h
192 +index 1bfc4e822a41..e82a7d31104e 100644
193 +--- a/arch/arm/mach-pxa/include/mach/mainstone.h
194 ++++ b/arch/arm/mach-pxa/include/mach/mainstone.h
195 +@@ -120,7 +120,9 @@
196 + #define MST_PCMCIA_PWR_VCC_50 0x4 /* voltage VCC = 5.0V */
197 +
198 + /* board specific IRQs */
199 +-#define MAINSTONE_IRQ(x) (IRQ_BOARD_START + (x))
200 ++#define MAINSTONE_NR_IRQS IRQ_BOARD_START
201 ++
202 ++#define MAINSTONE_IRQ(x) (MAINSTONE_NR_IRQS + (x))
203 + #define MAINSTONE_MMC_IRQ MAINSTONE_IRQ(0)
204 + #define MAINSTONE_USIM_IRQ MAINSTONE_IRQ(1)
205 + #define MAINSTONE_USBC_IRQ MAINSTONE_IRQ(2)
206 +@@ -136,6 +138,4 @@
207 + #define MAINSTONE_S1_STSCHG_IRQ MAINSTONE_IRQ(14)
208 + #define MAINSTONE_S1_IRQ MAINSTONE_IRQ(15)
209 +
210 +-#define MAINSTONE_NR_IRQS (IRQ_BOARD_START + 16)
211 +-
212 + #endif
213 +diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
214 +index 6bb3f47b1f14..799f31f2c7ff 100644
215 +--- a/arch/arm/mach-pxa/lubbock.c
216 ++++ b/arch/arm/mach-pxa/lubbock.c
217 +@@ -12,6 +12,7 @@
218 + * published by the Free Software Foundation.
219 + */
220 + #include <linux/gpio.h>
221 ++#include <linux/gpio/machine.h>
222 + #include <linux/module.h>
223 + #include <linux/kernel.h>
224 + #include <linux/init.h>
225 +@@ -120,84 +121,6 @@ void lubbock_set_misc_wr(unsigned int mask, unsigned int set)
226 + }
227 + EXPORT_SYMBOL(lubbock_set_misc_wr);
228 +
229 +-static unsigned long lubbock_irq_enabled;
230 +-
231 +-static void lubbock_mask_irq(struct irq_data *d)
232 +-{
233 +- int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
234 +- LUB_IRQ_MASK_EN = (lubbock_irq_enabled &= ~(1 << lubbock_irq));
235 +-}
236 +-
237 +-static void lubbock_unmask_irq(struct irq_data *d)
238 +-{
239 +- int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
240 +- /* the irq can be acknowledged only if deasserted, so it's done here */
241 +- LUB_IRQ_SET_CLR &= ~(1 << lubbock_irq);
242 +- LUB_IRQ_MASK_EN = (lubbock_irq_enabled |= (1 << lubbock_irq));
243 +-}
244 +-
245 +-static struct irq_chip lubbock_irq_chip = {
246 +- .name = "FPGA",
247 +- .irq_ack = lubbock_mask_irq,
248 +- .irq_mask = lubbock_mask_irq,
249 +- .irq_unmask = lubbock_unmask_irq,
250 +-};
251 +-
252 +-static void lubbock_irq_handler(unsigned int irq, struct irq_desc *desc)
253 +-{
254 +- unsigned long pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
255 +- do {
256 +- /* clear our parent irq */
257 +- desc->irq_data.chip->irq_ack(&desc->irq_data);
258 +- if (likely(pending)) {
259 +- irq = LUBBOCK_IRQ(0) + __ffs(pending);
260 +- generic_handle_irq(irq);
261 +- }
262 +- pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
263 +- } while (pending);
264 +-}
265 +-
266 +-static void __init lubbock_init_irq(void)
267 +-{
268 +- int irq;
269 +-
270 +- pxa25x_init_irq();
271 +-
272 +- /* setup extra lubbock irqs */
273 +- for (irq = LUBBOCK_IRQ(0); irq <= LUBBOCK_LAST_IRQ; irq++) {
274 +- irq_set_chip_and_handler(irq, &lubbock_irq_chip,
275 +- handle_level_irq);
276 +- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
277 +- }
278 +-
279 +- irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), lubbock_irq_handler);
280 +- irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
281 +-}
282 +-
283 +-#ifdef CONFIG_PM
284 +-
285 +-static void lubbock_irq_resume(void)
286 +-{
287 +- LUB_IRQ_MASK_EN = lubbock_irq_enabled;
288 +-}
289 +-
290 +-static struct syscore_ops lubbock_irq_syscore_ops = {
291 +- .resume = lubbock_irq_resume,
292 +-};
293 +-
294 +-static int __init lubbock_irq_device_init(void)
295 +-{
296 +- if (machine_is_lubbock()) {
297 +- register_syscore_ops(&lubbock_irq_syscore_ops);
298 +- return 0;
299 +- }
300 +- return -ENODEV;
301 +-}
302 +-
303 +-device_initcall(lubbock_irq_device_init);
304 +-
305 +-#endif
306 +-
307 + static int lubbock_udc_is_connected(void)
308 + {
309 + return (LUB_MISC_RD & (1 << 9)) == 0;
310 +@@ -380,11 +303,38 @@ static struct platform_device lubbock_flash_device[2] = {
311 + },
312 + };
313 +
314 ++static struct resource lubbock_cplds_resources[] = {
315 ++ [0] = {
316 ++ .start = LUBBOCK_FPGA_PHYS + 0xc0,
317 ++ .end = LUBBOCK_FPGA_PHYS + 0xe0 - 1,
318 ++ .flags = IORESOURCE_MEM,
319 ++ },
320 ++ [1] = {
321 ++ .start = PXA_GPIO_TO_IRQ(0),
322 ++ .end = PXA_GPIO_TO_IRQ(0),
323 ++ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
324 ++ },
325 ++ [2] = {
326 ++ .start = LUBBOCK_IRQ(0),
327 ++ .end = LUBBOCK_IRQ(6),
328 ++ .flags = IORESOURCE_IRQ,
329 ++ },
330 ++};
331 ++
332 ++static struct platform_device lubbock_cplds_device = {
333 ++ .name = "pxa_cplds_irqs",
334 ++ .id = -1,
335 ++ .resource = &lubbock_cplds_resources[0],
336 ++ .num_resources = 3,
337 ++};
338 ++
339 ++
340 + static struct platform_device *devices[] __initdata = {
341 + &sa1111_device,
342 + &smc91x_device,
343 + &lubbock_flash_device[0],
344 + &lubbock_flash_device[1],
345 ++ &lubbock_cplds_device,
346 + };
347 +
348 + static struct pxafb_mode_info sharp_lm8v31_mode = {
349 +@@ -553,7 +503,7 @@ MACHINE_START(LUBBOCK, "Intel DBPXA250 Development Platform (aka Lubbock)")
350 + /* Maintainer: MontaVista Software Inc. */
351 + .map_io = lubbock_map_io,
352 + .nr_irqs = LUBBOCK_NR_IRQS,
353 +- .init_irq = lubbock_init_irq,
354 ++ .init_irq = pxa25x_init_irq,
355 + .handle_irq = pxa25x_handle_irq,
356 + .timer = &pxa_timer,
357 + .init_machine = lubbock_init,
358 +diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
359 +index 1aebaf719462..3d679dbc3cc6 100644
360 +--- a/arch/arm/mach-pxa/mainstone.c
361 ++++ b/arch/arm/mach-pxa/mainstone.c
362 +@@ -13,6 +13,7 @@
363 + * published by the Free Software Foundation.
364 + */
365 + #include <linux/gpio.h>
366 ++#include <linux/gpio/machine.h>
367 + #include <linux/init.h>
368 + #include <linux/platform_device.h>
369 + #include <linux/syscore_ops.h>
370 +@@ -120,92 +121,6 @@ static unsigned long mainstone_pin_config[] = {
371 + GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH,
372 + };
373 +
374 +-static unsigned long mainstone_irq_enabled;
375 +-
376 +-static void mainstone_mask_irq(struct irq_data *d)
377 +-{
378 +- int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
379 +- MST_INTMSKENA = (mainstone_irq_enabled &= ~(1 << mainstone_irq));
380 +-}
381 +-
382 +-static void mainstone_unmask_irq(struct irq_data *d)
383 +-{
384 +- int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
385 +- /* the irq can be acknowledged only if deasserted, so it's done here */
386 +- MST_INTSETCLR &= ~(1 << mainstone_irq);
387 +- MST_INTMSKENA = (mainstone_irq_enabled |= (1 << mainstone_irq));
388 +-}
389 +-
390 +-static struct irq_chip mainstone_irq_chip = {
391 +- .name = "FPGA",
392 +- .irq_ack = mainstone_mask_irq,
393 +- .irq_mask = mainstone_mask_irq,
394 +- .irq_unmask = mainstone_unmask_irq,
395 +-};
396 +-
397 +-static void mainstone_irq_handler(unsigned int irq, struct irq_desc *desc)
398 +-{
399 +- unsigned long pending = MST_INTSETCLR & mainstone_irq_enabled;
400 +- do {
401 +- /* clear useless edge notification */
402 +- desc->irq_data.chip->irq_ack(&desc->irq_data);
403 +- if (likely(pending)) {
404 +- irq = MAINSTONE_IRQ(0) + __ffs(pending);
405 +- generic_handle_irq(irq);
406 +- }
407 +- pending = MST_INTSETCLR & mainstone_irq_enabled;
408 +- } while (pending);
409 +-}
410 +-
411 +-static void __init mainstone_init_irq(void)
412 +-{
413 +- int irq;
414 +-
415 +- pxa27x_init_irq();
416 +-
417 +- /* setup extra Mainstone irqs */
418 +- for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) {
419 +- irq_set_chip_and_handler(irq, &mainstone_irq_chip,
420 +- handle_level_irq);
421 +- if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14))
422 +- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN);
423 +- else
424 +- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
425 +- }
426 +- set_irq_flags(MAINSTONE_IRQ(8), 0);
427 +- set_irq_flags(MAINSTONE_IRQ(12), 0);
428 +-
429 +- MST_INTMSKENA = 0;
430 +- MST_INTSETCLR = 0;
431 +-
432 +- irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), mainstone_irq_handler);
433 +- irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
434 +-}
435 +-
436 +-#ifdef CONFIG_PM
437 +-
438 +-static void mainstone_irq_resume(void)
439 +-{
440 +- MST_INTMSKENA = mainstone_irq_enabled;
441 +-}
442 +-
443 +-static struct syscore_ops mainstone_irq_syscore_ops = {
444 +- .resume = mainstone_irq_resume,
445 +-};
446 +-
447 +-static int __init mainstone_irq_device_init(void)
448 +-{
449 +- if (machine_is_mainstone())
450 +- register_syscore_ops(&mainstone_irq_syscore_ops);
451 +-
452 +- return 0;
453 +-}
454 +-
455 +-device_initcall(mainstone_irq_device_init);
456 +-
457 +-#endif
458 +-
459 +-
460 + static struct resource smc91x_resources[] = {
461 + [0] = {
462 + .start = (MST_ETH_PHYS + 0x300),
463 +@@ -483,11 +398,37 @@ static struct platform_device mst_gpio_keys_device = {
464 + },
465 + };
466 +
467 ++static struct resource mst_cplds_resources[] = {
468 ++ [0] = {
469 ++ .start = MST_FPGA_PHYS + 0xc0,
470 ++ .end = MST_FPGA_PHYS + 0xe0 - 1,
471 ++ .flags = IORESOURCE_MEM,
472 ++ },
473 ++ [1] = {
474 ++ .start = PXA_GPIO_TO_IRQ(0),
475 ++ .end = PXA_GPIO_TO_IRQ(0),
476 ++ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
477 ++ },
478 ++ [2] = {
479 ++ .start = MAINSTONE_IRQ(0),
480 ++ .end = MAINSTONE_IRQ(15),
481 ++ .flags = IORESOURCE_IRQ,
482 ++ },
483 ++};
484 ++
485 ++static struct platform_device mst_cplds_device = {
486 ++ .name = "pxa_cplds_irqs",
487 ++ .id = -1,
488 ++ .resource = &mst_cplds_resources[0],
489 ++ .num_resources = 3,
490 ++};
491 ++
492 + static struct platform_device *platform_devices[] __initdata = {
493 + &smc91x_device,
494 + &mst_flash_device[0],
495 + &mst_flash_device[1],
496 + &mst_gpio_keys_device,
497 ++ &mst_cplds_device,
498 + };
499 +
500 + static struct pxaohci_platform_data mainstone_ohci_platform_data = {
501 +@@ -618,7 +559,7 @@ MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)")
502 + .atag_offset = 0x100, /* BLOB boot parameter setting */
503 + .map_io = mainstone_map_io,
504 + .nr_irqs = MAINSTONE_NR_IRQS,
505 +- .init_irq = mainstone_init_irq,
506 ++ .init_irq = pxa27x_init_irq,
507 + .handle_irq = pxa27x_handle_irq,
508 + .timer = &pxa_timer,
509 + .init_machine = mainstone_init,
510 +diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c
511 +new file mode 100644
512 +index 000000000000..f1aeb54fabe3
513 +--- /dev/null
514 ++++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c
515 +@@ -0,0 +1,200 @@
516 ++/*
517 ++ * Intel Reference Systems cplds
518 ++ *
519 ++ * Copyright (C) 2014 Robert Jarzmik
520 ++ *
521 ++ * This program is free software; you can redistribute it and/or modify
522 ++ * it under the terms of the GNU General Public License as published by
523 ++ * the Free Software Foundation; either version 2 of the License, or
524 ++ * (at your option) any later version.
525 ++ *
526 ++ * Cplds motherboard driver, supporting lubbock and mainstone SoC board.
527 ++ */
528 ++
529 ++#include <linux/bitops.h>
530 ++#include <linux/gpio.h>
531 ++#include <linux/gpio/consumer.h>
532 ++#include <linux/interrupt.h>
533 ++#include <linux/io.h>
534 ++#include <linux/irq.h>
535 ++#include <linux/irqdomain.h>
536 ++#include <linux/mfd/core.h>
537 ++#include <linux/module.h>
538 ++#include <linux/of_platform.h>
539 ++
540 ++#define FPGA_IRQ_MASK_EN 0x0
541 ++#define FPGA_IRQ_SET_CLR 0x10
542 ++
543 ++#define CPLDS_NB_IRQ 32
544 ++
545 ++struct cplds {
546 ++ void __iomem *base;
547 ++ int irq;
548 ++ unsigned int irq_mask;
549 ++ struct gpio_desc *gpio0;
550 ++ struct irq_domain *irqdomain;
551 ++};
552 ++
553 ++static irqreturn_t cplds_irq_handler(int in_irq, void *d)
554 ++{
555 ++ struct cplds *fpga = d;
556 ++ unsigned long pending;
557 ++ unsigned int bit;
558 ++
559 ++ pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
560 ++ for_each_set_bit(bit, &pending, CPLDS_NB_IRQ)
561 ++ generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit));
562 ++
563 ++ return IRQ_HANDLED;
564 ++}
565 ++
566 ++static void cplds_irq_mask_ack(struct irq_data *d)
567 ++{
568 ++ struct cplds *fpga = irq_data_get_irq_chip_data(d);
569 ++ unsigned int cplds_irq = irqd_to_hwirq(d);
570 ++ unsigned int set, bit = BIT(cplds_irq);
571 ++
572 ++ fpga->irq_mask &= ~bit;
573 ++ writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
574 ++ set = readl(fpga->base + FPGA_IRQ_SET_CLR);
575 ++ writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
576 ++}
577 ++
578 ++static void cplds_irq_unmask(struct irq_data *d)
579 ++{
580 ++ struct cplds *fpga = irq_data_get_irq_chip_data(d);
581 ++ unsigned int cplds_irq = irqd_to_hwirq(d);
582 ++ unsigned int bit = BIT(cplds_irq);
583 ++
584 ++ fpga->irq_mask |= bit;
585 ++ writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
586 ++}
587 ++
588 ++static struct irq_chip cplds_irq_chip = {
589 ++ .name = "pxa_cplds",
590 ++ .irq_mask_ack = cplds_irq_mask_ack,
591 ++ .irq_unmask = cplds_irq_unmask,
592 ++ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
593 ++};
594 ++
595 ++static int cplds_irq_domain_map(struct irq_domain *d, unsigned int irq,
596 ++ irq_hw_number_t hwirq)
597 ++{
598 ++ struct cplds *fpga = d->host_data;
599 ++
600 ++ irq_set_chip_and_handler(irq, &cplds_irq_chip, handle_level_irq);
601 ++ irq_set_chip_data(irq, fpga);
602 ++
603 ++ return 0;
604 ++}
605 ++
606 ++static const struct irq_domain_ops cplds_irq_domain_ops = {
607 ++ .xlate = irq_domain_xlate_twocell,
608 ++ .map = cplds_irq_domain_map,
609 ++};
610 ++
611 ++static int cplds_resume(struct platform_device *pdev)
612 ++{
613 ++ struct cplds *fpga = platform_get_drvdata(pdev);
614 ++
615 ++ writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
616 ++
617 ++ return 0;
618 ++}
619 ++
620 ++static int cplds_probe(struct platform_device *pdev)
621 ++{
622 ++ struct resource *res;
623 ++ struct cplds *fpga;
624 ++ int ret;
625 ++ unsigned int base_irq = 0;
626 ++ unsigned long irqflags = 0;
627 ++
628 ++ fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL);
629 ++ if (!fpga)
630 ++ return -ENOMEM;
631 ++
632 ++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
633 ++ if (res) {
634 ++ fpga->irq = (unsigned int)res->start;
635 ++ irqflags = res->flags;
636 ++ }
637 ++ if (!fpga->irq)
638 ++ return -ENODEV;
639 ++
640 ++ base_irq = platform_get_irq(pdev, 1);
641 ++ if (base_irq < 0)
642 ++ base_irq = 0;
643 ++
644 ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
645 ++ fpga->base = devm_ioremap_resource(&pdev->dev, res);
646 ++ if (IS_ERR(fpga->base))
647 ++ return PTR_ERR(fpga->base);
648 ++
649 ++ platform_set_drvdata(pdev, fpga);
650 ++
651 ++ writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
652 ++ writel(0, fpga->base + FPGA_IRQ_SET_CLR);
653 ++
654 ++ ret = devm_request_irq(&pdev->dev, fpga->irq, cplds_irq_handler,
655 ++ irqflags, dev_name(&pdev->dev), fpga);
656 ++ if (ret == -ENOSYS)
657 ++ return -EPROBE_DEFER;
658 ++
659 ++ if (ret) {
660 ++ dev_err(&pdev->dev, "couldn't request main irq%d: %d\n",
661 ++ fpga->irq, ret);
662 ++ return ret;
663 ++ }
664 ++
665 ++ irq_set_irq_wake(fpga->irq, 1);
666 ++ fpga->irqdomain = irq_domain_add_linear(pdev->dev.of_node,
667 ++ CPLDS_NB_IRQ,
668 ++ &cplds_irq_domain_ops, fpga);
669 ++ if (!fpga->irqdomain)
670 ++ return -ENODEV;
671 ++
672 ++ if (base_irq) {
673 ++ ret = irq_create_strict_mappings(fpga->irqdomain, base_irq, 0,
674 ++ CPLDS_NB_IRQ);
675 ++ if (ret) {
676 ++ dev_err(&pdev->dev, "couldn't create the irq mapping %d..%d\n",
677 ++ base_irq, base_irq + CPLDS_NB_IRQ);
678 ++ return ret;
679 ++ }
680 ++ }
681 ++
682 ++ return 0;
683 ++}
684 ++
685 ++static int cplds_remove(struct platform_device *pdev)
686 ++{
687 ++ struct cplds *fpga = platform_get_drvdata(pdev);
688 ++
689 ++ irq_set_chip_and_handler(fpga->irq, NULL, NULL);
690 ++
691 ++ return 0;
692 ++}
693 ++
694 ++static const struct of_device_id cplds_id_table[] = {
695 ++ { .compatible = "intel,lubbock-cplds-irqs", },
696 ++ { .compatible = "intel,mainstone-cplds-irqs", },
697 ++ { }
698 ++};
699 ++MODULE_DEVICE_TABLE(of, cplds_id_table);
700 ++
701 ++static struct platform_driver cplds_driver = {
702 ++ .driver = {
703 ++ .name = "pxa_cplds_irqs",
704 ++ .of_match_table = of_match_ptr(cplds_id_table),
705 ++ },
706 ++ .probe = cplds_probe,
707 ++ .remove = cplds_remove,
708 ++ .resume = cplds_resume,
709 ++};
710 ++
711 ++module_platform_driver(cplds_driver);
712 ++
713 ++MODULE_DESCRIPTION("PXA Cplds interrupts driver");
714 ++MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@××××.fr>");
715 ++MODULE_LICENSE("GPL");
716 +diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
717 +index 62135849f48b..ad941453340a 100644
718 +--- a/arch/arm/net/bpf_jit_32.c
719 ++++ b/arch/arm/net/bpf_jit_32.c
720 +@@ -52,6 +52,7 @@
721 + #define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
722 +
723 + #define FLAG_NEED_X_RESET (1 << 0)
724 ++#define FLAG_IMM_OVERFLOW (1 << 1)
725 +
726 + struct jit_ctx {
727 + const struct sk_filter *skf;
728 +@@ -286,6 +287,15 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx)
729 + /* PC in ARM mode == address of the instruction + 8 */
730 + imm = offset - (8 + ctx->idx * 4);
731 +
732 ++ if (imm & ~0xfff) {
733 ++ /*
734 ++ * literal pool is too far, signal it into flags. we
735 ++ * can only detect it on the second pass unfortunately.
736 ++ */
737 ++ ctx->flags |= FLAG_IMM_OVERFLOW;
738 ++ return 0;
739 ++ }
740 ++
741 + return imm;
742 + }
743 +
744 +@@ -817,6 +827,14 @@ b_epilogue:
745 + default:
746 + return -1;
747 + }
748 ++
749 ++ if (ctx->flags & FLAG_IMM_OVERFLOW)
750 ++ /*
751 ++ * this instruction generated an overflow when
752 ++ * trying to access the literal pool, so
753 ++ * delegate this filter to the kernel interpreter.
754 ++ */
755 ++ return -1;
756 + }
757 +
758 + /* compute offsets only during the first pass */
759 +@@ -876,7 +894,14 @@ void bpf_jit_compile(struct sk_filter *fp)
760 +
761 + ctx.idx = 0;
762 + build_prologue(&ctx);
763 +- build_body(&ctx);
764 ++ if (build_body(&ctx) < 0) {
765 ++#if __LINUX_ARM_ARCH__ < 7
766 ++ if (ctx.imm_count)
767 ++ kfree(ctx.imms);
768 ++#endif
769 ++ bpf_jit_binary_free(header);
770 ++ goto out;
771 ++ }
772 + build_epilogue(&ctx);
773 +
774 + flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
775 +diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
776 +index 356ee84cad95..04845aaf5985 100644
777 +--- a/arch/c6x/kernel/time.c
778 ++++ b/arch/c6x/kernel/time.c
779 +@@ -49,7 +49,7 @@ u64 sched_clock(void)
780 + return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
781 + }
782 +
783 +-void time_init(void)
784 ++void __init time_init(void)
785 + {
786 + u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
787 +
788 +diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
789 +index a5aa43d07c8e..9cd8cbf9bd43 100644
790 +--- a/arch/mips/kernel/irq.c
791 ++++ b/arch/mips/kernel/irq.c
792 +@@ -110,7 +110,7 @@ void __init init_IRQ(void)
793 + #endif
794 + }
795 +
796 +-#ifdef DEBUG_STACKOVERFLOW
797 ++#ifdef CONFIG_DEBUG_STACKOVERFLOW
798 + static inline void check_stack_overflow(void)
799 + {
800 + unsigned long sp;
801 +diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
802 +index 5bf34ec89669..2ca17353fc0c 100644
803 +--- a/arch/mips/power/hibernate.S
804 ++++ b/arch/mips/power/hibernate.S
805 +@@ -31,6 +31,8 @@ LEAF(swsusp_arch_suspend)
806 + END(swsusp_arch_suspend)
807 +
808 + LEAF(swsusp_arch_resume)
809 ++ /* Avoid TLB mismatch during and after kernel resume */
810 ++ jal local_flush_tlb_all
811 + PTR_L t0, restore_pblist
812 + 0:
813 + PTR_L t1, PBE_ADDRESS(t0) /* source */
814 +@@ -44,7 +46,6 @@ LEAF(swsusp_arch_resume)
815 + bne t1, t3, 1b
816 + PTR_L t0, PBE_NEXT(t0)
817 + bnez t0, 0b
818 +- jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
819 + PTR_LA t0, saved_regs
820 + PTR_L ra, PT_R31(t0)
821 + PTR_L sp, PT_R29(t0)
822 +diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
823 +index b4437e8a7a8f..334254c5292e 100644
824 +--- a/arch/powerpc/kernel/cacheinfo.c
825 ++++ b/arch/powerpc/kernel/cacheinfo.c
826 +@@ -62,12 +62,22 @@ struct cache_type_info {
827 + };
828 +
829 + /* These are used to index the cache_type_info array. */
830 +-#define CACHE_TYPE_UNIFIED 0
831 +-#define CACHE_TYPE_INSTRUCTION 1
832 +-#define CACHE_TYPE_DATA 2
833 ++#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
834 ++#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
835 ++#define CACHE_TYPE_INSTRUCTION 2
836 ++#define CACHE_TYPE_DATA 3
837 +
838 + static const struct cache_type_info cache_type_info[] = {
839 + {
840 ++ /* Embedded systems that use cache-size, cache-block-size,
841 ++ * etc. for the Unified (typically L2) cache. */
842 ++ .name = "Unified",
843 ++ .size_prop = "cache-size",
844 ++ .line_size_props = { "cache-line-size",
845 ++ "cache-block-size", },
846 ++ .nr_sets_prop = "cache-sets",
847 ++ },
848 ++ {
849 + /* PowerPC Processor binding says the [di]-cache-*
850 + * must be equal on unified caches, so just use
851 + * d-cache properties. */
852 +@@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
853 + {
854 + struct cache *iter;
855 +
856 +- if (cache->type == CACHE_TYPE_UNIFIED)
857 ++ if (cache->type == CACHE_TYPE_UNIFIED ||
858 ++ cache->type == CACHE_TYPE_UNIFIED_D)
859 + return cache;
860 +
861 + list_for_each_entry(iter, &cache_list, list)
862 +@@ -324,15 +335,27 @@ static bool cache_node_is_unified(const struct device_node *np)
863 + return of_get_property(np, "cache-unified", NULL);
864 + }
865 +
866 +-static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
867 ++/*
868 ++ * Unified caches can have two different sets of tags. Most embedded
869 ++ * use cache-size, etc. for the unified cache size, but open firmware systems
870 ++ * use d-cache-size, etc. Check on initialization for which type we have, and
871 ++ * return the appropriate structure type. Assume it's embedded if it isn't
872 ++ * open firmware. If it's yet a 3rd type, then there will be missing entries
873 ++ * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
874 ++ * to be extended further.
875 ++ */
876 ++static int cache_is_unified_d(const struct device_node *np)
877 + {
878 +- struct cache *cache;
879 ++ return of_get_property(np,
880 ++ cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
881 ++ CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
882 ++}
883 +
884 ++static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
885 ++{
886 + pr_debug("creating L%d ucache for %s\n", level, node->full_name);
887 +
888 +- cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
889 +-
890 +- return cache;
891 ++ return new_cache(cache_is_unified_d(node), level, node);
892 + }
893 +
894 + static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level)
895 +diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
896 +index 7703569b5d4f..6be807d29b1d 100644
897 +--- a/arch/powerpc/kernel/vmlinux.lds.S
898 ++++ b/arch/powerpc/kernel/vmlinux.lds.S
899 +@@ -213,6 +213,7 @@ SECTIONS
900 + *(.opd)
901 + }
902 +
903 ++ . = ALIGN(256);
904 + .got : AT(ADDR(.got) - LOAD_OFFSET) {
905 + __toc_start = .;
906 + *(.got)
907 +diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
908 +index e8a18d1cc7c9..a9bd794652c9 100644
909 +--- a/arch/powerpc/perf/callchain.c
910 ++++ b/arch/powerpc/perf/callchain.c
911 +@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
912 + sp = regs->gpr[1];
913 + perf_callchain_store(entry, next_ip);
914 +
915 +- for (;;) {
916 ++ while (entry->nr < PERF_MAX_STACK_DEPTH) {
917 + fp = (unsigned long __user *) sp;
918 + if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
919 + return;
920 +diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
921 +index 0f1b706506ed..27672762746b 100644
922 +--- a/arch/powerpc/platforms/pseries/dlpar.c
923 ++++ b/arch/powerpc/platforms/pseries/dlpar.c
924 +@@ -416,6 +416,12 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
925 + goto out;
926 + }
927 +
928 ++ rc = dlpar_acquire_drc(drc_index);
929 ++ if (rc) {
930 ++ rc = -EINVAL;
931 ++ goto out;
932 ++ }
933 ++
934 + dn = dlpar_configure_connector(drc_index);
935 + if (!dn) {
936 + rc = -EINVAL;
937 +@@ -436,13 +442,6 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
938 + kfree(dn->full_name);
939 + dn->full_name = cpu_name;
940 +
941 +- rc = dlpar_acquire_drc(drc_index);
942 +- if (rc) {
943 +- dlpar_free_cc_nodes(dn);
944 +- rc = -EINVAL;
945 +- goto out;
946 +- }
947 +-
948 + rc = dlpar_attach_node(dn);
949 + if (rc) {
950 + dlpar_release_drc(drc_index);
951 +diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
952 +index b1bd170f24b1..c2dac2e0e56a 100644
953 +--- a/arch/s390/crypto/ghash_s390.c
954 ++++ b/arch/s390/crypto/ghash_s390.c
955 +@@ -16,11 +16,12 @@
956 + #define GHASH_DIGEST_SIZE 16
957 +
958 + struct ghash_ctx {
959 +- u8 icv[16];
960 +- u8 key[16];
961 ++ u8 key[GHASH_BLOCK_SIZE];
962 + };
963 +
964 + struct ghash_desc_ctx {
965 ++ u8 icv[GHASH_BLOCK_SIZE];
966 ++ u8 key[GHASH_BLOCK_SIZE];
967 + u8 buffer[GHASH_BLOCK_SIZE];
968 + u32 bytes;
969 + };
970 +@@ -28,8 +29,10 @@ struct ghash_desc_ctx {
971 + static int ghash_init(struct shash_desc *desc)
972 + {
973 + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
974 ++ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
975 +
976 + memset(dctx, 0, sizeof(*dctx));
977 ++ memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
978 +
979 + return 0;
980 + }
981 +@@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
982 + }
983 +
984 + memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
985 +- memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
986 +
987 + return 0;
988 + }
989 +@@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
990 + const u8 *src, unsigned int srclen)
991 + {
992 + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
993 +- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
994 + unsigned int n;
995 + u8 *buf = dctx->buffer;
996 + int ret;
997 +@@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
998 + src += n;
999 +
1000 + if (!dctx->bytes) {
1001 +- ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
1002 ++ ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
1003 + GHASH_BLOCK_SIZE);
1004 + BUG_ON(ret != GHASH_BLOCK_SIZE);
1005 + }
1006 +@@ -78,7 +79,7 @@ static int ghash_update(struct shash_desc *desc,
1007 +
1008 + n = srclen & ~(GHASH_BLOCK_SIZE - 1);
1009 + if (n) {
1010 +- ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
1011 ++ ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
1012 + BUG_ON(ret != n);
1013 + src += n;
1014 + srclen -= n;
1015 +@@ -92,7 +93,7 @@ static int ghash_update(struct shash_desc *desc,
1016 + return 0;
1017 + }
1018 +
1019 +-static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
1020 ++static int ghash_flush(struct ghash_desc_ctx *dctx)
1021 + {
1022 + u8 *buf = dctx->buffer;
1023 + int ret;
1024 +@@ -102,20 +103,19 @@ static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
1025 +
1026 + memset(pos, 0, dctx->bytes);
1027 +
1028 +- ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
1029 ++ ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
1030 + BUG_ON(ret != GHASH_BLOCK_SIZE);
1031 +- }
1032 +
1033 +- dctx->bytes = 0;
1034 ++ dctx->bytes = 0;
1035 ++ }
1036 + }
1037 +
1038 + static int ghash_final(struct shash_desc *desc, u8 *dst)
1039 + {
1040 + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
1041 +- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
1042 +
1043 +- ghash_flush(ctx, dctx);
1044 +- memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
1045 ++ ghash_flush(dctx);
1046 ++ memcpy(dst, dtx->icv, GHASH_BLOCK_SIZE);
1047 +
1048 + return 0;
1049 + }
1050 +diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
1051 +index aa1494d0e380..42068041733c 100644
1052 +--- a/arch/s390/kernel/suspend.c
1053 ++++ b/arch/s390/kernel/suspend.c
1054 +@@ -9,6 +9,8 @@
1055 + #include <linux/pfn.h>
1056 + #include <linux/suspend.h>
1057 + #include <linux/mm.h>
1058 ++#include <asm/ipl.h>
1059 ++#include <asm/sections.h>
1060 + #include <asm/ctl_reg.h>
1061 +
1062 + /*
1063 +@@ -137,6 +139,8 @@ int pfn_is_nosave(unsigned long pfn)
1064 + {
1065 + unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
1066 + unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
1067 ++ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
1068 ++ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
1069 +
1070 + /* Always save lowcore pages (LC protection might be enabled). */
1071 + if (pfn <= LC_PAGES)
1072 +@@ -144,6 +148,8 @@ int pfn_is_nosave(unsigned long pfn)
1073 + if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
1074 + return 1;
1075 + /* Skip memory holes and read-only pages (NSS, DCSS, ...). */
1076 ++ if (pfn >= stext_pfn && pfn <= eshared_pfn)
1077 ++ return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
1078 + if (tprot(PFN_PHYS(pfn)))
1079 + return 1;
1080 + return 0;
1081 +diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
1082 +index e5a45dbd26ac..f641458e86b4 100644
1083 +--- a/arch/s390/kvm/priv.c
1084 ++++ b/arch/s390/kvm/priv.c
1085 +@@ -218,6 +218,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
1086 + for (n = mem->count - 1; n > 0 ; n--)
1087 + memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
1088 +
1089 ++ memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
1090 + mem->vm[0].cpus_total = cpus;
1091 + mem->vm[0].cpus_configured = cpus;
1092 + mem->vm[0].cpus_standby = 0;
1093 +diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
1094 +index 257d9cca214f..1262fb6a9df6 100644
1095 +--- a/arch/x86/include/asm/i387.h
1096 ++++ b/arch/x86/include/asm/i387.h
1097 +@@ -23,8 +23,32 @@ extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
1098 + extern void math_state_restore(void);
1099 +
1100 + extern bool irq_fpu_usable(void);
1101 +-extern void kernel_fpu_begin(void);
1102 +-extern void kernel_fpu_end(void);
1103 ++
1104 ++/*
1105 ++ * Careful: __kernel_fpu_begin/end() must be called with preempt disabled
1106 ++ * and they don't touch the preempt state on their own.
1107 ++ * If you enable preemption after __kernel_fpu_begin(), preempt notifier
1108 ++ * should call the __kernel_fpu_end() to prevent the kernel/user FPU
1109 ++ * state from getting corrupted. KVM for example uses this model.
1110 ++ *
1111 ++ * All other cases use kernel_fpu_begin/end() which disable preemption
1112 ++ * during kernel FPU usage.
1113 ++ */
1114 ++extern void __kernel_fpu_begin(void);
1115 ++extern void __kernel_fpu_end(void);
1116 ++
1117 ++static inline void kernel_fpu_begin(void)
1118 ++{
1119 ++ WARN_ON_ONCE(!irq_fpu_usable());
1120 ++ preempt_disable();
1121 ++ __kernel_fpu_begin();
1122 ++}
1123 ++
1124 ++static inline void kernel_fpu_end(void)
1125 ++{
1126 ++ __kernel_fpu_end();
1127 ++ preempt_enable();
1128 ++}
1129 +
1130 + /*
1131 + * Some instructions like VIA's padlock instructions generate a spurious
1132 +diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h
1133 +index f229b13a5f30..0c5482257fc3 100644
1134 +--- a/arch/x86/include/asm/iommu_table.h
1135 ++++ b/arch/x86/include/asm/iommu_table.h
1136 +@@ -79,11 +79,12 @@ struct iommu_table_entry {
1137 + * d). Similar to the 'init', except that this gets called from pci_iommu_init
1138 + * where we do have a memory allocator.
1139 + *
1140 +- * The standard vs the _FINISH differs in that the _FINISH variant will
1141 +- * continue detecting other IOMMUs in the call list after the
1142 +- * the detection routine returns a positive number. The _FINISH will
1143 +- * stop the execution chain. Both will still call the 'init' and
1144 +- * 'late_init' functions if they are set.
1145 ++ * The standard IOMMU_INIT differs from the IOMMU_INIT_FINISH variant
1146 ++ * in that the former will continue detecting other IOMMUs in the call
1147 ++ * list after the detection routine returns a positive number, while the
1148 ++ * latter will stop the execution chain upon first successful detection.
1149 ++ * Both variants will still call the 'init' and 'late_init' functions if
1150 ++ * they are set.
1151 + */
1152 + #define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init) \
1153 + __IOMMU_INIT(_detect, _depend, _init, _late_init, 1)
1154 +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
1155 +index 0d2db0e7caf4..9eeaed48d0bf 100644
1156 +--- a/arch/x86/kernel/cpu/mcheck/mce.c
1157 ++++ b/arch/x86/kernel/cpu/mcheck/mce.c
1158 +@@ -652,11 +652,14 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
1159 + static int mce_no_way_out(struct mce *m, char **msg)
1160 + {
1161 + int i;
1162 ++ char *tmp;
1163 +
1164 + for (i = 0; i < banks; i++) {
1165 + m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
1166 +- if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
1167 ++ if (mce_severity(m, tolerant, &tmp) >= MCE_PANIC_SEVERITY) {
1168 ++ *msg = tmp;
1169 + return 1;
1170 ++ }
1171 + }
1172 + return 0;
1173 + }
1174 +diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
1175 +index 6610e811fb39..7aa728d72b60 100644
1176 +--- a/arch/x86/kernel/i387.c
1177 ++++ b/arch/x86/kernel/i387.c
1178 +@@ -77,29 +77,26 @@ bool irq_fpu_usable(void)
1179 + }
1180 + EXPORT_SYMBOL(irq_fpu_usable);
1181 +
1182 +-void kernel_fpu_begin(void)
1183 ++void __kernel_fpu_begin(void)
1184 + {
1185 + struct task_struct *me = current;
1186 +
1187 +- WARN_ON_ONCE(!irq_fpu_usable());
1188 +- preempt_disable();
1189 + if (__thread_has_fpu(me)) {
1190 + __save_init_fpu(me);
1191 + __thread_clear_has_fpu(me);
1192 +- /* We do 'stts()' in kernel_fpu_end() */
1193 ++ /* We do 'stts()' in __kernel_fpu_end() */
1194 + } else {
1195 + percpu_write(fpu_owner_task, NULL);
1196 + clts();
1197 + }
1198 + }
1199 +-EXPORT_SYMBOL(kernel_fpu_begin);
1200 ++EXPORT_SYMBOL(__kernel_fpu_begin);
1201 +
1202 +-void kernel_fpu_end(void)
1203 ++void __kernel_fpu_end(void)
1204 + {
1205 + stts();
1206 +- preempt_enable();
1207 + }
1208 +-EXPORT_SYMBOL(kernel_fpu_end);
1209 ++EXPORT_SYMBOL(__kernel_fpu_end);
1210 +
1211 + void unlazy_fpu(struct task_struct *tsk)
1212 + {
1213 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1214 +index 84f4bca0ca2c..2da1a8c9173e 100644
1215 +--- a/arch/x86/kvm/mmu.c
1216 ++++ b/arch/x86/kvm/mmu.c
1217 +@@ -3658,7 +3658,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1218 + ++vcpu->kvm->stat.mmu_pte_write;
1219 + kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
1220 +
1221 +- mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
1222 ++ mask.cr0_wp = mask.cr4_pae = mask.nxe = mask.smep_andnot_wp = 1;
1223 + for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
1224 + if (detect_write_misaligned(sp, gpa, bytes) ||
1225 + detect_write_flooding(sp)) {
1226 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1227 +index 2eb4e5af8816..4ad0d7171d6c 100644
1228 +--- a/arch/x86/kvm/vmx.c
1229 ++++ b/arch/x86/kvm/vmx.c
1230 +@@ -1455,8 +1455,12 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1231 + #ifdef CONFIG_X86_64
1232 + wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1233 + #endif
1234 +- if (user_has_fpu())
1235 +- clts();
1236 ++ /*
1237 ++ * If the FPU is not active (through the host task or
1238 ++ * the guest vcpu), then restore the cr0.TS bit.
1239 ++ */
1240 ++ if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
1241 ++ stts();
1242 + load_gdt(&__get_cpu_var(host_gdt));
1243 + }
1244 +
1245 +@@ -3633,7 +3637,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
1246 + struct desc_ptr dt;
1247 + unsigned long cr4;
1248 +
1249 +- vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
1250 ++ vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
1251 + vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
1252 +
1253 + /* Save the most likely value for this task's CR4 in the VMCS. */
1254 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1255 +index 318a2454366f..4ad2b7bb382e 100644
1256 +--- a/arch/x86/kvm/x86.c
1257 ++++ b/arch/x86/kvm/x86.c
1258 +@@ -5907,7 +5907,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
1259 + */
1260 + kvm_put_guest_xcr0(vcpu);
1261 + vcpu->guest_fpu_loaded = 1;
1262 +- unlazy_fpu(current);
1263 ++ __kernel_fpu_begin();
1264 + fpu_restore_checking(&vcpu->arch.guest_fpu);
1265 + trace_kvm_fpu(1);
1266 + }
1267 +@@ -5921,6 +5921,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
1268 +
1269 + vcpu->guest_fpu_loaded = 0;
1270 + fpu_save_init(&vcpu->arch.guest_fpu);
1271 ++ __kernel_fpu_end();
1272 + ++vcpu->stat.fpu_reload;
1273 + kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
1274 + trace_kvm_fpu(0);
1275 +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
1276 +index 0597f95b6da6..95f9934e8050 100644
1277 +--- a/arch/x86/net/bpf_jit_comp.c
1278 ++++ b/arch/x86/net/bpf_jit_comp.c
1279 +@@ -155,7 +155,12 @@ void bpf_jit_compile(struct sk_filter *fp)
1280 + }
1281 + cleanup_addr = proglen; /* epilogue address */
1282 +
1283 +- for (pass = 0; pass < 10; pass++) {
1284 ++ /* JITed image shrinks with every pass and the loop iterates
1285 ++ * until the image stops shrinking. Very large bpf programs
1286 ++ * may converge on the last pass. In such case do one more
1287 ++ * pass to emit the final image
1288 ++ */
1289 ++ for (pass = 0; pass < 10 || image; pass++) {
1290 + u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
1291 + /* no prologue/epilogue for trivial filters (RET something) */
1292 + proglen = 0;
1293 +diff --git a/block/genhd.c b/block/genhd.c
1294 +index 7a2a8dc9bc5f..618ca1aaa858 100644
1295 +--- a/block/genhd.c
1296 ++++ b/block/genhd.c
1297 +@@ -420,13 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
1298 + do {
1299 + if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
1300 + return -ENOMEM;
1301 +- spin_lock(&ext_devt_lock);
1302 ++ spin_lock_bh(&ext_devt_lock);
1303 + rc = idr_get_new(&ext_devt_idr, part, &idx);
1304 + if (!rc && idx >= NR_EXT_DEVT) {
1305 + idr_remove(&ext_devt_idr, idx);
1306 + rc = -EBUSY;
1307 + }
1308 +- spin_unlock(&ext_devt_lock);
1309 ++ spin_unlock_bh(&ext_devt_lock);
1310 + } while (rc == -EAGAIN);
1311 +
1312 + if (rc)
1313 +@@ -451,9 +451,9 @@ void blk_free_devt(dev_t devt)
1314 + return;
1315 +
1316 + if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
1317 +- spin_lock(&ext_devt_lock);
1318 ++ spin_lock_bh(&ext_devt_lock);
1319 + idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
1320 +- spin_unlock(&ext_devt_lock);
1321 ++ spin_unlock_bh(&ext_devt_lock);
1322 + }
1323 + }
1324 +
1325 +@@ -684,13 +684,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
1326 + } else {
1327 + struct hd_struct *part;
1328 +
1329 +- spin_lock(&ext_devt_lock);
1330 ++ spin_lock_bh(&ext_devt_lock);
1331 + part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
1332 + if (part && get_disk(part_to_disk(part))) {
1333 + *partno = part->partno;
1334 + disk = part_to_disk(part);
1335 + }
1336 +- spin_unlock(&ext_devt_lock);
1337 ++ spin_unlock_bh(&ext_devt_lock);
1338 + }
1339 +
1340 + return disk;
1341 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1342 +index 6fc61eb07b7b..1338f1f79582 100644
1343 +--- a/drivers/ata/ahci.c
1344 ++++ b/drivers/ata/ahci.c
1345 +@@ -67,6 +67,7 @@ enum board_ids {
1346 + board_ahci_yes_fbs,
1347 +
1348 + /* board IDs for specific chipsets in alphabetical order */
1349 ++ board_ahci_avn,
1350 + board_ahci_mcp65,
1351 + board_ahci_mcp77,
1352 + board_ahci_mcp89,
1353 +@@ -85,6 +86,8 @@ enum board_ids {
1354 + static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
1355 + static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1356 + unsigned long deadline);
1357 ++static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
1358 ++ unsigned long deadline);
1359 + static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1360 + unsigned long deadline);
1361 + #ifdef CONFIG_PM
1362 +@@ -106,6 +109,11 @@ static struct ata_port_operations ahci_p5wdh_ops = {
1363 + .hardreset = ahci_p5wdh_hardreset,
1364 + };
1365 +
1366 ++static struct ata_port_operations ahci_avn_ops = {
1367 ++ .inherits = &ahci_ops,
1368 ++ .hardreset = ahci_avn_hardreset,
1369 ++};
1370 ++
1371 + static const struct ata_port_info ahci_port_info[] = {
1372 + /* by features */
1373 + [board_ahci] =
1374 +@@ -154,6 +162,12 @@ static const struct ata_port_info ahci_port_info[] = {
1375 + .port_ops = &ahci_ops,
1376 + },
1377 + /* by chipsets */
1378 ++ [board_ahci_avn] = {
1379 ++ .flags = AHCI_FLAG_COMMON,
1380 ++ .pio_mask = ATA_PIO4,
1381 ++ .udma_mask = ATA_UDMA6,
1382 ++ .port_ops = &ahci_avn_ops,
1383 ++ },
1384 + [board_ahci_mcp65] =
1385 + {
1386 + AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
1387 +@@ -300,14 +314,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
1388 + { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
1389 + { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
1390 + { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
1391 +- { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
1392 +- { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
1393 +- { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
1394 +- { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
1395 +- { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
1396 +- { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
1397 +- { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
1398 +- { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
1399 ++ { PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */
1400 ++ { PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */
1401 ++ { PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */
1402 ++ { PCI_VDEVICE(INTEL, 0x1f35), board_ahci_avn }, /* Avoton RAID */
1403 ++ { PCI_VDEVICE(INTEL, 0x1f36), board_ahci_avn }, /* Avoton RAID */
1404 ++ { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
1405 ++ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
1406 ++ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
1407 + { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
1408 + { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
1409 + { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
1410 +@@ -671,6 +685,78 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1411 + return rc;
1412 + }
1413 +
1414 ++/*
1415 ++ * ahci_avn_hardreset - attempt more aggressive recovery of Avoton ports.
1416 ++ *
1417 ++ * It has been observed with some SSDs that the timing of events in the
1418 ++ * link synchronization phase can leave the port in a state that can not
1419 ++ * be recovered by a SATA-hard-reset alone. The failing signature is
1420 ++ * SStatus.DET stuck at 1 ("Device presence detected but Phy
1421 ++ * communication not established"). It was found that unloading and
1422 ++ * reloading the driver when this problem occurs allows the drive
1423 ++ * connection to be recovered (DET advanced to 0x3). The critical
1424 ++ * component of reloading the driver is that the port state machines are
1425 ++ * reset by bouncing "port enable" in the AHCI PCS configuration
1426 ++ * register. So, reproduce that effect by bouncing a port whenever we
1427 ++ * see DET==1 after a reset.
1428 ++ */
1429 ++static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
1430 ++ unsigned long deadline)
1431 ++{
1432 ++ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1433 ++ struct ata_port *ap = link->ap;
1434 ++ struct ahci_port_priv *pp = ap->private_data;
1435 ++ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1436 ++ unsigned long tmo = deadline - jiffies;
1437 ++ struct ata_taskfile tf;
1438 ++ bool online;
1439 ++ int rc, i;
1440 ++
1441 ++ DPRINTK("ENTER\n");
1442 ++
1443 ++ ahci_stop_engine(ap);
1444 ++
1445 ++ for (i = 0; i < 2; i++) {
1446 ++ u16 val;
1447 ++ u32 sstatus;
1448 ++ int port = ap->port_no;
1449 ++ struct ata_host *host = ap->host;
1450 ++ struct pci_dev *pdev = to_pci_dev(host->dev);
1451 ++
1452 ++ /* clear D2H reception area to properly wait for D2H FIS */
1453 ++ ata_tf_init(link->device, &tf);
1454 ++ tf.command = ATA_BUSY;
1455 ++ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1456 ++
1457 ++ rc = sata_link_hardreset(link, timing, deadline, &online,
1458 ++ ahci_check_ready);
1459 ++
1460 ++ if (sata_scr_read(link, SCR_STATUS, &sstatus) != 0 ||
1461 ++ (sstatus & 0xf) != 1)
1462 ++ break;
1463 ++
1464 ++ ata_link_printk(link, KERN_INFO, "avn bounce port%d\n",
1465 ++ port);
1466 ++
1467 ++ pci_read_config_word(pdev, 0x92, &val);
1468 ++ val &= ~(1 << port);
1469 ++ pci_write_config_word(pdev, 0x92, val);
1470 ++ ata_msleep(ap, 1000);
1471 ++ val |= 1 << port;
1472 ++ pci_write_config_word(pdev, 0x92, val);
1473 ++ deadline += tmo;
1474 ++ }
1475 ++
1476 ++ ahci_start_engine(ap);
1477 ++
1478 ++ if (online)
1479 ++ *class = ahci_dev_classify(ap);
1480 ++
1481 ++ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1482 ++ return rc;
1483 ++}
1484 ++
1485 ++
1486 + #ifdef CONFIG_PM
1487 + static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1488 + {
1489 +diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
1490 +index c2594ddf25b0..57eb1c212a4c 100644
1491 +--- a/drivers/ata/ahci.h
1492 ++++ b/drivers/ata/ahci.h
1493 +@@ -320,6 +320,7 @@ extern struct device_attribute *ahci_sdev_attrs[];
1494 + extern struct ata_port_operations ahci_ops;
1495 + extern struct ata_port_operations ahci_pmp_retry_srst_ops;
1496 +
1497 ++unsigned int ahci_dev_classify(struct ata_port *ap);
1498 + void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1499 + u32 opts);
1500 + void ahci_save_initial_config(struct device *dev,
1501 +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
1502 +index 60f41cd2c330..30cdba79d7ae 100644
1503 +--- a/drivers/ata/libahci.c
1504 ++++ b/drivers/ata/libahci.c
1505 +@@ -1139,7 +1139,7 @@ static void ahci_dev_config(struct ata_device *dev)
1506 + }
1507 + }
1508 +
1509 +-static unsigned int ahci_dev_classify(struct ata_port *ap)
1510 ++unsigned int ahci_dev_classify(struct ata_port *ap)
1511 + {
1512 + void __iomem *port_mmio = ahci_port_base(ap);
1513 + struct ata_taskfile tf;
1514 +@@ -1153,6 +1153,7 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)
1515 +
1516 + return ata_dev_classify(&tf);
1517 + }
1518 ++EXPORT_SYMBOL_GPL(ahci_dev_classify);
1519 +
1520 + void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1521 + u32 opts)
1522 +@@ -1670,8 +1671,7 @@ static void ahci_port_intr(struct ata_port *ap)
1523 + if (unlikely(resetting))
1524 + status &= ~PORT_IRQ_BAD_PMP;
1525 +
1526 +- /* if LPM is enabled, PHYRDY doesn't mean anything */
1527 +- if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) {
1528 ++ if (sata_lpm_ignore_phy_events(&ap->link)) {
1529 + status &= ~PORT_IRQ_PHYRDY;
1530 + ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
1531 + }
1532 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1533 +index adaf994abb79..0a6767b9939c 100644
1534 +--- a/drivers/ata/libata-core.c
1535 ++++ b/drivers/ata/libata-core.c
1536 +@@ -6657,6 +6657,38 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
1537 + return tmp;
1538 + }
1539 +
1540 ++/**
1541 ++ * sata_lpm_ignore_phy_events - test if PHY event should be ignored
1542 ++ * @link: Link receiving the event
1543 ++ *
1544 ++ * Test whether the received PHY event has to be ignored or not.
1545 ++ *
1546 ++ * LOCKING:
1547 ++ * None:
1548 ++ *
1549 ++ * RETURNS:
1550 ++ * True if the event has to be ignored.
1551 ++ */
1552 ++bool sata_lpm_ignore_phy_events(struct ata_link *link)
1553 ++{
1554 ++ unsigned long lpm_timeout = link->last_lpm_change +
1555 ++ msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
1556 ++
1557 ++ /* if LPM is enabled, PHYRDY doesn't mean anything */
1558 ++ if (link->lpm_policy > ATA_LPM_MAX_POWER)
1559 ++ return true;
1560 ++
1561 ++ /* ignore the first PHY event after the LPM policy changed
1562 ++ * as it is might be spurious
1563 ++ */
1564 ++ if ((link->flags & ATA_LFLAG_CHANGED) &&
1565 ++ time_before(jiffies, lpm_timeout))
1566 ++ return true;
1567 ++
1568 ++ return false;
1569 ++}
1570 ++EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
1571 ++
1572 + /*
1573 + * Dummy port_ops
1574 + */
1575 +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
1576 +index 37fb4d6069a2..033b8cd497ac 100644
1577 +--- a/drivers/ata/libata-eh.c
1578 ++++ b/drivers/ata/libata-eh.c
1579 +@@ -3424,6 +3424,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
1580 + }
1581 + }
1582 +
1583 ++ link->last_lpm_change = jiffies;
1584 ++ link->flags |= ATA_LFLAG_CHANGED;
1585 ++
1586 + return 0;
1587 +
1588 + fail:
1589 +diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
1590 +index 8a084bffd068..52be498c2d1c 100644
1591 +--- a/drivers/bluetooth/ath3k.c
1592 ++++ b/drivers/bluetooth/ath3k.c
1593 +@@ -64,6 +64,7 @@ static struct usb_device_id ath3k_table[] = {
1594 + /* Atheros AR3011 with sflash firmware*/
1595 + { USB_DEVICE(0x0489, 0xE027) },
1596 + { USB_DEVICE(0x0489, 0xE03D) },
1597 ++ { USB_DEVICE(0x04F2, 0xAFF1) },
1598 + { USB_DEVICE(0x0930, 0x0215) },
1599 + { USB_DEVICE(0x0CF3, 0x3002) },
1600 + { USB_DEVICE(0x0CF3, 0xE019) },
1601 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1602 +index 7c0b21ebd33b..f8a58db55055 100644
1603 +--- a/drivers/bluetooth/btusb.c
1604 ++++ b/drivers/bluetooth/btusb.c
1605 +@@ -142,6 +142,7 @@ static struct usb_device_id blacklist_table[] = {
1606 + /* Atheros 3011 with sflash firmware */
1607 + { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
1608 + { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
1609 ++ { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE },
1610 + { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
1611 + { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
1612 + { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
1613 +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
1614 +index bdecba5de3a4..e53994978ce8 100644
1615 +--- a/drivers/char/ipmi/ipmi_si_intf.c
1616 ++++ b/drivers/char/ipmi/ipmi_si_intf.c
1617 +@@ -2668,7 +2668,7 @@ static int wait_for_msg_done(struct smi_info *smi_info)
1618 + smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1619 + schedule_timeout_uninterruptible(1);
1620 + smi_result = smi_info->handlers->event(
1621 +- smi_info->si_sm, 100);
1622 ++ smi_info->si_sm, jiffies_to_usecs(1));
1623 + } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
1624 + smi_result = smi_info->handlers->event(
1625 + smi_info->si_sm, 0);
1626 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1627 +index b1f1d105e8c7..e1c744d7370a 100644
1628 +--- a/drivers/gpu/drm/i915/i915_gem.c
1629 ++++ b/drivers/gpu/drm/i915/i915_gem.c
1630 +@@ -1779,9 +1779,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1631 + uint32_t seqno;
1632 + int i;
1633 +
1634 +- if (list_empty(&ring->request_list))
1635 +- return;
1636 +-
1637 + WARN_ON(i915_verify_lists(ring->dev));
1638 +
1639 + seqno = ring->get_seqno(ring);
1640 +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
1641 +index d51c08da3f69..af6790ccb8a9 100644
1642 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c
1643 ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
1644 +@@ -318,8 +318,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
1645 + misc |= ATOM_COMPOSITESYNC;
1646 + if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1647 + misc |= ATOM_INTERLACE;
1648 +- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1649 ++ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
1650 + misc |= ATOM_DOUBLE_CLOCK_MODE;
1651 ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1652 ++ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
1653 +
1654 + args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
1655 + args.ucCRTC = radeon_crtc->crtc_id;
1656 +@@ -362,8 +364,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
1657 + misc |= ATOM_COMPOSITESYNC;
1658 + if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1659 + misc |= ATOM_INTERLACE;
1660 +- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1661 ++ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
1662 + misc |= ATOM_DOUBLE_CLOCK_MODE;
1663 ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1664 ++ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
1665 +
1666 + args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
1667 + args.ucCRTC = radeon_crtc->crtc_id;
1668 +diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
1669 +index c5fe79e67ed9..db4df97b7872 100644
1670 +--- a/drivers/gpu/drm/radeon/evergreen.c
1671 ++++ b/drivers/gpu/drm/radeon/evergreen.c
1672 +@@ -1079,7 +1079,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
1673 + WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1674 + WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
1675 + WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1676 +- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1677 ++ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
1678 + WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1679 + WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1680 + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1681 +diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
1682 +index 461262eee79a..1f451796407b 100644
1683 +--- a/drivers/gpu/drm/radeon/ni.c
1684 ++++ b/drivers/gpu/drm/radeon/ni.c
1685 +@@ -1075,7 +1075,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
1686 + L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1687 + /* setup context0 */
1688 + WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1689 +- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1690 ++ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
1691 + WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1692 + WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1693 + (u32)(rdev->dummy_page.addr >> 12));
1694 +diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
1695 +index 9c7062d970ee..d441aed782ad 100644
1696 +--- a/drivers/gpu/drm/radeon/r600.c
1697 ++++ b/drivers/gpu/drm/radeon/r600.c
1698 +@@ -930,7 +930,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
1699 + WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1700 + WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1701 + WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1702 +- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1703 ++ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
1704 + WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1705 + WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1706 + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1707 +diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
1708 +index f3ee36036487..d66d2cdf4f0a 100644
1709 +--- a/drivers/gpu/drm/radeon/radeon_cs.c
1710 ++++ b/drivers/gpu/drm/radeon/radeon_cs.c
1711 +@@ -49,7 +49,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
1712 + if (p->relocs_ptr == NULL) {
1713 + return -ENOMEM;
1714 + }
1715 +- p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
1716 ++ p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
1717 + if (p->relocs == NULL) {
1718 + return -ENOMEM;
1719 + }
1720 +@@ -324,7 +324,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
1721 + }
1722 + }
1723 + kfree(parser->track);
1724 +- kfree(parser->relocs);
1725 ++ drm_free_large(parser->relocs);
1726 + kfree(parser->relocs_ptr);
1727 + for (i = 0; i < parser->nchunks; i++) {
1728 + kfree(parser->chunks[i].kdata);
1729 +diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
1730 +index 1ec1255520ad..3358730be78b 100644
1731 +--- a/drivers/gpu/drm/radeon/rv770.c
1732 ++++ b/drivers/gpu/drm/radeon/rv770.c
1733 +@@ -158,7 +158,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
1734 + WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1735 + WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
1736 + WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1737 +- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1738 ++ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
1739 + WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1740 + WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1741 + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1742 +diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
1743 +index 068b21f0d7df..3b6e641decd0 100644
1744 +--- a/drivers/gpu/drm/radeon/si.c
1745 ++++ b/drivers/gpu/drm/radeon/si.c
1746 +@@ -2537,7 +2537,7 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
1747 + L2_CACHE_BIGK_FRAGMENT_SIZE(0));
1748 + /* setup context0 */
1749 + WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1750 +- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1751 ++ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
1752 + WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1753 + WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1754 + (u32)(rdev->dummy_page.addr >> 12));
1755 +@@ -2555,7 +2555,7 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
1756 + */
1757 + /* set vm size, must be a multiple of 4 */
1758 + WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
1759 +- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
1760 ++ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
1761 + for (i = 1; i < 16; i++) {
1762 + if (i < 8)
1763 + WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
1764 +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
1765 +index 3c8b2c473b81..980ef7e174fb 100644
1766 +--- a/drivers/hv/channel.c
1767 ++++ b/drivers/hv/channel.c
1768 +@@ -177,7 +177,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
1769 + GFP_KERNEL);
1770 + if (!open_info) {
1771 + err = -ENOMEM;
1772 +- goto error0;
1773 ++ goto error_gpadl;
1774 + }
1775 +
1776 + init_completion(&open_info->waitevent);
1777 +@@ -193,7 +193,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
1778 +
1779 + if (userdatalen > MAX_USER_DEFINED_BYTES) {
1780 + err = -EINVAL;
1781 +- goto error0;
1782 ++ goto error_gpadl;
1783 + }
1784 +
1785 + if (userdatalen)
1786 +@@ -234,6 +234,9 @@ error1:
1787 + list_del(&open_info->msglistentry);
1788 + spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1789 +
1790 ++error_gpadl:
1791 ++ vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
1792 ++
1793 + error0:
1794 + free_pages((unsigned long)out,
1795 + get_order(send_ringbuffer_size + recv_ringbuffer_size));
1796 +diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
1797 +index 9ffbfc575a0c..bcf67af0a07c 100644
1798 +--- a/drivers/hv/channel_mgmt.c
1799 ++++ b/drivers/hv/channel_mgmt.c
1800 +@@ -531,7 +531,7 @@ int vmbus_request_offers(void)
1801 + {
1802 + struct vmbus_channel_message_header *msg;
1803 + struct vmbus_channel_msginfo *msginfo;
1804 +- int ret, t;
1805 ++ int ret;
1806 +
1807 + msginfo = kmalloc(sizeof(*msginfo) +
1808 + sizeof(struct vmbus_channel_message_header),
1809 +@@ -539,8 +539,6 @@ int vmbus_request_offers(void)
1810 + if (!msginfo)
1811 + return -ENOMEM;
1812 +
1813 +- init_completion(&msginfo->waitevent);
1814 +-
1815 + msg = (struct vmbus_channel_message_header *)msginfo->msg;
1816 +
1817 + msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1818 +@@ -554,14 +552,6 @@ int vmbus_request_offers(void)
1819 + goto cleanup;
1820 + }
1821 +
1822 +- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
1823 +- if (t == 0) {
1824 +- ret = -ETIMEDOUT;
1825 +- goto cleanup;
1826 +- }
1827 +-
1828 +-
1829 +-
1830 + cleanup:
1831 + kfree(msginfo);
1832 +
1833 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1834 +index 67432e200c63..8987a9ac0309 100644
1835 +--- a/drivers/infiniband/core/cma.c
1836 ++++ b/drivers/infiniband/core/cma.c
1837 +@@ -759,36 +759,43 @@ static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
1838 + return 0;
1839 + }
1840 +
1841 ++static __be16 ss_get_port(const struct sockaddr_storage *ss)
1842 ++{
1843 ++ if (ss->ss_family == AF_INET)
1844 ++ return ((struct sockaddr_in *)ss)->sin_port;
1845 ++ else if (ss->ss_family == AF_INET6)
1846 ++ return ((struct sockaddr_in6 *)ss)->sin6_port;
1847 ++ BUG();
1848 ++}
1849 ++
1850 + static void cma_save_net_info(struct rdma_addr *addr,
1851 + struct rdma_addr *listen_addr,
1852 + u8 ip_ver, __be16 port,
1853 + union cma_ip_addr *src, union cma_ip_addr *dst)
1854 + {
1855 +- struct sockaddr_in *listen4, *ip4;
1856 +- struct sockaddr_in6 *listen6, *ip6;
1857 ++ struct sockaddr_in *ip4;
1858 ++ struct sockaddr_in6 *ip6;
1859 +
1860 + switch (ip_ver) {
1861 + case 4:
1862 +- listen4 = (struct sockaddr_in *) &listen_addr->src_addr;
1863 + ip4 = (struct sockaddr_in *) &addr->src_addr;
1864 +- ip4->sin_family = listen4->sin_family;
1865 ++ ip4->sin_family = AF_INET;;
1866 + ip4->sin_addr.s_addr = dst->ip4.addr;
1867 +- ip4->sin_port = listen4->sin_port;
1868 ++ ip4->sin_port = ss_get_port(&listen_addr->src_addr);
1869 +
1870 + ip4 = (struct sockaddr_in *) &addr->dst_addr;
1871 +- ip4->sin_family = listen4->sin_family;
1872 ++ ip4->sin_family = AF_INET;
1873 + ip4->sin_addr.s_addr = src->ip4.addr;
1874 + ip4->sin_port = port;
1875 + break;
1876 + case 6:
1877 +- listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr;
1878 + ip6 = (struct sockaddr_in6 *) &addr->src_addr;
1879 +- ip6->sin6_family = listen6->sin6_family;
1880 ++ ip6->sin6_family = AF_INET6;
1881 + ip6->sin6_addr = dst->ip6;
1882 +- ip6->sin6_port = listen6->sin6_port;
1883 ++ ip6->sin6_port = ss_get_port(&listen_addr->src_addr);
1884 +
1885 + ip6 = (struct sockaddr_in6 *) &addr->dst_addr;
1886 +- ip6->sin6_family = listen6->sin6_family;
1887 ++ ip6->sin6_family = AF_INET6;
1888 + ip6->sin6_addr = src->ip6;
1889 + ip6->sin6_port = port;
1890 + break;
1891 +diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
1892 +index d0254beb6d90..c1fef27010d4 100644
1893 +--- a/drivers/infiniband/core/umem.c
1894 ++++ b/drivers/infiniband/core/umem.c
1895 +@@ -94,6 +94,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
1896 + if (dmasync)
1897 + dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
1898 +
1899 ++ if (!size)
1900 ++ return ERR_PTR(-EINVAL);
1901 ++
1902 + /*
1903 + * If the combination of the addr and size requested for this memory
1904 + * region causes an integer overflow, return error.
1905 +diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
1906 +index 3a7848966627..b53548c153be 100644
1907 +--- a/drivers/infiniband/hw/mlx4/qp.c
1908 ++++ b/drivers/infiniband/hw/mlx4/qp.c
1909 +@@ -1670,8 +1670,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
1910 +
1911 + memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
1912 +
1913 +- *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
1914 +- wr->wr.ud.hlen);
1915 ++ *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
1916 + *lso_seg_len = halign;
1917 + return 0;
1918 + }
1919 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
1920 +index f2c2ffedeff5..8eeff9e13319 100644
1921 +--- a/drivers/input/mouse/elantech.c
1922 ++++ b/drivers/input/mouse/elantech.c
1923 +@@ -313,7 +313,7 @@ static void elantech_report_semi_mt_data(struct input_dev *dev,
1924 + unsigned int x2, unsigned int y2)
1925 + {
1926 + elantech_set_slot(dev, 0, num_fingers != 0, x1, y1);
1927 +- elantech_set_slot(dev, 1, num_fingers == 2, x2, y2);
1928 ++ elantech_set_slot(dev, 1, num_fingers >= 2, x2, y2);
1929 + }
1930 +
1931 + /*
1932 +@@ -783,6 +783,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
1933 + }
1934 +
1935 + /*
1936 ++ * This writes the reg_07 value again to the hardware at the end of every
1937 ++ * set_rate call because the register loses its value. reg_07 allows setting
1938 ++ * absolute mode on v4 hardware
1939 ++ */
1940 ++static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse,
1941 ++ unsigned int rate)
1942 ++{
1943 ++ struct elantech_data *etd = psmouse->private;
1944 ++
1945 ++ etd->original_set_rate(psmouse, rate);
1946 ++ if (elantech_write_reg(psmouse, 0x07, etd->reg_07))
1947 ++ psmouse_err(psmouse, "restoring reg_07 failed\n");
1948 ++}
1949 ++
1950 ++/*
1951 + * Put the touchpad into absolute mode
1952 + */
1953 + static int elantech_set_absolute_mode(struct psmouse *psmouse)
1954 +@@ -980,6 +995,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1955 + * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
1956 + * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
1957 + * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
1958 ++ * Asus TP500LN 0x381f17 10, 14, 0e clickpad
1959 ++ * Asus X750JN 0x381f17 10, 14, 0e clickpad
1960 + * Asus UX31 0x361f00 20, 15, 0e clickpad
1961 + * Asus UX32VD 0x361f02 00, 15, 0e clickpad
1962 + * Avatar AVIU-145A2 0x361f00 ? clickpad
1963 +@@ -1219,10 +1236,11 @@ static bool elantech_is_signature_valid(const unsigned char *param)
1964 + return true;
1965 +
1966 + /*
1967 +- * Some models have a revision higher then 20. Meaning param[2] may
1968 +- * be 10 or 20, skip the rates check for these.
1969 ++ * Some hw_version >= 4 models have a revision higher then 20. Meaning
1970 ++ * that param[2] may be 10 or 20, skip the rates check for these.
1971 + */
1972 +- if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
1973 ++ if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f &&
1974 ++ param[2] < 40)
1975 + return true;
1976 +
1977 + for (i = 0; i < ARRAY_SIZE(rates); i++)
1978 +@@ -1427,6 +1445,11 @@ int elantech_init(struct psmouse *psmouse)
1979 + goto init_fail;
1980 + }
1981 +
1982 ++ if (etd->fw_version == 0x381f17) {
1983 ++ etd->original_set_rate = psmouse->set_rate;
1984 ++ psmouse->set_rate = elantech_set_rate_restore_reg_07;
1985 ++ }
1986 ++
1987 + if (elantech_set_input_params(psmouse)) {
1988 + psmouse_err(psmouse, "failed to query touchpad range.\n");
1989 + goto init_fail;
1990 +diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
1991 +index 46db3be45ac9..4d1b220cbdfc 100644
1992 +--- a/drivers/input/mouse/elantech.h
1993 ++++ b/drivers/input/mouse/elantech.h
1994 +@@ -137,6 +137,7 @@ struct elantech_data {
1995 + struct finger_pos mt[ETP_MAX_FINGERS];
1996 + unsigned char parity[256];
1997 + int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
1998 ++ void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate);
1999 + };
2000 +
2001 + #ifdef CONFIG_MOUSE_PS2_ELANTECH
2002 +diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
2003 +index b5fdcb78a75b..34842e5e8b55 100644
2004 +--- a/drivers/lguest/core.c
2005 ++++ b/drivers/lguest/core.c
2006 +@@ -171,7 +171,7 @@ static void unmap_switcher(void)
2007 + bool lguest_address_ok(const struct lguest *lg,
2008 + unsigned long addr, unsigned long len)
2009 + {
2010 +- return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
2011 ++ return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr);
2012 + }
2013 +
2014 + /*
2015 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
2016 +index 926989d6419f..a1bd68885c7c 100644
2017 +--- a/drivers/md/dm-crypt.c
2018 ++++ b/drivers/md/dm-crypt.c
2019 +@@ -782,11 +782,10 @@ static int crypt_convert(struct crypt_config *cc,
2020 +
2021 + switch (r) {
2022 + /* async */
2023 ++ case -EINPROGRESS:
2024 + case -EBUSY:
2025 + wait_for_completion(&ctx->restart);
2026 + INIT_COMPLETION(ctx->restart);
2027 +- /* fall through*/
2028 +- case -EINPROGRESS:
2029 + this_cc->req = NULL;
2030 + ctx->sector++;
2031 + continue;
2032 +@@ -1195,10 +1194,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
2033 + struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
2034 + struct crypt_config *cc = io->target->private;
2035 +
2036 +- if (error == -EINPROGRESS) {
2037 +- complete(&ctx->restart);
2038 ++ if (error == -EINPROGRESS)
2039 + return;
2040 +- }
2041 +
2042 + if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
2043 + error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
2044 +@@ -1209,12 +1206,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
2045 + mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
2046 +
2047 + if (!atomic_dec_and_test(&ctx->pending))
2048 +- return;
2049 ++ goto done;
2050 +
2051 + if (bio_data_dir(io->base_bio) == READ)
2052 + kcryptd_crypt_read_done(io);
2053 + else
2054 + kcryptd_crypt_write_io_submit(io, 1);
2055 ++done:
2056 ++ if (!completion_done(&ctx->restart))
2057 ++ complete(&ctx->restart);
2058 + }
2059 +
2060 + static void kcryptd_crypt(struct work_struct *work)
2061 +diff --git a/drivers/md/md.c b/drivers/md/md.c
2062 +index 17e2f526457c..83dba060525b 100644
2063 +--- a/drivers/md/md.c
2064 ++++ b/drivers/md/md.c
2065 +@@ -5431,9 +5431,9 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg)
2066 + int err = -ENOMEM;
2067 +
2068 + if (md_allow_write(mddev))
2069 +- file = kmalloc(sizeof(*file), GFP_NOIO);
2070 ++ file = kzalloc(sizeof(*file), GFP_NOIO);
2071 + else
2072 +- file = kmalloc(sizeof(*file), GFP_KERNEL);
2073 ++ file = kzalloc(sizeof(*file), GFP_KERNEL);
2074 +
2075 + if (!file)
2076 + goto out;
2077 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2078 +index c276ad09ace9..7a218e81eb39 100644
2079 +--- a/drivers/md/raid5.c
2080 ++++ b/drivers/md/raid5.c
2081 +@@ -1622,7 +1622,8 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2082 +
2083 + conf->slab_cache = sc;
2084 + conf->active_name = 1-conf->active_name;
2085 +- conf->pool_size = newsize;
2086 ++ if (!err)
2087 ++ conf->pool_size = newsize;
2088 + return err;
2089 + }
2090 +
2091 +diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
2092 +index 9729b92fbfdd..f8449d534a25 100644
2093 +--- a/drivers/memstick/core/mspro_block.c
2094 ++++ b/drivers/memstick/core/mspro_block.c
2095 +@@ -760,7 +760,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
2096 +
2097 + if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
2098 + if (msb->data_dir == READ) {
2099 +- for (cnt = 0; cnt < msb->current_seg; cnt++)
2100 ++ for (cnt = 0; cnt < msb->current_seg; cnt++) {
2101 + t_len += msb->req_sg[cnt].length
2102 + / msb->page_size;
2103 +
2104 +@@ -768,6 +768,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
2105 + t_len += msb->current_page - 1;
2106 +
2107 + t_len *= msb->page_size;
2108 ++ }
2109 + }
2110 + } else
2111 + t_len = blk_rq_bytes(msb->block_req);
2112 +diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
2113 +index ba821fe70bca..eef126126478 100644
2114 +--- a/drivers/mmc/core/core.c
2115 ++++ b/drivers/mmc/core/core.c
2116 +@@ -2370,6 +2370,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
2117 + switch (mode) {
2118 + case PM_HIBERNATION_PREPARE:
2119 + case PM_SUSPEND_PREPARE:
2120 ++ case PM_RESTORE_PREPARE:
2121 +
2122 + spin_lock_irqsave(&host->lock, flags);
2123 + host->rescan_disable = 1;
2124 +diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
2125 +index e6f08d945709..c300cc4dcda0 100644
2126 +--- a/drivers/mmc/host/atmel-mci.c
2127 ++++ b/drivers/mmc/host/atmel-mci.c
2128 +@@ -1125,7 +1125,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2129 +
2130 + if (ios->clock) {
2131 + unsigned int clock_min = ~0U;
2132 +- u32 clkdiv;
2133 ++ int clkdiv;
2134 +
2135 + spin_lock_bh(&host->lock);
2136 + if (!host->mode_reg) {
2137 +@@ -1150,7 +1150,12 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2138 + /* Calculate clock divider */
2139 + if (host->caps.has_odd_clk_div) {
2140 + clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
2141 +- if (clkdiv > 511) {
2142 ++ if (clkdiv < 0) {
2143 ++ dev_warn(&mmc->class_dev,
2144 ++ "clock %u too fast; using %lu\n",
2145 ++ clock_min, host->bus_hz / 2);
2146 ++ clkdiv = 0;
2147 ++ } else if (clkdiv > 511) {
2148 + dev_warn(&mmc->class_dev,
2149 + "clock %u too slow; using %lu\n",
2150 + clock_min, host->bus_hz / (511 + 2));
2151 +diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
2152 +index ad76592fb2f4..7ac2c05783f3 100644
2153 +--- a/drivers/mtd/ubi/cdev.c
2154 ++++ b/drivers/mtd/ubi/cdev.c
2155 +@@ -475,7 +475,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
2156 + /* Validate the request */
2157 + err = -EINVAL;
2158 + if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
2159 +- req.bytes < 0 || req.lnum >= vol->usable_leb_size)
2160 ++ req.bytes < 0 || req.bytes > vol->usable_leb_size)
2161 + break;
2162 + if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
2163 + req.dtype != UBI_UNKNOWN)
2164 +diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
2165 +index 2455d620d96b..9abc0ea6702e 100644
2166 +--- a/drivers/mtd/ubi/eba.c
2167 ++++ b/drivers/mtd/ubi/eba.c
2168 +@@ -1261,7 +1261,8 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
2169 + * during re-size.
2170 + */
2171 + ubi_scan_move_to_list(sv, seb, &si->erase);
2172 +- vol->eba_tbl[seb->lnum] = seb->pnum;
2173 ++ else
2174 ++ vol->eba_tbl[seb->lnum] = seb->pnum;
2175 + }
2176 + }
2177 +
2178 +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
2179 +index 7c1a9bf8ac86..284d144ff5a6 100644
2180 +--- a/drivers/mtd/ubi/wl.c
2181 ++++ b/drivers/mtd/ubi/wl.c
2182 +@@ -666,7 +666,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
2183 + int cancel)
2184 + {
2185 + int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
2186 +- int vol_id = -1, uninitialized_var(lnum);
2187 ++ int vol_id = -1, lnum = -1;
2188 + struct ubi_wl_entry *e1, *e2;
2189 + struct ubi_vid_hdr *vid_hdr;
2190 +
2191 +diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
2192 +index 8d8908d2a9b1..b7a7524c8693 100644
2193 +--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
2194 ++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
2195 +@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
2196 + static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
2197 + struct e1000_rx_ring *rx_ring,
2198 + int *work_done, int work_to_do);
2199 ++static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
2200 ++ struct e1000_rx_ring *rx_ring,
2201 ++ int cleaned_count)
2202 ++{
2203 ++}
2204 + static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
2205 + struct e1000_rx_ring *rx_ring,
2206 + int cleaned_count);
2207 +@@ -3545,8 +3550,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
2208 + msleep(1);
2209 + /* e1000_down has a dependency on max_frame_size */
2210 + hw->max_frame_size = max_frame;
2211 +- if (netif_running(netdev))
2212 ++ if (netif_running(netdev)) {
2213 ++ /* prevent buffers from being reallocated */
2214 ++ adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
2215 + e1000_down(adapter);
2216 ++ }
2217 +
2218 + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2219 + * means we reserve 2 more, this pushes us to allocate from the next
2220 +diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
2221 +index 2c4cdcecbe39..091c85f461a1 100644
2222 +--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
2223 ++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
2224 +@@ -357,6 +357,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
2225 + {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
2226 + {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
2227 + {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
2228 ++ {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
2229 + {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
2230 + {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
2231 + {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
2232 +diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
2233 +index 2b6faa069e9a..9b6cb583883a 100644
2234 +--- a/drivers/net/wireless/rtlwifi/usb.c
2235 ++++ b/drivers/net/wireless/rtlwifi/usb.c
2236 +@@ -119,7 +119,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
2237 +
2238 + do {
2239 + status = usb_control_msg(udev, pipe, request, reqtype, value,
2240 +- index, pdata, len, 0); /*max. timeout*/
2241 ++ index, pdata, len, 1000);
2242 + if (status < 0) {
2243 + /* firmware download is checksumed, don't retry */
2244 + if ((value >= FW_8192C_START_ADDRESS &&
2245 +diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
2246 +index 1887e2f166a4..67938889f5b6 100644
2247 +--- a/drivers/platform/x86/compal-laptop.c
2248 ++++ b/drivers/platform/x86/compal-laptop.c
2249 +@@ -1047,7 +1047,13 @@ static int __devinit compal_probe(struct platform_device *pdev)
2250 +
2251 + /* Power supply */
2252 + initialize_power_supply_data(data);
2253 +- power_supply_register(&compal_device->dev, &data->psy);
2254 ++ err = power_supply_register(&compal_device->dev, &data->psy);
2255 ++ if (err < 0) {
2256 ++ hwmon_device_unregister(data->hwmon_dev);
2257 ++ sysfs_remove_group(&pdev->dev.kobj,
2258 ++ &compal_attribute_group);
2259 ++ kfree(data);
2260 ++ }
2261 +
2262 + platform_set_drvdata(pdev, data);
2263 +
2264 +diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
2265 +index 3868ab2397c6..fb37df690b9a 100644
2266 +--- a/drivers/scsi/3w-9xxx.c
2267 ++++ b/drivers/scsi/3w-9xxx.c
2268 +@@ -149,7 +149,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
2269 + static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
2270 + static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
2271 + static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
2272 +-static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
2273 +
2274 + /* Functions */
2275 +
2276 +@@ -1352,11 +1351,11 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
2277 + }
2278 +
2279 + /* Now complete the io */
2280 ++ scsi_dma_unmap(cmd);
2281 ++ cmd->scsi_done(cmd);
2282 + tw_dev->state[request_id] = TW_S_COMPLETED;
2283 + twa_free_request_id(tw_dev, request_id);
2284 + tw_dev->posted_request_count--;
2285 +- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
2286 +- twa_unmap_scsi_data(tw_dev, request_id);
2287 + }
2288 +
2289 + /* Check for valid status after each drain */
2290 +@@ -1414,26 +1413,6 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
2291 + }
2292 + } /* End twa_load_sgl() */
2293 +
2294 +-/* This function will perform a pci-dma mapping for a scatter gather list */
2295 +-static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
2296 +-{
2297 +- int use_sg;
2298 +- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
2299 +-
2300 +- use_sg = scsi_dma_map(cmd);
2301 +- if (!use_sg)
2302 +- return 0;
2303 +- else if (use_sg < 0) {
2304 +- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
2305 +- return 0;
2306 +- }
2307 +-
2308 +- cmd->SCp.phase = TW_PHASE_SGLIST;
2309 +- cmd->SCp.have_data_in = use_sg;
2310 +-
2311 +- return use_sg;
2312 +-} /* End twa_map_scsi_sg_data() */
2313 +-
2314 + /* This function will poll for a response interrupt of a request */
2315 + static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
2316 + {
2317 +@@ -1612,9 +1591,11 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
2318 + (tw_dev->state[i] != TW_S_INITIAL) &&
2319 + (tw_dev->state[i] != TW_S_COMPLETED)) {
2320 + if (tw_dev->srb[i]) {
2321 +- tw_dev->srb[i]->result = (DID_RESET << 16);
2322 +- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
2323 +- twa_unmap_scsi_data(tw_dev, i);
2324 ++ struct scsi_cmnd *cmd = tw_dev->srb[i];
2325 ++
2326 ++ cmd->result = (DID_RESET << 16);
2327 ++ scsi_dma_unmap(cmd);
2328 ++ cmd->scsi_done(cmd);
2329 + }
2330 + }
2331 + }
2332 +@@ -1793,21 +1774,18 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
2333 + /* Save the scsi command for use by the ISR */
2334 + tw_dev->srb[request_id] = SCpnt;
2335 +
2336 +- /* Initialize phase to zero */
2337 +- SCpnt->SCp.phase = TW_PHASE_INITIAL;
2338 +-
2339 + retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
2340 + switch (retval) {
2341 + case SCSI_MLQUEUE_HOST_BUSY:
2342 ++ scsi_dma_unmap(SCpnt);
2343 + twa_free_request_id(tw_dev, request_id);
2344 +- twa_unmap_scsi_data(tw_dev, request_id);
2345 + break;
2346 + case 1:
2347 +- tw_dev->state[request_id] = TW_S_COMPLETED;
2348 +- twa_free_request_id(tw_dev, request_id);
2349 +- twa_unmap_scsi_data(tw_dev, request_id);
2350 + SCpnt->result = (DID_ERROR << 16);
2351 ++ scsi_dma_unmap(SCpnt);
2352 + done(SCpnt);
2353 ++ tw_dev->state[request_id] = TW_S_COMPLETED;
2354 ++ twa_free_request_id(tw_dev, request_id);
2355 + retval = 0;
2356 + }
2357 + out:
2358 +@@ -1875,8 +1853,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
2359 + command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
2360 + command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
2361 + } else {
2362 +- sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
2363 +- if (sg_count == 0)
2364 ++ sg_count = scsi_dma_map(srb);
2365 ++ if (sg_count < 0)
2366 + goto out;
2367 +
2368 + scsi_for_each_sg(srb, sg, sg_count, i) {
2369 +@@ -1991,15 +1969,6 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
2370 + return(table[index].text);
2371 + } /* End twa_string_lookup() */
2372 +
2373 +-/* This function will perform a pci-dma unmap */
2374 +-static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
2375 +-{
2376 +- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
2377 +-
2378 +- if (cmd->SCp.phase == TW_PHASE_SGLIST)
2379 +- scsi_dma_unmap(cmd);
2380 +-} /* End twa_unmap_scsi_data() */
2381 +-
2382 + /* This function gets called when a disk is coming on-line */
2383 + static int twa_slave_configure(struct scsi_device *sdev)
2384 + {
2385 +diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
2386 +index 040f7214e5b7..0fdc83cfa0e1 100644
2387 +--- a/drivers/scsi/3w-9xxx.h
2388 ++++ b/drivers/scsi/3w-9xxx.h
2389 +@@ -324,11 +324,6 @@ static twa_message_type twa_error_table[] = {
2390 + #define TW_CURRENT_DRIVER_BUILD 0
2391 + #define TW_CURRENT_DRIVER_BRANCH 0
2392 +
2393 +-/* Phase defines */
2394 +-#define TW_PHASE_INITIAL 0
2395 +-#define TW_PHASE_SINGLE 1
2396 +-#define TW_PHASE_SGLIST 2
2397 +-
2398 + /* Misc defines */
2399 + #define TW_9550SX_DRAIN_COMPLETED 0xFFFF
2400 + #define TW_SECTOR_SIZE 512
2401 +diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
2402 +index 13e39e1fdfe2..c555ccb119d1 100644
2403 +--- a/drivers/scsi/3w-sas.c
2404 ++++ b/drivers/scsi/3w-sas.c
2405 +@@ -303,26 +303,6 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
2406 + return 0;
2407 + } /* End twl_post_command_packet() */
2408 +
2409 +-/* This function will perform a pci-dma mapping for a scatter gather list */
2410 +-static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
2411 +-{
2412 +- int use_sg;
2413 +- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
2414 +-
2415 +- use_sg = scsi_dma_map(cmd);
2416 +- if (!use_sg)
2417 +- return 0;
2418 +- else if (use_sg < 0) {
2419 +- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
2420 +- return 0;
2421 +- }
2422 +-
2423 +- cmd->SCp.phase = TW_PHASE_SGLIST;
2424 +- cmd->SCp.have_data_in = use_sg;
2425 +-
2426 +- return use_sg;
2427 +-} /* End twl_map_scsi_sg_data() */
2428 +-
2429 + /* This function hands scsi cdb's to the firmware */
2430 + static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
2431 + {
2432 +@@ -370,8 +350,8 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
2433 + if (!sglistarg) {
2434 + /* Map sglist from scsi layer to cmd packet */
2435 + if (scsi_sg_count(srb)) {
2436 +- sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
2437 +- if (sg_count == 0)
2438 ++ sg_count = scsi_dma_map(srb);
2439 ++ if (sg_count <= 0)
2440 + goto out;
2441 +
2442 + scsi_for_each_sg(srb, sg, sg_count, i) {
2443 +@@ -1116,15 +1096,6 @@ out:
2444 + return retval;
2445 + } /* End twl_initialize_device_extension() */
2446 +
2447 +-/* This function will perform a pci-dma unmap */
2448 +-static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
2449 +-{
2450 +- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
2451 +-
2452 +- if (cmd->SCp.phase == TW_PHASE_SGLIST)
2453 +- scsi_dma_unmap(cmd);
2454 +-} /* End twl_unmap_scsi_data() */
2455 +-
2456 + /* This function will handle attention interrupts */
2457 + static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
2458 + {
2459 +@@ -1265,11 +1236,11 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
2460 + }
2461 +
2462 + /* Now complete the io */
2463 ++ scsi_dma_unmap(cmd);
2464 ++ cmd->scsi_done(cmd);
2465 + tw_dev->state[request_id] = TW_S_COMPLETED;
2466 + twl_free_request_id(tw_dev, request_id);
2467 + tw_dev->posted_request_count--;
2468 +- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
2469 +- twl_unmap_scsi_data(tw_dev, request_id);
2470 + }
2471 +
2472 + /* Check for another response interrupt */
2473 +@@ -1414,10 +1385,12 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
2474 + if ((tw_dev->state[i] != TW_S_FINISHED) &&
2475 + (tw_dev->state[i] != TW_S_INITIAL) &&
2476 + (tw_dev->state[i] != TW_S_COMPLETED)) {
2477 +- if (tw_dev->srb[i]) {
2478 +- tw_dev->srb[i]->result = (DID_RESET << 16);
2479 +- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
2480 +- twl_unmap_scsi_data(tw_dev, i);
2481 ++ struct scsi_cmnd *cmd = tw_dev->srb[i];
2482 ++
2483 ++ if (cmd) {
2484 ++ cmd->result = (DID_RESET << 16);
2485 ++ scsi_dma_unmap(cmd);
2486 ++ cmd->scsi_done(cmd);
2487 + }
2488 + }
2489 + }
2490 +@@ -1521,9 +1494,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
2491 + /* Save the scsi command for use by the ISR */
2492 + tw_dev->srb[request_id] = SCpnt;
2493 +
2494 +- /* Initialize phase to zero */
2495 +- SCpnt->SCp.phase = TW_PHASE_INITIAL;
2496 +-
2497 + retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
2498 + if (retval) {
2499 + tw_dev->state[request_id] = TW_S_COMPLETED;
2500 +diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
2501 +index d474892701d4..fec6449c7595 100644
2502 +--- a/drivers/scsi/3w-sas.h
2503 ++++ b/drivers/scsi/3w-sas.h
2504 +@@ -103,10 +103,6 @@ static char *twl_aen_severity_table[] =
2505 + #define TW_CURRENT_DRIVER_BUILD 0
2506 + #define TW_CURRENT_DRIVER_BRANCH 0
2507 +
2508 +-/* Phase defines */
2509 +-#define TW_PHASE_INITIAL 0
2510 +-#define TW_PHASE_SGLIST 2
2511 +-
2512 + /* Misc defines */
2513 + #define TW_SECTOR_SIZE 512
2514 + #define TW_MAX_UNITS 32
2515 +diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
2516 +index 7fe96ff60c58..86bc5f9212d6 100644
2517 +--- a/drivers/scsi/3w-xxxx.c
2518 ++++ b/drivers/scsi/3w-xxxx.c
2519 +@@ -1283,32 +1283,6 @@ static int tw_initialize_device_extension(TW_Device_Extension *tw_dev)
2520 + return 0;
2521 + } /* End tw_initialize_device_extension() */
2522 +
2523 +-static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
2524 +-{
2525 +- int use_sg;
2526 +-
2527 +- dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
2528 +-
2529 +- use_sg = scsi_dma_map(cmd);
2530 +- if (use_sg < 0) {
2531 +- printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
2532 +- return 0;
2533 +- }
2534 +-
2535 +- cmd->SCp.phase = TW_PHASE_SGLIST;
2536 +- cmd->SCp.have_data_in = use_sg;
2537 +-
2538 +- return use_sg;
2539 +-} /* End tw_map_scsi_sg_data() */
2540 +-
2541 +-static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
2542 +-{
2543 +- dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
2544 +-
2545 +- if (cmd->SCp.phase == TW_PHASE_SGLIST)
2546 +- scsi_dma_unmap(cmd);
2547 +-} /* End tw_unmap_scsi_data() */
2548 +-
2549 + /* This function will reset a device extension */
2550 + static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
2551 + {
2552 +@@ -1331,8 +1305,8 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
2553 + srb = tw_dev->srb[i];
2554 + if (srb != NULL) {
2555 + srb->result = (DID_RESET << 16);
2556 +- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
2557 +- tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]);
2558 ++ scsi_dma_unmap(srb);
2559 ++ srb->scsi_done(srb);
2560 + }
2561 + }
2562 + }
2563 +@@ -1779,8 +1753,8 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
2564 + command_packet->byte8.io.lba = lba;
2565 + command_packet->byte6.block_count = num_sectors;
2566 +
2567 +- use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
2568 +- if (!use_sg)
2569 ++ use_sg = scsi_dma_map(srb);
2570 ++ if (use_sg <= 0)
2571 + return 1;
2572 +
2573 + scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
2574 +@@ -1967,9 +1941,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
2575 + /* Save the scsi command for use by the ISR */
2576 + tw_dev->srb[request_id] = SCpnt;
2577 +
2578 +- /* Initialize phase to zero */
2579 +- SCpnt->SCp.phase = TW_PHASE_INITIAL;
2580 +-
2581 + switch (*command) {
2582 + case READ_10:
2583 + case READ_6:
2584 +@@ -2196,12 +2167,11 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
2585 +
2586 + /* Now complete the io */
2587 + if ((error != TW_ISR_DONT_COMPLETE)) {
2588 ++ scsi_dma_unmap(tw_dev->srb[request_id]);
2589 ++ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
2590 + tw_dev->state[request_id] = TW_S_COMPLETED;
2591 + tw_state_request_finish(tw_dev, request_id);
2592 + tw_dev->posted_request_count--;
2593 +- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
2594 +-
2595 +- tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
2596 + }
2597 + }
2598 +
2599 +diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
2600 +index 49dcf03c631a..1d31858766ce 100644
2601 +--- a/drivers/scsi/3w-xxxx.h
2602 ++++ b/drivers/scsi/3w-xxxx.h
2603 +@@ -195,11 +195,6 @@ static unsigned char tw_sense_table[][4] =
2604 + #define TW_AEN_SMART_FAIL 0x000F
2605 + #define TW_AEN_SBUF_FAIL 0x0024
2606 +
2607 +-/* Phase defines */
2608 +-#define TW_PHASE_INITIAL 0
2609 +-#define TW_PHASE_SINGLE 1
2610 +-#define TW_PHASE_SGLIST 2
2611 +-
2612 + /* Misc defines */
2613 + #define TW_ALIGNMENT_6000 64 /* 64 bytes */
2614 + #define TW_ALIGNMENT_7000 4 /* 4 bytes */
2615 +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
2616 +index bfd87fab39aa..3e0f71c155a3 100644
2617 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
2618 ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
2619 +@@ -1426,11 +1426,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2620 + fp_possible = io_info.fpOkForIo;
2621 + }
2622 +
2623 +- /* Use smp_processor_id() for now until cmd->request->cpu is CPU
2624 ++ /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
2625 + id by default, not CPU group id, otherwise all MSI-X queues won't
2626 + be utilized */
2627 + cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
2628 +- smp_processor_id() % instance->msix_vectors : 0;
2629 ++ raw_smp_processor_id() % instance->msix_vectors : 0;
2630 +
2631 + if (fp_possible) {
2632 + megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
2633 +diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
2634 +index cf8dfab9489f..28a635c66f3e 100644
2635 +--- a/drivers/scsi/scsi_devinfo.c
2636 ++++ b/drivers/scsi/scsi_devinfo.c
2637 +@@ -222,6 +222,7 @@ static struct {
2638 + {"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
2639 + {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
2640 + {"Promise", "", NULL, BLIST_SPARSELUN},
2641 ++ {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
2642 + {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
2643 + {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
2644 + {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
2645 +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
2646 +index 348840e80921..37abf4fdc8d7 100644
2647 +--- a/drivers/scsi/scsi_scan.c
2648 ++++ b/drivers/scsi/scsi_scan.c
2649 +@@ -888,6 +888,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
2650 + */
2651 + if (*bflags & BLIST_MAX_512)
2652 + blk_queue_max_hw_sectors(sdev->request_queue, 512);
2653 ++ /*
2654 ++ * Max 1024 sector transfer length for targets that report incorrect
2655 ++ * max/optimal lengths and relied on the old block layer safe default
2656 ++ */
2657 ++ else if (*bflags & BLIST_MAX_1024)
2658 ++ blk_queue_max_hw_sectors(sdev->request_queue, 1024);
2659 +
2660 + /*
2661 + * Some devices may not want to have a start command automatically
2662 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2663 +index 7f6746a642e6..97aa6c647da9 100644
2664 +--- a/drivers/scsi/sd.c
2665 ++++ b/drivers/scsi/sd.c
2666 +@@ -1423,6 +1423,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
2667 + {
2668 + u64 start_lba = blk_rq_pos(scmd->request);
2669 + u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
2670 ++ u64 factor = scmd->device->sector_size / 512;
2671 + u64 bad_lba;
2672 + int info_valid;
2673 + /*
2674 +@@ -1444,16 +1445,10 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
2675 + if (scsi_bufflen(scmd) <= scmd->device->sector_size)
2676 + return 0;
2677 +
2678 +- if (scmd->device->sector_size < 512) {
2679 +- /* only legitimate sector_size here is 256 */
2680 +- start_lba <<= 1;
2681 +- end_lba <<= 1;
2682 +- } else {
2683 +- /* be careful ... don't want any overflows */
2684 +- u64 factor = scmd->device->sector_size / 512;
2685 +- do_div(start_lba, factor);
2686 +- do_div(end_lba, factor);
2687 +- }
2688 ++ /* be careful ... don't want any overflows */
2689 ++ factor = scmd->device->sector_size / 512;
2690 ++ do_div(start_lba, factor);
2691 ++ do_div(end_lba, factor);
2692 +
2693 + /* The bad lba was reported incorrectly, we have no idea where
2694 + * the error is.
2695 +@@ -1984,8 +1979,7 @@ got_data:
2696 + if (sector_size != 512 &&
2697 + sector_size != 1024 &&
2698 + sector_size != 2048 &&
2699 +- sector_size != 4096 &&
2700 +- sector_size != 256) {
2701 ++ sector_size != 4096) {
2702 + sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
2703 + sector_size);
2704 + /*
2705 +@@ -2034,8 +2028,6 @@ got_data:
2706 + sdkp->capacity <<= 2;
2707 + else if (sector_size == 1024)
2708 + sdkp->capacity <<= 1;
2709 +- else if (sector_size == 256)
2710 +- sdkp->capacity >>= 1;
2711 +
2712 + blk_queue_physical_block_size(sdp->request_queue,
2713 + sdkp->physical_block_size);
2714 +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
2715 +index a0df0fbf2db9..4ae776995e2a 100644
2716 +--- a/drivers/scsi/storvsc_drv.c
2717 ++++ b/drivers/scsi/storvsc_drv.c
2718 +@@ -610,21 +610,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
2719 + if (bounce_sgl[j].length == PAGE_SIZE) {
2720 + /* full..move to next entry */
2721 + sg_kunmap_atomic(bounce_addr);
2722 ++ bounce_addr = 0;
2723 + j++;
2724 ++ }
2725 +
2726 +- /* if we need to use another bounce buffer */
2727 +- if (srclen || i != orig_sgl_count - 1)
2728 +- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
2729 ++ /* if we need to use another bounce buffer */
2730 ++ if (srclen && bounce_addr == 0)
2731 ++ bounce_addr = sg_kmap_atomic(bounce_sgl, j);
2732 +
2733 +- } else if (srclen == 0 && i == orig_sgl_count - 1) {
2734 +- /* unmap the last bounce that is < PAGE_SIZE */
2735 +- sg_kunmap_atomic(bounce_addr);
2736 +- }
2737 + }
2738 +
2739 + sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
2740 + }
2741 +
2742 ++ if (bounce_addr)
2743 ++ sg_kunmap_atomic(bounce_addr);
2744 ++
2745 + local_irq_restore(flags);
2746 +
2747 + return total_copied;
2748 +diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
2749 +index 66bd576bb5e9..ca61576dc46a 100644
2750 +--- a/drivers/staging/ozwpan/ozusbsvc1.c
2751 ++++ b/drivers/staging/ozwpan/ozusbsvc1.c
2752 +@@ -314,7 +314,11 @@ void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
2753 + struct oz_multiple_fixed *body =
2754 + (struct oz_multiple_fixed *)data_hdr;
2755 + u8 *data = body->data;
2756 +- int n = (len - sizeof(struct oz_multiple_fixed)+1)
2757 ++ unsigned int n;
2758 ++ if (!body->unit_size ||
2759 ++ len < sizeof(struct oz_multiple_fixed) - 1)
2760 ++ break;
2761 ++ n = (len - (sizeof(struct oz_multiple_fixed) - 1))
2762 + / body->unit_size;
2763 + while (n--) {
2764 + oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
2765 +@@ -376,10 +380,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
2766 + case OZ_GET_DESC_RSP: {
2767 + struct oz_get_desc_rsp *body =
2768 + (struct oz_get_desc_rsp *)usb_hdr;
2769 +- int data_len = elt->length -
2770 +- sizeof(struct oz_get_desc_rsp) + 1;
2771 +- u16 offs = le16_to_cpu(get_unaligned(&body->offset));
2772 +- u16 total_size =
2773 ++ u16 offs, total_size;
2774 ++ u8 data_len;
2775 ++
2776 ++ if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
2777 ++ break;
2778 ++ data_len = elt->length -
2779 ++ (sizeof(struct oz_get_desc_rsp) - 1);
2780 ++ offs = le16_to_cpu(get_unaligned(&body->offset));
2781 ++ total_size =
2782 + le16_to_cpu(get_unaligned(&body->total_size));
2783 + oz_trace("USB_REQ_GET_DESCRIPTOR - cnf\n");
2784 + oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
2785 +diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
2786 +index 6183573f112f..5d4b4f07f4b8 100644
2787 +--- a/drivers/staging/panel/panel.c
2788 ++++ b/drivers/staging/panel/panel.c
2789 +@@ -273,11 +273,11 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
2790 + * LCD types
2791 + */
2792 + #define LCD_TYPE_NONE 0
2793 +-#define LCD_TYPE_OLD 1
2794 +-#define LCD_TYPE_KS0074 2
2795 +-#define LCD_TYPE_HANTRONIX 3
2796 +-#define LCD_TYPE_NEXCOM 4
2797 +-#define LCD_TYPE_CUSTOM 5
2798 ++#define LCD_TYPE_CUSTOM 1
2799 ++#define LCD_TYPE_OLD 2
2800 ++#define LCD_TYPE_KS0074 3
2801 ++#define LCD_TYPE_HANTRONIX 4
2802 ++#define LCD_TYPE_NEXCOM 5
2803 +
2804 + /*
2805 + * keypad types
2806 +@@ -455,8 +455,7 @@ MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead");
2807 + static int lcd_type = -1;
2808 + module_param(lcd_type, int, 0000);
2809 + MODULE_PARM_DESC(lcd_type,
2810 +- "LCD type: 0=none, 1=old //, 2=serial ks0074, "
2811 +- "3=hantronix //, 4=nexcom //, 5=compiled-in");
2812 ++ "LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom");
2813 +
2814 + static int lcd_proto = -1;
2815 + module_param(lcd_proto, int, 0000);
2816 +diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
2817 +index 808267456eed..ec7e71c1e86c 100644
2818 +--- a/drivers/target/target_core_pscsi.c
2819 ++++ b/drivers/target/target_core_pscsi.c
2820 +@@ -567,6 +567,7 @@ static struct se_device *pscsi_create_virtdevice(
2821 + " pdv_host_id: %d\n", pdv->pdv_host_id);
2822 + return ERR_PTR(-EINVAL);
2823 + }
2824 ++ pdv->pdv_lld_host = sh;
2825 + }
2826 + } else {
2827 + if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
2828 +@@ -653,6 +654,8 @@ static void pscsi_free_device(void *p)
2829 + if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
2830 + (phv->phv_lld_host != NULL))
2831 + scsi_host_put(phv->phv_lld_host);
2832 ++ else if (pdv->pdv_lld_host)
2833 ++ scsi_host_put(pdv->pdv_lld_host);
2834 +
2835 + if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
2836 + scsi_device_put(sd);
2837 +diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
2838 +index 43f1c419e8e5..6c23c9443dd4 100644
2839 +--- a/drivers/target/target_core_pscsi.h
2840 ++++ b/drivers/target/target_core_pscsi.h
2841 +@@ -45,6 +45,7 @@ struct pscsi_dev_virt {
2842 + int pdv_lun_id;
2843 + struct block_device *pdv_bd;
2844 + struct scsi_device *pdv_sd;
2845 ++ struct Scsi_Host *pdv_lld_host;
2846 + struct se_hba *pdv_se_hba;
2847 + } ____cacheline_aligned;
2848 +
2849 +diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
2850 +index 944eaeb8e0cf..160b1f38c5df 100644
2851 +--- a/drivers/tty/hvc/hvc_xen.c
2852 ++++ b/drivers/tty/hvc/hvc_xen.c
2853 +@@ -290,7 +290,7 @@ static int xen_initial_domain_console_init(void)
2854 + return -ENOMEM;
2855 + }
2856 +
2857 +- info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
2858 ++ info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
2859 + info->vtermno = HVC_COOKIE;
2860 +
2861 + spin_lock(&xencons_lock);
2862 +@@ -300,11 +300,27 @@ static int xen_initial_domain_console_init(void)
2863 + return 0;
2864 + }
2865 +
2866 ++static void xen_console_update_evtchn(struct xencons_info *info)
2867 ++{
2868 ++ if (xen_hvm_domain()) {
2869 ++ uint64_t v;
2870 ++ int err;
2871 ++
2872 ++ err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
2873 ++ if (!err && v)
2874 ++ info->evtchn = v;
2875 ++ } else
2876 ++ info->evtchn = xen_start_info->console.domU.evtchn;
2877 ++}
2878 ++
2879 + void xen_console_resume(void)
2880 + {
2881 + struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE);
2882 +- if (info != NULL && info->irq)
2883 ++ if (info != NULL && info->irq) {
2884 ++ if (!xen_initial_domain())
2885 ++ xen_console_update_evtchn(info);
2886 + rebind_evtchn_irq(info->evtchn, info->irq);
2887 ++ }
2888 + }
2889 +
2890 + static void xencons_disconnect_backend(struct xencons_info *info)
2891 +diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
2892 +index 6563cad09293..746e771c2789 100644
2893 +--- a/drivers/tty/serial/of_serial.c
2894 ++++ b/drivers/tty/serial/of_serial.c
2895 +@@ -192,7 +192,6 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = {
2896 + { .compatible = "ibm,qpace-nwp-serial",
2897 + .data = (void *)PORT_NWPSERIAL, },
2898 + #endif
2899 +- { .type = "serial", .data = (void *)PORT_UNKNOWN, },
2900 + { /* end of list */ },
2901 + };
2902 +
2903 +diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
2904 +index 6cd414341d5e..d9706e734b76 100644
2905 +--- a/drivers/tty/serial/uartlite.c
2906 ++++ b/drivers/tty/serial/uartlite.c
2907 +@@ -573,7 +573,8 @@ MODULE_DEVICE_TABLE(of, ulite_of_match);
2908 +
2909 + static int __devinit ulite_probe(struct platform_device *pdev)
2910 + {
2911 +- struct resource *res, *res2;
2912 ++ struct resource *res;
2913 ++ int irq;
2914 + int id = pdev->id;
2915 + #ifdef CONFIG_OF
2916 + const __be32 *prop;
2917 +@@ -587,11 +588,11 @@ static int __devinit ulite_probe(struct platform_device *pdev)
2918 + if (!res)
2919 + return -ENODEV;
2920 +
2921 +- res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2922 +- if (!res2)
2923 +- return -ENODEV;
2924 ++ irq = platform_get_irq(pdev, 0);
2925 ++ if (irq <= 0)
2926 ++ return -ENXIO;
2927 +
2928 +- return ulite_assign(&pdev->dev, id, res->start, res2->start);
2929 ++ return ulite_assign(&pdev->dev, id, res->start, irq);
2930 + }
2931 +
2932 + static int __devexit ulite_remove(struct platform_device *pdev)
2933 +diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
2934 +index b627363352e5..778c39a85962 100644
2935 +--- a/drivers/tty/serial/xilinx_uartps.c
2936 ++++ b/drivers/tty/serial/xilinx_uartps.c
2937 +@@ -941,9 +941,9 @@ static struct uart_driver xuartps_uart_driver = {
2938 + **/
2939 + static int __devinit xuartps_probe(struct platform_device *pdev)
2940 + {
2941 +- int rc;
2942 ++ int rc, irq;
2943 + struct uart_port *port;
2944 +- struct resource *res, *res2;
2945 ++ struct resource *res;
2946 + int clk = 0;
2947 +
2948 + #ifdef CONFIG_OF
2949 +@@ -964,9 +964,9 @@ static int __devinit xuartps_probe(struct platform_device *pdev)
2950 + if (!res)
2951 + return -ENODEV;
2952 +
2953 +- res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2954 +- if (!res2)
2955 +- return -ENODEV;
2956 ++ irq = platform_get_irq(pdev, 0);
2957 ++ if (irq <= 0)
2958 ++ return -ENXIO;
2959 +
2960 + /* Initialize the port structure */
2961 + port = xuartps_get_port();
2962 +@@ -980,7 +980,7 @@ static int __devinit xuartps_probe(struct platform_device *pdev)
2963 + * and triggers invocation of the config_port() entry point.
2964 + */
2965 + port->mapbase = res->start;
2966 +- port->irq = res2->start;
2967 ++ port->irq = irq;
2968 + port->dev = &pdev->dev;
2969 + port->uartclk = clk;
2970 + dev_set_drvdata(&pdev->dev, port);
2971 +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
2972 +index e5fa34e5423f..c65c93959f89 100644
2973 +--- a/drivers/usb/class/cdc-wdm.c
2974 ++++ b/drivers/usb/class/cdc-wdm.c
2975 +@@ -268,7 +268,7 @@ static void wdm_int_callback(struct urb *urb)
2976 + case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
2977 + dev_dbg(&desc->intf->dev,
2978 + "NOTIFY_RESPONSE_AVAILABLE received: index %d len %d",
2979 +- dr->wIndex, dr->wLength);
2980 ++ le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
2981 + break;
2982 +
2983 + case USB_CDC_NOTIFY_NETWORK_CONNECTION:
2984 +@@ -281,7 +281,9 @@ static void wdm_int_callback(struct urb *urb)
2985 + clear_bit(WDM_POLL_RUNNING, &desc->flags);
2986 + dev_err(&desc->intf->dev,
2987 + "unknown notification %d received: index %d len %d\n",
2988 +- dr->bNotificationType, dr->wIndex, dr->wLength);
2989 ++ dr->bNotificationType,
2990 ++ le16_to_cpu(dr->wIndex),
2991 ++ le16_to_cpu(dr->wLength));
2992 + goto exit;
2993 + }
2994 +
2995 +@@ -425,7 +427,7 @@ static ssize_t wdm_write
2996 + USB_RECIP_INTERFACE);
2997 + req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
2998 + req->wValue = 0;
2999 +- req->wIndex = desc->inum;
3000 ++ req->wIndex = desc->inum; /* already converted */
3001 + req->wLength = cpu_to_le16(count);
3002 + set_bit(WDM_IN_USE, &desc->flags);
3003 + desc->outbuf = buf;
3004 +@@ -438,7 +440,7 @@ static ssize_t wdm_write
3005 + dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
3006 + } else {
3007 + dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
3008 +- req->wIndex);
3009 ++ le16_to_cpu(req->wIndex));
3010 + }
3011 + out:
3012 + usb_autopm_put_interface(desc->intf);
3013 +@@ -782,7 +784,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
3014 + desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
3015 + desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
3016 + desc->irq->wValue = 0;
3017 +- desc->irq->wIndex = desc->inum;
3018 ++ desc->irq->wIndex = desc->inum; /* already converted */
3019 + desc->irq->wLength = cpu_to_le16(desc->wMaxCommand);
3020 +
3021 + usb_fill_control_urb(
3022 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
3023 +index e47a4e12b297..5e93425424f6 100644
3024 +--- a/drivers/usb/host/xhci-ring.c
3025 ++++ b/drivers/usb/host/xhci-ring.c
3026 +@@ -2129,8 +2129,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
3027 + break;
3028 + case COMP_DEV_ERR:
3029 + case COMP_STALL:
3030 ++ frame->status = -EPROTO;
3031 ++ skip_td = true;
3032 ++ break;
3033 + case COMP_TX_ERR:
3034 + frame->status = -EPROTO;
3035 ++ if (event_trb != td->last_trb)
3036 ++ return 0;
3037 + skip_td = true;
3038 + break;
3039 + case COMP_STOP:
3040 +@@ -2738,7 +2743,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
3041 + xhci_halt(xhci);
3042 + hw_died:
3043 + spin_unlock(&xhci->lock);
3044 +- return -ESHUTDOWN;
3045 ++ return IRQ_HANDLED;
3046 + }
3047 +
3048 + /*
3049 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3050 +index 80b3d8559b09..855f084a9a32 100644
3051 +--- a/drivers/usb/host/xhci.h
3052 ++++ b/drivers/usb/host/xhci.h
3053 +@@ -1233,7 +1233,7 @@ union xhci_trb {
3054 + * since the command ring is 64-byte aligned.
3055 + * It must also be greater than 16.
3056 + */
3057 +-#define TRBS_PER_SEGMENT 64
3058 ++#define TRBS_PER_SEGMENT 256
3059 + /* Allow two commands + a link TRB, along with any reserved command TRBs */
3060 + #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
3061 + #define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
3062 +diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
3063 +index a2b4008dc069..d3481c497be9 100644
3064 +--- a/drivers/usb/musb/musb_core.c
3065 ++++ b/drivers/usb/musb/musb_core.c
3066 +@@ -1544,16 +1544,30 @@ irqreturn_t musb_interrupt(struct musb *musb)
3067 + (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral",
3068 + musb->int_usb, musb->int_tx, musb->int_rx);
3069 +
3070 +- /* the core can interrupt us for multiple reasons; docs have
3071 +- * a generic interrupt flowchart to follow
3072 ++ /**
3073 ++ * According to Mentor Graphics' documentation, flowchart on page 98,
3074 ++ * IRQ should be handled as follows:
3075 ++ *
3076 ++ * . Resume IRQ
3077 ++ * . Session Request IRQ
3078 ++ * . VBUS Error IRQ
3079 ++ * . Suspend IRQ
3080 ++ * . Connect IRQ
3081 ++ * . Disconnect IRQ
3082 ++ * . Reset/Babble IRQ
3083 ++ * . SOF IRQ (we're not using this one)
3084 ++ * . Endpoint 0 IRQ
3085 ++ * . TX Endpoints
3086 ++ * . RX Endpoints
3087 ++ *
3088 ++ * We will be following that flowchart in order to avoid any problems
3089 ++ * that might arise with internal Finite State Machine.
3090 + */
3091 ++
3092 + if (musb->int_usb)
3093 + retval |= musb_stage0_irq(musb, musb->int_usb,
3094 + devctl, power);
3095 +
3096 +- /* "stage 1" is handling endpoint irqs */
3097 +-
3098 +- /* handle endpoint 0 first */
3099 + if (musb->int_tx & 1) {
3100 + if (devctl & MUSB_DEVCTL_HM)
3101 + retval |= musb_h_ep0_irq(musb);
3102 +@@ -1561,43 +1575,37 @@ irqreturn_t musb_interrupt(struct musb *musb)
3103 + retval |= musb_g_ep0_irq(musb);
3104 + }
3105 +
3106 +- /* RX on endpoints 1-15 */
3107 +- reg = musb->int_rx >> 1;
3108 ++ reg = musb->int_tx >> 1;
3109 + ep_num = 1;
3110 + while (reg) {
3111 + if (reg & 1) {
3112 +- /* musb_ep_select(musb->mregs, ep_num); */
3113 +- /* REVISIT just retval = ep->rx_irq(...) */
3114 + retval = IRQ_HANDLED;
3115 + if (devctl & MUSB_DEVCTL_HM) {
3116 + if (is_host_capable())
3117 +- musb_host_rx(musb, ep_num);
3118 ++ musb_host_tx(musb, ep_num);
3119 + } else {
3120 + if (is_peripheral_capable())
3121 +- musb_g_rx(musb, ep_num);
3122 ++ musb_g_tx(musb, ep_num);
3123 + }
3124 + }
3125 +-
3126 + reg >>= 1;
3127 + ep_num++;
3128 + }
3129 +
3130 +- /* TX on endpoints 1-15 */
3131 +- reg = musb->int_tx >> 1;
3132 ++ reg = musb->int_rx >> 1;
3133 + ep_num = 1;
3134 + while (reg) {
3135 + if (reg & 1) {
3136 +- /* musb_ep_select(musb->mregs, ep_num); */
3137 +- /* REVISIT just retval |= ep->tx_irq(...) */
3138 + retval = IRQ_HANDLED;
3139 + if (devctl & MUSB_DEVCTL_HM) {
3140 + if (is_host_capable())
3141 +- musb_host_tx(musb, ep_num);
3142 ++ musb_host_rx(musb, ep_num);
3143 + } else {
3144 + if (is_peripheral_capable())
3145 +- musb_g_tx(musb, ep_num);
3146 ++ musb_g_rx(musb, ep_num);
3147 + }
3148 + }
3149 ++
3150 + reg >>= 1;
3151 + ep_num++;
3152 + }
3153 +diff --git a/drivers/usb/otg/otg_fsm.c b/drivers/usb/otg/otg_fsm.c
3154 +index ade131a8ae5e..e87edda1fe22 100644
3155 +--- a/drivers/usb/otg/otg_fsm.c
3156 ++++ b/drivers/usb/otg/otg_fsm.c
3157 +@@ -138,9 +138,9 @@ int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
3158 + break;
3159 + case OTG_STATE_B_PERIPHERAL:
3160 + otg_chrg_vbus(fsm, 0);
3161 +- otg_loc_conn(fsm, 1);
3162 + otg_loc_sof(fsm, 0);
3163 + otg_set_protocol(fsm, PROTO_GADGET);
3164 ++ otg_loc_conn(fsm, 1);
3165 + break;
3166 + case OTG_STATE_B_WAIT_ACON:
3167 + otg_chrg_vbus(fsm, 0);
3168 +@@ -200,10 +200,10 @@ int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
3169 +
3170 + break;
3171 + case OTG_STATE_A_PERIPHERAL:
3172 +- otg_loc_conn(fsm, 1);
3173 + otg_loc_sof(fsm, 0);
3174 + otg_set_protocol(fsm, PROTO_GADGET);
3175 + otg_drv_vbus(fsm, 1);
3176 ++ otg_loc_conn(fsm, 1);
3177 + break;
3178 + case OTG_STATE_A_WAIT_VFALL:
3179 + otg_drv_vbus(fsm, 0);
3180 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
3181 +index 5c289fc2bc5a..29bf38309252 100644
3182 +--- a/drivers/usb/serial/cp210x.c
3183 ++++ b/drivers/usb/serial/cp210x.c
3184 +@@ -133,6 +133,8 @@ static const struct usb_device_id id_table[] = {
3185 + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
3186 + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
3187 + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
3188 ++ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
3189 ++ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
3190 + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
3191 + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
3192 + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
3193 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
3194 +index 06abee68cb4f..1e4899c2d5f1 100644
3195 +--- a/drivers/usb/serial/ftdi_sio.c
3196 ++++ b/drivers/usb/serial/ftdi_sio.c
3197 +@@ -723,6 +723,7 @@ static struct usb_device_id id_table_combined [] = {
3198 + { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
3199 + { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
3200 + { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
3201 ++ { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
3202 + { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
3203 + { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
3204 + { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
3205 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
3206 +index 105b9826d8d6..1fee973f100a 100644
3207 +--- a/drivers/usb/serial/ftdi_sio_ids.h
3208 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
3209 +@@ -155,6 +155,7 @@
3210 + #define XSENS_AWINDA_STATION_PID 0x0101
3211 + #define XSENS_AWINDA_DONGLE_PID 0x0102
3212 + #define XSENS_MTW_PID 0x0200 /* Xsens MTw */
3213 ++#define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */
3214 + #define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
3215 +
3216 + /* Xsens devices using FTDI VID */
3217 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
3218 +index 3e450b4e9c5f..723ed876f318 100644
3219 +--- a/drivers/usb/serial/pl2303.c
3220 ++++ b/drivers/usb/serial/pl2303.c
3221 +@@ -67,7 +67,6 @@ static const struct usb_device_id id_table[] = {
3222 + { USB_DEVICE(DCU10_VENDOR_ID, DCU10_PRODUCT_ID) },
3223 + { USB_DEVICE(SITECOM_VENDOR_ID, SITECOM_PRODUCT_ID) },
3224 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_ID) },
3225 +- { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_ID) },
3226 + { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1) },
3227 + { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65) },
3228 + { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X75) },
3229 +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
3230 +index 71fd9da1d6e7..e3b7af8adfb7 100644
3231 +--- a/drivers/usb/serial/pl2303.h
3232 ++++ b/drivers/usb/serial/pl2303.h
3233 +@@ -62,10 +62,6 @@
3234 + #define ALCATEL_VENDOR_ID 0x11f7
3235 + #define ALCATEL_PRODUCT_ID 0x02df
3236 +
3237 +-/* Samsung I330 phone cradle */
3238 +-#define SAMSUNG_VENDOR_ID 0x04e8
3239 +-#define SAMSUNG_PRODUCT_ID 0x8001
3240 +-
3241 + #define SIEMENS_VENDOR_ID 0x11f5
3242 + #define SIEMENS_PRODUCT_ID_SX1 0x0001
3243 + #define SIEMENS_PRODUCT_ID_X65 0x0003
3244 +diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
3245 +index b3afd19341f1..c5cf350ea8ce 100644
3246 +--- a/drivers/usb/serial/visor.c
3247 ++++ b/drivers/usb/serial/visor.c
3248 +@@ -101,7 +101,7 @@ static struct usb_device_id id_table [] = {
3249 + .driver_info = (kernel_ulong_t)&palm_os_4_probe },
3250 + { USB_DEVICE(ACER_VENDOR_ID, ACER_S10_ID),
3251 + .driver_info = (kernel_ulong_t)&palm_os_4_probe },
3252 +- { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID),
3253 ++ { USB_DEVICE_INTERFACE_CLASS(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID, 0xff),
3254 + .driver_info = (kernel_ulong_t)&palm_os_4_probe },
3255 + { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID),
3256 + .driver_info = (kernel_ulong_t)&palm_os_4_probe },
3257 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
3258 +index 25174beb8d8d..d0ecaf9ff415 100644
3259 +--- a/drivers/usb/storage/unusual_devs.h
3260 ++++ b/drivers/usb/storage/unusual_devs.h
3261 +@@ -752,6 +752,13 @@ UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000,
3262 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
3263 + US_FL_GO_SLOW ),
3264 +
3265 ++/* Reported by Christian Schaller <cschalle@××××××.com> */
3266 ++UNUSUAL_DEV( 0x059f, 0x0651, 0x0000, 0x0000,
3267 ++ "LaCie",
3268 ++ "External HDD",
3269 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
3270 ++ US_FL_NO_WP_DETECT ),
3271 ++
3272 + /* Submitted by Joel Bourquard <numlock@××××××××.ch>
3273 + * Some versions of this device need the SubClass and Protocol overrides
3274 + * while others don't.
3275 +diff --git a/drivers/xen/events.c b/drivers/xen/events.c
3276 +index 9161f06564eb..d6e2deee7bf0 100644
3277 +--- a/drivers/xen/events.c
3278 ++++ b/drivers/xen/events.c
3279 +@@ -563,8 +563,8 @@ static unsigned int __startup_pirq(unsigned int irq)
3280 + pirq_query_unmask(irq);
3281 +
3282 + evtchn_to_irq[evtchn] = irq;
3283 +- bind_evtchn_to_cpu(evtchn, 0);
3284 + info->evtchn = evtchn;
3285 ++ bind_evtchn_to_cpu(evtchn, 0);
3286 +
3287 + out:
3288 + unmask_evtchn(evtchn);
3289 +@@ -906,7 +906,7 @@ static int find_virq(unsigned int virq, unsigned int cpu)
3290 + return rc;
3291 + }
3292 +
3293 +-int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
3294 ++int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
3295 + {
3296 + struct evtchn_bind_virq bind_virq;
3297 + int evtchn, irq, ret;
3298 +@@ -920,8 +920,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
3299 + if (irq == -1)
3300 + goto out;
3301 +
3302 +- irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
3303 +- handle_percpu_irq, "virq");
3304 ++ if (percpu)
3305 ++ irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
3306 ++ handle_percpu_irq, "virq");
3307 ++ else
3308 ++ irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
3309 ++ handle_edge_irq, "virq");
3310 +
3311 + bind_virq.virq = virq;
3312 + bind_virq.vcpu = cpu;
3313 +@@ -1042,7 +1046,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
3314 + {
3315 + int irq, retval;
3316 +
3317 +- irq = bind_virq_to_irq(virq, cpu);
3318 ++ irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
3319 + if (irq < 0)
3320 + return irq;
3321 + retval = request_irq(irq, handler, irqflags, devname, dev_id);
3322 +diff --git a/firmware/ihex2fw.c b/firmware/ihex2fw.c
3323 +index cf38e159131a..08d90e25abf0 100644
3324 +--- a/firmware/ihex2fw.c
3325 ++++ b/firmware/ihex2fw.c
3326 +@@ -86,6 +86,7 @@ int main(int argc, char **argv)
3327 + case 'j':
3328 + include_jump = 1;
3329 + break;
3330 ++ default:
3331 + return usage();
3332 + }
3333 + }
3334 +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
3335 +index 6dacccef7790..a181b58cedd5 100644
3336 +--- a/fs/binfmt_elf.c
3337 ++++ b/fs/binfmt_elf.c
3338 +@@ -742,6 +742,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
3339 + i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
3340 + int elf_prot = 0, elf_flags;
3341 + unsigned long k, vaddr;
3342 ++ unsigned long total_size = 0;
3343 +
3344 + if (elf_ppnt->p_type != PT_LOAD)
3345 + continue;
3346 +@@ -805,10 +806,16 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
3347 + #else
3348 + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
3349 + #endif
3350 ++ total_size = total_mapping_size(elf_phdata,
3351 ++ loc->elf_ex.e_phnum);
3352 ++ if (!total_size) {
3353 ++ error = -EINVAL;
3354 ++ goto out_free_dentry;
3355 ++ }
3356 + }
3357 +
3358 + error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
3359 +- elf_prot, elf_flags, 0);
3360 ++ elf_prot, elf_flags, total_size);
3361 + if (BAD_ADDR(error)) {
3362 + send_sig(SIGKILL, current, 0);
3363 + retval = IS_ERR((void *)error) ?
3364 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3365 +index cf6e7f4a4f4c..f4576dc0cf29 100644
3366 +--- a/fs/btrfs/extent-tree.c
3367 ++++ b/fs/btrfs/extent-tree.c
3368 +@@ -5939,12 +5939,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
3369 + return -ENOSPC;
3370 + }
3371 +
3372 +- if (btrfs_test_opt(root, DISCARD))
3373 +- ret = btrfs_discard_extent(root, start, len, NULL);
3374 +-
3375 + if (pin)
3376 + pin_down_extent(root, cache, start, len, 1);
3377 + else {
3378 ++ if (btrfs_test_opt(root, DISCARD))
3379 ++ ret = btrfs_discard_extent(root, start, len, NULL);
3380 + btrfs_add_free_space(cache, start, len);
3381 + btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
3382 + }
3383 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
3384 +index 3a65f4343416..d46c48187636 100644
3385 +--- a/fs/btrfs/ioctl.c
3386 ++++ b/fs/btrfs/ioctl.c
3387 +@@ -2374,6 +2374,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
3388 + if (off + len == src->i_size)
3389 + len = ALIGN(src->i_size, bs) - off;
3390 +
3391 ++ if (len == 0) {
3392 ++ ret = 0;
3393 ++ goto out_unlock;
3394 ++ }
3395 ++
3396 + /* verify the end result is block aligned */
3397 + if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
3398 + !IS_ALIGNED(destoff, bs))
3399 +diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
3400 +index e7a5659087e6..0ee73d128162 100644
3401 +--- a/fs/btrfs/xattr.c
3402 ++++ b/fs/btrfs/xattr.c
3403 +@@ -310,21 +310,40 @@ const struct xattr_handler *btrfs_xattr_handlers[] = {
3404 + /*
3405 + * Check if the attribute is in a supported namespace.
3406 + *
3407 +- * This applied after the check for the synthetic attributes in the system
3408 ++ * This is applied after the check for the synthetic attributes in the system
3409 + * namespace.
3410 + */
3411 +-static bool btrfs_is_valid_xattr(const char *name)
3412 ++static int btrfs_is_valid_xattr(const char *name)
3413 + {
3414 +- return !strncmp(name, XATTR_SECURITY_PREFIX,
3415 +- XATTR_SECURITY_PREFIX_LEN) ||
3416 +- !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
3417 +- !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
3418 +- !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
3419 ++ int len = strlen(name);
3420 ++ int prefixlen = 0;
3421 ++
3422 ++ if (!strncmp(name, XATTR_SECURITY_PREFIX,
3423 ++ XATTR_SECURITY_PREFIX_LEN))
3424 ++ prefixlen = XATTR_SECURITY_PREFIX_LEN;
3425 ++ else if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
3426 ++ prefixlen = XATTR_SYSTEM_PREFIX_LEN;
3427 ++ else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
3428 ++ prefixlen = XATTR_TRUSTED_PREFIX_LEN;
3429 ++ else if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
3430 ++ prefixlen = XATTR_USER_PREFIX_LEN;
3431 ++ else
3432 ++ return -EOPNOTSUPP;
3433 ++
3434 ++ /*
3435 ++ * The name cannot consist of just prefix
3436 ++ */
3437 ++ if (len <= prefixlen)
3438 ++ return -EINVAL;
3439 ++
3440 ++ return 0;
3441 + }
3442 +
3443 + ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
3444 + void *buffer, size_t size)
3445 + {
3446 ++ int ret;
3447 ++
3448 + /*
3449 + * If this is a request for a synthetic attribute in the system.*
3450 + * namespace use the generic infrastructure to resolve a handler
3451 +@@ -333,8 +352,9 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
3452 + if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
3453 + return generic_getxattr(dentry, name, buffer, size);
3454 +
3455 +- if (!btrfs_is_valid_xattr(name))
3456 +- return -EOPNOTSUPP;
3457 ++ ret = btrfs_is_valid_xattr(name);
3458 ++ if (ret)
3459 ++ return ret;
3460 + return __btrfs_getxattr(dentry->d_inode, name, buffer, size);
3461 + }
3462 +
3463 +@@ -342,6 +362,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
3464 + size_t size, int flags)
3465 + {
3466 + struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
3467 ++ int ret;
3468 +
3469 + /*
3470 + * The permission on security.* and system.* is not checked
3471 +@@ -358,8 +379,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
3472 + if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
3473 + return generic_setxattr(dentry, name, value, size, flags);
3474 +
3475 +- if (!btrfs_is_valid_xattr(name))
3476 +- return -EOPNOTSUPP;
3477 ++ ret = btrfs_is_valid_xattr(name);
3478 ++ if (ret)
3479 ++ return ret;
3480 +
3481 + if (size == 0)
3482 + value = ""; /* empty EA, do not remove */
3483 +@@ -371,6 +393,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
3484 + int btrfs_removexattr(struct dentry *dentry, const char *name)
3485 + {
3486 + struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
3487 ++ int ret;
3488 +
3489 + /*
3490 + * The permission on security.* and system.* is not checked
3491 +@@ -387,8 +410,9 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
3492 + if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
3493 + return generic_removexattr(dentry, name);
3494 +
3495 +- if (!btrfs_is_valid_xattr(name))
3496 +- return -EOPNOTSUPP;
3497 ++ ret = btrfs_is_valid_xattr(name);
3498 ++ if (ret)
3499 ++ return ret;
3500 +
3501 + return __btrfs_setxattr(NULL, dentry->d_inode, name, NULL, 0,
3502 + XATTR_REPLACE);
3503 +diff --git a/fs/dcache.c b/fs/dcache.c
3504 +index 8038a780696f..d071ea768057 100644
3505 +--- a/fs/dcache.c
3506 ++++ b/fs/dcache.c
3507 +@@ -1204,13 +1204,13 @@ ascend:
3508 + /* might go back up the wrong parent if we have had a rename */
3509 + if (!locked && read_seqretry(&rename_lock, seq))
3510 + goto rename_retry;
3511 +- next = child->d_child.next;
3512 +- while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
3513 ++ /* go into the first sibling still alive */
3514 ++ do {
3515 ++ next = child->d_child.next;
3516 + if (next == &this_parent->d_subdirs)
3517 + goto ascend;
3518 + child = list_entry(next, struct dentry, d_child);
3519 +- next = next->next;
3520 +- }
3521 ++ } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
3522 + rcu_read_unlock();
3523 + goto resume;
3524 + }
3525 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3526 +index 4296a6f800a0..bbe09a975003 100644
3527 +--- a/fs/ext4/extents.c
3528 ++++ b/fs/ext4/extents.c
3529 +@@ -320,7 +320,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
3530 + ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
3531 + ext4_lblk_t last = lblock + len - 1;
3532 +
3533 +- if (lblock > last)
3534 ++ if (len == 0 || lblock > last)
3535 + return 0;
3536 + return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
3537 + }
3538 +@@ -4365,13 +4365,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
3539 + struct ext4_map_blocks map;
3540 + unsigned int credits, blkbits = inode->i_blkbits;
3541 +
3542 +- /*
3543 +- * currently supporting (pre)allocate mode for extent-based
3544 +- * files _only_
3545 +- */
3546 +- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
3547 +- return -EOPNOTSUPP;
3548 +-
3549 + /* Return error if mode is not supported */
3550 + if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
3551 + return -EOPNOTSUPP;
3552 +@@ -4392,6 +4385,15 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
3553 + */
3554 + credits = ext4_chunk_trans_blocks(inode, max_blocks);
3555 + mutex_lock(&inode->i_mutex);
3556 ++
3557 ++ /*
3558 ++ * We only support preallocation for extent-based files only
3559 ++ */
3560 ++ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3561 ++ mutex_unlock(&inode->i_mutex);
3562 ++ return -EOPNOTSUPP;
3563 ++ }
3564 ++
3565 + ret = inode_newsize_ok(inode, (len + offset));
3566 + if (ret) {
3567 + mutex_unlock(&inode->i_mutex);
3568 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
3569 +index dc5852301da7..e1e1d467c6f9 100644
3570 +--- a/fs/ext4/namei.c
3571 ++++ b/fs/ext4/namei.c
3572 +@@ -1469,7 +1469,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
3573 + struct inode *inode)
3574 + {
3575 + struct inode *dir = dentry->d_parent->d_inode;
3576 +- struct buffer_head *bh;
3577 ++ struct buffer_head *bh = NULL;
3578 + struct ext4_dir_entry_2 *de;
3579 + struct super_block *sb;
3580 + int retval;
3581 +@@ -1484,7 +1484,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
3582 + if (is_dx(dir)) {
3583 + retval = ext4_dx_add_entry(handle, dentry, inode);
3584 + if (!retval || (retval != ERR_BAD_DX_DIR))
3585 +- return retval;
3586 ++ goto out;
3587 + ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
3588 + dx_fallback++;
3589 + ext4_mark_inode_dirty(handle, dir);
3590 +@@ -1495,14 +1495,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
3591 + if(!bh)
3592 + return retval;
3593 + retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
3594 +- if (retval != -ENOSPC) {
3595 +- brelse(bh);
3596 +- return retval;
3597 +- }
3598 ++ if (retval != -ENOSPC)
3599 ++ goto out;
3600 +
3601 + if (blocks == 1 && !dx_fallback &&
3602 +- EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
3603 +- return make_indexed_dir(handle, dentry, inode, bh);
3604 ++ EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
3605 ++ retval = make_indexed_dir(handle, dentry, inode, bh);
3606 ++ bh = NULL; /* make_indexed_dir releases bh */
3607 ++ goto out;
3608 ++ }
3609 + brelse(bh);
3610 + }
3611 + bh = ext4_append(handle, dir, &block, &retval);
3612 +@@ -1512,6 +1513,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
3613 + de->inode = 0;
3614 + de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize);
3615 + retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
3616 ++out:
3617 + brelse(bh);
3618 + if (retval == 0)
3619 + ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
3620 +diff --git a/fs/fhandle.c b/fs/fhandle.c
3621 +index a48e4a139be1..f7c18e97d852 100644
3622 +--- a/fs/fhandle.c
3623 ++++ b/fs/fhandle.c
3624 +@@ -198,8 +198,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
3625 + goto out_err;
3626 + }
3627 + /* copy the full handle */
3628 +- if (copy_from_user(handle, ufh,
3629 +- sizeof(struct file_handle) +
3630 ++ *handle = f_handle;
3631 ++ if (copy_from_user(&handle->f_handle,
3632 ++ &ufh->f_handle,
3633 + f_handle.handle_bytes)) {
3634 + retval = -EFAULT;
3635 + goto out_handle;
3636 +diff --git a/fs/namei.c b/fs/namei.c
3637 +index bdcd70544fce..9c4b9b811d7b 100644
3638 +--- a/fs/namei.c
3639 ++++ b/fs/namei.c
3640 +@@ -1314,7 +1314,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
3641 + }
3642 + if (should_follow_link(inode, follow)) {
3643 + if (nd->flags & LOOKUP_RCU) {
3644 +- if (unlikely(unlazy_walk(nd, path->dentry))) {
3645 ++ if (unlikely(nd->path.mnt != path->mnt ||
3646 ++ unlazy_walk(nd, path->dentry))) {
3647 + terminate_walk(nd);
3648 + return -ECHILD;
3649 + }
3650 +diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
3651 +index 38a44c679a0a..0184e91eb4fa 100644
3652 +--- a/fs/nfs/callback.c
3653 ++++ b/fs/nfs/callback.c
3654 +@@ -156,6 +156,7 @@ nfs41_callback_svc(void *vrqstp)
3655 + struct rpc_rqst, rq_bc_list);
3656 + list_del(&req->rq_bc_list);
3657 + spin_unlock_bh(&serv->sv_cb_lock);
3658 ++ finish_wait(&serv->sv_cb_waitq, &wq);
3659 + dprintk("Invoking bc_svc_process()\n");
3660 + error = bc_svc_process(serv, req, rqstp);
3661 + dprintk("bc_svc_process() returned w/ error code= %d\n",
3662 +@@ -163,8 +164,9 @@ nfs41_callback_svc(void *vrqstp)
3663 + } else {
3664 + spin_unlock_bh(&serv->sv_cb_lock);
3665 + schedule();
3666 ++ finish_wait(&serv->sv_cb_waitq, &wq);
3667 + }
3668 +- finish_wait(&serv->sv_cb_waitq, &wq);
3669 ++ flush_signals(current);
3670 + }
3671 + return 0;
3672 + }
3673 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
3674 +index a4b87c69fbbb..6143a1ea63b1 100644
3675 +--- a/fs/nfsd/nfs4state.c
3676 ++++ b/fs/nfsd/nfs4state.c
3677 +@@ -3364,10 +3364,17 @@ static int check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_sess
3678 + return nfserr_old_stateid;
3679 + }
3680 +
3681 ++static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
3682 ++{
3683 ++ if (ols->st_stateowner->so_is_open_owner &&
3684 ++ !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3685 ++ return nfserr_bad_stateid;
3686 ++ return nfs_ok;
3687 ++}
3688 ++
3689 + __be32 nfs4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
3690 + {
3691 + struct nfs4_stid *s;
3692 +- struct nfs4_ol_stateid *ols;
3693 + __be32 status;
3694 +
3695 + if (STALE_STATEID(stateid))
3696 +@@ -3381,11 +3388,7 @@ __be32 nfs4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
3697 + return status;
3698 + if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID)))
3699 + return nfs_ok;
3700 +- ols = openlockstateid(s);
3701 +- if (ols->st_stateowner->so_is_open_owner
3702 +- && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3703 +- return nfserr_bad_stateid;
3704 +- return nfs_ok;
3705 ++ return nfsd4_check_openowner_confirmed(openlockstateid(s));
3706 + }
3707 +
3708 + static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s)
3709 +@@ -3452,8 +3455,8 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
3710 + status = nfs4_check_fh(current_fh, stp);
3711 + if (status)
3712 + goto out;
3713 +- if (stp->st_stateowner->so_is_open_owner
3714 +- && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3715 ++ status = nfsd4_check_openowner_confirmed(stp);
3716 ++ if (status)
3717 + goto out;
3718 + status = nfs4_check_openmode(stp, flags);
3719 + if (status)
3720 +diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
3721 +index 4db777d3dc05..c8bc7ea199f5 100644
3722 +--- a/fs/nfsd/nfsctl.c
3723 ++++ b/fs/nfsd/nfsctl.c
3724 +@@ -1163,15 +1163,15 @@ static int __init init_nfsd(void)
3725 + int retval;
3726 + printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@×××××××××.de).\n");
3727 +
3728 +- retval = register_cld_notifier();
3729 +- if (retval)
3730 +- return retval;
3731 + retval = register_pernet_subsys(&nfsd_net_ops);
3732 + if (retval < 0)
3733 +- goto out_unregister_notifier;
3734 +- retval = nfsd4_init_slabs();
3735 ++ return retval;
3736 ++ retval = register_cld_notifier();
3737 + if (retval)
3738 + goto out_unregister_pernet;
3739 ++ retval = nfsd4_init_slabs();
3740 ++ if (retval)
3741 ++ goto out_unregister_notifier;
3742 + nfs4_state_init();
3743 + retval = nfsd_fault_inject_init(); /* nfsd fault injection controls */
3744 + if (retval)
3745 +@@ -1209,10 +1209,10 @@ out_free_stat:
3746 + nfsd_fault_inject_cleanup();
3747 + out_free_slabs:
3748 + nfsd4_free_slabs();
3749 +-out_unregister_pernet:
3750 +- unregister_pernet_subsys(&nfsd_net_ops);
3751 + out_unregister_notifier:
3752 + unregister_cld_notifier();
3753 ++out_unregister_pernet:
3754 ++ unregister_pernet_subsys(&nfsd_net_ops);
3755 + return retval;
3756 + }
3757 +
3758 +@@ -1228,8 +1228,8 @@ static void __exit exit_nfsd(void)
3759 + nfsd4_free_slabs();
3760 + nfsd_fault_inject_cleanup();
3761 + unregister_filesystem(&nfsd_fs_type);
3762 +- unregister_pernet_subsys(&nfsd_net_ops);
3763 + unregister_cld_notifier();
3764 ++ unregister_pernet_subsys(&nfsd_net_ops);
3765 + }
3766 +
3767 + MODULE_AUTHOR("Olaf Kirch <okir@×××××××××.de>");
3768 +diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
3769 +index ecdbae19a766..090d8ce25bd1 100644
3770 +--- a/fs/nilfs2/btree.c
3771 ++++ b/fs/nilfs2/btree.c
3772 +@@ -388,7 +388,7 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
3773 + nchildren = nilfs_btree_node_get_nchildren(node);
3774 +
3775 + if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
3776 +- level > NILFS_BTREE_LEVEL_MAX ||
3777 ++ level >= NILFS_BTREE_LEVEL_MAX ||
3778 + nchildren < 0 ||
3779 + nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
3780 + pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
3781 +diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
3782 +index dbc372e2f71d..7ba6ac187edd 100644
3783 +--- a/fs/ocfs2/dlm/dlmmaster.c
3784 ++++ b/fs/ocfs2/dlm/dlmmaster.c
3785 +@@ -729,6 +729,19 @@ lookup:
3786 + if (tmpres) {
3787 + spin_unlock(&dlm->spinlock);
3788 + spin_lock(&tmpres->spinlock);
3789 ++
3790 ++ /*
3791 ++ * Right after dlm spinlock was released, dlm_thread could have
3792 ++ * purged the lockres. Check if lockres got unhashed. If so
3793 ++ * start over.
3794 ++ */
3795 ++ if (hlist_unhashed(&tmpres->hash_node)) {
3796 ++ spin_unlock(&tmpres->spinlock);
3797 ++ dlm_lockres_put(tmpres);
3798 ++ tmpres = NULL;
3799 ++ goto lookup;
3800 ++ }
3801 ++
3802 + /* Wait on the thread that is mastering the resource */
3803 + if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
3804 + __dlm_wait_on_lockres(tmpres);
3805 +diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
3806 +index dbc842222589..798dedcd44ef 100644
3807 +--- a/fs/omfs/inode.c
3808 ++++ b/fs/omfs/inode.c
3809 +@@ -361,7 +361,7 @@ nomem:
3810 + }
3811 +
3812 + enum {
3813 +- Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask
3814 ++ Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err
3815 + };
3816 +
3817 + static const match_table_t tokens = {
3818 +@@ -370,6 +370,7 @@ static const match_table_t tokens = {
3819 + {Opt_umask, "umask=%o"},
3820 + {Opt_dmask, "dmask=%o"},
3821 + {Opt_fmask, "fmask=%o"},
3822 ++ {Opt_err, NULL},
3823 + };
3824 +
3825 + static int parse_options(char *options, struct omfs_sb_info *sbi)
3826 +diff --git a/fs/pipe.c b/fs/pipe.c
3827 +index 1667e6fe0416..abfb93525ca6 100644
3828 +--- a/fs/pipe.c
3829 ++++ b/fs/pipe.c
3830 +@@ -104,25 +104,27 @@ void pipe_wait(struct pipe_inode_info *pipe)
3831 + }
3832 +
3833 + static int
3834 +-pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
3835 +- int atomic)
3836 ++pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov,
3837 ++ size_t *remaining, int atomic)
3838 + {
3839 + unsigned long copy;
3840 +
3841 +- while (len > 0) {
3842 ++ while (*remaining > 0) {
3843 + while (!iov->iov_len)
3844 + iov++;
3845 +- copy = min_t(unsigned long, len, iov->iov_len);
3846 ++ copy = min_t(unsigned long, *remaining, iov->iov_len);
3847 +
3848 + if (atomic) {
3849 +- if (__copy_from_user_inatomic(to, iov->iov_base, copy))
3850 ++ if (__copy_from_user_inatomic(addr + *offset,
3851 ++ iov->iov_base, copy))
3852 + return -EFAULT;
3853 + } else {
3854 +- if (copy_from_user(to, iov->iov_base, copy))
3855 ++ if (copy_from_user(addr + *offset,
3856 ++ iov->iov_base, copy))
3857 + return -EFAULT;
3858 + }
3859 +- to += copy;
3860 +- len -= copy;
3861 ++ *offset += copy;
3862 ++ *remaining -= copy;
3863 + iov->iov_base += copy;
3864 + iov->iov_len -= copy;
3865 + }
3866 +@@ -130,25 +132,27 @@ pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
3867 + }
3868 +
3869 + static int
3870 +-pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
3871 +- int atomic)
3872 ++pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset,
3873 ++ size_t *remaining, int atomic)
3874 + {
3875 + unsigned long copy;
3876 +
3877 +- while (len > 0) {
3878 ++ while (*remaining > 0) {
3879 + while (!iov->iov_len)
3880 + iov++;
3881 +- copy = min_t(unsigned long, len, iov->iov_len);
3882 ++ copy = min_t(unsigned long, *remaining, iov->iov_len);
3883 +
3884 + if (atomic) {
3885 +- if (__copy_to_user_inatomic(iov->iov_base, from, copy))
3886 ++ if (__copy_to_user_inatomic(iov->iov_base,
3887 ++ addr + *offset, copy))
3888 + return -EFAULT;
3889 + } else {
3890 +- if (copy_to_user(iov->iov_base, from, copy))
3891 ++ if (copy_to_user(iov->iov_base,
3892 ++ addr + *offset, copy))
3893 + return -EFAULT;
3894 + }
3895 +- from += copy;
3896 +- len -= copy;
3897 ++ *offset += copy;
3898 ++ *remaining -= copy;
3899 + iov->iov_base += copy;
3900 + iov->iov_len -= copy;
3901 + }
3902 +@@ -384,7 +388,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
3903 + struct pipe_buffer *buf = pipe->bufs + curbuf;
3904 + const struct pipe_buf_operations *ops = buf->ops;
3905 + void *addr;
3906 +- size_t chars = buf->len;
3907 ++ size_t chars = buf->len, remaining;
3908 + int error, atomic;
3909 +
3910 + if (chars > total_len)
3911 +@@ -398,9 +402,11 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
3912 + }
3913 +
3914 + atomic = !iov_fault_in_pages_write(iov, chars);
3915 ++ remaining = chars;
3916 + redo:
3917 + addr = ops->map(pipe, buf, atomic);
3918 +- error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
3919 ++ error = pipe_iov_copy_to_user(iov, addr, &buf->offset,
3920 ++ &remaining, atomic);
3921 + ops->unmap(pipe, buf, addr);
3922 + if (unlikely(error)) {
3923 + /*
3924 +@@ -415,7 +421,6 @@ redo:
3925 + break;
3926 + }
3927 + ret += chars;
3928 +- buf->offset += chars;
3929 + buf->len -= chars;
3930 +
3931 + /* Was it a packet buffer? Clean up and exit */
3932 +@@ -522,6 +527,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
3933 + if (ops->can_merge && offset + chars <= PAGE_SIZE) {
3934 + int error, atomic = 1;
3935 + void *addr;
3936 ++ size_t remaining = chars;
3937 +
3938 + error = ops->confirm(pipe, buf);
3939 + if (error)
3940 +@@ -530,8 +536,8 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
3941 + iov_fault_in_pages_read(iov, chars);
3942 + redo1:
3943 + addr = ops->map(pipe, buf, atomic);
3944 +- error = pipe_iov_copy_from_user(offset + addr, iov,
3945 +- chars, atomic);
3946 ++ error = pipe_iov_copy_from_user(addr, &offset, iov,
3947 ++ &remaining, atomic);
3948 + ops->unmap(pipe, buf, addr);
3949 + ret = error;
3950 + do_wakeup = 1;
3951 +@@ -566,6 +572,8 @@ redo1:
3952 + struct page *page = pipe->tmp_page;
3953 + char *src;
3954 + int error, atomic = 1;
3955 ++ int offset = 0;
3956 ++ size_t remaining;
3957 +
3958 + if (!page) {
3959 + page = alloc_page(GFP_HIGHUSER);
3960 +@@ -586,14 +594,15 @@ redo1:
3961 + chars = total_len;
3962 +
3963 + iov_fault_in_pages_read(iov, chars);
3964 ++ remaining = chars;
3965 + redo2:
3966 + if (atomic)
3967 + src = kmap_atomic(page);
3968 + else
3969 + src = kmap(page);
3970 +
3971 +- error = pipe_iov_copy_from_user(src, iov, chars,
3972 +- atomic);
3973 ++ error = pipe_iov_copy_from_user(src, &offset, iov,
3974 ++ &remaining, atomic);
3975 + if (atomic)
3976 + kunmap_atomic(src);
3977 + else
3978 +diff --git a/fs/udf/inode.c b/fs/udf/inode.c
3979 +index 8053ee75d297..330ec8cfeb63 100644
3980 +--- a/fs/udf/inode.c
3981 ++++ b/fs/udf/inode.c
3982 +@@ -1392,6 +1392,19 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
3983 + iinfo->i_lenEAttr;
3984 + }
3985 +
3986 ++ /*
3987 ++ * Sanity check length of allocation descriptors and extended attrs to
3988 ++ * avoid integer overflows
3989 ++ */
3990 ++ if (iinfo->i_lenEAttr > inode->i_sb->s_blocksize || iinfo->i_lenAlloc > inode->i_sb->s_blocksize) {
3991 ++ make_bad_inode(inode);
3992 ++ return;
3993 ++ }
3994 ++ /* Now do exact checks */
3995 ++ if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > inode->i_sb->s_blocksize) {
3996 ++ make_bad_inode(inode);
3997 ++ return;
3998 ++ }
3999 + /* Sanity checks for files in ICB so that we don't get confused later */
4000 + if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
4001 + /*
4002 +diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
4003 +index e8bcc4742e0e..6d52429f80bc 100644
4004 +--- a/include/acpi/actypes.h
4005 ++++ b/include/acpi/actypes.h
4006 +@@ -198,9 +198,29 @@ typedef int INT32;
4007 + typedef s32 acpi_native_int;
4008 +
4009 + typedef u32 acpi_size;
4010 ++
4011 ++#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
4012 ++
4013 ++/*
4014 ++ * OSPMs can define this to shrink the size of the structures for 32-bit
4015 ++ * none PAE environment. ASL compiler may always define this to generate
4016 ++ * 32-bit OSPM compliant tables.
4017 ++ */
4018 + typedef u32 acpi_io_address;
4019 + typedef u32 acpi_physical_address;
4020 +
4021 ++#else /* ACPI_32BIT_PHYSICAL_ADDRESS */
4022 ++
4023 ++/*
4024 ++ * It is reported that, after some calculations, the physical addresses can
4025 ++ * wrap over the 32-bit boundary on 32-bit PAE environment.
4026 ++ * https://bugzilla.kernel.org/show_bug.cgi?id=87971
4027 ++ */
4028 ++typedef u64 acpi_io_address;
4029 ++typedef u64 acpi_physical_address;
4030 ++
4031 ++#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */
4032 ++
4033 + #define ACPI_MAX_PTR ACPI_UINT32_MAX
4034 + #define ACPI_SIZE_MAX ACPI_UINT32_MAX
4035 +
4036 +diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
4037 +index 5af3ed52ef98..b9f921012043 100644
4038 +--- a/include/acpi/platform/acenv.h
4039 ++++ b/include/acpi/platform/acenv.h
4040 +@@ -75,6 +75,7 @@
4041 + #define ACPI_CONSTANT_EVAL_ONLY
4042 + #define ACPI_LARGE_NAMESPACE_NODE
4043 + #define ACPI_DATA_TABLE_DISASSEMBLY
4044 ++#define ACPI_32BIT_PHYSICAL_ADDRESS
4045 + #endif
4046 +
4047 + #ifdef ACPI_EXEC_APP
4048 +diff --git a/include/linux/jhash.h b/include/linux/jhash.h
4049 +index 47cb09edec1a..348c6f47e4cc 100644
4050 +--- a/include/linux/jhash.h
4051 ++++ b/include/linux/jhash.h
4052 +@@ -145,11 +145,11 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
4053 + }
4054 +
4055 +
4056 +-/* jhash_3words - hash exactly 3, 2 or 1 word(s) */
4057 +-static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
4058 ++/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */
4059 ++static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
4060 + {
4061 +- a += JHASH_INITVAL;
4062 +- b += JHASH_INITVAL;
4063 ++ a += initval;
4064 ++ b += initval;
4065 + c += initval;
4066 +
4067 + __jhash_final(a, b, c);
4068 +@@ -157,14 +157,19 @@ static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
4069 + return c;
4070 + }
4071 +
4072 ++static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
4073 ++{
4074 ++ return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2));
4075 ++}
4076 ++
4077 + static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
4078 + {
4079 +- return jhash_3words(a, b, 0, initval);
4080 ++ return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
4081 + }
4082 +
4083 + static inline u32 jhash_1word(u32 a, u32 initval)
4084 + {
4085 +- return jhash_3words(a, 0, 0, initval);
4086 ++ return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2));
4087 + }
4088 +
4089 + #endif /* _LINUX_JHASH_H */
4090 +diff --git a/include/linux/libata.h b/include/linux/libata.h
4091 +index 764cd54dfea7..35e7f71cd8a5 100644
4092 +--- a/include/linux/libata.h
4093 ++++ b/include/linux/libata.h
4094 +@@ -182,6 +182,7 @@ enum {
4095 + ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */
4096 + ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
4097 + ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
4098 ++ ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */
4099 +
4100 + /* struct ata_port flags */
4101 + ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
4102 +@@ -285,6 +286,12 @@ enum {
4103 + */
4104 + ATA_TMOUT_PMP_SRST_WAIT = 5000,
4105 +
4106 ++ /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
4107 ++ * be a spurious PHY event, so ignore the first PHY event that
4108 ++ * occurs within 10s after the policy change.
4109 ++ */
4110 ++ ATA_TMOUT_SPURIOUS_PHY = 10000,
4111 ++
4112 + /* ATA bus states */
4113 + BUS_UNKNOWN = 0,
4114 + BUS_DMA = 1,
4115 +@@ -727,6 +734,8 @@ struct ata_link {
4116 + struct ata_eh_context eh_context;
4117 +
4118 + struct ata_device device[ATA_MAX_DEVICES];
4119 ++
4120 ++ unsigned long last_lpm_change; /* when last LPM change happened */
4121 + };
4122 + #define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag)
4123 + #define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0])
4124 +@@ -1065,6 +1074,7 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev);
4125 + extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
4126 + extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
4127 + extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
4128 ++extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
4129 +
4130 + extern int ata_cable_40wire(struct ata_port *ap);
4131 + extern int ata_cable_80wire(struct ata_port *ap);
4132 +diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
4133 +index 89bd4a4dcfb4..25c6891e6302 100644
4134 +--- a/include/linux/nilfs2_fs.h
4135 ++++ b/include/linux/nilfs2_fs.h
4136 +@@ -458,7 +458,7 @@ struct nilfs_btree_node {
4137 + /* level */
4138 + #define NILFS_BTREE_LEVEL_DATA 0
4139 + #define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
4140 +-#define NILFS_BTREE_LEVEL_MAX 14
4141 ++#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */
4142 +
4143 + /**
4144 + * struct nilfs_palloc_group_desc - block group descriptor
4145 +diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
4146 +index b4ddd3b18b4c..fc8cdff83a50 100644
4147 +--- a/include/scsi/scsi_devinfo.h
4148 ++++ b/include/scsi/scsi_devinfo.h
4149 +@@ -30,4 +30,5 @@
4150 + #define BLIST_RETRY_HWERROR 0x400000 /* retry HARDWARE_ERROR */
4151 + #define BLIST_MAX_512 0x800000 /* maximum 512 sector cdb length */
4152 + #define BLIST_ATTACH_PQ3 0x1000000 /* Scan: Attach to PQ3 devices */
4153 ++#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
4154 + #endif
4155 +diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
4156 +index 4f865df42f0f..7ee55e3ff4fe 100644
4157 +--- a/include/sound/emu10k1.h
4158 ++++ b/include/sound/emu10k1.h
4159 +@@ -43,7 +43,8 @@
4160 +
4161 + #define EMUPAGESIZE 4096
4162 + #define MAXREQVOICES 8
4163 +-#define MAXPAGES 8192
4164 ++#define MAXPAGES0 4096 /* 32 bit mode */
4165 ++#define MAXPAGES1 8192 /* 31 bit mode */
4166 + #define RESERVED 0
4167 + #define NUM_MIDI 16
4168 + #define NUM_G 64 /* use all channels */
4169 +@@ -52,8 +53,7 @@
4170 +
4171 + /* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */
4172 + #define EMU10K1_DMA_MASK 0x7fffffffUL /* 31bit */
4173 +-#define AUDIGY_DMA_MASK 0x7fffffffUL /* 31bit FIXME - 32 should work? */
4174 +- /* See ALSA bug #1276 - rlrevell */
4175 ++#define AUDIGY_DMA_MASK 0xffffffffUL /* 32bit mode */
4176 +
4177 + #define TMEMSIZE 256*1024
4178 + #define TMEMSIZEREG 4
4179 +@@ -470,8 +470,11 @@
4180 +
4181 + #define MAPB 0x0d /* Cache map B */
4182 +
4183 +-#define MAP_PTE_MASK 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
4184 +-#define MAP_PTI_MASK 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
4185 ++#define MAP_PTE_MASK0 0xfffff000 /* The 20 MSBs of the PTE indexed by the PTI */
4186 ++#define MAP_PTI_MASK0 0x00000fff /* The 12 bit index to one of the 4096 PTE dwords */
4187 ++
4188 ++#define MAP_PTE_MASK1 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
4189 ++#define MAP_PTI_MASK1 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
4190 +
4191 + /* 0x0e, 0x0f: Not used */
4192 +
4193 +@@ -1708,6 +1711,7 @@ struct snd_emu10k1 {
4194 + unsigned short model; /* subsystem id */
4195 + unsigned int card_type; /* EMU10K1_CARD_* */
4196 + unsigned int ecard_ctrl; /* ecard control bits */
4197 ++ unsigned int address_mode; /* address mode */
4198 + unsigned long dma_mask; /* PCI DMA mask */
4199 + unsigned int delay_pcm_irq; /* in samples */
4200 + int max_cache_pages; /* max memory size / PAGE_SIZE */
4201 +diff --git a/include/xen/events.h b/include/xen/events.h
4202 +index 04399b28e821..f9cb6306511f 100644
4203 +--- a/include/xen/events.h
4204 ++++ b/include/xen/events.h
4205 +@@ -12,7 +12,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
4206 + irq_handler_t handler,
4207 + unsigned long irqflags, const char *devname,
4208 + void *dev_id);
4209 +-int bind_virq_to_irq(unsigned int virq, unsigned int cpu);
4210 ++int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
4211 + int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
4212 + irq_handler_t handler,
4213 + unsigned long irqflags, const char *devname,
4214 +diff --git a/kernel/ptrace.c b/kernel/ptrace.c
4215 +index a1432369be50..d9e71e311027 100644
4216 +--- a/kernel/ptrace.c
4217 ++++ b/kernel/ptrace.c
4218 +@@ -632,6 +632,8 @@ static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
4219 + static int ptrace_resume(struct task_struct *child, long request,
4220 + unsigned long data)
4221 + {
4222 ++ bool need_siglock;
4223 ++
4224 + if (!valid_signal(data))
4225 + return -EIO;
4226 +
4227 +@@ -659,8 +661,26 @@ static int ptrace_resume(struct task_struct *child, long request,
4228 + user_disable_single_step(child);
4229 + }
4230 +
4231 ++ /*
4232 ++ * Change ->exit_code and ->state under siglock to avoid the race
4233 ++ * with wait_task_stopped() in between; a non-zero ->exit_code will
4234 ++ * wrongly look like another report from tracee.
4235 ++ *
4236 ++ * Note that we need siglock even if ->exit_code == data and/or this
4237 ++ * status was not reported yet, the new status must not be cleared by
4238 ++ * wait_task_stopped() after resume.
4239 ++ *
4240 ++ * If data == 0 we do not care if wait_task_stopped() reports the old
4241 ++ * status and clears the code too; this can't race with the tracee, it
4242 ++ * takes siglock after resume.
4243 ++ */
4244 ++ need_siglock = data && !thread_group_empty(current);
4245 ++ if (need_siglock)
4246 ++ spin_lock_irq(&child->sighand->siglock);
4247 + child->exit_code = data;
4248 + wake_up_state(child, __TASK_TRACED);
4249 ++ if (need_siglock)
4250 ++ spin_unlock_irq(&child->sighand->siglock);
4251 +
4252 + return 0;
4253 + }
4254 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
4255 +index 2f8363e0a1ec..15be43522c8b 100644
4256 +--- a/kernel/sched/core.c
4257 ++++ b/kernel/sched/core.c
4258 +@@ -4396,8 +4396,13 @@ recheck:
4259 +
4260 + if (running)
4261 + p->sched_class->set_curr_task(rq);
4262 +- if (on_rq)
4263 +- enqueue_task(rq, p, 0);
4264 ++ if (on_rq) {
4265 ++ /*
4266 ++ * We enqueue to tail when the priority of a task is
4267 ++ * increased (user space view).
4268 ++ */
4269 ++ enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
4270 ++ }
4271 +
4272 + check_class_changed(rq, p, prev_class, oldprio);
4273 + task_rq_unlock(rq, p, &flags);
4274 +diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
4275 +index a5457d577b98..6ad2e2d320fe 100644
4276 +--- a/kernel/trace/ring_buffer_benchmark.c
4277 ++++ b/kernel/trace/ring_buffer_benchmark.c
4278 +@@ -455,7 +455,7 @@ static int __init ring_buffer_benchmark_init(void)
4279 +
4280 + if (producer_fifo >= 0) {
4281 + struct sched_param param = {
4282 +- .sched_priority = consumer_fifo
4283 ++ .sched_priority = producer_fifo
4284 + };
4285 + sched_setscheduler(producer, SCHED_FIFO, &param);
4286 + } else
4287 +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
4288 +index 289197a2d334..3b04aec58700 100644
4289 +--- a/kernel/trace/trace_events_filter.c
4290 ++++ b/kernel/trace/trace_events_filter.c
4291 +@@ -1357,19 +1357,25 @@ static int check_preds(struct filter_parse_state *ps)
4292 + {
4293 + int n_normal_preds = 0, n_logical_preds = 0;
4294 + struct postfix_elt *elt;
4295 ++ int cnt = 0;
4296 +
4297 + list_for_each_entry(elt, &ps->postfix, list) {
4298 +- if (elt->op == OP_NONE)
4299 ++ if (elt->op == OP_NONE) {
4300 ++ cnt++;
4301 + continue;
4302 ++ }
4303 +
4304 + if (elt->op == OP_AND || elt->op == OP_OR) {
4305 + n_logical_preds++;
4306 ++ cnt--;
4307 + continue;
4308 + }
4309 ++ cnt--;
4310 + n_normal_preds++;
4311 ++ WARN_ON_ONCE(cnt < 0);
4312 + }
4313 +
4314 +- if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
4315 ++ if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
4316 + parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
4317 + return -EINVAL;
4318 + }
4319 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
4320 +index 97eec2174769..c957a370bff8 100644
4321 +--- a/mm/memory-failure.c
4322 ++++ b/mm/memory-failure.c
4323 +@@ -1095,10 +1095,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
4324 + * The check (unnecessarily) ignores LRU pages being isolated and
4325 + * walked by the page reclaim code, however that's not a big loss.
4326 + */
4327 +- if (!PageHuge(p) && !PageTransTail(p)) {
4328 +- if (!PageLRU(p))
4329 +- shake_page(p, 0);
4330 +- if (!PageLRU(p)) {
4331 ++ if (!PageHuge(p)) {
4332 ++ if (!PageLRU(hpage))
4333 ++ shake_page(hpage, 0);
4334 ++ if (!PageLRU(hpage)) {
4335 + /*
4336 + * shake_page could have turned it free.
4337 + */
4338 +diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
4339 +index 5ba0c844d508..3cb93e85d221 100644
4340 +--- a/net/bridge/br_fdb.c
4341 ++++ b/net/bridge/br_fdb.c
4342 +@@ -440,7 +440,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
4343 + fdb->updated = jiffies;
4344 + }
4345 + } else {
4346 +- spin_lock(&br->hash_lock);
4347 ++ spin_lock_bh(&br->hash_lock);
4348 + if (likely(!fdb_find(head, addr))) {
4349 + fdb = fdb_create(head, source, addr);
4350 + if (fdb)
4351 +@@ -449,7 +449,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
4352 + /* else we lose race and someone else inserts
4353 + * it first, don't bother updating
4354 + */
4355 +- spin_unlock(&br->hash_lock);
4356 ++ spin_unlock_bh(&br->hash_lock);
4357 + }
4358 + }
4359 +
4360 +@@ -665,9 +665,11 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
4361 + }
4362 +
4363 + if (ndm->ndm_flags & NTF_USE) {
4364 ++ local_bh_disable();
4365 + rcu_read_lock();
4366 + br_fdb_update(p->br, p, addr);
4367 + rcu_read_unlock();
4368 ++ local_bh_enable();
4369 + } else {
4370 + spin_lock_bh(&p->br->hash_lock);
4371 + err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
4372 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
4373 +index 0b870d75a542..a41051a1bca5 100644
4374 +--- a/net/bridge/br_multicast.c
4375 ++++ b/net/bridge/br_multicast.c
4376 +@@ -972,7 +972,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
4377 + }
4378 +
4379 + err = br_ip6_multicast_add_group(br, port, &grec->grec_mca);
4380 +- if (!err)
4381 ++ if (err)
4382 + break;
4383 + }
4384 +
4385 +@@ -991,6 +991,9 @@ static void br_multicast_add_router(struct net_bridge *br,
4386 + struct net_bridge_port *p;
4387 + struct hlist_node *n, *slot = NULL;
4388 +
4389 ++ if (!hlist_unhashed(&port->rlist))
4390 ++ return;
4391 ++
4392 + hlist_for_each_entry(p, n, &br->router_list, rlist) {
4393 + if ((unsigned long) port >= (unsigned long) p)
4394 + break;
4395 +@@ -1018,12 +1021,8 @@ static void br_multicast_mark_router(struct net_bridge *br,
4396 + if (port->multicast_router != 1)
4397 + return;
4398 +
4399 +- if (!hlist_unhashed(&port->rlist))
4400 +- goto timer;
4401 +-
4402 + br_multicast_add_router(br, port);
4403 +
4404 +-timer:
4405 + mod_timer(&port->multicast_router_timer,
4406 + now + br->multicast_querier_interval);
4407 + }
4408 +diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
4409 +index e54ef82fdad7..5ba424839939 100644
4410 +--- a/net/bridge/br_netfilter.c
4411 ++++ b/net/bridge/br_netfilter.c
4412 +@@ -818,12 +818,12 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
4413 + return NF_STOLEN;
4414 + }
4415 +
4416 +-#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
4417 ++#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
4418 + static int br_nf_dev_queue_xmit(struct sk_buff *skb)
4419 + {
4420 + int ret;
4421 +
4422 +- if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
4423 ++ if (skb->protocol == htons(ETH_P_IP) &&
4424 + skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
4425 + !skb_is_gso(skb)) {
4426 + if (br_parse_ip_options(skb))
4427 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
4428 +index cb9085272dd7..9f471c32d209 100644
4429 +--- a/net/ipv4/ping.c
4430 ++++ b/net/ipv4/ping.c
4431 +@@ -138,6 +138,7 @@ static void ping_v4_unhash(struct sock *sk)
4432 + if (sk_hashed(sk)) {
4433 + write_lock_bh(&ping_table.lock);
4434 + hlist_nulls_del(&sk->sk_nulls_node);
4435 ++ sk_nulls_node_init(&sk->sk_nulls_node);
4436 + sock_put(sk);
4437 + isk->inet_num = 0;
4438 + isk->inet_sport = 0;
4439 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
4440 +index 7949b5d1663f..5f8c20b67da2 100644
4441 +--- a/net/ipv4/udp.c
4442 ++++ b/net/ipv4/udp.c
4443 +@@ -1251,10 +1251,8 @@ csum_copy_err:
4444 + UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
4445 + unlock_sock_fast(sk, slow);
4446 +
4447 +- if (noblock)
4448 +- return -EAGAIN;
4449 +-
4450 +- /* starting over for a new packet */
4451 ++ /* starting over for a new packet, but check if we need to yield */
4452 ++ cond_resched();
4453 + msg->msg_flags &= ~MSG_TRUNC;
4454 + goto try_again;
4455 + }
4456 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4457 +index 4cfba3d5ad2c..23b33048ea98 100644
4458 +--- a/net/ipv6/route.c
4459 ++++ b/net/ipv6/route.c
4460 +@@ -1661,6 +1661,17 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
4461 + goto out;
4462 + }
4463 +
4464 ++#ifdef CONFIG_IPV6_MULTIPLE_TABLES
4465 ++ if (rt == net->ipv6.ip6_blk_hole_entry ||
4466 ++ rt == net->ipv6.ip6_prohibit_entry) {
4467 ++ if (net_ratelimit())
4468 ++ printk(KERN_DEBUG "rt6_redirect: source isn't a valid" \
4469 ++ " nexthop for redirect target " \
4470 ++ "(blackhole or prohibited)\n");
4471 ++ goto out;
4472 ++ }
4473 ++#endif
4474 ++
4475 + /*
4476 + * We have finally decided to accept it.
4477 + */
4478 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
4479 +index ef9052f8c90b..2f99b12b717e 100644
4480 +--- a/net/ipv6/udp.c
4481 ++++ b/net/ipv6/udp.c
4482 +@@ -451,10 +451,8 @@ csum_copy_err:
4483 + }
4484 + unlock_sock_fast(sk, slow);
4485 +
4486 +- if (noblock)
4487 +- return -EAGAIN;
4488 +-
4489 +- /* starting over for a new packet */
4490 ++ /* starting over for a new packet, but check if we need to yield */
4491 ++ cond_resched();
4492 + msg->msg_flags &= ~MSG_TRUNC;
4493 + goto try_again;
4494 + }
4495 +diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
4496 +index 7aa31bbfaa3b..9464f92b1447 100644
4497 +--- a/net/mac80211/wep.c
4498 ++++ b/net/mac80211/wep.c
4499 +@@ -97,8 +97,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
4500 +
4501 + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
4502 +
4503 +- if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN ||
4504 +- skb_headroom(skb) < WEP_IV_LEN))
4505 ++ if (WARN_ON(skb_headroom(skb) < WEP_IV_LEN))
4506 + return NULL;
4507 +
4508 + hdrlen = ieee80211_hdrlen(hdr->frame_control);
4509 +@@ -160,6 +159,9 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
4510 + size_t len;
4511 + u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
4512 +
4513 ++ if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN))
4514 ++ return -1;
4515 ++
4516 + iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
4517 + if (!iv)
4518 + return -1;
4519 +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
4520 +index cbc5bfd8c8e4..f2ed4a996290 100644
4521 +--- a/net/netfilter/ipvs/ip_vs_ctl.c
4522 ++++ b/net/netfilter/ipvs/ip_vs_ctl.c
4523 +@@ -3689,6 +3689,9 @@ void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
4524 + cancel_delayed_work_sync(&ipvs->defense_work);
4525 + cancel_work_sync(&ipvs->defense_work.work);
4526 + unregister_net_sysctl_table(ipvs->sysctl_hdr);
4527 ++
4528 ++ if (!net_eq(net, &init_net))
4529 ++ kfree(ipvs->sysctl_tbl);
4530 + }
4531 +
4532 + #else
4533 +diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
4534 +index bde7d69b440d..e89563655030 100644
4535 +--- a/net/rose/af_rose.c
4536 ++++ b/net/rose/af_rose.c
4537 +@@ -194,7 +194,8 @@ static void rose_kill_by_device(struct net_device *dev)
4538 +
4539 + if (rose->device == dev) {
4540 + rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
4541 +- rose->neighbour->use--;
4542 ++ if (rose->neighbour)
4543 ++ rose->neighbour->use--;
4544 + rose->device = NULL;
4545 + }
4546 + }
4547 +diff --git a/net/socket.c b/net/socket.c
4548 +index 025f7f4d2d80..f5ce151e0e3b 100644
4549 +--- a/net/socket.c
4550 ++++ b/net/socket.c
4551 +@@ -1934,14 +1934,12 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
4552 + int err, ctl_len, iov_size, total_len;
4553 +
4554 + err = -EFAULT;
4555 +- if (MSG_CMSG_COMPAT & flags) {
4556 +- if (get_compat_msghdr(msg_sys, msg_compat))
4557 +- return -EFAULT;
4558 +- } else {
4559 ++ if (MSG_CMSG_COMPAT & flags)
4560 ++ err = get_compat_msghdr(msg_sys, msg_compat);
4561 ++ else
4562 + err = copy_msghdr_from_user(msg_sys, msg);
4563 +- if (err)
4564 +- return err;
4565 +- }
4566 ++ if (err)
4567 ++ return err;
4568 +
4569 + /* do not move before msg_sys is valid */
4570 + err = -EMSGSIZE;
4571 +@@ -2149,14 +2147,12 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
4572 + struct sockaddr __user *uaddr;
4573 + int __user *uaddr_len;
4574 +
4575 +- if (MSG_CMSG_COMPAT & flags) {
4576 +- if (get_compat_msghdr(msg_sys, msg_compat))
4577 +- return -EFAULT;
4578 +- } else {
4579 ++ if (MSG_CMSG_COMPAT & flags)
4580 ++ err = get_compat_msghdr(msg_sys, msg_compat);
4581 ++ else
4582 + err = copy_msghdr_from_user(msg_sys, msg);
4583 +- if (err)
4584 +- return err;
4585 +- }
4586 ++ if (err)
4587 ++ return err;
4588 +
4589 + err = -EMSGSIZE;
4590 + if (msg_sys->msg_iovlen > UIO_MAXIOV)
4591 +diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
4592 +index 0920ea3bf599..5776921d6209 100644
4593 +--- a/security/selinux/nlmsgtab.c
4594 ++++ b/security/selinux/nlmsgtab.c
4595 +@@ -100,6 +100,12 @@ static struct nlmsg_perm nlmsg_xfrm_perms[] =
4596 + { XFRM_MSG_FLUSHPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
4597 + { XFRM_MSG_NEWAE, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
4598 + { XFRM_MSG_GETAE, NETLINK_XFRM_SOCKET__NLMSG_READ },
4599 ++ { XFRM_MSG_REPORT, NETLINK_XFRM_SOCKET__NLMSG_READ },
4600 ++ { XFRM_MSG_MIGRATE, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
4601 ++ { XFRM_MSG_NEWSADINFO, NETLINK_XFRM_SOCKET__NLMSG_READ },
4602 ++ { XFRM_MSG_GETSADINFO, NETLINK_XFRM_SOCKET__NLMSG_READ },
4603 ++ { XFRM_MSG_GETSPDINFO, NETLINK_XFRM_SOCKET__NLMSG_READ },
4604 ++ { XFRM_MSG_MAPPING, NETLINK_XFRM_SOCKET__NLMSG_READ },
4605 + };
4606 +
4607 + static struct nlmsg_perm nlmsg_audit_perms[] =
4608 +diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
4609 +index 790c65d980c8..aefde0175846 100644
4610 +--- a/sound/pci/emu10k1/emu10k1.c
4611 ++++ b/sound/pci/emu10k1/emu10k1.c
4612 +@@ -181,8 +181,10 @@ static int __devinit snd_card_emu10k1_probe(struct pci_dev *pci,
4613 + }
4614 + #endif
4615 +
4616 +- strcpy(card->driver, emu->card_capabilities->driver);
4617 +- strcpy(card->shortname, emu->card_capabilities->name);
4618 ++ strlcpy(card->driver, emu->card_capabilities->driver,
4619 ++ sizeof(card->driver));
4620 ++ strlcpy(card->shortname, emu->card_capabilities->name,
4621 ++ sizeof(card->shortname));
4622 + snprintf(card->longname, sizeof(card->longname),
4623 + "%s (rev.%d, serial:0x%x) at 0x%lx, irq %i",
4624 + card->shortname, emu->revision, emu->serial, emu->port, emu->irq);
4625 +diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
4626 +index f35284be7b02..829595078fa1 100644
4627 +--- a/sound/pci/emu10k1/emu10k1_callback.c
4628 ++++ b/sound/pci/emu10k1/emu10k1_callback.c
4629 +@@ -415,7 +415,7 @@ start_voice(struct snd_emux_voice *vp)
4630 + snd_emu10k1_ptr_write(hw, Z2, ch, 0);
4631 +
4632 + /* invalidate maps */
4633 +- temp = (hw->silent_page.addr << 1) | MAP_PTI_MASK;
4634 ++ temp = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
4635 + snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
4636 + snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
4637 + #if 0
4638 +@@ -436,7 +436,7 @@ start_voice(struct snd_emux_voice *vp)
4639 + snd_emu10k1_ptr_write(hw, CDF, ch, sample);
4640 +
4641 + /* invalidate maps */
4642 +- temp = ((unsigned int)hw->silent_page.addr << 1) | MAP_PTI_MASK;
4643 ++ temp = ((unsigned int)hw->silent_page.addr << hw_address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
4644 + snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
4645 + snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
4646 +
4647 +diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
4648 +index a78fdf466fa7..124ae93d3748 100644
4649 +--- a/sound/pci/emu10k1/emu10k1_main.c
4650 ++++ b/sound/pci/emu10k1/emu10k1_main.c
4651 +@@ -282,7 +282,7 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
4652 + snd_emu10k1_ptr_write(emu, TCB, 0, 0); /* taken from original driver */
4653 + snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */
4654 +
4655 +- silent_page = (emu->silent_page.addr << 1) | MAP_PTI_MASK;
4656 ++ silent_page = (emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
4657 + for (ch = 0; ch < NUM_G; ch++) {
4658 + snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page);
4659 + snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page);
4660 +@@ -348,6 +348,11 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
4661 + outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG);
4662 + }
4663 +
4664 ++ if (emu->address_mode == 0) {
4665 ++ /* use 16M in 4G */
4666 ++ outl(inl(emu->port + HCFG) | HCFG_EXPANDED_MEM, emu->port + HCFG);
4667 ++ }
4668 ++
4669 + return 0;
4670 + }
4671 +
4672 +@@ -1390,7 +1395,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
4673 + *
4674 + */
4675 + {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102,
4676 +- .driver = "Audigy2", .name = "SB Audigy 2 ZS Notebook [SB0530]",
4677 ++ .driver = "Audigy2", .name = "Audigy 2 ZS Notebook [SB0530]",
4678 + .id = "Audigy2",
4679 + .emu10k2_chip = 1,
4680 + .ca0108_chip = 1,
4681 +@@ -1540,7 +1545,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
4682 + .adc_1361t = 1, /* 24 bit capture instead of 16bit */
4683 + .ac97_chip = 1} ,
4684 + {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102,
4685 +- .driver = "Audigy2", .name = "SB Audigy 2 Platinum EX [SB0280]",
4686 ++ .driver = "Audigy2", .name = "Audigy 2 Platinum EX [SB0280]",
4687 + .id = "Audigy2",
4688 + .emu10k2_chip = 1,
4689 + .ca0102_chip = 1,
4690 +@@ -1844,8 +1849,10 @@ int __devinit snd_emu10k1_create(struct snd_card *card,
4691 +
4692 + is_audigy = emu->audigy = c->emu10k2_chip;
4693 +
4694 ++ /* set addressing mode */
4695 ++ emu->address_mode = is_audigy ? 0 : 1;
4696 + /* set the DMA transfer mask */
4697 +- emu->dma_mask = is_audigy ? AUDIGY_DMA_MASK : EMU10K1_DMA_MASK;
4698 ++ emu->dma_mask = emu->address_mode ? EMU10K1_DMA_MASK : AUDIGY_DMA_MASK;
4699 + if (pci_set_dma_mask(pci, emu->dma_mask) < 0 ||
4700 + pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) {
4701 + snd_printk(KERN_ERR "architecture does not support PCI busmaster DMA with mask 0x%lx\n", emu->dma_mask);
4702 +@@ -1868,7 +1875,7 @@ int __devinit snd_emu10k1_create(struct snd_card *card,
4703 +
4704 + emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT;
4705 + if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
4706 +- 32 * 1024, &emu->ptb_pages) < 0) {
4707 ++ (emu->address_mode ? 32 : 16) * 1024, &emu->ptb_pages) < 0) {
4708 + err = -ENOMEM;
4709 + goto error;
4710 + }
4711 +@@ -1967,8 +1974,8 @@ int __devinit snd_emu10k1_create(struct snd_card *card,
4712 +
4713 + /* Clear silent pages and set up pointers */
4714 + memset(emu->silent_page.area, 0, PAGE_SIZE);
4715 +- silent_page = emu->silent_page.addr << 1;
4716 +- for (idx = 0; idx < MAXPAGES; idx++)
4717 ++ silent_page = emu->silent_page.addr << emu->address_mode;
4718 ++ for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++)
4719 + ((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx);
4720 +
4721 + /* set up voice indices */
4722 +diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
4723 +index e22b8e2bbd88..c673d2b31510 100644
4724 +--- a/sound/pci/emu10k1/emupcm.c
4725 ++++ b/sound/pci/emu10k1/emupcm.c
4726 +@@ -379,7 +379,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
4727 + snd_emu10k1_ptr_write(emu, Z1, voice, 0);
4728 + snd_emu10k1_ptr_write(emu, Z2, voice, 0);
4729 + /* invalidate maps */
4730 +- silent_page = ((unsigned int)emu->silent_page.addr << 1) | MAP_PTI_MASK;
4731 ++ silent_page = ((unsigned int)emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
4732 + snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page);
4733 + snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page);
4734 + /* modulation envelope */
4735 +diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
4736 +index bc38dd4d071f..9c499e6bae06 100644
4737 +--- a/sound/pci/emu10k1/emuproc.c
4738 ++++ b/sound/pci/emu10k1/emuproc.c
4739 +@@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
4740 + struct snd_emu10k1 *emu = entry->private_data;
4741 + u32 value;
4742 + u32 value2;
4743 +- unsigned long flags;
4744 + u32 rate;
4745 +
4746 + if (emu->card_capabilities->emu_model) {
4747 +- spin_lock_irqsave(&emu->emu_lock, flags);
4748 + snd_emu1010_fpga_read(emu, 0x38, &value);
4749 +- spin_unlock_irqrestore(&emu->emu_lock, flags);
4750 + if ((value & 0x1) == 0) {
4751 +- spin_lock_irqsave(&emu->emu_lock, flags);
4752 + snd_emu1010_fpga_read(emu, 0x2a, &value);
4753 + snd_emu1010_fpga_read(emu, 0x2b, &value2);
4754 +- spin_unlock_irqrestore(&emu->emu_lock, flags);
4755 + rate = 0x1770000 / (((value << 5) | value2)+1);
4756 + snd_iprintf(buffer, "ADAT Locked : %u\n", rate);
4757 + } else {
4758 + snd_iprintf(buffer, "ADAT Unlocked\n");
4759 + }
4760 +- spin_lock_irqsave(&emu->emu_lock, flags);
4761 + snd_emu1010_fpga_read(emu, 0x20, &value);
4762 +- spin_unlock_irqrestore(&emu->emu_lock, flags);
4763 + if ((value & 0x4) == 0) {
4764 +- spin_lock_irqsave(&emu->emu_lock, flags);
4765 + snd_emu1010_fpga_read(emu, 0x28, &value);
4766 + snd_emu1010_fpga_read(emu, 0x29, &value2);
4767 +- spin_unlock_irqrestore(&emu->emu_lock, flags);
4768 + rate = 0x1770000 / (((value << 5) | value2)+1);
4769 + snd_iprintf(buffer, "SPDIF Locked : %d\n", rate);
4770 + } else {
4771 +@@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
4772 + {
4773 + struct snd_emu10k1 *emu = entry->private_data;
4774 + u32 value;
4775 +- unsigned long flags;
4776 + int i;
4777 + snd_iprintf(buffer, "EMU1010 Registers:\n\n");
4778 +
4779 + for(i = 0; i < 0x40; i+=1) {
4780 +- spin_lock_irqsave(&emu->emu_lock, flags);
4781 + snd_emu1010_fpga_read(emu, i, &value);
4782 +- spin_unlock_irqrestore(&emu->emu_lock, flags);
4783 + snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f);
4784 + }
4785 + }
4786 +diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
4787 +index 4f502a2bdc3c..87b7c65fa033 100644
4788 +--- a/sound/pci/emu10k1/memory.c
4789 ++++ b/sound/pci/emu10k1/memory.c
4790 +@@ -34,10 +34,11 @@
4791 + * aligned pages in others
4792 + */
4793 + #define __set_ptb_entry(emu,page,addr) \
4794 +- (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
4795 ++ (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
4796 +
4797 + #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
4798 +-#define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
4799 ++#define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
4800 ++#define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES)
4801 + /* get aligned page from offset address */
4802 + #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
4803 + /* get offset address from aligned page */
4804 +@@ -124,7 +125,7 @@ static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct lis
4805 + }
4806 + page = blk->mapped_page + blk->pages;
4807 + }
4808 +- size = MAX_ALIGN_PAGES - page;
4809 ++ size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
4810 + if (size >= max_size) {
4811 + *nextp = pos;
4812 + return page;
4813 +@@ -181,7 +182,7 @@ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
4814 + q = get_emu10k1_memblk(p, mapped_link);
4815 + end_page = q->mapped_page;
4816 + } else
4817 +- end_page = MAX_ALIGN_PAGES;
4818 ++ end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
4819 +
4820 + /* remove links */
4821 + list_del(&blk->mapped_link);
4822 +@@ -305,7 +306,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
4823 + if (snd_BUG_ON(!emu))
4824 + return NULL;
4825 + if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
4826 +- runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
4827 ++ runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
4828 + return NULL;
4829 + hdr = emu->memhdr;
4830 + if (snd_BUG_ON(!hdr))
4831 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
4832 +index c74a044284b0..f70115e143ec 100644
4833 +--- a/sound/pci/hda/hda_codec.c
4834 ++++ b/sound/pci/hda/hda_codec.c
4835 +@@ -2093,6 +2093,16 @@ _snd_hda_find_mixer_ctl(struct hda_codec *codec,
4836 + return snd_ctl_find_id(codec->bus->card, &id);
4837 + }
4838 +
4839 ++/* meta hook to call each driver's vmaster hook */
4840 ++static void vmaster_hook(void *private_data, int enabled)
4841 ++{
4842 ++ struct hda_vmaster_mute_hook *hook = private_data;
4843 ++
4844 ++ if (hook->mute_mode != HDA_VMUTE_FOLLOW_MASTER)
4845 ++ enabled = hook->mute_mode;
4846 ++ hook->hook(hook->codec, enabled);
4847 ++}
4848 ++
4849 + /**
4850 + * snd_hda_find_mixer_ctl - Find a mixer control element with the given name
4851 + * @codec: HD-audio codec
4852 +@@ -2520,9 +2530,9 @@ int snd_hda_add_vmaster_hook(struct hda_codec *codec,
4853 +
4854 + if (!hook->hook || !hook->sw_kctl)
4855 + return 0;
4856 +- snd_ctl_add_vmaster_hook(hook->sw_kctl, hook->hook, codec);
4857 + hook->codec = codec;
4858 + hook->mute_mode = HDA_VMUTE_FOLLOW_MASTER;
4859 ++ snd_ctl_add_vmaster_hook(hook->sw_kctl, vmaster_hook, hook);
4860 + if (!expose_enum_ctl)
4861 + return 0;
4862 + kctl = snd_ctl_new1(&vmaster_mute_mode, hook);
4863 +@@ -2540,14 +2550,7 @@ void snd_hda_sync_vmaster_hook(struct hda_vmaster_mute_hook *hook)
4864 + {
4865 + if (!hook->hook || !hook->codec)
4866 + return;
4867 +- switch (hook->mute_mode) {
4868 +- case HDA_VMUTE_FOLLOW_MASTER:
4869 +- snd_ctl_sync_vmaster_hook(hook->sw_kctl);
4870 +- break;
4871 +- default:
4872 +- hook->hook(hook->codec, hook->mute_mode);
4873 +- break;
4874 +- }
4875 ++ snd_ctl_sync_vmaster_hook(hook->sw_kctl);
4876 + }
4877 + EXPORT_SYMBOL_HDA(snd_hda_sync_vmaster_hook);
4878 +
4879 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
4880 +index 20cfc5b44710..ba80c2f7e047 100644
4881 +--- a/sound/pci/hda/patch_conexant.c
4882 ++++ b/sound/pci/hda/patch_conexant.c
4883 +@@ -4606,6 +4606,14 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
4884 + .patch = patch_conexant_auto },
4885 + { .id = 0x14f150b9, .name = "CX20665",
4886 + .patch = patch_conexant_auto },
4887 ++ { .id = 0x14f150f1, .name = "CX20721",
4888 ++ .patch = patch_conexant_auto },
4889 ++ { .id = 0x14f150f2, .name = "CX20722",
4890 ++ .patch = patch_conexant_auto },
4891 ++ { .id = 0x14f150f3, .name = "CX20723",
4892 ++ .patch = patch_conexant_auto },
4893 ++ { .id = 0x14f150f4, .name = "CX20724",
4894 ++ .patch = patch_conexant_auto },
4895 + { .id = 0x14f1510f, .name = "CX20751/2",
4896 + .patch = patch_conexant_auto },
4897 + { .id = 0x14f15110, .name = "CX20751/2",
4898 +@@ -4640,6 +4648,10 @@ MODULE_ALIAS("snd-hda-codec-id:14f150ab");
4899 + MODULE_ALIAS("snd-hda-codec-id:14f150ac");
4900 + MODULE_ALIAS("snd-hda-codec-id:14f150b8");
4901 + MODULE_ALIAS("snd-hda-codec-id:14f150b9");
4902 ++MODULE_ALIAS("snd-hda-codec-id:14f150f1");
4903 ++MODULE_ALIAS("snd-hda-codec-id:14f150f2");
4904 ++MODULE_ALIAS("snd-hda-codec-id:14f150f3");
4905 ++MODULE_ALIAS("snd-hda-codec-id:14f150f4");
4906 + MODULE_ALIAS("snd-hda-codec-id:14f1510f");
4907 + MODULE_ALIAS("snd-hda-codec-id:14f15110");
4908 + MODULE_ALIAS("snd-hda-codec-id:14f15111");
4909 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4910 +index b16a37f633c5..bf1f0abf415b 100644
4911 +--- a/sound/pci/hda/patch_realtek.c
4912 ++++ b/sound/pci/hda/patch_realtek.c
4913 +@@ -5412,6 +5412,7 @@ static const struct alc_fixup alc882_fixups[] = {
4914 + static const struct snd_pci_quirk alc882_fixup_tbl[] = {
4915 + SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD),
4916 + SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
4917 ++ SND_PCI_QUIRK(0x1025, 0x0107, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
4918 + SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_FIXUP_ACER_EAPD),
4919 + SND_PCI_QUIRK(0x1025, 0x0110, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
4920 + SND_PCI_QUIRK(0x1025, 0x0112, "Acer Aspire 9303", ALC883_FIXUP_ACER_EAPD),
4921 +diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
4922 +index f9e2bdaf91f1..54059324d942 100644
4923 +--- a/sound/soc/codecs/cs4271.c
4924 ++++ b/sound/soc/codecs/cs4271.c
4925 +@@ -475,10 +475,10 @@ static int cs4271_probe(struct snd_soc_codec *codec)
4926 + if (gpio_nreset >= 0) {
4927 + /* Reset codec */
4928 + gpio_direction_output(gpio_nreset, 0);
4929 +- udelay(1);
4930 ++ mdelay(1);
4931 + gpio_set_value(gpio_nreset, 1);
4932 + /* Give the codec time to wake up */
4933 +- udelay(1);
4934 ++ mdelay(1);
4935 + }
4936 +
4937 + cs4271->gpio_nreset = gpio_nreset;
4938 +diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
4939 +index 3941f50bf187..90deecddb6ac 100644
4940 +--- a/sound/soc/codecs/wm8741.c
4941 ++++ b/sound/soc/codecs/wm8741.c
4942 +@@ -105,7 +105,7 @@ static struct {
4943 + };
4944 +
4945 + static unsigned int rates_11289[] = {
4946 +- 44100, 88235,
4947 ++ 44100, 88200,
4948 + };
4949 +
4950 + static struct snd_pcm_hw_constraint_list constraints_11289 = {
4951 +@@ -132,7 +132,7 @@ static struct snd_pcm_hw_constraint_list constraints_16384 = {
4952 + };
4953 +
4954 + static unsigned int rates_16934[] = {
4955 +- 44100, 88235,
4956 ++ 44100, 88200,
4957 + };
4958 +
4959 + static struct snd_pcm_hw_constraint_list constraints_16934 = {
4960 +@@ -150,7 +150,7 @@ static struct snd_pcm_hw_constraint_list constraints_18432 = {
4961 + };
4962 +
4963 + static unsigned int rates_22579[] = {
4964 +- 44100, 88235, 1764000
4965 ++ 44100, 88200, 176400
4966 + };
4967 +
4968 + static struct snd_pcm_hw_constraint_list constraints_22579 = {
4969 +@@ -168,7 +168,7 @@ static struct snd_pcm_hw_constraint_list constraints_24576 = {
4970 + };
4971 +
4972 + static unsigned int rates_36864[] = {
4973 +- 48000, 96000, 19200
4974 ++ 48000, 96000, 192000
4975 + };
4976 +
4977 + static struct snd_pcm_hw_constraint_list constraints_36864 = {
4978 +diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
4979 +index a3e4831bbe39..ed986e6d10c4 100644
4980 +--- a/sound/soc/codecs/wm8960.c
4981 ++++ b/sound/soc/codecs/wm8960.c
4982 +@@ -333,7 +333,7 @@ static const struct snd_soc_dapm_route audio_paths[] = {
4983 + { "Right Input Mixer", "Boost Switch", "Right Boost Mixer", },
4984 + { "Right Input Mixer", NULL, "RINPUT1", }, /* Really Boost Switch */
4985 + { "Right Input Mixer", NULL, "RINPUT2" },
4986 +- { "Right Input Mixer", NULL, "LINPUT3" },
4987 ++ { "Right Input Mixer", NULL, "RINPUT3" },
4988 +
4989 + { "Left ADC", NULL, "Left Input Mixer" },
4990 + { "Right ADC", NULL, "Right Input Mixer" },
4991 +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
4992 +index d9924d76f713..c93c573f967a 100644
4993 +--- a/sound/soc/codecs/wm8994.c
4994 ++++ b/sound/soc/codecs/wm8994.c
4995 +@@ -2636,7 +2636,7 @@ static struct {
4996 + };
4997 +
4998 + static int fs_ratios[] = {
4999 +- 64, 128, 192, 256, 348, 512, 768, 1024, 1408, 1536
5000 ++ 64, 128, 192, 256, 384, 512, 768, 1024, 1408, 1536
5001 + };
5002 +
5003 + static int bclk_divs[] = {
5004 +diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
5005 +index 319754cf6208..daf61abc3670 100644
5006 +--- a/sound/synth/emux/emux_oss.c
5007 ++++ b/sound/synth/emux/emux_oss.c
5008 +@@ -118,12 +118,8 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
5009 + if (snd_BUG_ON(!arg || !emu))
5010 + return -ENXIO;
5011 +
5012 +- mutex_lock(&emu->register_mutex);
5013 +-
5014 +- if (!snd_emux_inc_count(emu)) {
5015 +- mutex_unlock(&emu->register_mutex);
5016 ++ if (!snd_emux_inc_count(emu))
5017 + return -EFAULT;
5018 +- }
5019 +
5020 + memset(&callback, 0, sizeof(callback));
5021 + callback.owner = THIS_MODULE;
5022 +@@ -135,7 +131,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
5023 + if (p == NULL) {
5024 + snd_printk(KERN_ERR "can't create port\n");
5025 + snd_emux_dec_count(emu);
5026 +- mutex_unlock(&emu->register_mutex);
5027 + return -ENOMEM;
5028 + }
5029 +
5030 +@@ -148,8 +143,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
5031 + reset_port_mode(p, arg->seq_mode);
5032 +
5033 + snd_emux_reset_port(p);
5034 +-
5035 +- mutex_unlock(&emu->register_mutex);
5036 + return 0;
5037 + }
5038 +
5039 +@@ -195,13 +188,11 @@ snd_emux_close_seq_oss(struct snd_seq_oss_arg *arg)
5040 + if (snd_BUG_ON(!emu))
5041 + return -ENXIO;
5042 +
5043 +- mutex_lock(&emu->register_mutex);
5044 + snd_emux_sounds_off_all(p);
5045 + snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port));
5046 + snd_seq_event_port_detach(p->chset.client, p->chset.port);
5047 + snd_emux_dec_count(emu);
5048 +
5049 +- mutex_unlock(&emu->register_mutex);
5050 + return 0;
5051 + }
5052 +
5053 +diff --git a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c
5054 +index 7778b8e19782..a0209204ae48 100644
5055 +--- a/sound/synth/emux/emux_seq.c
5056 ++++ b/sound/synth/emux/emux_seq.c
5057 +@@ -124,12 +124,10 @@ snd_emux_detach_seq(struct snd_emux *emu)
5058 + if (emu->voices)
5059 + snd_emux_terminate_all(emu);
5060 +
5061 +- mutex_lock(&emu->register_mutex);
5062 + if (emu->client >= 0) {
5063 + snd_seq_delete_kernel_client(emu->client);
5064 + emu->client = -1;
5065 + }
5066 +- mutex_unlock(&emu->register_mutex);
5067 + }
5068 +
5069 +
5070 +@@ -269,8 +267,8 @@ snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data,
5071 + /*
5072 + * increment usage count
5073 + */
5074 +-int
5075 +-snd_emux_inc_count(struct snd_emux *emu)
5076 ++static int
5077 ++__snd_emux_inc_count(struct snd_emux *emu)
5078 + {
5079 + emu->used++;
5080 + if (!try_module_get(emu->ops.owner))
5081 +@@ -284,12 +282,21 @@ snd_emux_inc_count(struct snd_emux *emu)
5082 + return 1;
5083 + }
5084 +
5085 ++int snd_emux_inc_count(struct snd_emux *emu)
5086 ++{
5087 ++ int ret;
5088 ++
5089 ++ mutex_lock(&emu->register_mutex);
5090 ++ ret = __snd_emux_inc_count(emu);
5091 ++ mutex_unlock(&emu->register_mutex);
5092 ++ return ret;
5093 ++}
5094 +
5095 + /*
5096 + * decrease usage count
5097 + */
5098 +-void
5099 +-snd_emux_dec_count(struct snd_emux *emu)
5100 ++static void
5101 ++__snd_emux_dec_count(struct snd_emux *emu)
5102 + {
5103 + module_put(emu->card->module);
5104 + emu->used--;
5105 +@@ -298,6 +305,12 @@ snd_emux_dec_count(struct snd_emux *emu)
5106 + module_put(emu->ops.owner);
5107 + }
5108 +
5109 ++void snd_emux_dec_count(struct snd_emux *emu)
5110 ++{
5111 ++ mutex_lock(&emu->register_mutex);
5112 ++ __snd_emux_dec_count(emu);
5113 ++ mutex_unlock(&emu->register_mutex);
5114 ++}
5115 +
5116 + /*
5117 + * Routine that is called upon a first use of a particular port
5118 +@@ -317,7 +330,7 @@ snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info)
5119 +
5120 + mutex_lock(&emu->register_mutex);
5121 + snd_emux_init_port(p);
5122 +- snd_emux_inc_count(emu);
5123 ++ __snd_emux_inc_count(emu);
5124 + mutex_unlock(&emu->register_mutex);
5125 + return 0;
5126 + }
5127 +@@ -340,7 +353,7 @@ snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info)
5128 +
5129 + mutex_lock(&emu->register_mutex);
5130 + snd_emux_sounds_off_all(p);
5131 +- snd_emux_dec_count(emu);
5132 ++ __snd_emux_dec_count(emu);
5133 + mutex_unlock(&emu->register_mutex);
5134 + return 0;
5135 + }
5136 +diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
5137 +index 1e0798f6539b..851786ffa639 100644
5138 +--- a/sound/usb/mixer_maps.c
5139 ++++ b/sound/usb/mixer_maps.c
5140 +@@ -380,6 +380,11 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
5141 + .ignore_ctl_error = 1,
5142 + },
5143 + {
5144 ++ /* MAYA44 USB+ */
5145 ++ .id = USB_ID(0x2573, 0x0008),
5146 ++ .map = maya44_map,
5147 ++ },
5148 ++ {
5149 + /* KEF X300A */
5150 + .id = USB_ID(0x27ac, 0x1000),
5151 + .map = scms_usb3318_map,