Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.1 commit in: /
Date: Mon, 21 Sep 2015 22:16:17
Message-Id: 1442873765.8fe6c9dc74e88b375ffa56515338a480f17ef3d1.mpagano@gentoo
1 commit: 8fe6c9dc74e88b375ffa56515338a480f17ef3d1
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Sep 21 22:16:05 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Sep 21 22:16:05 2015 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8fe6c9dc
7
8 Linux patch 4.1.8
9
10 0000_README | 4 +
11 1007_linux-4.1.8.patch | 4367 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 4371 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index ad474e3..b88684d 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -71,6 +71,10 @@ Patch: 1006_linux-4.1.7.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.1.7
21
22 +Patch: 1007_linux-4.1.8.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.1.8
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1007_linux-4.1.8.patch b/1007_linux-4.1.8.patch
31 new file mode 100644
32 index 0000000..a41c476
33 --- /dev/null
34 +++ b/1007_linux-4.1.8.patch
35 @@ -0,0 +1,4367 @@
36 +diff --git a/Documentation/ABI/testing/configfs-usb-gadget-loopback b/Documentation/ABI/testing/configfs-usb-gadget-loopback
37 +index 9aae5bfb9908..06beefbcf061 100644
38 +--- a/Documentation/ABI/testing/configfs-usb-gadget-loopback
39 ++++ b/Documentation/ABI/testing/configfs-usb-gadget-loopback
40 +@@ -5,4 +5,4 @@ Description:
41 + The attributes:
42 +
43 + qlen - depth of loopback queue
44 +- bulk_buflen - buffer length
45 ++ buflen - buffer length
46 +diff --git a/Documentation/ABI/testing/configfs-usb-gadget-sourcesink b/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
47 +index 29477c319f61..bc7ff731aa0c 100644
48 +--- a/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
49 ++++ b/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
50 +@@ -9,4 +9,4 @@ Description:
51 + isoc_maxpacket - 0 - 1023 (fs), 0 - 1024 (hs/ss)
52 + isoc_mult - 0..2 (hs/ss only)
53 + isoc_maxburst - 0..15 (ss only)
54 +- qlen - buffer length
55 ++ buflen - buffer length
56 +diff --git a/Documentation/usb/gadget-testing.txt b/Documentation/usb/gadget-testing.txt
57 +index f45b2bf4b41d..820664af8f6a 100644
58 +--- a/Documentation/usb/gadget-testing.txt
59 ++++ b/Documentation/usb/gadget-testing.txt
60 +@@ -237,9 +237,7 @@ Testing the LOOPBACK function
61 + -----------------------------
62 +
63 + device: run the gadget
64 +-host: test-usb
65 +-
66 +-http://www.linux-usb.org/usbtest/testusb.c
67 ++host: test-usb (tools/usb/testusb.c)
68 +
69 + 8. MASS STORAGE function
70 + ========================
71 +@@ -588,9 +586,8 @@ Testing the SOURCESINK function
72 + -------------------------------
73 +
74 + device: run the gadget
75 +-host: test-usb
76 ++host: test-usb (tools/usb/testusb.c)
77 +
78 +-http://www.linux-usb.org/usbtest/testusb.c
79 +
80 + 16. UAC1 function
81 + =================
82 +diff --git a/Makefile b/Makefile
83 +index b8591e5f79b8..dbf3baa5fabb 100644
84 +--- a/Makefile
85 ++++ b/Makefile
86 +@@ -1,6 +1,6 @@
87 + VERSION = 4
88 + PATCHLEVEL = 1
89 +-SUBLEVEL = 7
90 ++SUBLEVEL = 8
91 + EXTRAVERSION =
92 + NAME = Series 4800
93 +
94 +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
95 +index 45df48ba0b12..19f4cc634b0e 100644
96 +--- a/arch/arm/Kconfig
97 ++++ b/arch/arm/Kconfig
98 +@@ -538,6 +538,7 @@ config ARCH_ORION5X
99 + select MVEBU_MBUS
100 + select PCI
101 + select PLAT_ORION_LEGACY
102 ++ select MULTI_IRQ_HANDLER
103 + help
104 + Support for the following Marvell Orion 5x series SoCs:
105 + Orion-1 (5181), Orion-VoIP (5181L), Orion-NAS (5182),
106 +diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
107 +index 0b9906880c0c..75aba40c69e1 100644
108 +--- a/arch/arm/boot/dts/exynos3250-rinato.dts
109 ++++ b/arch/arm/boot/dts/exynos3250-rinato.dts
110 +@@ -181,7 +181,7 @@
111 +
112 + display-timings {
113 + timing-0 {
114 +- clock-frequency = <0>;
115 ++ clock-frequency = <4600000>;
116 + hactive = <320>;
117 + vactive = <320>;
118 + hfront-porch = <1>;
119 +diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
120 +index 165968d51d8f..8eca5878a877 100644
121 +--- a/arch/arm/boot/dts/rk3288.dtsi
122 ++++ b/arch/arm/boot/dts/rk3288.dtsi
123 +@@ -584,7 +584,7 @@
124 + compatible = "rockchip,rk3288-wdt", "snps,dw-wdt";
125 + reg = <0xff800000 0x100>;
126 + clocks = <&cru PCLK_WDT>;
127 +- interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
128 ++ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
129 + status = "disabled";
130 + };
131 +
132 +diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
133 +index 57d5df0c1fbd..7581e036bda6 100644
134 +--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
135 ++++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
136 +@@ -331,7 +331,7 @@ static struct clockdomain l4per2_7xx_clkdm = {
137 + .dep_bit = DRA7XX_L4PER2_STATDEP_SHIFT,
138 + .wkdep_srcs = l4per2_wkup_sleep_deps,
139 + .sleepdep_srcs = l4per2_wkup_sleep_deps,
140 +- .flags = CLKDM_CAN_HWSUP_SWSUP,
141 ++ .flags = CLKDM_CAN_SWSUP,
142 + };
143 +
144 + static struct clockdomain mpu0_7xx_clkdm = {
145 +diff --git a/arch/arm/mach-orion5x/include/mach/irqs.h b/arch/arm/mach-orion5x/include/mach/irqs.h
146 +index a6fa9d8f12d8..2431d9923427 100644
147 +--- a/arch/arm/mach-orion5x/include/mach/irqs.h
148 ++++ b/arch/arm/mach-orion5x/include/mach/irqs.h
149 +@@ -16,42 +16,42 @@
150 + /*
151 + * Orion Main Interrupt Controller
152 + */
153 +-#define IRQ_ORION5X_BRIDGE 0
154 +-#define IRQ_ORION5X_DOORBELL_H2C 1
155 +-#define IRQ_ORION5X_DOORBELL_C2H 2
156 +-#define IRQ_ORION5X_UART0 3
157 +-#define IRQ_ORION5X_UART1 4
158 +-#define IRQ_ORION5X_I2C 5
159 +-#define IRQ_ORION5X_GPIO_0_7 6
160 +-#define IRQ_ORION5X_GPIO_8_15 7
161 +-#define IRQ_ORION5X_GPIO_16_23 8
162 +-#define IRQ_ORION5X_GPIO_24_31 9
163 +-#define IRQ_ORION5X_PCIE0_ERR 10
164 +-#define IRQ_ORION5X_PCIE0_INT 11
165 +-#define IRQ_ORION5X_USB1_CTRL 12
166 +-#define IRQ_ORION5X_DEV_BUS_ERR 14
167 +-#define IRQ_ORION5X_PCI_ERR 15
168 +-#define IRQ_ORION5X_USB_BR_ERR 16
169 +-#define IRQ_ORION5X_USB0_CTRL 17
170 +-#define IRQ_ORION5X_ETH_RX 18
171 +-#define IRQ_ORION5X_ETH_TX 19
172 +-#define IRQ_ORION5X_ETH_MISC 20
173 +-#define IRQ_ORION5X_ETH_SUM 21
174 +-#define IRQ_ORION5X_ETH_ERR 22
175 +-#define IRQ_ORION5X_IDMA_ERR 23
176 +-#define IRQ_ORION5X_IDMA_0 24
177 +-#define IRQ_ORION5X_IDMA_1 25
178 +-#define IRQ_ORION5X_IDMA_2 26
179 +-#define IRQ_ORION5X_IDMA_3 27
180 +-#define IRQ_ORION5X_CESA 28
181 +-#define IRQ_ORION5X_SATA 29
182 +-#define IRQ_ORION5X_XOR0 30
183 +-#define IRQ_ORION5X_XOR1 31
184 ++#define IRQ_ORION5X_BRIDGE (1 + 0)
185 ++#define IRQ_ORION5X_DOORBELL_H2C (1 + 1)
186 ++#define IRQ_ORION5X_DOORBELL_C2H (1 + 2)
187 ++#define IRQ_ORION5X_UART0 (1 + 3)
188 ++#define IRQ_ORION5X_UART1 (1 + 4)
189 ++#define IRQ_ORION5X_I2C (1 + 5)
190 ++#define IRQ_ORION5X_GPIO_0_7 (1 + 6)
191 ++#define IRQ_ORION5X_GPIO_8_15 (1 + 7)
192 ++#define IRQ_ORION5X_GPIO_16_23 (1 + 8)
193 ++#define IRQ_ORION5X_GPIO_24_31 (1 + 9)
194 ++#define IRQ_ORION5X_PCIE0_ERR (1 + 10)
195 ++#define IRQ_ORION5X_PCIE0_INT (1 + 11)
196 ++#define IRQ_ORION5X_USB1_CTRL (1 + 12)
197 ++#define IRQ_ORION5X_DEV_BUS_ERR (1 + 14)
198 ++#define IRQ_ORION5X_PCI_ERR (1 + 15)
199 ++#define IRQ_ORION5X_USB_BR_ERR (1 + 16)
200 ++#define IRQ_ORION5X_USB0_CTRL (1 + 17)
201 ++#define IRQ_ORION5X_ETH_RX (1 + 18)
202 ++#define IRQ_ORION5X_ETH_TX (1 + 19)
203 ++#define IRQ_ORION5X_ETH_MISC (1 + 20)
204 ++#define IRQ_ORION5X_ETH_SUM (1 + 21)
205 ++#define IRQ_ORION5X_ETH_ERR (1 + 22)
206 ++#define IRQ_ORION5X_IDMA_ERR (1 + 23)
207 ++#define IRQ_ORION5X_IDMA_0 (1 + 24)
208 ++#define IRQ_ORION5X_IDMA_1 (1 + 25)
209 ++#define IRQ_ORION5X_IDMA_2 (1 + 26)
210 ++#define IRQ_ORION5X_IDMA_3 (1 + 27)
211 ++#define IRQ_ORION5X_CESA (1 + 28)
212 ++#define IRQ_ORION5X_SATA (1 + 29)
213 ++#define IRQ_ORION5X_XOR0 (1 + 30)
214 ++#define IRQ_ORION5X_XOR1 (1 + 31)
215 +
216 + /*
217 + * Orion General Purpose Pins
218 + */
219 +-#define IRQ_ORION5X_GPIO_START 32
220 ++#define IRQ_ORION5X_GPIO_START 33
221 + #define NR_GPIO_IRQS 32
222 +
223 + #define NR_IRQS (IRQ_ORION5X_GPIO_START + NR_GPIO_IRQS)
224 +diff --git a/arch/arm/mach-orion5x/irq.c b/arch/arm/mach-orion5x/irq.c
225 +index cd4bac4d7e43..086ecb87d885 100644
226 +--- a/arch/arm/mach-orion5x/irq.c
227 ++++ b/arch/arm/mach-orion5x/irq.c
228 +@@ -42,7 +42,7 @@ __exception_irq_entry orion5x_legacy_handle_irq(struct pt_regs *regs)
229 + stat = readl_relaxed(MAIN_IRQ_CAUSE);
230 + stat &= readl_relaxed(MAIN_IRQ_MASK);
231 + if (stat) {
232 +- unsigned int hwirq = __fls(stat);
233 ++ unsigned int hwirq = 1 + __fls(stat);
234 + handle_IRQ(hwirq, regs);
235 + return;
236 + }
237 +@@ -51,7 +51,7 @@ __exception_irq_entry orion5x_legacy_handle_irq(struct pt_regs *regs)
238 +
239 + void __init orion5x_init_irq(void)
240 + {
241 +- orion_irq_init(0, MAIN_IRQ_MASK);
242 ++ orion_irq_init(1, MAIN_IRQ_MASK);
243 +
244 + #ifdef CONFIG_MULTI_IRQ_HANDLER
245 + set_handle_irq(orion5x_legacy_handle_irq);
246 +diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c
247 +index 2e6ab67e2284..611a5f96d3ca 100644
248 +--- a/arch/arm/mach-rockchip/platsmp.c
249 ++++ b/arch/arm/mach-rockchip/platsmp.c
250 +@@ -72,29 +72,22 @@ static struct reset_control *rockchip_get_core_reset(int cpu)
251 + static int pmu_set_power_domain(int pd, bool on)
252 + {
253 + u32 val = (on) ? 0 : BIT(pd);
254 ++ struct reset_control *rstc = rockchip_get_core_reset(pd);
255 + int ret;
256 +
257 ++ if (IS_ERR(rstc) && read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
258 ++ pr_err("%s: could not get reset control for core %d\n",
259 ++ __func__, pd);
260 ++ return PTR_ERR(rstc);
261 ++ }
262 ++
263 + /*
264 + * We need to soft reset the cpu when we turn off the cpu power domain,
265 + * or else the active processors might be stalled when the individual
266 + * processor is powered down.
267 + */
268 +- if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
269 +- struct reset_control *rstc = rockchip_get_core_reset(pd);
270 +-
271 +- if (IS_ERR(rstc)) {
272 +- pr_err("%s: could not get reset control for core %d\n",
273 +- __func__, pd);
274 +- return PTR_ERR(rstc);
275 +- }
276 +-
277 +- if (on)
278 +- reset_control_deassert(rstc);
279 +- else
280 +- reset_control_assert(rstc);
281 +-
282 +- reset_control_put(rstc);
283 +- }
284 ++ if (!IS_ERR(rstc) && !on)
285 ++ reset_control_assert(rstc);
286 +
287 + ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val);
288 + if (ret < 0) {
289 +@@ -112,6 +105,12 @@ static int pmu_set_power_domain(int pd, bool on)
290 + }
291 + }
292 +
293 ++ if (!IS_ERR(rstc)) {
294 ++ if (on)
295 ++ reset_control_deassert(rstc);
296 ++ reset_control_put(rstc);
297 ++ }
298 ++
299 + return 0;
300 + }
301 +
302 +@@ -147,8 +146,12 @@ static int __cpuinit rockchip_boot_secondary(unsigned int cpu,
303 + * the mailbox:
304 + * sram_base_addr + 4: 0xdeadbeaf
305 + * sram_base_addr + 8: start address for pc
306 ++ * The cpu0 need to wait the other cpus other than cpu0 entering
307 ++ * the wfe state.The wait time is affected by many aspects.
308 ++ * (e.g: cpu frequency, bootrom frequency, sram frequency, ...)
309 + * */
310 +- udelay(10);
311 ++ mdelay(1); /* ensure the cpus other than cpu0 to startup */
312 ++
313 + writel(virt_to_phys(secondary_startup), sram_base_addr + 8);
314 + writel(0xDEADBEAF, sram_base_addr + 4);
315 + dsb_sev();
316 +diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
317 +index b027a89737b6..c6d601cc9764 100644
318 +--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
319 ++++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
320 +@@ -421,14 +421,20 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
321 + rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
322 + v = pte & ~HPTE_V_HVLOCK;
323 + if (v & HPTE_V_VALID) {
324 +- u64 pte1;
325 +-
326 +- pte1 = be64_to_cpu(hpte[1]);
327 + hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
328 +- rb = compute_tlbie_rb(v, pte1, pte_index);
329 ++ rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index);
330 + do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
331 +- /* Read PTE low word after tlbie to get final R/C values */
332 +- remove_revmap_chain(kvm, pte_index, rev, v, pte1);
333 ++ /*
334 ++ * The reference (R) and change (C) bits in a HPT
335 ++ * entry can be set by hardware at any time up until
336 ++ * the HPTE is invalidated and the TLB invalidation
337 ++ * sequence has completed. This means that when
338 ++ * removing a HPTE, we need to re-read the HPTE after
339 ++ * the invalidation sequence has completed in order to
340 ++ * obtain reliable values of R and C.
341 ++ */
342 ++ remove_revmap_chain(kvm, pte_index, rev, v,
343 ++ be64_to_cpu(hpte[1]));
344 + }
345 + r = rev->guest_rpte & ~HPTE_GR_RESERVED;
346 + note_hpte_modification(kvm, rev);
347 +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
348 +index 4d70df26c402..3b2d2c5b6376 100644
349 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
350 ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
351 +@@ -1127,6 +1127,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
352 + cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
353 + bne 3f
354 + lbz r0, HSTATE_HOST_IPI(r13)
355 ++ cmpwi r0, 0
356 + beq 4f
357 + b guest_exit_cont
358 + 3:
359 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
360 +index 7262fe438c99..1942f22e6694 100644
361 +--- a/arch/s390/kernel/setup.c
362 ++++ b/arch/s390/kernel/setup.c
363 +@@ -683,7 +683,7 @@ static void __init setup_memory(void)
364 + /*
365 + * Setup hardware capabilities.
366 + */
367 +-static void __init setup_hwcaps(void)
368 ++static int __init setup_hwcaps(void)
369 + {
370 + static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
371 + struct cpuid cpu_id;
372 +@@ -749,9 +749,11 @@ static void __init setup_hwcaps(void)
373 + elf_hwcap |= HWCAP_S390_TE;
374 +
375 + /*
376 +- * Vector extension HWCAP_S390_VXRS is bit 11.
377 ++ * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
378 ++ * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
379 ++ * instead of facility bit 129.
380 + */
381 +- if (test_facility(129))
382 ++ if (MACHINE_HAS_VX)
383 + elf_hwcap |= HWCAP_S390_VXRS;
384 + get_cpu_id(&cpu_id);
385 + add_device_randomness(&cpu_id, sizeof(cpu_id));
386 +@@ -788,7 +790,9 @@ static void __init setup_hwcaps(void)
387 + strcpy(elf_platform, "z13");
388 + break;
389 + }
390 ++ return 0;
391 + }
392 ++arch_initcall(setup_hwcaps);
393 +
394 + /*
395 + * Add system information as device randomness
396 +@@ -871,11 +875,6 @@ void __init setup_arch(char **cmdline_p)
397 + cpu_init();
398 +
399 + /*
400 +- * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
401 +- */
402 +- setup_hwcaps();
403 +-
404 +- /*
405 + * Create kernel page tables and switch to virtual addressing.
406 + */
407 + paging_init();
408 +diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
409 +index 2079baf06bdd..daf8d2b9a217 100644
410 +--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
411 ++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
412 +@@ -294,6 +294,7 @@ static struct ahash_alg ghash_async_alg = {
413 + .cra_name = "ghash",
414 + .cra_driver_name = "ghash-clmulni",
415 + .cra_priority = 400,
416 ++ .cra_ctxsize = sizeof(struct ghash_async_ctx),
417 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
418 + .cra_blocksize = GHASH_BLOCK_SIZE,
419 + .cra_type = &crypto_ahash_type,
420 +diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
421 +index a0bf89fd2647..4e10d73cf018 100644
422 +--- a/arch/x86/include/asm/desc.h
423 ++++ b/arch/x86/include/asm/desc.h
424 +@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
425 + set_ldt(NULL, 0);
426 + }
427 +
428 +-/*
429 +- * load one particular LDT into the current CPU
430 +- */
431 +-static inline void load_LDT_nolock(mm_context_t *pc)
432 +-{
433 +- set_ldt(pc->ldt, pc->size);
434 +-}
435 +-
436 +-static inline void load_LDT(mm_context_t *pc)
437 +-{
438 +- preempt_disable();
439 +- load_LDT_nolock(pc);
440 +- preempt_enable();
441 +-}
442 +-
443 + static inline unsigned long get_desc_base(const struct desc_struct *desc)
444 + {
445 + return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
446 +diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
447 +index 09b9620a73b4..364d27481a52 100644
448 +--- a/arch/x86/include/asm/mmu.h
449 ++++ b/arch/x86/include/asm/mmu.h
450 +@@ -9,8 +9,7 @@
451 + * we put the segment information here.
452 + */
453 + typedef struct {
454 +- void *ldt;
455 +- int size;
456 ++ struct ldt_struct *ldt;
457 +
458 + #ifdef CONFIG_X86_64
459 + /* True if mm supports a task running in 32 bit compatibility mode. */
460 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
461 +index e997f70f80c4..80d67dd80351 100644
462 +--- a/arch/x86/include/asm/mmu_context.h
463 ++++ b/arch/x86/include/asm/mmu_context.h
464 +@@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) {}
465 + #endif
466 +
467 + /*
468 ++ * ldt_structs can be allocated, used, and freed, but they are never
469 ++ * modified while live.
470 ++ */
471 ++struct ldt_struct {
472 ++ /*
473 ++ * Xen requires page-aligned LDTs with special permissions. This is
474 ++ * needed to prevent us from installing evil descriptors such as
475 ++ * call gates. On native, we could merge the ldt_struct and LDT
476 ++ * allocations, but it's not worth trying to optimize.
477 ++ */
478 ++ struct desc_struct *entries;
479 ++ int size;
480 ++};
481 ++
482 ++static inline void load_mm_ldt(struct mm_struct *mm)
483 ++{
484 ++ struct ldt_struct *ldt;
485 ++
486 ++ /* lockless_dereference synchronizes with smp_store_release */
487 ++ ldt = lockless_dereference(mm->context.ldt);
488 ++
489 ++ /*
490 ++ * Any change to mm->context.ldt is followed by an IPI to all
491 ++ * CPUs with the mm active. The LDT will not be freed until
492 ++ * after the IPI is handled by all such CPUs. This means that,
493 ++ * if the ldt_struct changes before we return, the values we see
494 ++ * will be safe, and the new values will be loaded before we run
495 ++ * any user code.
496 ++ *
497 ++ * NB: don't try to convert this to use RCU without extreme care.
498 ++ * We would still need IRQs off, because we don't want to change
499 ++ * the local LDT after an IPI loaded a newer value than the one
500 ++ * that we can see.
501 ++ */
502 ++
503 ++ if (unlikely(ldt))
504 ++ set_ldt(ldt->entries, ldt->size);
505 ++ else
506 ++ clear_LDT();
507 ++
508 ++ DEBUG_LOCKS_WARN_ON(preemptible());
509 ++}
510 ++
511 ++/*
512 + * Used for LDT copy/destruction.
513 + */
514 + int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
515 +@@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
516 + * was called and then modify_ldt changed
517 + * prev->context.ldt but suppressed an IPI to this CPU.
518 + * In this case, prev->context.ldt != NULL, because we
519 +- * never free an LDT while the mm still exists. That
520 +- * means that next->context.ldt != prev->context.ldt,
521 +- * because mms never share an LDT.
522 ++ * never set context.ldt to NULL while the mm still
523 ++ * exists. That means that next->context.ldt !=
524 ++ * prev->context.ldt, because mms never share an LDT.
525 + */
526 + if (unlikely(prev->context.ldt != next->context.ldt))
527 +- load_LDT_nolock(&next->context);
528 ++ load_mm_ldt(next);
529 + }
530 + #ifdef CONFIG_SMP
531 + else {
532 +@@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
533 + load_cr3(next->pgd);
534 + trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
535 + load_mm_cr4(next);
536 +- load_LDT_nolock(&next->context);
537 ++ load_mm_ldt(next);
538 + }
539 + }
540 + #endif
541 +diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
542 +index dbe76a14c3c9..07bea80223f6 100644
543 +--- a/arch/x86/kernel/acpi/boot.c
544 ++++ b/arch/x86/kernel/acpi/boot.c
545 +@@ -489,6 +489,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
546 + polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
547 +
548 + mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
549 ++ acpi_penalize_sci_irq(bus_irq, trigger, polarity);
550 +
551 + /*
552 + * stash over-ride to indicate we've been here
553 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
554 +index a62cf04dac8a..205e0f3df501 100644
555 +--- a/arch/x86/kernel/cpu/common.c
556 ++++ b/arch/x86/kernel/cpu/common.c
557 +@@ -1434,7 +1434,7 @@ void cpu_init(void)
558 + load_sp0(t, &current->thread);
559 + set_tss_desc(cpu, t);
560 + load_TR_desc();
561 +- load_LDT(&init_mm.context);
562 ++ load_mm_ldt(&init_mm);
563 +
564 + clear_all_debug_regs();
565 + dbg_restore_debug_regs();
566 +@@ -1483,7 +1483,7 @@ void cpu_init(void)
567 + load_sp0(t, thread);
568 + set_tss_desc(cpu, t);
569 + load_TR_desc();
570 +- load_LDT(&init_mm.context);
571 ++ load_mm_ldt(&init_mm);
572 +
573 + t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
574 +
575 +diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
576 +index b4a41cf030ed..e166d833cf63 100644
577 +--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
578 ++++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
579 +@@ -116,6 +116,27 @@ void mce_intel_hcpu_update(unsigned long cpu)
580 + per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
581 + }
582 +
583 ++static void cmci_toggle_interrupt_mode(bool on)
584 ++{
585 ++ unsigned long flags, *owned;
586 ++ int bank;
587 ++ u64 val;
588 ++
589 ++ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
590 ++ owned = this_cpu_ptr(mce_banks_owned);
591 ++ for_each_set_bit(bank, owned, MAX_NR_BANKS) {
592 ++ rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
593 ++
594 ++ if (on)
595 ++ val |= MCI_CTL2_CMCI_EN;
596 ++ else
597 ++ val &= ~MCI_CTL2_CMCI_EN;
598 ++
599 ++ wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
600 ++ }
601 ++ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
602 ++}
603 ++
604 + unsigned long cmci_intel_adjust_timer(unsigned long interval)
605 + {
606 + if ((this_cpu_read(cmci_backoff_cnt) > 0) &&
607 +@@ -145,7 +166,7 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval)
608 + */
609 + if (!atomic_read(&cmci_storm_on_cpus)) {
610 + __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
611 +- cmci_reenable();
612 ++ cmci_toggle_interrupt_mode(true);
613 + cmci_recheck();
614 + }
615 + return CMCI_POLL_INTERVAL;
616 +@@ -156,22 +177,6 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval)
617 + }
618 + }
619 +
620 +-static void cmci_storm_disable_banks(void)
621 +-{
622 +- unsigned long flags, *owned;
623 +- int bank;
624 +- u64 val;
625 +-
626 +- raw_spin_lock_irqsave(&cmci_discover_lock, flags);
627 +- owned = this_cpu_ptr(mce_banks_owned);
628 +- for_each_set_bit(bank, owned, MAX_NR_BANKS) {
629 +- rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
630 +- val &= ~MCI_CTL2_CMCI_EN;
631 +- wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
632 +- }
633 +- raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
634 +-}
635 +-
636 + static bool cmci_storm_detect(void)
637 + {
638 + unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
639 +@@ -193,7 +198,7 @@ static bool cmci_storm_detect(void)
640 + if (cnt <= CMCI_STORM_THRESHOLD)
641 + return false;
642 +
643 +- cmci_storm_disable_banks();
644 ++ cmci_toggle_interrupt_mode(false);
645 + __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
646 + r = atomic_add_return(1, &cmci_storm_on_cpus);
647 + mce_timer_kick(CMCI_STORM_INTERVAL);
648 +diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
649 +index aa4e3a74e541..4cc98a4e8ea9 100644
650 +--- a/arch/x86/kernel/cpu/perf_event.c
651 ++++ b/arch/x86/kernel/cpu/perf_event.c
652 +@@ -2170,21 +2170,25 @@ static unsigned long get_segment_base(unsigned int segment)
653 + int idx = segment >> 3;
654 +
655 + if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
656 ++ struct ldt_struct *ldt;
657 ++
658 + if (idx > LDT_ENTRIES)
659 + return 0;
660 +
661 +- if (idx > current->active_mm->context.size)
662 ++ /* IRQs are off, so this synchronizes with smp_store_release */
663 ++ ldt = lockless_dereference(current->active_mm->context.ldt);
664 ++ if (!ldt || idx > ldt->size)
665 + return 0;
666 +
667 +- desc = current->active_mm->context.ldt;
668 ++ desc = &ldt->entries[idx];
669 + } else {
670 + if (idx > GDT_ENTRIES)
671 + return 0;
672 +
673 +- desc = raw_cpu_ptr(gdt_page.gdt);
674 ++ desc = raw_cpu_ptr(gdt_page.gdt) + idx;
675 + }
676 +
677 +- return get_desc_base(desc + idx);
678 ++ return get_desc_base(desc);
679 + }
680 +
681 + #ifdef CONFIG_COMPAT
682 +diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
683 +index c37886d759cc..2bcc0525f1c1 100644
684 +--- a/arch/x86/kernel/ldt.c
685 ++++ b/arch/x86/kernel/ldt.c
686 +@@ -12,6 +12,7 @@
687 + #include <linux/string.h>
688 + #include <linux/mm.h>
689 + #include <linux/smp.h>
690 ++#include <linux/slab.h>
691 + #include <linux/vmalloc.h>
692 + #include <linux/uaccess.h>
693 +
694 +@@ -20,82 +21,82 @@
695 + #include <asm/mmu_context.h>
696 + #include <asm/syscalls.h>
697 +
698 +-#ifdef CONFIG_SMP
699 ++/* context.lock is held for us, so we don't need any locking. */
700 + static void flush_ldt(void *current_mm)
701 + {
702 +- if (current->active_mm == current_mm)
703 +- load_LDT(&current->active_mm->context);
704 ++ mm_context_t *pc;
705 ++
706 ++ if (current->active_mm != current_mm)
707 ++ return;
708 ++
709 ++ pc = &current->active_mm->context;
710 ++ set_ldt(pc->ldt->entries, pc->ldt->size);
711 + }
712 +-#endif
713 +
714 +-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
715 ++/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
716 ++static struct ldt_struct *alloc_ldt_struct(int size)
717 + {
718 +- void *oldldt, *newldt;
719 +- int oldsize;
720 +-
721 +- if (mincount <= pc->size)
722 +- return 0;
723 +- oldsize = pc->size;
724 +- mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
725 +- (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
726 +- if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
727 +- newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
728 ++ struct ldt_struct *new_ldt;
729 ++ int alloc_size;
730 ++
731 ++ if (size > LDT_ENTRIES)
732 ++ return NULL;
733 ++
734 ++ new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
735 ++ if (!new_ldt)
736 ++ return NULL;
737 ++
738 ++ BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
739 ++ alloc_size = size * LDT_ENTRY_SIZE;
740 ++
741 ++ /*
742 ++ * Xen is very picky: it requires a page-aligned LDT that has no
743 ++ * trailing nonzero bytes in any page that contains LDT descriptors.
744 ++ * Keep it simple: zero the whole allocation and never allocate less
745 ++ * than PAGE_SIZE.
746 ++ */
747 ++ if (alloc_size > PAGE_SIZE)
748 ++ new_ldt->entries = vzalloc(alloc_size);
749 + else
750 +- newldt = (void *)__get_free_page(GFP_KERNEL);
751 +-
752 +- if (!newldt)
753 +- return -ENOMEM;
754 ++ new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
755 +
756 +- if (oldsize)
757 +- memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
758 +- oldldt = pc->ldt;
759 +- memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
760 +- (mincount - oldsize) * LDT_ENTRY_SIZE);
761 ++ if (!new_ldt->entries) {
762 ++ kfree(new_ldt);
763 ++ return NULL;
764 ++ }
765 +
766 +- paravirt_alloc_ldt(newldt, mincount);
767 ++ new_ldt->size = size;
768 ++ return new_ldt;
769 ++}
770 +
771 +-#ifdef CONFIG_X86_64
772 +- /* CHECKME: Do we really need this ? */
773 +- wmb();
774 +-#endif
775 +- pc->ldt = newldt;
776 +- wmb();
777 +- pc->size = mincount;
778 +- wmb();
779 +-
780 +- if (reload) {
781 +-#ifdef CONFIG_SMP
782 +- preempt_disable();
783 +- load_LDT(pc);
784 +- if (!cpumask_equal(mm_cpumask(current->mm),
785 +- cpumask_of(smp_processor_id())))
786 +- smp_call_function(flush_ldt, current->mm, 1);
787 +- preempt_enable();
788 +-#else
789 +- load_LDT(pc);
790 +-#endif
791 +- }
792 +- if (oldsize) {
793 +- paravirt_free_ldt(oldldt, oldsize);
794 +- if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
795 +- vfree(oldldt);
796 +- else
797 +- put_page(virt_to_page(oldldt));
798 +- }
799 +- return 0;
800 ++/* After calling this, the LDT is immutable. */
801 ++static void finalize_ldt_struct(struct ldt_struct *ldt)
802 ++{
803 ++ paravirt_alloc_ldt(ldt->entries, ldt->size);
804 + }
805 +
806 +-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
807 ++/* context.lock is held */
808 ++static void install_ldt(struct mm_struct *current_mm,
809 ++ struct ldt_struct *ldt)
810 + {
811 +- int err = alloc_ldt(new, old->size, 0);
812 +- int i;
813 ++ /* Synchronizes with lockless_dereference in load_mm_ldt. */
814 ++ smp_store_release(&current_mm->context.ldt, ldt);
815 ++
816 ++ /* Activate the LDT for all CPUs using current_mm. */
817 ++ on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
818 ++}
819 +
820 +- if (err < 0)
821 +- return err;
822 ++static void free_ldt_struct(struct ldt_struct *ldt)
823 ++{
824 ++ if (likely(!ldt))
825 ++ return;
826 +
827 +- for (i = 0; i < old->size; i++)
828 +- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
829 +- return 0;
830 ++ paravirt_free_ldt(ldt->entries, ldt->size);
831 ++ if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
832 ++ vfree(ldt->entries);
833 ++ else
834 ++ kfree(ldt->entries);
835 ++ kfree(ldt);
836 + }
837 +
838 + /*
839 +@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
840 + */
841 + int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
842 + {
843 ++ struct ldt_struct *new_ldt;
844 + struct mm_struct *old_mm;
845 + int retval = 0;
846 +
847 + mutex_init(&mm->context.lock);
848 +- mm->context.size = 0;
849 + old_mm = current->mm;
850 +- if (old_mm && old_mm->context.size > 0) {
851 +- mutex_lock(&old_mm->context.lock);
852 +- retval = copy_ldt(&mm->context, &old_mm->context);
853 +- mutex_unlock(&old_mm->context.lock);
854 ++ if (!old_mm) {
855 ++ mm->context.ldt = NULL;
856 ++ return 0;
857 + }
858 ++
859 ++ mutex_lock(&old_mm->context.lock);
860 ++ if (!old_mm->context.ldt) {
861 ++ mm->context.ldt = NULL;
862 ++ goto out_unlock;
863 ++ }
864 ++
865 ++ new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
866 ++ if (!new_ldt) {
867 ++ retval = -ENOMEM;
868 ++ goto out_unlock;
869 ++ }
870 ++
871 ++ memcpy(new_ldt->entries, old_mm->context.ldt->entries,
872 ++ new_ldt->size * LDT_ENTRY_SIZE);
873 ++ finalize_ldt_struct(new_ldt);
874 ++
875 ++ mm->context.ldt = new_ldt;
876 ++
877 ++out_unlock:
878 ++ mutex_unlock(&old_mm->context.lock);
879 + return retval;
880 + }
881 +
882 +@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
883 + */
884 + void destroy_context(struct mm_struct *mm)
885 + {
886 +- if (mm->context.size) {
887 +-#ifdef CONFIG_X86_32
888 +- /* CHECKME: Can this ever happen ? */
889 +- if (mm == current->active_mm)
890 +- clear_LDT();
891 +-#endif
892 +- paravirt_free_ldt(mm->context.ldt, mm->context.size);
893 +- if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
894 +- vfree(mm->context.ldt);
895 +- else
896 +- put_page(virt_to_page(mm->context.ldt));
897 +- mm->context.size = 0;
898 +- }
899 ++ free_ldt_struct(mm->context.ldt);
900 ++ mm->context.ldt = NULL;
901 + }
902 +
903 + static int read_ldt(void __user *ptr, unsigned long bytecount)
904 + {
905 +- int err;
906 ++ int retval;
907 + unsigned long size;
908 + struct mm_struct *mm = current->mm;
909 +
910 +- if (!mm->context.size)
911 +- return 0;
912 ++ mutex_lock(&mm->context.lock);
913 ++
914 ++ if (!mm->context.ldt) {
915 ++ retval = 0;
916 ++ goto out_unlock;
917 ++ }
918 ++
919 + if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
920 + bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
921 +
922 +- mutex_lock(&mm->context.lock);
923 +- size = mm->context.size * LDT_ENTRY_SIZE;
924 ++ size = mm->context.ldt->size * LDT_ENTRY_SIZE;
925 + if (size > bytecount)
926 + size = bytecount;
927 +
928 +- err = 0;
929 +- if (copy_to_user(ptr, mm->context.ldt, size))
930 +- err = -EFAULT;
931 +- mutex_unlock(&mm->context.lock);
932 +- if (err < 0)
933 +- goto error_return;
934 ++ if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
935 ++ retval = -EFAULT;
936 ++ goto out_unlock;
937 ++ }
938 ++
939 + if (size != bytecount) {
940 +- /* zero-fill the rest */
941 +- if (clear_user(ptr + size, bytecount - size) != 0) {
942 +- err = -EFAULT;
943 +- goto error_return;
944 ++ /* Zero-fill the rest and pretend we read bytecount bytes. */
945 ++ if (clear_user(ptr + size, bytecount - size)) {
946 ++ retval = -EFAULT;
947 ++ goto out_unlock;
948 + }
949 + }
950 +- return bytecount;
951 +-error_return:
952 +- return err;
953 ++ retval = bytecount;
954 ++
955 ++out_unlock:
956 ++ mutex_unlock(&mm->context.lock);
957 ++ return retval;
958 + }
959 +
960 + static int read_default_ldt(void __user *ptr, unsigned long bytecount)
961 +@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
962 + struct desc_struct ldt;
963 + int error;
964 + struct user_desc ldt_info;
965 ++ int oldsize, newsize;
966 ++ struct ldt_struct *new_ldt, *old_ldt;
967 +
968 + error = -EINVAL;
969 + if (bytecount != sizeof(ldt_info))
970 +@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
971 + goto out;
972 + }
973 +
974 +- mutex_lock(&mm->context.lock);
975 +- if (ldt_info.entry_number >= mm->context.size) {
976 +- error = alloc_ldt(&current->mm->context,
977 +- ldt_info.entry_number + 1, 1);
978 +- if (error < 0)
979 +- goto out_unlock;
980 +- }
981 +-
982 +- /* Allow LDTs to be cleared by the user. */
983 +- if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
984 +- if (oldmode || LDT_empty(&ldt_info)) {
985 +- memset(&ldt, 0, sizeof(ldt));
986 +- goto install;
987 ++ if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
988 ++ LDT_empty(&ldt_info)) {
989 ++ /* The user wants to clear the entry. */
990 ++ memset(&ldt, 0, sizeof(ldt));
991 ++ } else {
992 ++ if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
993 ++ error = -EINVAL;
994 ++ goto out;
995 + }
996 ++
997 ++ fill_ldt(&ldt, &ldt_info);
998 ++ if (oldmode)
999 ++ ldt.avl = 0;
1000 + }
1001 +
1002 +- if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
1003 +- error = -EINVAL;
1004 ++ mutex_lock(&mm->context.lock);
1005 ++
1006 ++ old_ldt = mm->context.ldt;
1007 ++ oldsize = old_ldt ? old_ldt->size : 0;
1008 ++ newsize = max((int)(ldt_info.entry_number + 1), oldsize);
1009 ++
1010 ++ error = -ENOMEM;
1011 ++ new_ldt = alloc_ldt_struct(newsize);
1012 ++ if (!new_ldt)
1013 + goto out_unlock;
1014 +- }
1015 +
1016 +- fill_ldt(&ldt, &ldt_info);
1017 +- if (oldmode)
1018 +- ldt.avl = 0;
1019 ++ if (old_ldt)
1020 ++ memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
1021 ++ new_ldt->entries[ldt_info.entry_number] = ldt;
1022 ++ finalize_ldt_struct(new_ldt);
1023 +
1024 +- /* Install the new entry ... */
1025 +-install:
1026 +- write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
1027 ++ install_ldt(mm, new_ldt);
1028 ++ free_ldt_struct(old_ldt);
1029 + error = 0;
1030 +
1031 + out_unlock:
1032 +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
1033 +index ddfdbf74f174..5e0bf57d9944 100644
1034 +--- a/arch/x86/kernel/process_64.c
1035 ++++ b/arch/x86/kernel/process_64.c
1036 +@@ -122,11 +122,11 @@ void __show_regs(struct pt_regs *regs, int all)
1037 + void release_thread(struct task_struct *dead_task)
1038 + {
1039 + if (dead_task->mm) {
1040 +- if (dead_task->mm->context.size) {
1041 ++ if (dead_task->mm->context.ldt) {
1042 + pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
1043 + dead_task->comm,
1044 + dead_task->mm->context.ldt,
1045 +- dead_task->mm->context.size);
1046 ++ dead_task->mm->context.ldt->size);
1047 + BUG();
1048 + }
1049 + }
1050 +diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
1051 +index 9b4d51d0c0d0..0ccb53a9fcd9 100644
1052 +--- a/arch/x86/kernel/step.c
1053 ++++ b/arch/x86/kernel/step.c
1054 +@@ -5,6 +5,7 @@
1055 + #include <linux/mm.h>
1056 + #include <linux/ptrace.h>
1057 + #include <asm/desc.h>
1058 ++#include <asm/mmu_context.h>
1059 +
1060 + unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
1061 + {
1062 +@@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
1063 + struct desc_struct *desc;
1064 + unsigned long base;
1065 +
1066 +- seg &= ~7UL;
1067 ++ seg >>= 3;
1068 +
1069 + mutex_lock(&child->mm->context.lock);
1070 +- if (unlikely((seg >> 3) >= child->mm->context.size))
1071 ++ if (unlikely(!child->mm->context.ldt ||
1072 ++ seg >= child->mm->context.ldt->size))
1073 + addr = -1L; /* bogus selector, access would fault */
1074 + else {
1075 +- desc = child->mm->context.ldt + seg;
1076 ++ desc = &child->mm->context.ldt->entries[seg];
1077 + base = get_desc_base(desc);
1078 +
1079 + /* 16-bit code segment? */
1080 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1081 +index b73337634214..554e877e0bc4 100644
1082 +--- a/arch/x86/kvm/mmu.c
1083 ++++ b/arch/x86/kvm/mmu.c
1084 +@@ -357,12 +357,6 @@ static u64 __get_spte_lockless(u64 *sptep)
1085 + {
1086 + return ACCESS_ONCE(*sptep);
1087 + }
1088 +-
1089 +-static bool __check_direct_spte_mmio_pf(u64 spte)
1090 +-{
1091 +- /* It is valid if the spte is zapped. */
1092 +- return spte == 0ull;
1093 +-}
1094 + #else
1095 + union split_spte {
1096 + struct {
1097 +@@ -478,23 +472,6 @@ retry:
1098 +
1099 + return spte.spte;
1100 + }
1101 +-
1102 +-static bool __check_direct_spte_mmio_pf(u64 spte)
1103 +-{
1104 +- union split_spte sspte = (union split_spte)spte;
1105 +- u32 high_mmio_mask = shadow_mmio_mask >> 32;
1106 +-
1107 +- /* It is valid if the spte is zapped. */
1108 +- if (spte == 0ull)
1109 +- return true;
1110 +-
1111 +- /* It is valid if the spte is being zapped. */
1112 +- if (sspte.spte_low == 0ull &&
1113 +- (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
1114 +- return true;
1115 +-
1116 +- return false;
1117 +-}
1118 + #endif
1119 +
1120 + static bool spte_is_locklessly_modifiable(u64 spte)
1121 +@@ -3343,21 +3320,6 @@ static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
1122 + return vcpu_match_mmio_gva(vcpu, addr);
1123 + }
1124 +
1125 +-
1126 +-/*
1127 +- * On direct hosts, the last spte is only allows two states
1128 +- * for mmio page fault:
1129 +- * - It is the mmio spte
1130 +- * - It is zapped or it is being zapped.
1131 +- *
1132 +- * This function completely checks the spte when the last spte
1133 +- * is not the mmio spte.
1134 +- */
1135 +-static bool check_direct_spte_mmio_pf(u64 spte)
1136 +-{
1137 +- return __check_direct_spte_mmio_pf(spte);
1138 +-}
1139 +-
1140 + static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
1141 + {
1142 + struct kvm_shadow_walk_iterator iterator;
1143 +@@ -3400,13 +3362,6 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
1144 + }
1145 +
1146 + /*
1147 +- * It's ok if the gva is remapped by other cpus on shadow guest,
1148 +- * it's a BUG if the gfn is not a mmio page.
1149 +- */
1150 +- if (direct && !check_direct_spte_mmio_pf(spte))
1151 +- return RET_MMIO_PF_BUG;
1152 +-
1153 +- /*
1154 + * If the page table is zapped by other cpus, let CPU fault again on
1155 + * the address.
1156 + */
1157 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1158 +index ea306adbbc13..47a32f743a91 100644
1159 +--- a/arch/x86/kvm/x86.c
1160 ++++ b/arch/x86/kvm/x86.c
1161 +@@ -2192,7 +2192,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1162 + if (guest_cpuid_has_tsc_adjust(vcpu)) {
1163 + if (!msr_info->host_initiated) {
1164 + s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
1165 +- kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
1166 ++ adjust_tsc_offset_guest(vcpu, adj);
1167 + }
1168 + vcpu->arch.ia32_tsc_adjust_msr = data;
1169 + }
1170 +diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
1171 +index 9b868124128d..274a52b1183e 100644
1172 +--- a/arch/x86/math-emu/fpu_entry.c
1173 ++++ b/arch/x86/math-emu/fpu_entry.c
1174 +@@ -29,7 +29,6 @@
1175 +
1176 + #include <asm/uaccess.h>
1177 + #include <asm/traps.h>
1178 +-#include <asm/desc.h>
1179 + #include <asm/user.h>
1180 + #include <asm/i387.h>
1181 +
1182 +@@ -185,7 +184,7 @@ void math_emulate(struct math_emu_info *info)
1183 + math_abort(FPU_info, SIGILL);
1184 + }
1185 +
1186 +- code_descriptor = LDT_DESCRIPTOR(FPU_CS);
1187 ++ code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
1188 + if (SEG_D_SIZE(code_descriptor)) {
1189 + /* The above test may be wrong, the book is not clear */
1190 + /* Segmented 32 bit protected mode */
1191 +diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
1192 +index 2c614410a5f3..d342fce49447 100644
1193 +--- a/arch/x86/math-emu/fpu_system.h
1194 ++++ b/arch/x86/math-emu/fpu_system.h
1195 +@@ -16,9 +16,24 @@
1196 + #include <linux/kernel.h>
1197 + #include <linux/mm.h>
1198 +
1199 +-/* s is always from a cpu register, and the cpu does bounds checking
1200 +- * during register load --> no further bounds checks needed */
1201 +-#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
1202 ++#include <asm/desc.h>
1203 ++#include <asm/mmu_context.h>
1204 ++
1205 ++static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
1206 ++{
1207 ++ static struct desc_struct zero_desc;
1208 ++ struct desc_struct ret = zero_desc;
1209 ++
1210 ++#ifdef CONFIG_MODIFY_LDT_SYSCALL
1211 ++ seg >>= 3;
1212 ++ mutex_lock(&current->mm->context.lock);
1213 ++ if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
1214 ++ ret = current->mm->context.ldt->entries[seg];
1215 ++ mutex_unlock(&current->mm->context.lock);
1216 ++#endif
1217 ++ return ret;
1218 ++}
1219 ++
1220 + #define SEG_D_SIZE(x) ((x).b & (3 << 21))
1221 + #define SEG_G_BIT(x) ((x).b & (1 << 23))
1222 + #define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
1223 +diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
1224 +index 6ef5e99380f9..8300db71c2a6 100644
1225 +--- a/arch/x86/math-emu/get_address.c
1226 ++++ b/arch/x86/math-emu/get_address.c
1227 +@@ -20,7 +20,6 @@
1228 + #include <linux/stddef.h>
1229 +
1230 + #include <asm/uaccess.h>
1231 +-#include <asm/desc.h>
1232 +
1233 + #include "fpu_system.h"
1234 + #include "exception.h"
1235 +@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
1236 + addr->selector = PM_REG_(segment);
1237 + }
1238 +
1239 +- descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
1240 ++ descriptor = FPU_get_ldt_descriptor(addr->selector);
1241 + base_address = SEG_BASE_ADDR(descriptor);
1242 + address = base_address + offset;
1243 + limit = base_address
1244 +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
1245 +index 757678fb26e1..bf9384488399 100644
1246 +--- a/arch/x86/power/cpu.c
1247 ++++ b/arch/x86/power/cpu.c
1248 +@@ -23,6 +23,7 @@
1249 + #include <asm/debugreg.h>
1250 + #include <asm/fpu-internal.h> /* pcntxt_mask */
1251 + #include <asm/cpu.h>
1252 ++#include <asm/mmu_context.h>
1253 +
1254 + #ifdef CONFIG_X86_32
1255 + __visible unsigned long saved_context_ebx;
1256 +@@ -154,7 +155,7 @@ static void fix_processor_context(void)
1257 + syscall_init(); /* This sets MSR_*STAR and related */
1258 + #endif
1259 + load_TR_desc(); /* This does ltr */
1260 +- load_LDT(&current->active_mm->context); /* This does lldt */
1261 ++ load_mm_ldt(current->active_mm); /* This does lldt */
1262 + }
1263 +
1264 + /**
1265 +diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
1266 +index 677bfcf4ee5d..28f33a8b7f5f 100644
1267 +--- a/arch/xtensa/include/asm/traps.h
1268 ++++ b/arch/xtensa/include/asm/traps.h
1269 +@@ -25,30 +25,39 @@ static inline void spill_registers(void)
1270 + {
1271 + #if XCHAL_NUM_AREGS > 16
1272 + __asm__ __volatile__ (
1273 +- " call12 1f\n"
1274 ++ " call8 1f\n"
1275 + " _j 2f\n"
1276 + " retw\n"
1277 + " .align 4\n"
1278 + "1:\n"
1279 ++#if XCHAL_NUM_AREGS == 32
1280 ++ " _entry a1, 32\n"
1281 ++ " addi a8, a0, 3\n"
1282 ++ " _entry a1, 16\n"
1283 ++ " mov a12, a12\n"
1284 ++ " retw\n"
1285 ++#else
1286 + " _entry a1, 48\n"
1287 +- " addi a12, a0, 3\n"
1288 +-#if XCHAL_NUM_AREGS > 32
1289 +- " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
1290 ++ " call12 1f\n"
1291 ++ " retw\n"
1292 ++ " .align 4\n"
1293 ++ "1:\n"
1294 ++ " .rept (" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n"
1295 + " _entry a1, 48\n"
1296 + " mov a12, a0\n"
1297 + " .endr\n"
1298 +-#endif
1299 +- " _entry a1, 48\n"
1300 ++ " _entry a1, 16\n"
1301 + #if XCHAL_NUM_AREGS % 12 == 0
1302 +- " mov a8, a8\n"
1303 +-#elif XCHAL_NUM_AREGS % 12 == 4
1304 + " mov a12, a12\n"
1305 +-#elif XCHAL_NUM_AREGS % 12 == 8
1306 ++#elif XCHAL_NUM_AREGS % 12 == 4
1307 + " mov a4, a4\n"
1308 ++#elif XCHAL_NUM_AREGS % 12 == 8
1309 ++ " mov a8, a8\n"
1310 + #endif
1311 + " retw\n"
1312 ++#endif
1313 + "2:\n"
1314 +- : : : "a12", "a13", "memory");
1315 ++ : : : "a8", "a9", "memory");
1316 + #else
1317 + __asm__ __volatile__ (
1318 + " mov a12, a12\n"
1319 +diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
1320 +index 82bbfa5a05b3..a2a902140c4e 100644
1321 +--- a/arch/xtensa/kernel/entry.S
1322 ++++ b/arch/xtensa/kernel/entry.S
1323 +@@ -568,12 +568,13 @@ user_exception_exit:
1324 + * (if we have restored WSBITS-1 frames).
1325 + */
1326 +
1327 ++2:
1328 + #if XCHAL_HAVE_THREADPTR
1329 + l32i a3, a1, PT_THREADPTR
1330 + wur a3, threadptr
1331 + #endif
1332 +
1333 +-2: j common_exception_exit
1334 ++ j common_exception_exit
1335 +
1336 + /* This is the kernel exception exit.
1337 + * We avoided to do a MOVSP when we entered the exception, but we
1338 +@@ -1820,7 +1821,7 @@ ENDPROC(system_call)
1339 + mov a12, a0
1340 + .endr
1341 + #endif
1342 +- _entry a1, 48
1343 ++ _entry a1, 16
1344 + #if XCHAL_NUM_AREGS % 12 == 0
1345 + mov a8, a8
1346 + #elif XCHAL_NUM_AREGS % 12 == 4
1347 +@@ -1844,7 +1845,7 @@ ENDPROC(system_call)
1348 +
1349 + ENTRY(_switch_to)
1350 +
1351 +- entry a1, 16
1352 ++ entry a1, 48
1353 +
1354 + mov a11, a3 # and 'next' (a3)
1355 +
1356 +diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
1357 +index ff6d8adc9cda..fb765524cc3d 100644
1358 +--- a/drivers/acpi/acpi_pnp.c
1359 ++++ b/drivers/acpi/acpi_pnp.c
1360 +@@ -153,6 +153,7 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
1361 + {"AEI0250"}, /* PROLiNK 1456VH ISA PnP K56flex Fax Modem */
1362 + {"AEI1240"}, /* Actiontec ISA PNP 56K X2 Fax Modem */
1363 + {"AKY1021"}, /* Rockwell 56K ACF II Fax+Data+Voice Modem */
1364 ++ {"ALI5123"}, /* ALi Fast Infrared Controller */
1365 + {"AZT4001"}, /* AZT3005 PnP SOUND DEVICE */
1366 + {"BDP3336"}, /* Best Data Products Inc. Smart One 336F PnP Modem */
1367 + {"BRI0A49"}, /* Boca Complete Ofc Communicator 14.4 Data-FAX */
1368 +diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
1369 +index cfd7581cc19f..b09ad554430a 100644
1370 +--- a/drivers/acpi/pci_link.c
1371 ++++ b/drivers/acpi/pci_link.c
1372 +@@ -826,6 +826,22 @@ void acpi_penalize_isa_irq(int irq, int active)
1373 + }
1374 +
1375 + /*
1376 ++ * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
1377 ++ * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
1378 ++ * PCI IRQs.
1379 ++ */
1380 ++void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
1381 ++{
1382 ++ if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
1383 ++ if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
1384 ++ polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
1385 ++ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
1386 ++ else
1387 ++ acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
1388 ++ }
1389 ++}
1390 ++
1391 ++/*
1392 + * Over-ride default table to reserve additional IRQs for use by ISA
1393 + * e.g. acpi_irq_isa=5
1394 + * Useful for telling ACPI how not to interfere with your ISA sound card.
1395 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1396 +index 65ee94454bbd..e6ea912aee31 100644
1397 +--- a/drivers/ata/ahci.c
1398 ++++ b/drivers/ata/ahci.c
1399 +@@ -349,6 +349,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
1400 + /* JMicron 362B and 362C have an AHCI function with IDE class code */
1401 + { PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr },
1402 + { PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr },
1403 ++ /* May need to update quirk_jmicron_async_suspend() for additions */
1404 +
1405 + /* ATI */
1406 + { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
1407 +@@ -1377,18 +1378,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1408 + else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
1409 + ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
1410 +
1411 +- /*
1412 +- * The JMicron chip 361/363 contains one SATA controller and one
1413 +- * PATA controller,for powering on these both controllers, we must
1414 +- * follow the sequence one by one, otherwise one of them can not be
1415 +- * powered on successfully, so here we disable the async suspend
1416 +- * method for these chips.
1417 +- */
1418 +- if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
1419 +- (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
1420 +- pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
1421 +- device_disable_async_suspend(&pdev->dev);
1422 +-
1423 + /* acquire resources */
1424 + rc = pcim_enable_device(pdev);
1425 + if (rc)
1426 +diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
1427 +index 47e418b8c8ba..4d1a5d2c4287 100644
1428 +--- a/drivers/ata/pata_jmicron.c
1429 ++++ b/drivers/ata/pata_jmicron.c
1430 +@@ -143,18 +143,6 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
1431 + };
1432 + const struct ata_port_info *ppi[] = { &info, NULL };
1433 +
1434 +- /*
1435 +- * The JMicron chip 361/363 contains one SATA controller and one
1436 +- * PATA controller,for powering on these both controllers, we must
1437 +- * follow the sequence one by one, otherwise one of them can not be
1438 +- * powered on successfully, so here we disable the async suspend
1439 +- * method for these chips.
1440 +- */
1441 +- if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
1442 +- (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
1443 +- pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
1444 +- device_disable_async_suspend(&pdev->dev);
1445 +-
1446 + return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
1447 + }
1448 +
1449 +diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
1450 +index 5b93852392b8..0d752851a1ee 100644
1451 +--- a/drivers/auxdisplay/ks0108.c
1452 ++++ b/drivers/auxdisplay/ks0108.c
1453 +@@ -139,6 +139,7 @@ static int __init ks0108_init(void)
1454 +
1455 + ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME,
1456 + NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
1457 ++ parport_put_port(ks0108_parport);
1458 + if (ks0108_pardevice == NULL) {
1459 + printk(KERN_ERR KS0108_NAME ": ERROR: "
1460 + "parport didn't register new device\n");
1461 +diff --git a/drivers/base/devres.c b/drivers/base/devres.c
1462 +index c8a53d1e019f..875464690117 100644
1463 +--- a/drivers/base/devres.c
1464 ++++ b/drivers/base/devres.c
1465 +@@ -297,10 +297,10 @@ void * devres_get(struct device *dev, void *new_res,
1466 + if (!dr) {
1467 + add_dr(dev, &new_dr->node);
1468 + dr = new_dr;
1469 +- new_dr = NULL;
1470 ++ new_res = NULL;
1471 + }
1472 + spin_unlock_irqrestore(&dev->devres_lock, flags);
1473 +- devres_free(new_dr);
1474 ++ devres_free(new_res);
1475 +
1476 + return dr->data;
1477 + }
1478 +diff --git a/drivers/base/platform.c b/drivers/base/platform.c
1479 +index ebf034b97278..7403de94832c 100644
1480 +--- a/drivers/base/platform.c
1481 ++++ b/drivers/base/platform.c
1482 +@@ -375,9 +375,7 @@ int platform_device_add(struct platform_device *pdev)
1483 +
1484 + while (--i >= 0) {
1485 + struct resource *r = &pdev->resource[i];
1486 +- unsigned long type = resource_type(r);
1487 +-
1488 +- if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
1489 ++ if (r->parent)
1490 + release_resource(r);
1491 + }
1492 +
1493 +@@ -408,9 +406,7 @@ void platform_device_del(struct platform_device *pdev)
1494 +
1495 + for (i = 0; i < pdev->num_resources; i++) {
1496 + struct resource *r = &pdev->resource[i];
1497 +- unsigned long type = resource_type(r);
1498 +-
1499 +- if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
1500 ++ if (r->parent)
1501 + release_resource(r);
1502 + }
1503 + }
1504 +diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
1505 +index c7b0fcebf168..ac3c07db92f1 100644
1506 +--- a/drivers/base/power/clock_ops.c
1507 ++++ b/drivers/base/power/clock_ops.c
1508 +@@ -37,7 +37,7 @@ struct pm_clock_entry {
1509 + * @dev: The device for the given clock
1510 + * @ce: PM clock entry corresponding to the clock.
1511 + */
1512 +-static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
1513 ++static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
1514 + {
1515 + int ret;
1516 +
1517 +@@ -49,8 +49,6 @@ static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
1518 + dev_err(dev, "%s: failed to enable clk %p, error %d\n",
1519 + __func__, ce->clk, ret);
1520 + }
1521 +-
1522 +- return ret;
1523 + }
1524 +
1525 + /**
1526 +diff --git a/drivers/clk/pistachio/clk-pistachio.c b/drivers/clk/pistachio/clk-pistachio.c
1527 +index 8c0fe8828f99..c4ceb5eaf46c 100644
1528 +--- a/drivers/clk/pistachio/clk-pistachio.c
1529 ++++ b/drivers/clk/pistachio/clk-pistachio.c
1530 +@@ -159,9 +159,15 @@ PNAME(mux_debug) = { "mips_pll_mux", "rpu_v_pll_mux",
1531 + "wifi_pll_mux", "bt_pll_mux" };
1532 + static u32 mux_debug_idx[] = { 0x0, 0x1, 0x2, 0x4, 0x8, 0x10 };
1533 +
1534 +-static unsigned int pistachio_critical_clks[] __initdata = {
1535 +- CLK_MIPS,
1536 +- CLK_PERIPH_SYS,
1537 ++static unsigned int pistachio_critical_clks_core[] __initdata = {
1538 ++ CLK_MIPS
1539 ++};
1540 ++
1541 ++static unsigned int pistachio_critical_clks_sys[] __initdata = {
1542 ++ PERIPH_CLK_SYS,
1543 ++ PERIPH_CLK_SYS_BUS,
1544 ++ PERIPH_CLK_DDR,
1545 ++ PERIPH_CLK_ROM,
1546 + };
1547 +
1548 + static void __init pistachio_clk_init(struct device_node *np)
1549 +@@ -193,8 +199,8 @@ static void __init pistachio_clk_init(struct device_node *np)
1550 +
1551 + pistachio_clk_register_provider(p);
1552 +
1553 +- pistachio_clk_force_enable(p, pistachio_critical_clks,
1554 +- ARRAY_SIZE(pistachio_critical_clks));
1555 ++ pistachio_clk_force_enable(p, pistachio_critical_clks_core,
1556 ++ ARRAY_SIZE(pistachio_critical_clks_core));
1557 + }
1558 + CLK_OF_DECLARE(pistachio_clk, "img,pistachio-clk", pistachio_clk_init);
1559 +
1560 +@@ -261,6 +267,9 @@ static void __init pistachio_clk_periph_init(struct device_node *np)
1561 + ARRAY_SIZE(pistachio_periph_gates));
1562 +
1563 + pistachio_clk_register_provider(p);
1564 ++
1565 ++ pistachio_clk_force_enable(p, pistachio_critical_clks_sys,
1566 ++ ARRAY_SIZE(pistachio_critical_clks_sys));
1567 + }
1568 + CLK_OF_DECLARE(pistachio_clk_periph, "img,pistachio-clk-periph",
1569 + pistachio_clk_periph_init);
1570 +diff --git a/drivers/clk/pistachio/clk-pll.c b/drivers/clk/pistachio/clk-pll.c
1571 +index de537560bf70..ebd0d2a3b5da 100644
1572 +--- a/drivers/clk/pistachio/clk-pll.c
1573 ++++ b/drivers/clk/pistachio/clk-pll.c
1574 +@@ -115,8 +115,7 @@ static int pll_gf40lp_frac_enable(struct clk_hw *hw)
1575 + u32 val;
1576 +
1577 + val = pll_readl(pll, PLL_CTRL3);
1578 +- val &= ~(PLL_FRAC_CTRL3_PD | PLL_FRAC_CTRL3_DACPD |
1579 +- PLL_FRAC_CTRL3_DSMPD | PLL_FRAC_CTRL3_FOUTPOSTDIVPD |
1580 ++ val &= ~(PLL_FRAC_CTRL3_PD | PLL_FRAC_CTRL3_FOUTPOSTDIVPD |
1581 + PLL_FRAC_CTRL3_FOUT4PHASEPD | PLL_FRAC_CTRL3_FOUTVCOPD);
1582 + pll_writel(pll, val, PLL_CTRL3);
1583 +
1584 +@@ -233,7 +232,7 @@ static int pll_gf40lp_laint_enable(struct clk_hw *hw)
1585 + u32 val;
1586 +
1587 + val = pll_readl(pll, PLL_CTRL1);
1588 +- val &= ~(PLL_INT_CTRL1_PD | PLL_INT_CTRL1_DSMPD |
1589 ++ val &= ~(PLL_INT_CTRL1_PD |
1590 + PLL_INT_CTRL1_FOUTPOSTDIVPD | PLL_INT_CTRL1_FOUTVCOPD);
1591 + pll_writel(pll, val, PLL_CTRL1);
1592 +
1593 +diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
1594 +index 6cd88d963a7f..542e45ef5087 100644
1595 +--- a/drivers/clk/pxa/clk-pxa25x.c
1596 ++++ b/drivers/clk/pxa/clk-pxa25x.c
1597 +@@ -79,7 +79,7 @@ unsigned int pxa25x_get_clk_frequency_khz(int info)
1598 + clks[3] / 1000000, (clks[3] % 1000000) / 10000);
1599 + }
1600 +
1601 +- return (unsigned int)clks[0];
1602 ++ return (unsigned int)clks[0] / KHz;
1603 + }
1604 +
1605 + static unsigned long clk_pxa25x_memory_get_rate(struct clk_hw *hw,
1606 +diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
1607 +index 5f9b54b024b9..267511df1e59 100644
1608 +--- a/drivers/clk/pxa/clk-pxa27x.c
1609 ++++ b/drivers/clk/pxa/clk-pxa27x.c
1610 +@@ -80,7 +80,7 @@ unsigned int pxa27x_get_clk_frequency_khz(int info)
1611 + pr_info("System bus clock: %ld.%02ldMHz\n",
1612 + clks[4] / 1000000, (clks[4] % 1000000) / 10000);
1613 + }
1614 +- return (unsigned int)clks[0];
1615 ++ return (unsigned int)clks[0] / KHz;
1616 + }
1617 +
1618 + bool pxa27x_is_ppll_disabled(void)
1619 +diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
1620 +index ac03ba49e9d1..4af4eed5f89f 100644
1621 +--- a/drivers/clk/pxa/clk-pxa3xx.c
1622 ++++ b/drivers/clk/pxa/clk-pxa3xx.c
1623 +@@ -78,7 +78,7 @@ unsigned int pxa3xx_get_clk_frequency_khz(int info)
1624 + pr_info("System bus clock: %ld.%02ldMHz\n",
1625 + clks[4] / 1000000, (clks[4] % 1000000) / 10000);
1626 + }
1627 +- return (unsigned int)clks[0];
1628 ++ return (unsigned int)clks[0] / KHz;
1629 + }
1630 +
1631 + static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw,
1632 +diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
1633 +index 54a756b90a37..457c540585f9 100644
1634 +--- a/drivers/clk/qcom/gcc-apq8084.c
1635 ++++ b/drivers/clk/qcom/gcc-apq8084.c
1636 +@@ -2105,6 +2105,7 @@ static struct clk_branch gcc_ce1_clk = {
1637 + "ce1_clk_src",
1638 + },
1639 + .num_parents = 1,
1640 ++ .flags = CLK_SET_RATE_PARENT,
1641 + .ops = &clk_branch2_ops,
1642 + },
1643 + },
1644 +diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
1645 +index c66f7bc2ae87..5d75bffab141 100644
1646 +--- a/drivers/clk/qcom/gcc-msm8916.c
1647 ++++ b/drivers/clk/qcom/gcc-msm8916.c
1648 +@@ -2278,7 +2278,7 @@ static struct clk_branch gcc_prng_ahb_clk = {
1649 + .halt_check = BRANCH_HALT_VOTED,
1650 + .clkr = {
1651 + .enable_reg = 0x45004,
1652 +- .enable_mask = BIT(0),
1653 ++ .enable_mask = BIT(8),
1654 + .hw.init = &(struct clk_init_data){
1655 + .name = "gcc_prng_ahb_clk",
1656 + .parent_names = (const char *[]){
1657 +diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
1658 +index c39d09874e74..f06a082e3e87 100644
1659 +--- a/drivers/clk/qcom/gcc-msm8974.c
1660 ++++ b/drivers/clk/qcom/gcc-msm8974.c
1661 +@@ -1783,6 +1783,7 @@ static struct clk_branch gcc_ce1_clk = {
1662 + "ce1_clk_src",
1663 + },
1664 + .num_parents = 1,
1665 ++ .flags = CLK_SET_RATE_PARENT,
1666 + .ops = &clk_branch2_ops,
1667 + },
1668 + },
1669 +diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
1670 +index d17eb4528a28..37f96117fd3d 100644
1671 +--- a/drivers/clk/rockchip/clk-rk3288.c
1672 ++++ b/drivers/clk/rockchip/clk-rk3288.c
1673 +@@ -578,7 +578,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
1674 + COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
1675 + RK3288_CLKSEL_CON(21), 0, 2, MFLAGS, 8, 5, DFLAGS,
1676 + RK3288_CLKGATE_CON(2), 5, GFLAGS),
1677 +- MUX(SCLK_MAC, "mac_clk", mux_mac_p, 0,
1678 ++ MUX(SCLK_MAC, "mac_clk", mux_mac_p, CLK_SET_RATE_PARENT,
1679 + RK3288_CLKSEL_CON(21), 4, 1, MFLAGS),
1680 + GATE(SCLK_MACREF_OUT, "sclk_macref_out", "mac_clk", 0,
1681 + RK3288_CLKGATE_CON(5), 3, GFLAGS),
1682 +diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
1683 +index 714d6ba782c8..f7890bf652e6 100644
1684 +--- a/drivers/clk/samsung/clk-exynos4.c
1685 ++++ b/drivers/clk/samsung/clk-exynos4.c
1686 +@@ -85,6 +85,7 @@
1687 + #define DIV_PERIL4 0xc560
1688 + #define DIV_PERIL5 0xc564
1689 + #define E4X12_DIV_CAM1 0xc568
1690 ++#define E4X12_GATE_BUS_FSYS1 0xc744
1691 + #define GATE_SCLK_CAM 0xc820
1692 + #define GATE_IP_CAM 0xc920
1693 + #define GATE_IP_TV 0xc924
1694 +@@ -1095,6 +1096,7 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
1695 + 0),
1696 + GATE(CLK_PPMUIMAGE, "ppmuimage", "aclk200", E4X12_GATE_IP_IMAGE, 9, 0,
1697 + 0),
1698 ++ GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0),
1699 + GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
1700 + GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
1701 + GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
1702 +diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
1703 +index e668e479a697..bdd284249cc3 100644
1704 +--- a/drivers/clk/samsung/clk-s5pv210.c
1705 ++++ b/drivers/clk/samsung/clk-s5pv210.c
1706 +@@ -828,6 +828,8 @@ static void __init __s5pv210_clk_init(struct device_node *np,
1707 +
1708 + s5pv210_clk_sleep_init();
1709 +
1710 ++ samsung_clk_of_add_provider(np, ctx);
1711 ++
1712 + pr_info("%s clocks: mout_apll = %ld, mout_mpll = %ld\n"
1713 + "\tmout_epll = %ld, mout_vpll = %ld\n",
1714 + is_s5p6442 ? "S5P6442" : "S5PV210",
1715 +diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
1716 +index c6e86a9a2aa3..5122ef25f595 100644
1717 +--- a/drivers/clk/versatile/clk-sp810.c
1718 ++++ b/drivers/clk/versatile/clk-sp810.c
1719 +@@ -128,8 +128,8 @@ static struct clk *clk_sp810_timerclken_of_get(struct of_phandle_args *clkspec,
1720 + {
1721 + struct clk_sp810 *sp810 = data;
1722 +
1723 +- if (WARN_ON(clkspec->args_count != 1 || clkspec->args[0] >
1724 +- ARRAY_SIZE(sp810->timerclken)))
1725 ++ if (WARN_ON(clkspec->args_count != 1 ||
1726 ++ clkspec->args[0] >= ARRAY_SIZE(sp810->timerclken)))
1727 + return NULL;
1728 +
1729 + return sp810->timerclken[clkspec->args[0]].clk;
1730 +diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl
1731 +index 0a6f899839dd..d8429cb71f02 100644
1732 +--- a/drivers/crypto/vmx/ghashp8-ppc.pl
1733 ++++ b/drivers/crypto/vmx/ghashp8-ppc.pl
1734 +@@ -61,6 +61,12 @@ $code=<<___;
1735 + mtspr 256,r0
1736 + li r10,0x30
1737 + lvx_u $H,0,r4 # load H
1738 ++ le?xor r7,r7,r7
1739 ++ le?addi r7,r7,0x8 # need a vperm start with 08
1740 ++ le?lvsr 5,0,r7
1741 ++ le?vspltisb 6,0x0f
1742 ++ le?vxor 5,5,6 # set a b-endian mask
1743 ++ le?vperm $H,$H,$H,5
1744 +
1745 + vspltisb $xC2,-16 # 0xf0
1746 + vspltisb $t0,1 # one
1747 +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1748 +index a19d2c71e205..fb91df1631d9 100644
1749 +--- a/drivers/gpu/drm/i915/i915_drv.c
1750 ++++ b/drivers/gpu/drm/i915/i915_drv.c
1751 +@@ -647,15 +647,18 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
1752 +
1753 + pci_disable_device(drm_dev->pdev);
1754 + /*
1755 +- * During hibernation on some GEN4 platforms the BIOS may try to access
1756 ++ * During hibernation on some platforms the BIOS may try to access
1757 + * the device even though it's already in D3 and hang the machine. So
1758 + * leave the device in D0 on those platforms and hope the BIOS will
1759 +- * power down the device properly. Platforms where this was seen:
1760 +- * Lenovo Thinkpad X301, X61s
1761 ++ * power down the device properly. The issue was seen on multiple old
1762 ++ * GENs with different BIOS vendors, so having an explicit blacklist
1763 ++ * is inpractical; apply the workaround on everything pre GEN6. The
1764 ++ * platforms where the issue was seen:
1765 ++ * Lenovo Thinkpad X301, X61s, X60, T60, X41
1766 ++ * Fujitsu FSC S7110
1767 ++ * Acer Aspire 1830T
1768 + */
1769 +- if (!(hibernation &&
1770 +- drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
1771 +- INTEL_INFO(dev_priv)->gen == 4))
1772 ++ if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
1773 + pci_set_power_state(drm_dev->pdev, PCI_D3hot);
1774 +
1775 + return 0;
1776 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1777 +index 683a9b004c11..7d53d7e15455 100644
1778 +--- a/drivers/gpu/drm/i915/i915_drv.h
1779 ++++ b/drivers/gpu/drm/i915/i915_drv.h
1780 +@@ -3190,13 +3190,13 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
1781 + #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
1782 +
1783 + #define I915_READ64_2x32(lower_reg, upper_reg) ({ \
1784 +- u32 upper, lower, tmp; \
1785 +- tmp = I915_READ(upper_reg); \
1786 ++ u32 upper, lower, old_upper, loop = 0; \
1787 ++ upper = I915_READ(upper_reg); \
1788 + do { \
1789 +- upper = tmp; \
1790 ++ old_upper = upper; \
1791 + lower = I915_READ(lower_reg); \
1792 +- tmp = I915_READ(upper_reg); \
1793 +- } while (upper != tmp); \
1794 ++ upper = I915_READ(upper_reg); \
1795 ++ } while (upper != old_upper && loop++ < 2); \
1796 + (u64)upper << 32 | lower; })
1797 +
1798 + #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
1799 +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1800 +index a3190e793ed4..479024a4caad 100644
1801 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1802 ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1803 +@@ -1025,6 +1025,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1804 + u32 old_read = obj->base.read_domains;
1805 + u32 old_write = obj->base.write_domain;
1806 +
1807 ++ obj->dirty = 1; /* be paranoid */
1808 + obj->base.write_domain = obj->base.pending_write_domain;
1809 + if (obj->base.write_domain == 0)
1810 + obj->base.pending_read_domains |= obj->base.read_domains;
1811 +@@ -1032,7 +1033,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1812 +
1813 + i915_vma_move_to_active(vma, ring);
1814 + if (obj->base.write_domain) {
1815 +- obj->dirty = 1;
1816 + i915_gem_request_assign(&obj->last_write_req, req);
1817 +
1818 + intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
1819 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1820 +index 57c887843dc3..f208bbc6d58e 100644
1821 +--- a/drivers/gpu/drm/i915/intel_display.c
1822 ++++ b/drivers/gpu/drm/i915/intel_display.c
1823 +@@ -13781,6 +13781,24 @@ void intel_modeset_init(struct drm_device *dev)
1824 + if (INTEL_INFO(dev)->num_pipes == 0)
1825 + return;
1826 +
1827 ++ /*
1828 ++ * There may be no VBT; and if the BIOS enabled SSC we can
1829 ++ * just keep using it to avoid unnecessary flicker. Whereas if the
1830 ++ * BIOS isn't using it, don't assume it will work even if the VBT
1831 ++ * indicates as much.
1832 ++ */
1833 ++ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
1834 ++ bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
1835 ++ DREF_SSC1_ENABLE);
1836 ++
1837 ++ if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
1838 ++ DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
1839 ++ bios_lvds_use_ssc ? "en" : "dis",
1840 ++ dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
1841 ++ dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
1842 ++ }
1843 ++ }
1844 ++
1845 + intel_init_display(dev);
1846 + intel_init_audio(dev);
1847 +
1848 +@@ -14266,7 +14284,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
1849 +
1850 + void intel_modeset_gem_init(struct drm_device *dev)
1851 + {
1852 +- struct drm_i915_private *dev_priv = dev->dev_private;
1853 + struct drm_crtc *c;
1854 + struct drm_i915_gem_object *obj;
1855 + int ret;
1856 +@@ -14275,16 +14292,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
1857 + intel_init_gt_powersave(dev);
1858 + mutex_unlock(&dev->struct_mutex);
1859 +
1860 +- /*
1861 +- * There may be no VBT; and if the BIOS enabled SSC we can
1862 +- * just keep using it to avoid unnecessary flicker. Whereas if the
1863 +- * BIOS isn't using it, don't assume it will work even if the VBT
1864 +- * indicates as much.
1865 +- */
1866 +- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
1867 +- dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
1868 +- DREF_SSC1_ENABLE);
1869 +-
1870 + intel_modeset_init_hw(dev);
1871 +
1872 + intel_setup_overlay(dev);
1873 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1874 +index b1fe32b119ef..fb2983f77141 100644
1875 +--- a/drivers/gpu/drm/i915/intel_dp.c
1876 ++++ b/drivers/gpu/drm/i915/intel_dp.c
1877 +@@ -4691,9 +4691,12 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
1878 +
1879 + intel_dp_probe_oui(intel_dp);
1880 +
1881 +- if (!intel_dp_probe_mst(intel_dp))
1882 ++ if (!intel_dp_probe_mst(intel_dp)) {
1883 ++ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1884 ++ intel_dp_check_link_status(intel_dp);
1885 ++ drm_modeset_unlock(&dev->mode_config.connection_mutex);
1886 + goto mst_fail;
1887 +-
1888 ++ }
1889 + } else {
1890 + if (intel_dp->is_mst) {
1891 + if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
1892 +@@ -4701,10 +4704,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
1893 + }
1894 +
1895 + if (!intel_dp->is_mst) {
1896 +- /*
1897 +- * we'll check the link status via the normal hot plug path later -
1898 +- * but for short hpds we should check it now
1899 +- */
1900 + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1901 + intel_dp_check_link_status(intel_dp);
1902 + drm_modeset_unlock(&dev->mode_config.connection_mutex);
1903 +diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
1904 +index 51966426addf..c7a0b8d8fac9 100644
1905 +--- a/drivers/gpu/drm/i915/intel_dsi.c
1906 ++++ b/drivers/gpu/drm/i915/intel_dsi.c
1907 +@@ -1036,11 +1036,7 @@ void intel_dsi_init(struct drm_device *dev)
1908 + intel_connector->unregister = intel_connector_unregister;
1909 +
1910 + /* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
1911 +- if (dev_priv->vbt.dsi.config->dual_link) {
1912 +- /* XXX: does dual link work on either pipe? */
1913 +- intel_encoder->crtc_mask = (1 << PIPE_A);
1914 +- intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
1915 +- } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
1916 ++ if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
1917 + intel_encoder->crtc_mask = (1 << PIPE_A);
1918 + intel_dsi->ports = (1 << PORT_A);
1919 + } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
1920 +@@ -1048,6 +1044,9 @@ void intel_dsi_init(struct drm_device *dev)
1921 + intel_dsi->ports = (1 << PORT_C);
1922 + }
1923 +
1924 ++ if (dev_priv->vbt.dsi.config->dual_link)
1925 ++ intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
1926 ++
1927 + /* Create a DSI host (and a device) for each port. */
1928 + for_each_dsi_port(port, intel_dsi->ports) {
1929 + struct intel_dsi_host *host;
1930 +diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
1931 +index 4a0a8b29b0a1..32248791bc4b 100644
1932 +--- a/drivers/gpu/drm/qxl/qxl_display.c
1933 ++++ b/drivers/gpu/drm/qxl/qxl_display.c
1934 +@@ -160,9 +160,35 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector,
1935 + *pwidth = head->width;
1936 + *pheight = head->height;
1937 + drm_mode_probed_add(connector, mode);
1938 ++ /* remember the last custom size for mode validation */
1939 ++ qdev->monitors_config_width = mode->hdisplay;
1940 ++ qdev->monitors_config_height = mode->vdisplay;
1941 + return 1;
1942 + }
1943 +
1944 ++static struct mode_size {
1945 ++ int w;
1946 ++ int h;
1947 ++} common_modes[] = {
1948 ++ { 640, 480},
1949 ++ { 720, 480},
1950 ++ { 800, 600},
1951 ++ { 848, 480},
1952 ++ {1024, 768},
1953 ++ {1152, 768},
1954 ++ {1280, 720},
1955 ++ {1280, 800},
1956 ++ {1280, 854},
1957 ++ {1280, 960},
1958 ++ {1280, 1024},
1959 ++ {1440, 900},
1960 ++ {1400, 1050},
1961 ++ {1680, 1050},
1962 ++ {1600, 1200},
1963 ++ {1920, 1080},
1964 ++ {1920, 1200}
1965 ++};
1966 ++
1967 + static int qxl_add_common_modes(struct drm_connector *connector,
1968 + unsigned pwidth,
1969 + unsigned pheight)
1970 +@@ -170,29 +196,6 @@ static int qxl_add_common_modes(struct drm_connector *connector,
1971 + struct drm_device *dev = connector->dev;
1972 + struct drm_display_mode *mode = NULL;
1973 + int i;
1974 +- struct mode_size {
1975 +- int w;
1976 +- int h;
1977 +- } common_modes[] = {
1978 +- { 640, 480},
1979 +- { 720, 480},
1980 +- { 800, 600},
1981 +- { 848, 480},
1982 +- {1024, 768},
1983 +- {1152, 768},
1984 +- {1280, 720},
1985 +- {1280, 800},
1986 +- {1280, 854},
1987 +- {1280, 960},
1988 +- {1280, 1024},
1989 +- {1440, 900},
1990 +- {1400, 1050},
1991 +- {1680, 1050},
1992 +- {1600, 1200},
1993 +- {1920, 1080},
1994 +- {1920, 1200}
1995 +- };
1996 +-
1997 + for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
1998 + mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
1999 + 60, false, false, false);
2000 +@@ -823,11 +826,22 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
2001 + static int qxl_conn_mode_valid(struct drm_connector *connector,
2002 + struct drm_display_mode *mode)
2003 + {
2004 ++ struct drm_device *ddev = connector->dev;
2005 ++ struct qxl_device *qdev = ddev->dev_private;
2006 ++ int i;
2007 ++
2008 + /* TODO: is this called for user defined modes? (xrandr --add-mode)
2009 + * TODO: check that the mode fits in the framebuffer */
2010 +- DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
2011 +- mode->vdisplay, mode->status);
2012 +- return MODE_OK;
2013 ++
2014 ++ if(qdev->monitors_config_width == mode->hdisplay &&
2015 ++ qdev->monitors_config_height == mode->vdisplay)
2016 ++ return MODE_OK;
2017 ++
2018 ++ for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
2019 ++ if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay)
2020 ++ return MODE_OK;
2021 ++ }
2022 ++ return MODE_BAD;
2023 + }
2024 +
2025 + static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
2026 +diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
2027 +index 7c6cafe21f5f..e66143cc1a7a 100644
2028 +--- a/drivers/gpu/drm/qxl/qxl_drv.h
2029 ++++ b/drivers/gpu/drm/qxl/qxl_drv.h
2030 +@@ -325,6 +325,8 @@ struct qxl_device {
2031 + struct work_struct fb_work;
2032 +
2033 + struct drm_property *hotplug_mode_update_property;
2034 ++ int monitors_config_width;
2035 ++ int monitors_config_height;
2036 + };
2037 +
2038 + /* forward declaration for QXL_INFO_IO */
2039 +diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
2040 +index b435c859dcbc..447dbfa6c793 100644
2041 +--- a/drivers/gpu/drm/radeon/atombios_dp.c
2042 ++++ b/drivers/gpu/drm/radeon/atombios_dp.c
2043 +@@ -171,8 +171,9 @@ radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
2044 + return -E2BIG;
2045 +
2046 + tx_buf[0] = msg->address & 0xff;
2047 +- tx_buf[1] = msg->address >> 8;
2048 +- tx_buf[2] = msg->request << 4;
2049 ++ tx_buf[1] = (msg->address >> 8) & 0xff;
2050 ++ tx_buf[2] = (msg->request << 4) |
2051 ++ ((msg->address >> 16) & 0xf);
2052 + tx_buf[3] = msg->size ? (msg->size - 1) : 0;
2053 +
2054 + switch (msg->request & ~DP_AUX_I2C_MOT) {
2055 +diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
2056 +index 59b3d3221294..d77dd1430d58 100644
2057 +--- a/drivers/gpu/drm/radeon/radeon_audio.c
2058 ++++ b/drivers/gpu/drm/radeon/radeon_audio.c
2059 +@@ -522,13 +522,15 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
2060 + return err;
2061 + }
2062 +
2063 +- if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
2064 +- if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
2065 +- frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
2066 +- else
2067 +- frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
2068 +- } else {
2069 +- frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
2070 ++ if (radeon_encoder->output_csc != RADEON_OUTPUT_CSC_BYPASS) {
2071 ++ if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
2072 ++ if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
2073 ++ frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
2074 ++ else
2075 ++ frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
2076 ++ } else {
2077 ++ frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
2078 ++ }
2079 + }
2080 +
2081 + err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
2082 +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2083 +index 94b21ae70ef7..5a2cafb4f1bc 100644
2084 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c
2085 ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2086 +@@ -95,6 +95,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
2087 + if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
2088 + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2089 + } else if (radeon_dp_needs_link_train(radeon_connector)) {
2090 ++ /* Don't try to start link training before we
2091 ++ * have the dpcd */
2092 ++ if (!radeon_dp_getdpcd(radeon_connector))
2093 ++ return;
2094 ++
2095 + /* set it to OFF so that drm_helper_connector_dpms()
2096 + * won't return immediately since the current state
2097 + * is ON at this point.
2098 +diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
2099 +index fcbd60bb0349..3b0c229d7dcd 100644
2100 +--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
2101 ++++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
2102 +@@ -116,8 +116,8 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
2103 + AUX_SW_WR_BYTES(bytes));
2104 +
2105 + /* write the data header into the registers */
2106 +- /* request, addres, msg size */
2107 +- byte = (msg->request << 4);
2108 ++ /* request, address, msg size */
2109 ++ byte = (msg->request << 4) | ((msg->address >> 16) & 0xf);
2110 + WREG32(AUX_SW_DATA + aux_offset[instance],
2111 + AUX_SW_DATA_MASK(byte) | AUX_SW_AUTOINCREMENT_DISABLE);
2112 +
2113 +diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
2114 +index a2dbbbe0d8d7..39bf74793b8b 100644
2115 +--- a/drivers/hid/hid-cp2112.c
2116 ++++ b/drivers/hid/hid-cp2112.c
2117 +@@ -537,7 +537,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
2118 + struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data;
2119 + struct hid_device *hdev = dev->hdev;
2120 + u8 buf[64];
2121 +- __be16 word;
2122 ++ __le16 word;
2123 + ssize_t count;
2124 + size_t read_length = 0;
2125 + unsigned int retries;
2126 +@@ -554,7 +554,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
2127 + if (I2C_SMBUS_READ == read_write)
2128 + count = cp2112_read_req(buf, addr, read_length);
2129 + else
2130 +- count = cp2112_write_req(buf, addr, data->byte, NULL,
2131 ++ count = cp2112_write_req(buf, addr, command, NULL,
2132 + 0);
2133 + break;
2134 + case I2C_SMBUS_BYTE_DATA:
2135 +@@ -569,7 +569,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
2136 + break;
2137 + case I2C_SMBUS_WORD_DATA:
2138 + read_length = 2;
2139 +- word = cpu_to_be16(data->word);
2140 ++ word = cpu_to_le16(data->word);
2141 +
2142 + if (I2C_SMBUS_READ == read_write)
2143 + count = cp2112_write_read_req(buf, addr, read_length,
2144 +@@ -582,7 +582,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
2145 + size = I2C_SMBUS_WORD_DATA;
2146 + read_write = I2C_SMBUS_READ;
2147 + read_length = 2;
2148 +- word = cpu_to_be16(data->word);
2149 ++ word = cpu_to_le16(data->word);
2150 +
2151 + count = cp2112_write_read_req(buf, addr, read_length, command,
2152 + (u8 *)&word, 2);
2153 +@@ -675,7 +675,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
2154 + data->byte = buf[0];
2155 + break;
2156 + case I2C_SMBUS_WORD_DATA:
2157 +- data->word = be16_to_cpup((__be16 *)buf);
2158 ++ data->word = le16_to_cpup((__le16 *)buf);
2159 + break;
2160 + case I2C_SMBUS_BLOCK_DATA:
2161 + if (read_length > I2C_SMBUS_BLOCK_MAX) {
2162 +diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
2163 +index bfbe1bedda7f..eab5bd6a2442 100644
2164 +--- a/drivers/hid/usbhid/hid-core.c
2165 ++++ b/drivers/hid/usbhid/hid-core.c
2166 +@@ -164,7 +164,7 @@ static void hid_io_error(struct hid_device *hid)
2167 + if (time_after(jiffies, usbhid->stop_retry)) {
2168 +
2169 + /* Retries failed, so do a port reset unless we lack bandwidth*/
2170 +- if (test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
2171 ++ if (!test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
2172 + && !test_and_set_bit(HID_RESET_PENDING, &usbhid->iofl)) {
2173 +
2174 + schedule_work(&usbhid->reset_work);
2175 +diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
2176 +index b3d0e94f72eb..8d2439345673 100644
2177 +--- a/drivers/iio/gyro/Kconfig
2178 ++++ b/drivers/iio/gyro/Kconfig
2179 +@@ -53,7 +53,8 @@ config ADXRS450
2180 + config BMG160
2181 + tristate "BOSCH BMG160 Gyro Sensor"
2182 + depends on I2C
2183 +- select IIO_TRIGGERED_BUFFER if IIO_BUFFER
2184 ++ select IIO_BUFFER
2185 ++ select IIO_TRIGGERED_BUFFER
2186 + help
2187 + Say yes here to build support for Bosch BMG160 Tri-axis Gyro Sensor
2188 + driver. This driver also supports BMI055 gyroscope.
2189 +diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
2190 +index 2fd68f2219a7..d42e4fe2c7ed 100644
2191 +--- a/drivers/iio/imu/adis16400_core.c
2192 ++++ b/drivers/iio/imu/adis16400_core.c
2193 +@@ -780,7 +780,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
2194 + .flags = ADIS16400_HAS_PROD_ID |
2195 + ADIS16400_HAS_SERIAL_NUMBER |
2196 + ADIS16400_BURST_DIAG_STAT,
2197 +- .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
2198 ++ .gyro_scale_micro = IIO_DEGREE_TO_RAD(40000), /* 0.04 deg/s */
2199 + .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
2200 + .temp_scale_nano = 73860000, /* 0.07386 C */
2201 + .temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
2202 +diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
2203 +index 989605dd6f78..b94bfd3f595b 100644
2204 +--- a/drivers/iio/imu/adis16480.c
2205 ++++ b/drivers/iio/imu/adis16480.c
2206 +@@ -110,6 +110,10 @@
2207 + struct adis16480_chip_info {
2208 + unsigned int num_channels;
2209 + const struct iio_chan_spec *channels;
2210 ++ unsigned int gyro_max_val;
2211 ++ unsigned int gyro_max_scale;
2212 ++ unsigned int accel_max_val;
2213 ++ unsigned int accel_max_scale;
2214 + };
2215 +
2216 + struct adis16480 {
2217 +@@ -497,19 +501,21 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
2218 + static int adis16480_read_raw(struct iio_dev *indio_dev,
2219 + const struct iio_chan_spec *chan, int *val, int *val2, long info)
2220 + {
2221 ++ struct adis16480 *st = iio_priv(indio_dev);
2222 ++
2223 + switch (info) {
2224 + case IIO_CHAN_INFO_RAW:
2225 + return adis_single_conversion(indio_dev, chan, 0, val);
2226 + case IIO_CHAN_INFO_SCALE:
2227 + switch (chan->type) {
2228 + case IIO_ANGL_VEL:
2229 +- *val = 0;
2230 +- *val2 = IIO_DEGREE_TO_RAD(20000); /* 0.02 degree/sec */
2231 +- return IIO_VAL_INT_PLUS_MICRO;
2232 ++ *val = st->chip_info->gyro_max_scale;
2233 ++ *val2 = st->chip_info->gyro_max_val;
2234 ++ return IIO_VAL_FRACTIONAL;
2235 + case IIO_ACCEL:
2236 +- *val = 0;
2237 +- *val2 = IIO_G_TO_M_S_2(800); /* 0.8 mg */
2238 +- return IIO_VAL_INT_PLUS_MICRO;
2239 ++ *val = st->chip_info->accel_max_scale;
2240 ++ *val2 = st->chip_info->accel_max_val;
2241 ++ return IIO_VAL_FRACTIONAL;
2242 + case IIO_MAGN:
2243 + *val = 0;
2244 + *val2 = 100; /* 0.0001 gauss */
2245 +@@ -674,18 +680,39 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
2246 + [ADIS16375] = {
2247 + .channels = adis16485_channels,
2248 + .num_channels = ARRAY_SIZE(adis16485_channels),
2249 ++ /*
2250 ++ * storing the value in rad/degree and the scale in degree
2251 ++ * gives us the result in rad and better precession than
2252 ++ * storing the scale directly in rad.
2253 ++ */
2254 ++ .gyro_max_val = IIO_RAD_TO_DEGREE(22887),
2255 ++ .gyro_max_scale = 300,
2256 ++ .accel_max_val = IIO_M_S_2_TO_G(21973),
2257 ++ .accel_max_scale = 18,
2258 + },
2259 + [ADIS16480] = {
2260 + .channels = adis16480_channels,
2261 + .num_channels = ARRAY_SIZE(adis16480_channels),
2262 ++ .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
2263 ++ .gyro_max_scale = 450,
2264 ++ .accel_max_val = IIO_M_S_2_TO_G(12500),
2265 ++ .accel_max_scale = 5,
2266 + },
2267 + [ADIS16485] = {
2268 + .channels = adis16485_channels,
2269 + .num_channels = ARRAY_SIZE(adis16485_channels),
2270 ++ .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
2271 ++ .gyro_max_scale = 450,
2272 ++ .accel_max_val = IIO_M_S_2_TO_G(20000),
2273 ++ .accel_max_scale = 5,
2274 + },
2275 + [ADIS16488] = {
2276 + .channels = adis16480_channels,
2277 + .num_channels = ARRAY_SIZE(adis16480_channels),
2278 ++ .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
2279 ++ .gyro_max_scale = 450,
2280 ++ .accel_max_val = IIO_M_S_2_TO_G(22500),
2281 ++ .accel_max_scale = 18,
2282 + },
2283 + };
2284 +
2285 +diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
2286 +index df919f44d513..7fa280b28ecb 100644
2287 +--- a/drivers/iio/industrialio-buffer.c
2288 ++++ b/drivers/iio/industrialio-buffer.c
2289 +@@ -151,7 +151,7 @@ unsigned int iio_buffer_poll(struct file *filp,
2290 + struct iio_buffer *rb = indio_dev->buffer;
2291 +
2292 + if (!indio_dev->info)
2293 +- return -ENODEV;
2294 ++ return 0;
2295 +
2296 + poll_wait(filp, &rb->pollq, wait);
2297 + if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
2298 +diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
2299 +index a99692ba91bc..69b8c338fa89 100644
2300 +--- a/drivers/iio/industrialio-event.c
2301 ++++ b/drivers/iio/industrialio-event.c
2302 +@@ -84,7 +84,7 @@ static unsigned int iio_event_poll(struct file *filep,
2303 + unsigned int events = 0;
2304 +
2305 + if (!indio_dev->info)
2306 +- return -ENODEV;
2307 ++ return events;
2308 +
2309 + poll_wait(filep, &ev_int->wait, wait);
2310 +
2311 +diff --git a/drivers/of/address.c b/drivers/of/address.c
2312 +index 8bfda6ade2c0..384574c3987c 100644
2313 +--- a/drivers/of/address.c
2314 ++++ b/drivers/of/address.c
2315 +@@ -845,10 +845,10 @@ struct device_node *of_find_matching_node_by_address(struct device_node *from,
2316 + struct resource res;
2317 +
2318 + while (dn) {
2319 +- if (of_address_to_resource(dn, 0, &res))
2320 +- continue;
2321 +- if (res.start == base_address)
2322 ++ if (!of_address_to_resource(dn, 0, &res) &&
2323 ++ res.start == base_address)
2324 + return dn;
2325 ++
2326 + dn = of_find_matching_node(dn, matches);
2327 + }
2328 +
2329 +diff --git a/drivers/pci/access.c b/drivers/pci/access.c
2330 +index d9b64a175990..b965c12168b7 100644
2331 +--- a/drivers/pci/access.c
2332 ++++ b/drivers/pci/access.c
2333 +@@ -439,6 +439,56 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
2334 + .release = pci_vpd_pci22_release,
2335 + };
2336 +
2337 ++static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
2338 ++ void *arg)
2339 ++{
2340 ++ struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
2341 ++ ssize_t ret;
2342 ++
2343 ++ if (!tdev)
2344 ++ return -ENODEV;
2345 ++
2346 ++ ret = pci_read_vpd(tdev, pos, count, arg);
2347 ++ pci_dev_put(tdev);
2348 ++ return ret;
2349 ++}
2350 ++
2351 ++static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
2352 ++ const void *arg)
2353 ++{
2354 ++ struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
2355 ++ ssize_t ret;
2356 ++
2357 ++ if (!tdev)
2358 ++ return -ENODEV;
2359 ++
2360 ++ ret = pci_write_vpd(tdev, pos, count, arg);
2361 ++ pci_dev_put(tdev);
2362 ++ return ret;
2363 ++}
2364 ++
2365 ++static const struct pci_vpd_ops pci_vpd_f0_ops = {
2366 ++ .read = pci_vpd_f0_read,
2367 ++ .write = pci_vpd_f0_write,
2368 ++ .release = pci_vpd_pci22_release,
2369 ++};
2370 ++
2371 ++static int pci_vpd_f0_dev_check(struct pci_dev *dev)
2372 ++{
2373 ++ struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
2374 ++ int ret = 0;
2375 ++
2376 ++ if (!tdev)
2377 ++ return -ENODEV;
2378 ++ if (!tdev->vpd || !tdev->multifunction ||
2379 ++ dev->class != tdev->class || dev->vendor != tdev->vendor ||
2380 ++ dev->device != tdev->device)
2381 ++ ret = -ENODEV;
2382 ++
2383 ++ pci_dev_put(tdev);
2384 ++ return ret;
2385 ++}
2386 ++
2387 + int pci_vpd_pci22_init(struct pci_dev *dev)
2388 + {
2389 + struct pci_vpd_pci22 *vpd;
2390 +@@ -447,12 +497,21 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
2391 + cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
2392 + if (!cap)
2393 + return -ENODEV;
2394 ++ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
2395 ++ int ret = pci_vpd_f0_dev_check(dev);
2396 ++
2397 ++ if (ret)
2398 ++ return ret;
2399 ++ }
2400 + vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
2401 + if (!vpd)
2402 + return -ENOMEM;
2403 +
2404 + vpd->base.len = PCI_VPD_PCI22_SIZE;
2405 +- vpd->base.ops = &pci_vpd_pci22_ops;
2406 ++ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
2407 ++ vpd->base.ops = &pci_vpd_f0_ops;
2408 ++ else
2409 ++ vpd->base.ops = &pci_vpd_pci22_ops;
2410 + mutex_init(&vpd->lock);
2411 + vpd->cap = cap;
2412 + vpd->busy = false;
2413 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2414 +index c6dc1dfd25d5..804cd3b02c66 100644
2415 +--- a/drivers/pci/quirks.c
2416 ++++ b/drivers/pci/quirks.c
2417 +@@ -1576,6 +1576,18 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB3
2418 +
2419 + #endif
2420 +
2421 ++static void quirk_jmicron_async_suspend(struct pci_dev *dev)
2422 ++{
2423 ++ if (dev->multifunction) {
2424 ++ device_disable_async_suspend(&dev->dev);
2425 ++ dev_info(&dev->dev, "async suspend disabled to avoid multi-function power-on ordering issue\n");
2426 ++ }
2427 ++}
2428 ++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend);
2429 ++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend);
2430 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend);
2431 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend);
2432 ++
2433 + #ifdef CONFIG_X86_IO_APIC
2434 + static void quirk_alder_ioapic(struct pci_dev *pdev)
2435 + {
2436 +@@ -1903,6 +1915,15 @@ static void quirk_netmos(struct pci_dev *dev)
2437 + DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
2438 + PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
2439 +
2440 ++static void quirk_f0_vpd_link(struct pci_dev *dev)
2441 ++{
2442 ++ if (!dev->multifunction || !PCI_FUNC(dev->devfn))
2443 ++ return;
2444 ++ dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
2445 ++}
2446 ++DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
2447 ++ PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
2448 ++
2449 + static void quirk_e100_interrupt(struct pci_dev *dev)
2450 + {
2451 + u16 command, pmcsr;
2452 +@@ -2838,12 +2859,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
2453 +
2454 + static void fixup_ti816x_class(struct pci_dev *dev)
2455 + {
2456 ++ u32 class = dev->class;
2457 ++
2458 + /* TI 816x devices do not have class code set when in PCIe boot mode */
2459 +- dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
2460 +- dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
2461 ++ dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
2462 ++ dev_info(&dev->dev, "PCI class overridden (%#08x -> %#08x)\n",
2463 ++ class, dev->class);
2464 + }
2465 + DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
2466 +- PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
2467 ++ PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
2468 +
2469 + /* Some PCIe devices do not work reliably with the claimed maximum
2470 + * payload size supported.
2471 +diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
2472 +index bd2b75c0d1d1..4fa7bcaf454e 100644
2473 +--- a/drivers/regulator/pbias-regulator.c
2474 ++++ b/drivers/regulator/pbias-regulator.c
2475 +@@ -30,6 +30,7 @@
2476 + struct pbias_reg_info {
2477 + u32 enable;
2478 + u32 enable_mask;
2479 ++ u32 disable_val;
2480 + u32 vmode;
2481 + unsigned int enable_time;
2482 + char *name;
2483 +@@ -62,6 +63,7 @@ static const struct pbias_reg_info pbias_mmc_omap2430 = {
2484 + .enable = BIT(1),
2485 + .enable_mask = BIT(1),
2486 + .vmode = BIT(0),
2487 ++ .disable_val = 0,
2488 + .enable_time = 100,
2489 + .name = "pbias_mmc_omap2430"
2490 + };
2491 +@@ -77,6 +79,7 @@ static const struct pbias_reg_info pbias_sim_omap3 = {
2492 + static const struct pbias_reg_info pbias_mmc_omap4 = {
2493 + .enable = BIT(26) | BIT(22),
2494 + .enable_mask = BIT(26) | BIT(25) | BIT(22),
2495 ++ .disable_val = BIT(25),
2496 + .vmode = BIT(21),
2497 + .enable_time = 100,
2498 + .name = "pbias_mmc_omap4"
2499 +@@ -85,6 +88,7 @@ static const struct pbias_reg_info pbias_mmc_omap4 = {
2500 + static const struct pbias_reg_info pbias_mmc_omap5 = {
2501 + .enable = BIT(27) | BIT(26),
2502 + .enable_mask = BIT(27) | BIT(25) | BIT(26),
2503 ++ .disable_val = BIT(25),
2504 + .vmode = BIT(21),
2505 + .enable_time = 100,
2506 + .name = "pbias_mmc_omap5"
2507 +@@ -159,6 +163,7 @@ static int pbias_regulator_probe(struct platform_device *pdev)
2508 + drvdata[data_idx].desc.enable_reg = res->start;
2509 + drvdata[data_idx].desc.enable_mask = info->enable_mask;
2510 + drvdata[data_idx].desc.enable_val = info->enable;
2511 ++ drvdata[data_idx].desc.disable_val = info->disable_val;
2512 +
2513 + cfg.init_data = pbias_matches[idx].init_data;
2514 + cfg.driver_data = &drvdata[data_idx];
2515 +diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
2516 +index 1efa4fdb7fe2..f45cd0cb1b32 100644
2517 +--- a/drivers/s390/char/sclp_early.c
2518 ++++ b/drivers/s390/char/sclp_early.c
2519 +@@ -7,6 +7,7 @@
2520 + #define KMSG_COMPONENT "sclp_early"
2521 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2522 +
2523 ++#include <linux/errno.h>
2524 + #include <asm/ctl_reg.h>
2525 + #include <asm/sclp.h>
2526 + #include <asm/ipl.h>
2527 +diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
2528 +index c956395cf46f..c89bada875f8 100644
2529 +--- a/drivers/soc/tegra/pmc.c
2530 ++++ b/drivers/soc/tegra/pmc.c
2531 +@@ -732,12 +732,12 @@ void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
2532 + u32 value, checksum;
2533 +
2534 + if (!pmc->soc->has_tsense_reset)
2535 +- goto out;
2536 ++ return;
2537 +
2538 + np = of_find_node_by_name(pmc->dev->of_node, "i2c-thermtrip");
2539 + if (!np) {
2540 + dev_warn(dev, "i2c-thermtrip node not found, %s.\n", disabled);
2541 +- goto out;
2542 ++ return;
2543 + }
2544 +
2545 + if (of_property_read_u32(np, "nvidia,i2c-controller-id", &ctrl_id)) {
2546 +diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
2547 +index 37875cf942f7..a5067739ee93 100644
2548 +--- a/drivers/spi/spi-bcm2835.c
2549 ++++ b/drivers/spi/spi-bcm2835.c
2550 +@@ -257,13 +257,11 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
2551 + spi_used_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536);
2552 + bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
2553 +
2554 +- /* handle all the modes */
2555 ++ /* handle all the 3-wire mode */
2556 + if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
2557 + cs |= BCM2835_SPI_CS_REN;
2558 +- if (spi->mode & SPI_CPOL)
2559 +- cs |= BCM2835_SPI_CS_CPOL;
2560 +- if (spi->mode & SPI_CPHA)
2561 +- cs |= BCM2835_SPI_CS_CPHA;
2562 ++ else
2563 ++ cs &= ~BCM2835_SPI_CS_REN;
2564 +
2565 + /* for gpio_cs set dummy CS so that no HW-CS get changed
2566 + * we can not run this in bcm2835_spi_set_cs, as it does
2567 +@@ -291,6 +289,25 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
2568 + return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs);
2569 + }
2570 +
2571 ++static int bcm2835_spi_prepare_message(struct spi_master *master,
2572 ++ struct spi_message *msg)
2573 ++{
2574 ++ struct spi_device *spi = msg->spi;
2575 ++ struct bcm2835_spi *bs = spi_master_get_devdata(master);
2576 ++ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
2577 ++
2578 ++ cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
2579 ++
2580 ++ if (spi->mode & SPI_CPOL)
2581 ++ cs |= BCM2835_SPI_CS_CPOL;
2582 ++ if (spi->mode & SPI_CPHA)
2583 ++ cs |= BCM2835_SPI_CS_CPHA;
2584 ++
2585 ++ bcm2835_wr(bs, BCM2835_SPI_CS, cs);
2586 ++
2587 ++ return 0;
2588 ++}
2589 ++
2590 + static void bcm2835_spi_handle_err(struct spi_master *master,
2591 + struct spi_message *msg)
2592 + {
2593 +@@ -429,6 +446,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
2594 + master->set_cs = bcm2835_spi_set_cs;
2595 + master->transfer_one = bcm2835_spi_transfer_one;
2596 + master->handle_err = bcm2835_spi_handle_err;
2597 ++ master->prepare_message = bcm2835_spi_prepare_message;
2598 + master->dev.of_node = pdev->dev.of_node;
2599 +
2600 + bs = spi_master_get_devdata(master);
2601 +diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
2602 +index 06b34e5bcfa3..47bb9b898dfd 100644
2603 +--- a/drivers/spi/spi-bitbang-txrx.h
2604 ++++ b/drivers/spi/spi-bitbang-txrx.h
2605 +@@ -49,7 +49,7 @@ bitbang_txrx_be_cpha0(struct spi_device *spi,
2606 + {
2607 + /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
2608 +
2609 +- bool oldbit = !(word & 1);
2610 ++ u32 oldbit = (!(word & (1<<(bits-1)))) << 31;
2611 + /* clock starts at inactive polarity */
2612 + for (word <<= (32 - bits); likely(bits); bits--) {
2613 +
2614 +@@ -81,7 +81,7 @@ bitbang_txrx_be_cpha1(struct spi_device *spi,
2615 + {
2616 + /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
2617 +
2618 +- bool oldbit = !(word & (1 << 31));
2619 ++ u32 oldbit = (!(word & (1<<(bits-1)))) << 31;
2620 + /* clock starts at inactive polarity */
2621 + for (word <<= (32 - bits); likely(bits); bits--) {
2622 +
2623 +diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
2624 +index eb03e1215195..7edede6e024b 100644
2625 +--- a/drivers/spi/spi-dw-mmio.c
2626 ++++ b/drivers/spi/spi-dw-mmio.c
2627 +@@ -74,6 +74,9 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
2628 +
2629 + dws->max_freq = clk_get_rate(dwsmmio->clk);
2630 +
2631 ++ of_property_read_u32(pdev->dev.of_node, "reg-io-width",
2632 ++ &dws->reg_io_width);
2633 ++
2634 + num_cs = 4;
2635 +
2636 + if (pdev->dev.of_node)
2637 +diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
2638 +index 8d67d03c71eb..4fbfcdc5cb24 100644
2639 +--- a/drivers/spi/spi-dw.c
2640 ++++ b/drivers/spi/spi-dw.c
2641 +@@ -194,7 +194,7 @@ static void dw_writer(struct dw_spi *dws)
2642 + else
2643 + txw = *(u16 *)(dws->tx);
2644 + }
2645 +- dw_writel(dws, DW_SPI_DR, txw);
2646 ++ dw_write_io_reg(dws, DW_SPI_DR, txw);
2647 + dws->tx += dws->n_bytes;
2648 + }
2649 + }
2650 +@@ -205,7 +205,7 @@ static void dw_reader(struct dw_spi *dws)
2651 + u16 rxw;
2652 +
2653 + while (max--) {
2654 +- rxw = dw_readl(dws, DW_SPI_DR);
2655 ++ rxw = dw_read_io_reg(dws, DW_SPI_DR);
2656 + /* Care rx only if the transfer's original "rx" is not null */
2657 + if (dws->rx_end - dws->len) {
2658 + if (dws->n_bytes == 1)
2659 +diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
2660 +index 6c91391c1a4f..b75ed327d5a2 100644
2661 +--- a/drivers/spi/spi-dw.h
2662 ++++ b/drivers/spi/spi-dw.h
2663 +@@ -109,6 +109,7 @@ struct dw_spi {
2664 + u32 fifo_len; /* depth of the FIFO buffer */
2665 + u32 max_freq; /* max bus freq supported */
2666 +
2667 ++ u32 reg_io_width; /* DR I/O width in bytes */
2668 + u16 bus_num;
2669 + u16 num_cs; /* supported slave numbers */
2670 +
2671 +@@ -145,11 +146,45 @@ static inline u32 dw_readl(struct dw_spi *dws, u32 offset)
2672 + return __raw_readl(dws->regs + offset);
2673 + }
2674 +
2675 ++static inline u16 dw_readw(struct dw_spi *dws, u32 offset)
2676 ++{
2677 ++ return __raw_readw(dws->regs + offset);
2678 ++}
2679 ++
2680 + static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val)
2681 + {
2682 + __raw_writel(val, dws->regs + offset);
2683 + }
2684 +
2685 ++static inline void dw_writew(struct dw_spi *dws, u32 offset, u16 val)
2686 ++{
2687 ++ __raw_writew(val, dws->regs + offset);
2688 ++}
2689 ++
2690 ++static inline u32 dw_read_io_reg(struct dw_spi *dws, u32 offset)
2691 ++{
2692 ++ switch (dws->reg_io_width) {
2693 ++ case 2:
2694 ++ return dw_readw(dws, offset);
2695 ++ case 4:
2696 ++ default:
2697 ++ return dw_readl(dws, offset);
2698 ++ }
2699 ++}
2700 ++
2701 ++static inline void dw_write_io_reg(struct dw_spi *dws, u32 offset, u32 val)
2702 ++{
2703 ++ switch (dws->reg_io_width) {
2704 ++ case 2:
2705 ++ dw_writew(dws, offset, val);
2706 ++ break;
2707 ++ case 4:
2708 ++ default:
2709 ++ dw_writel(dws, offset, val);
2710 ++ break;
2711 ++ }
2712 ++}
2713 ++
2714 + static inline void spi_enable_chip(struct dw_spi *dws, int enable)
2715 + {
2716 + dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0));
2717 +diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
2718 +index acce90ac7371..bb916c8d40db 100644
2719 +--- a/drivers/spi/spi-img-spfi.c
2720 ++++ b/drivers/spi/spi-img-spfi.c
2721 +@@ -105,6 +105,10 @@ struct img_spfi {
2722 + bool rx_dma_busy;
2723 + };
2724 +
2725 ++struct img_spfi_device_data {
2726 ++ bool gpio_requested;
2727 ++};
2728 ++
2729 + static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg)
2730 + {
2731 + return readl(spfi->regs + reg);
2732 +@@ -267,15 +271,15 @@ static int img_spfi_start_pio(struct spi_master *master,
2733 + cpu_relax();
2734 + }
2735 +
2736 +- ret = spfi_wait_all_done(spfi);
2737 +- if (ret < 0)
2738 +- return ret;
2739 +-
2740 + if (rx_bytes > 0 || tx_bytes > 0) {
2741 + dev_err(spfi->dev, "PIO transfer timed out\n");
2742 + return -ETIMEDOUT;
2743 + }
2744 +
2745 ++ ret = spfi_wait_all_done(spfi);
2746 ++ if (ret < 0)
2747 ++ return ret;
2748 ++
2749 + return 0;
2750 + }
2751 +
2752 +@@ -440,21 +444,50 @@ static int img_spfi_unprepare(struct spi_master *master,
2753 +
2754 + static int img_spfi_setup(struct spi_device *spi)
2755 + {
2756 +- int ret;
2757 +-
2758 +- ret = gpio_request_one(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ?
2759 +- GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
2760 +- dev_name(&spi->dev));
2761 +- if (ret)
2762 +- dev_err(&spi->dev, "can't request chipselect gpio %d\n",
2763 ++ int ret = -EINVAL;
2764 ++ struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi);
2765 ++
2766 ++ if (!spfi_data) {
2767 ++ spfi_data = kzalloc(sizeof(*spfi_data), GFP_KERNEL);
2768 ++ if (!spfi_data)
2769 ++ return -ENOMEM;
2770 ++ spfi_data->gpio_requested = false;
2771 ++ spi_set_ctldata(spi, spfi_data);
2772 ++ }
2773 ++ if (!spfi_data->gpio_requested) {
2774 ++ ret = gpio_request_one(spi->cs_gpio,
2775 ++ (spi->mode & SPI_CS_HIGH) ?
2776 ++ GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
2777 ++ dev_name(&spi->dev));
2778 ++ if (ret)
2779 ++ dev_err(&spi->dev, "can't request chipselect gpio %d\n",
2780 + spi->cs_gpio);
2781 +-
2782 ++ else
2783 ++ spfi_data->gpio_requested = true;
2784 ++ } else {
2785 ++ if (gpio_is_valid(spi->cs_gpio)) {
2786 ++ int mode = ((spi->mode & SPI_CS_HIGH) ?
2787 ++ GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH);
2788 ++
2789 ++ ret = gpio_direction_output(spi->cs_gpio, mode);
2790 ++ if (ret)
2791 ++ dev_err(&spi->dev, "chipselect gpio %d setup failed (%d)\n",
2792 ++ spi->cs_gpio, ret);
2793 ++ }
2794 ++ }
2795 + return ret;
2796 + }
2797 +
2798 + static void img_spfi_cleanup(struct spi_device *spi)
2799 + {
2800 +- gpio_free(spi->cs_gpio);
2801 ++ struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi);
2802 ++
2803 ++ if (spfi_data) {
2804 ++ if (spfi_data->gpio_requested)
2805 ++ gpio_free(spi->cs_gpio);
2806 ++ kfree(spfi_data);
2807 ++ spi_set_ctldata(spi, NULL);
2808 ++ }
2809 + }
2810 +
2811 + static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
2812 +diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
2813 +index bcc7c635d8e7..7872f3c78b51 100644
2814 +--- a/drivers/spi/spi-sh-msiof.c
2815 ++++ b/drivers/spi/spi-sh-msiof.c
2816 +@@ -48,8 +48,8 @@ struct sh_msiof_spi_priv {
2817 + const struct sh_msiof_chipdata *chipdata;
2818 + struct sh_msiof_spi_info *info;
2819 + struct completion done;
2820 +- int tx_fifo_size;
2821 +- int rx_fifo_size;
2822 ++ unsigned int tx_fifo_size;
2823 ++ unsigned int rx_fifo_size;
2824 + void *tx_dma_page;
2825 + void *rx_dma_page;
2826 + dma_addr_t tx_dma_addr;
2827 +@@ -95,8 +95,6 @@ struct sh_msiof_spi_priv {
2828 + #define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
2829 + #define MDR2_GRPMASK1 0x00000001 /* Group Output Mask 1 (SH, A1) */
2830 +
2831 +-#define MAX_WDLEN 256U
2832 +-
2833 + /* TSCR and RSCR */
2834 + #define SCR_BRPS_MASK 0x1f00 /* Prescaler Setting (1-32) */
2835 + #define SCR_BRPS(i) (((i) - 1) << 8)
2836 +@@ -850,7 +848,12 @@ static int sh_msiof_transfer_one(struct spi_master *master,
2837 + * DMA supports 32-bit words only, hence pack 8-bit and 16-bit
2838 + * words, with byte resp. word swapping.
2839 + */
2840 +- unsigned int l = min(len, MAX_WDLEN * 4);
2841 ++ unsigned int l = 0;
2842 ++
2843 ++ if (tx_buf)
2844 ++ l = min(len, p->tx_fifo_size * 4);
2845 ++ if (rx_buf)
2846 ++ l = min(len, p->rx_fifo_size * 4);
2847 +
2848 + if (bits <= 8) {
2849 + if (l & 3)
2850 +@@ -963,7 +966,7 @@ static const struct sh_msiof_chipdata sh_data = {
2851 +
2852 + static const struct sh_msiof_chipdata r8a779x_data = {
2853 + .tx_fifo_size = 64,
2854 +- .rx_fifo_size = 256,
2855 ++ .rx_fifo_size = 64,
2856 + .master_flags = SPI_MASTER_MUST_TX,
2857 + };
2858 +
2859 +diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
2860 +index 133f53a9c1d4..a339c1e9997a 100644
2861 +--- a/drivers/spi/spi-xilinx.c
2862 ++++ b/drivers/spi/spi-xilinx.c
2863 +@@ -249,19 +249,23 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
2864 + xspi->tx_ptr = t->tx_buf;
2865 + xspi->rx_ptr = t->rx_buf;
2866 + remaining_words = t->len / xspi->bytes_per_word;
2867 +- reinit_completion(&xspi->done);
2868 +
2869 + if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) {
2870 ++ u32 isr;
2871 + use_irq = true;
2872 +- xspi->write_fn(XSPI_INTR_TX_EMPTY,
2873 +- xspi->regs + XIPIF_V123B_IISR_OFFSET);
2874 +- /* Enable the global IPIF interrupt */
2875 +- xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
2876 +- xspi->regs + XIPIF_V123B_DGIER_OFFSET);
2877 + /* Inhibit irq to avoid spurious irqs on tx_empty*/
2878 + cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
2879 + xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
2880 + xspi->regs + XSPI_CR_OFFSET);
2881 ++ /* ACK old irqs (if any) */
2882 ++ isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
2883 ++ if (isr)
2884 ++ xspi->write_fn(isr,
2885 ++ xspi->regs + XIPIF_V123B_IISR_OFFSET);
2886 ++ /* Enable the global IPIF interrupt */
2887 ++ xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
2888 ++ xspi->regs + XIPIF_V123B_DGIER_OFFSET);
2889 ++ reinit_completion(&xspi->done);
2890 + }
2891 +
2892 + while (remaining_words) {
2893 +@@ -302,8 +306,10 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
2894 + remaining_words -= n_words;
2895 + }
2896 +
2897 +- if (use_irq)
2898 ++ if (use_irq) {
2899 + xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
2900 ++ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
2901 ++ }
2902 +
2903 + return t->len;
2904 + }
2905 +diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
2906 +index 934af3ff7897..b0fc027cf485 100644
2907 +--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
2908 ++++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
2909 +@@ -120,8 +120,20 @@ static int adl_pci7x3x_do_insn_bits(struct comedi_device *dev,
2910 + {
2911 + unsigned long reg = (unsigned long)s->private;
2912 +
2913 +- if (comedi_dio_update_state(s, data))
2914 +- outl(s->state, dev->iobase + reg);
2915 ++ if (comedi_dio_update_state(s, data)) {
2916 ++ unsigned int val = s->state;
2917 ++
2918 ++ if (s->n_chan == 16) {
2919 ++ /*
2920 ++ * It seems the PCI-7230 needs the 16-bit DO state
2921 ++ * to be shifted left by 16 bits before being written
2922 ++ * to the 32-bit register. Set the value in both
2923 ++ * halves of the register to be sure.
2924 ++ */
2925 ++ val |= val << 16;
2926 ++ }
2927 ++ outl(val, dev->iobase + reg);
2928 ++ }
2929 +
2930 + data[1] = s->state;
2931 +
2932 +diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
2933 +index eaa9add491df..dc0b25a54088 100644
2934 +--- a/drivers/staging/comedi/drivers/usbduxsigma.c
2935 ++++ b/drivers/staging/comedi/drivers/usbduxsigma.c
2936 +@@ -550,27 +550,6 @@ static int usbduxsigma_ai_cmdtest(struct comedi_device *dev,
2937 + if (err)
2938 + return 3;
2939 +
2940 +- /* Step 4: fix up any arguments */
2941 +-
2942 +- if (high_speed) {
2943 +- /*
2944 +- * every 2 channels get a time window of 125us. Thus, if we
2945 +- * sample all 16 channels we need 1ms. If we sample only one
2946 +- * channel we need only 125us
2947 +- */
2948 +- devpriv->ai_interval = interval;
2949 +- devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval);
2950 +- } else {
2951 +- /* interval always 1ms */
2952 +- devpriv->ai_interval = 1;
2953 +- devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
2954 +- }
2955 +- if (devpriv->ai_timer < 1)
2956 +- err |= -EINVAL;
2957 +-
2958 +- if (err)
2959 +- return 4;
2960 +-
2961 + return 0;
2962 + }
2963 +
2964 +@@ -668,6 +647,22 @@ static int usbduxsigma_ai_cmd(struct comedi_device *dev,
2965 +
2966 + down(&devpriv->sem);
2967 +
2968 ++ if (devpriv->high_speed) {
2969 ++ /*
2970 ++ * every 2 channels get a time window of 125us. Thus, if we
2971 ++ * sample all 16 channels we need 1ms. If we sample only one
2972 ++ * channel we need only 125us
2973 ++ */
2974 ++ unsigned int interval = usbduxsigma_chans_to_interval(len);
2975 ++
2976 ++ devpriv->ai_interval = interval;
2977 ++ devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval);
2978 ++ } else {
2979 ++ /* interval always 1ms */
2980 ++ devpriv->ai_interval = 1;
2981 ++ devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
2982 ++ }
2983 ++
2984 + for (i = 0; i < len; i++) {
2985 + unsigned int chan = CR_CHAN(cmd->chanlist[i]);
2986 +
2987 +@@ -917,25 +912,6 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device *dev,
2988 + if (err)
2989 + return 3;
2990 +
2991 +- /* Step 4: fix up any arguments */
2992 +-
2993 +- /* we count in timer steps */
2994 +- if (high_speed) {
2995 +- /* timing of the conversion itself: every 125 us */
2996 +- devpriv->ao_timer = cmd->convert_arg / 125000;
2997 +- } else {
2998 +- /*
2999 +- * timing of the scan: every 1ms
3000 +- * we get all channels at once
3001 +- */
3002 +- devpriv->ao_timer = cmd->scan_begin_arg / 1000000;
3003 +- }
3004 +- if (devpriv->ao_timer < 1)
3005 +- err |= -EINVAL;
3006 +-
3007 +- if (err)
3008 +- return 4;
3009 +-
3010 + return 0;
3011 + }
3012 +
3013 +@@ -948,6 +924,20 @@ static int usbduxsigma_ao_cmd(struct comedi_device *dev,
3014 +
3015 + down(&devpriv->sem);
3016 +
3017 ++ if (cmd->convert_src == TRIG_TIMER) {
3018 ++ /*
3019 ++ * timing of the conversion itself: every 125 us
3020 ++ * at high speed (not used yet)
3021 ++ */
3022 ++ devpriv->ao_timer = cmd->convert_arg / 125000;
3023 ++ } else {
3024 ++ /*
3025 ++ * timing of the scan: every 1ms
3026 ++ * we get all channels at once
3027 ++ */
3028 ++ devpriv->ao_timer = cmd->scan_begin_arg / 1000000;
3029 ++ }
3030 ++
3031 + devpriv->ao_counter = devpriv->ao_timer;
3032 +
3033 + if (cmd->start_src == TRIG_NOW) {
3034 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
3035 +index 46bcebba54b2..9373cca121d3 100644
3036 +--- a/drivers/tty/serial/8250/8250_pci.c
3037 ++++ b/drivers/tty/serial/8250/8250_pci.c
3038 +@@ -2000,6 +2000,12 @@ pci_wch_ch38x_setup(struct serial_private *priv,
3039 +
3040 + #define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358
3041 +
3042 ++#define PCI_VENDOR_ID_PERICOM 0x12D8
3043 ++#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
3044 ++#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952
3045 ++#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954
3046 ++#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958
3047 ++
3048 + /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
3049 + #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
3050 + #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588
3051 +@@ -2314,27 +2320,12 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
3052 + * Pericom
3053 + */
3054 + {
3055 +- .vendor = 0x12d8,
3056 +- .device = 0x7952,
3057 +- .subvendor = PCI_ANY_ID,
3058 +- .subdevice = PCI_ANY_ID,
3059 +- .setup = pci_pericom_setup,
3060 +- },
3061 +- {
3062 +- .vendor = 0x12d8,
3063 +- .device = 0x7954,
3064 +- .subvendor = PCI_ANY_ID,
3065 +- .subdevice = PCI_ANY_ID,
3066 +- .setup = pci_pericom_setup,
3067 +- },
3068 +- {
3069 +- .vendor = 0x12d8,
3070 +- .device = 0x7958,
3071 +- .subvendor = PCI_ANY_ID,
3072 +- .subdevice = PCI_ANY_ID,
3073 +- .setup = pci_pericom_setup,
3074 ++ .vendor = PCI_VENDOR_ID_PERICOM,
3075 ++ .device = PCI_ANY_ID,
3076 ++ .subvendor = PCI_ANY_ID,
3077 ++ .subdevice = PCI_ANY_ID,
3078 ++ .setup = pci_pericom_setup,
3079 + },
3080 +-
3081 + /*
3082 + * PLX
3083 + */
3084 +@@ -3031,6 +3022,10 @@ enum pci_board_num_t {
3085 + pbn_fintek_8,
3086 + pbn_fintek_12,
3087 + pbn_wch384_4,
3088 ++ pbn_pericom_PI7C9X7951,
3089 ++ pbn_pericom_PI7C9X7952,
3090 ++ pbn_pericom_PI7C9X7954,
3091 ++ pbn_pericom_PI7C9X7958,
3092 + };
3093 +
3094 + /*
3095 +@@ -3848,7 +3843,6 @@ static struct pciserial_board pci_boards[] = {
3096 + .base_baud = 115200,
3097 + .first_offset = 0x40,
3098 + },
3099 +-
3100 + [pbn_wch384_4] = {
3101 + .flags = FL_BASE0,
3102 + .num_ports = 4,
3103 +@@ -3856,6 +3850,33 @@ static struct pciserial_board pci_boards[] = {
3104 + .uart_offset = 8,
3105 + .first_offset = 0xC0,
3106 + },
3107 ++ /*
3108 ++ * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART
3109 ++ */
3110 ++ [pbn_pericom_PI7C9X7951] = {
3111 ++ .flags = FL_BASE0,
3112 ++ .num_ports = 1,
3113 ++ .base_baud = 921600,
3114 ++ .uart_offset = 0x8,
3115 ++ },
3116 ++ [pbn_pericom_PI7C9X7952] = {
3117 ++ .flags = FL_BASE0,
3118 ++ .num_ports = 2,
3119 ++ .base_baud = 921600,
3120 ++ .uart_offset = 0x8,
3121 ++ },
3122 ++ [pbn_pericom_PI7C9X7954] = {
3123 ++ .flags = FL_BASE0,
3124 ++ .num_ports = 4,
3125 ++ .base_baud = 921600,
3126 ++ .uart_offset = 0x8,
3127 ++ },
3128 ++ [pbn_pericom_PI7C9X7958] = {
3129 ++ .flags = FL_BASE0,
3130 ++ .num_ports = 8,
3131 ++ .base_baud = 921600,
3132 ++ .uart_offset = 0x8,
3133 ++ },
3134 + };
3135 +
3136 + static const struct pci_device_id blacklist[] = {
3137 +@@ -5117,6 +5138,25 @@ static struct pci_device_id serial_pci_tbl[] = {
3138 + 0,
3139 + 0, pbn_exar_XR17V8358 },
3140 + /*
3141 ++ * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART
3142 ++ */
3143 ++ { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7951,
3144 ++ PCI_ANY_ID, PCI_ANY_ID,
3145 ++ 0,
3146 ++ 0, pbn_pericom_PI7C9X7951 },
3147 ++ { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7952,
3148 ++ PCI_ANY_ID, PCI_ANY_ID,
3149 ++ 0,
3150 ++ 0, pbn_pericom_PI7C9X7952 },
3151 ++ { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7954,
3152 ++ PCI_ANY_ID, PCI_ANY_ID,
3153 ++ 0,
3154 ++ 0, pbn_pericom_PI7C9X7954 },
3155 ++ { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7958,
3156 ++ PCI_ANY_ID, PCI_ANY_ID,
3157 ++ 0,
3158 ++ 0, pbn_pericom_PI7C9X7958 },
3159 ++ /*
3160 + * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
3161 + */
3162 + { PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560,
3163 +diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
3164 +index 50a09cd76d50..658b392d1170 100644
3165 +--- a/drivers/tty/serial/8250/8250_pnp.c
3166 ++++ b/drivers/tty/serial/8250/8250_pnp.c
3167 +@@ -41,6 +41,12 @@ static const struct pnp_device_id pnp_dev_table[] = {
3168 + { "AEI1240", 0 },
3169 + /* Rockwell 56K ACF II Fax+Data+Voice Modem */
3170 + { "AKY1021", 0 /*SPCI_FL_NO_SHIRQ*/ },
3171 ++ /*
3172 ++ * ALi Fast Infrared Controller
3173 ++ * Native driver (ali-ircc) is broken so at least
3174 ++ * it can be used with irtty-sir.
3175 ++ */
3176 ++ { "ALI5123", 0 },
3177 + /* AZT3005 PnP SOUND DEVICE */
3178 + { "AZT4001", 0 },
3179 + /* Best Data Products Inc. Smart One 336F PnP Modem */
3180 +@@ -364,6 +370,11 @@ static const struct pnp_device_id pnp_dev_table[] = {
3181 + /* Winbond CIR port, should not be probed. We should keep track
3182 + of it to prevent the legacy serial driver from probing it */
3183 + { "WEC1022", CIR_PORT },
3184 ++ /*
3185 ++ * SMSC IrCC SIR/FIR port, should not be probed by serial driver
3186 ++ * as well so its own driver can bind to it.
3187 ++ */
3188 ++ { "SMCF010", CIR_PORT },
3189 + { "", 0 }
3190 + };
3191 +
3192 +diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
3193 +index 35c55505b3eb..5a41b8fbb10a 100644
3194 +--- a/drivers/tty/serial/men_z135_uart.c
3195 ++++ b/drivers/tty/serial/men_z135_uart.c
3196 +@@ -392,7 +392,6 @@ static irqreturn_t men_z135_intr(int irq, void *data)
3197 + struct men_z135_port *uart = (struct men_z135_port *)data;
3198 + struct uart_port *port = &uart->port;
3199 + bool handled = false;
3200 +- unsigned long flags;
3201 + int irq_id;
3202 +
3203 + uart->stat_reg = ioread32(port->membase + MEN_Z135_STAT_REG);
3204 +@@ -401,7 +400,7 @@ static irqreturn_t men_z135_intr(int irq, void *data)
3205 + if (!irq_id)
3206 + goto out;
3207 +
3208 +- spin_lock_irqsave(&port->lock, flags);
3209 ++ spin_lock(&port->lock);
3210 + /* It's save to write to IIR[7:6] RXC[9:8] */
3211 + iowrite8(irq_id, port->membase + MEN_Z135_STAT_REG);
3212 +
3213 +@@ -427,7 +426,7 @@ static irqreturn_t men_z135_intr(int irq, void *data)
3214 + handled = true;
3215 + }
3216 +
3217 +- spin_unlock_irqrestore(&port->lock, flags);
3218 ++ spin_unlock(&port->lock);
3219 + out:
3220 + return IRQ_RETVAL(handled);
3221 + }
3222 +@@ -717,7 +716,7 @@ static void men_z135_set_termios(struct uart_port *port,
3223 +
3224 + baud = uart_get_baud_rate(port, termios, old, 0, uart_freq / 16);
3225 +
3226 +- spin_lock(&port->lock);
3227 ++ spin_lock_irq(&port->lock);
3228 + if (tty_termios_baud_rate(termios))
3229 + tty_termios_encode_baud_rate(termios, baud, baud);
3230 +
3231 +@@ -725,7 +724,7 @@ static void men_z135_set_termios(struct uart_port *port,
3232 + iowrite32(bd_reg, port->membase + MEN_Z135_BAUD_REG);
3233 +
3234 + uart_update_timeout(port, termios->c_cflag, baud);
3235 +- spin_unlock(&port->lock);
3236 ++ spin_unlock_irq(&port->lock);
3237 + }
3238 +
3239 + static const char *men_z135_type(struct uart_port *port)
3240 +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
3241 +index a0ae942d9562..1e0d9b8c48c9 100644
3242 +--- a/drivers/tty/serial/samsung.c
3243 ++++ b/drivers/tty/serial/samsung.c
3244 +@@ -295,15 +295,6 @@ static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport,
3245 + if (ourport->tx_mode != S3C24XX_TX_DMA)
3246 + enable_tx_dma(ourport);
3247 +
3248 +- while (xmit->tail & (dma_get_cache_alignment() - 1)) {
3249 +- if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
3250 +- return 0;
3251 +- wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
3252 +- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
3253 +- port->icount.tx++;
3254 +- count--;
3255 +- }
3256 +-
3257 + dma->tx_size = count & ~(dma_get_cache_alignment() - 1);
3258 + dma->tx_transfer_addr = dma->tx_addr + xmit->tail;
3259 +
3260 +@@ -342,7 +333,9 @@ static void s3c24xx_serial_start_next_tx(struct s3c24xx_uart_port *ourport)
3261 + return;
3262 + }
3263 +
3264 +- if (!ourport->dma || !ourport->dma->tx_chan || count < port->fifosize)
3265 ++ if (!ourport->dma || !ourport->dma->tx_chan ||
3266 ++ count < ourport->min_dma_size ||
3267 ++ xmit->tail & (dma_get_cache_alignment() - 1))
3268 + s3c24xx_serial_start_tx_pio(ourport);
3269 + else
3270 + s3c24xx_serial_start_tx_dma(ourport, count);
3271 +@@ -736,15 +729,20 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
3272 + struct uart_port *port = &ourport->port;
3273 + struct circ_buf *xmit = &port->state->xmit;
3274 + unsigned long flags;
3275 +- int count;
3276 ++ int count, dma_count = 0;
3277 +
3278 + spin_lock_irqsave(&port->lock, flags);
3279 +
3280 + count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
3281 +
3282 +- if (ourport->dma && ourport->dma->tx_chan && count >= port->fifosize) {
3283 +- s3c24xx_serial_start_tx_dma(ourport, count);
3284 +- goto out;
3285 ++ if (ourport->dma && ourport->dma->tx_chan &&
3286 ++ count >= ourport->min_dma_size) {
3287 ++ int align = dma_get_cache_alignment() -
3288 ++ (xmit->tail & (dma_get_cache_alignment() - 1));
3289 ++ if (count-align >= ourport->min_dma_size) {
3290 ++ dma_count = count-align;
3291 ++ count = align;
3292 ++ }
3293 + }
3294 +
3295 + if (port->x_char) {
3296 +@@ -765,14 +763,24 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
3297 +
3298 + /* try and drain the buffer... */
3299 +
3300 +- count = port->fifosize;
3301 +- while (!uart_circ_empty(xmit) && count-- > 0) {
3302 ++ if (count > port->fifosize) {
3303 ++ count = port->fifosize;
3304 ++ dma_count = 0;
3305 ++ }
3306 ++
3307 ++ while (!uart_circ_empty(xmit) && count > 0) {
3308 + if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
3309 + break;
3310 +
3311 + wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
3312 + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
3313 + port->icount.tx++;
3314 ++ count--;
3315 ++ }
3316 ++
3317 ++ if (!count && dma_count) {
3318 ++ s3c24xx_serial_start_tx_dma(ourport, dma_count);
3319 ++ goto out;
3320 + }
3321 +
3322 + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) {
3323 +@@ -1838,6 +1846,13 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
3324 + else if (ourport->info->fifosize)
3325 + ourport->port.fifosize = ourport->info->fifosize;
3326 +
3327 ++ /*
3328 ++ * DMA transfers must be aligned at least to cache line size,
3329 ++ * so find minimal transfer size suitable for DMA mode
3330 ++ */
3331 ++ ourport->min_dma_size = max_t(int, ourport->port.fifosize,
3332 ++ dma_get_cache_alignment());
3333 ++
3334 + probe_index++;
3335 +
3336 + dbg("%s: initialising port %p...\n", __func__, ourport);
3337 +diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
3338 +index d275032aa68d..fc5deaa4f382 100644
3339 +--- a/drivers/tty/serial/samsung.h
3340 ++++ b/drivers/tty/serial/samsung.h
3341 +@@ -82,6 +82,7 @@ struct s3c24xx_uart_port {
3342 + unsigned char tx_claimed;
3343 + unsigned int pm_level;
3344 + unsigned long baudclk_rate;
3345 ++ unsigned int min_dma_size;
3346 +
3347 + unsigned int rx_irq;
3348 + unsigned int tx_irq;
3349 +diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
3350 +index 69e769c35cf5..06ecd1e6871c 100644
3351 +--- a/drivers/usb/dwc3/ep0.c
3352 ++++ b/drivers/usb/dwc3/ep0.c
3353 +@@ -820,6 +820,11 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
3354 + unsigned maxp = ep0->endpoint.maxpacket;
3355 +
3356 + transfer_size += (maxp - (transfer_size % maxp));
3357 ++
3358 ++ /* Maximum of DWC3_EP0_BOUNCE_SIZE can only be received */
3359 ++ if (transfer_size > DWC3_EP0_BOUNCE_SIZE)
3360 ++ transfer_size = DWC3_EP0_BOUNCE_SIZE;
3361 ++
3362 + transferred = min_t(u32, ur->length,
3363 + transfer_size - length);
3364 + memcpy(ur->buf, dwc->ep0_bounce, transferred);
3365 +@@ -941,11 +946,14 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
3366 + return;
3367 + }
3368 +
3369 +- WARN_ON(req->request.length > DWC3_EP0_BOUNCE_SIZE);
3370 +-
3371 + maxpacket = dep->endpoint.maxpacket;
3372 + transfer_size = roundup(req->request.length, maxpacket);
3373 +
3374 ++ if (transfer_size > DWC3_EP0_BOUNCE_SIZE) {
3375 ++ dev_WARN(dwc->dev, "bounce buf can't handle req len\n");
3376 ++ transfer_size = DWC3_EP0_BOUNCE_SIZE;
3377 ++ }
3378 ++
3379 + dwc->ep0_bounced = true;
3380 +
3381 + /*
3382 +diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
3383 +index 531861547253..96d935b00504 100644
3384 +--- a/drivers/usb/gadget/function/f_uac2.c
3385 ++++ b/drivers/usb/gadget/function/f_uac2.c
3386 +@@ -975,6 +975,29 @@ free_ep(struct uac2_rtd_params *prm, struct usb_ep *ep)
3387 + "%s:%d Error!\n", __func__, __LINE__);
3388 + }
3389 +
3390 ++static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
3391 ++ struct usb_endpoint_descriptor *ep_desc,
3392 ++ unsigned int factor, bool is_playback)
3393 ++{
3394 ++ int chmask, srate, ssize;
3395 ++ u16 max_packet_size;
3396 ++
3397 ++ if (is_playback) {
3398 ++ chmask = uac2_opts->p_chmask;
3399 ++ srate = uac2_opts->p_srate;
3400 ++ ssize = uac2_opts->p_ssize;
3401 ++ } else {
3402 ++ chmask = uac2_opts->c_chmask;
3403 ++ srate = uac2_opts->c_srate;
3404 ++ ssize = uac2_opts->c_ssize;
3405 ++ }
3406 ++
3407 ++ max_packet_size = num_channels(chmask) * ssize *
3408 ++ DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
3409 ++ ep_desc->wMaxPacketSize = cpu_to_le16(min(max_packet_size,
3410 ++ le16_to_cpu(ep_desc->wMaxPacketSize)));
3411 ++}
3412 ++
3413 + static int
3414 + afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
3415 + {
3416 +@@ -1070,10 +1093,14 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
3417 + uac2->p_prm.uac2 = uac2;
3418 + uac2->c_prm.uac2 = uac2;
3419 +
3420 ++ /* Calculate wMaxPacketSize according to audio bandwidth */
3421 ++ set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
3422 ++ set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
3423 ++ set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
3424 ++ set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
3425 ++
3426 + hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
3427 +- hs_epout_desc.wMaxPacketSize = fs_epout_desc.wMaxPacketSize;
3428 + hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
3429 +- hs_epin_desc.wMaxPacketSize = fs_epin_desc.wMaxPacketSize;
3430 +
3431 + ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL);
3432 + if (ret)
3433 +diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
3434 +index 309706fe4bf0..9704053dfe05 100644
3435 +--- a/drivers/usb/gadget/udc/m66592-udc.c
3436 ++++ b/drivers/usb/gadget/udc/m66592-udc.c
3437 +@@ -1052,7 +1052,7 @@ static void set_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
3438 + tmp = m66592_read(m66592, M66592_INTSTS0) &
3439 + M66592_CTSQ;
3440 + udelay(1);
3441 +- } while (tmp != M66592_CS_IDST || timeout-- > 0);
3442 ++ } while (tmp != M66592_CS_IDST && timeout-- > 0);
3443 +
3444 + if (tmp == M66592_CS_IDST)
3445 + m66592_bset(m66592,
3446 +diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
3447 +index 5e44407aa099..5216f2b09d63 100644
3448 +--- a/drivers/usb/host/ehci-sysfs.c
3449 ++++ b/drivers/usb/host/ehci-sysfs.c
3450 +@@ -29,7 +29,7 @@ static ssize_t show_companion(struct device *dev,
3451 + int count = PAGE_SIZE;
3452 + char *ptr = buf;
3453 +
3454 +- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
3455 ++ ehci = hcd_to_ehci(dev_get_drvdata(dev));
3456 + nports = HCS_N_PORTS(ehci->hcs_params);
3457 +
3458 + for (index = 0; index < nports; ++index) {
3459 +@@ -54,7 +54,7 @@ static ssize_t store_companion(struct device *dev,
3460 + struct ehci_hcd *ehci;
3461 + int portnum, new_owner;
3462 +
3463 +- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
3464 ++ ehci = hcd_to_ehci(dev_get_drvdata(dev));
3465 + new_owner = PORT_OWNER; /* Owned by companion */
3466 + if (sscanf(buf, "%d", &portnum) != 1)
3467 + return -EINVAL;
3468 +@@ -85,7 +85,7 @@ static ssize_t show_uframe_periodic_max(struct device *dev,
3469 + struct ehci_hcd *ehci;
3470 + int n;
3471 +
3472 +- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
3473 ++ ehci = hcd_to_ehci(dev_get_drvdata(dev));
3474 + n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max);
3475 + return n;
3476 + }
3477 +@@ -101,7 +101,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
3478 + unsigned long flags;
3479 + ssize_t ret;
3480 +
3481 +- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
3482 ++ ehci = hcd_to_ehci(dev_get_drvdata(dev));
3483 + if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
3484 + return -EINVAL;
3485 +
3486 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
3487 +index 4c8b3b82103d..a5a0376bbd48 100644
3488 +--- a/drivers/usb/serial/ftdi_sio.c
3489 ++++ b/drivers/usb/serial/ftdi_sio.c
3490 +@@ -605,6 +605,10 @@ static const struct usb_device_id id_table_combined[] = {
3491 + { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
3492 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
3493 + { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
3494 ++ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
3495 ++ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
3496 ++ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2WI_PID) },
3497 ++ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX3_PID) },
3498 + /*
3499 + * ELV devices:
3500 + */
3501 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
3502 +index 792e054126de..2943b97b2a83 100644
3503 +--- a/drivers/usb/serial/ftdi_sio_ids.h
3504 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
3505 +@@ -568,6 +568,14 @@
3506 + */
3507 + #define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
3508 +
3509 ++/*
3510 ++ * CustomWare / ShipModul NMEA multiplexers product ids (FTDI_VID)
3511 ++ */
3512 ++#define FTDI_CUSTOMWARE_MINIPLEX_PID 0xfd48 /* MiniPlex first generation NMEA Multiplexer */
3513 ++#define FTDI_CUSTOMWARE_MINIPLEX2_PID 0xfd49 /* MiniPlex-USB and MiniPlex-2 series */
3514 ++#define FTDI_CUSTOMWARE_MINIPLEX2WI_PID 0xfd4a /* MiniPlex-2Wi */
3515 ++#define FTDI_CUSTOMWARE_MINIPLEX3_PID 0xfd4b /* MiniPlex-3 series */
3516 ++
3517 +
3518 + /********************************/
3519 + /** third-party VID/PID combos **/
3520 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
3521 +index f5257af33ecf..ae682e4eeaef 100644
3522 +--- a/drivers/usb/serial/pl2303.c
3523 ++++ b/drivers/usb/serial/pl2303.c
3524 +@@ -362,21 +362,38 @@ static speed_t pl2303_encode_baud_rate_direct(unsigned char buf[4],
3525 + static speed_t pl2303_encode_baud_rate_divisor(unsigned char buf[4],
3526 + speed_t baud)
3527 + {
3528 +- unsigned int tmp;
3529 ++ unsigned int baseline, mantissa, exponent;
3530 +
3531 + /*
3532 + * Apparently the formula is:
3533 +- * baudrate = 12M * 32 / (2^buf[1]) / buf[0]
3534 ++ * baudrate = 12M * 32 / (mantissa * 4^exponent)
3535 ++ * where
3536 ++ * mantissa = buf[8:0]
3537 ++ * exponent = buf[11:9]
3538 + */
3539 +- tmp = 12000000 * 32 / baud;
3540 ++ baseline = 12000000 * 32;
3541 ++ mantissa = baseline / baud;
3542 ++ if (mantissa == 0)
3543 ++ mantissa = 1; /* Avoid dividing by zero if baud > 32*12M. */
3544 ++ exponent = 0;
3545 ++ while (mantissa >= 512) {
3546 ++ if (exponent < 7) {
3547 ++ mantissa >>= 2; /* divide by 4 */
3548 ++ exponent++;
3549 ++ } else {
3550 ++ /* Exponent is maxed. Trim mantissa and leave. */
3551 ++ mantissa = 511;
3552 ++ break;
3553 ++ }
3554 ++ }
3555 ++
3556 + buf[3] = 0x80;
3557 + buf[2] = 0;
3558 +- buf[1] = (tmp >= 256);
3559 +- while (tmp >= 256) {
3560 +- tmp >>= 2;
3561 +- buf[1] <<= 1;
3562 +- }
3563 +- buf[0] = tmp;
3564 ++ buf[1] = exponent << 1 | mantissa >> 8;
3565 ++ buf[0] = mantissa & 0xff;
3566 ++
3567 ++ /* Calculate and return the exact baud rate. */
3568 ++ baud = (baseline / mantissa) >> (exponent << 1);
3569 +
3570 + return baud;
3571 + }
3572 +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
3573 +index d156545728c2..ebcec8cda858 100644
3574 +--- a/drivers/usb/serial/qcserial.c
3575 ++++ b/drivers/usb/serial/qcserial.c
3576 +@@ -139,6 +139,7 @@ static const struct usb_device_id id_table[] = {
3577 + {USB_DEVICE(0x0AF0, 0x8120)}, /* Option GTM681W */
3578 +
3579 + /* non-Gobi Sierra Wireless devices */
3580 ++ {DEVICE_SWI(0x03f0, 0x4e1d)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
3581 + {DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */
3582 + {DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */
3583 + {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */
3584 +diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
3585 +index 8fceec7298e0..6ed804450a5a 100644
3586 +--- a/drivers/usb/serial/symbolserial.c
3587 ++++ b/drivers/usb/serial/symbolserial.c
3588 +@@ -94,7 +94,7 @@ exit:
3589 +
3590 + static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port)
3591 + {
3592 +- struct symbol_private *priv = usb_get_serial_data(port->serial);
3593 ++ struct symbol_private *priv = usb_get_serial_port_data(port);
3594 + unsigned long flags;
3595 + int result = 0;
3596 +
3597 +@@ -120,7 +120,7 @@ static void symbol_close(struct usb_serial_port *port)
3598 + static void symbol_throttle(struct tty_struct *tty)
3599 + {
3600 + struct usb_serial_port *port = tty->driver_data;
3601 +- struct symbol_private *priv = usb_get_serial_data(port->serial);
3602 ++ struct symbol_private *priv = usb_get_serial_port_data(port);
3603 +
3604 + spin_lock_irq(&priv->lock);
3605 + priv->throttled = true;
3606 +@@ -130,7 +130,7 @@ static void symbol_throttle(struct tty_struct *tty)
3607 + static void symbol_unthrottle(struct tty_struct *tty)
3608 + {
3609 + struct usb_serial_port *port = tty->driver_data;
3610 +- struct symbol_private *priv = usb_get_serial_data(port->serial);
3611 ++ struct symbol_private *priv = usb_get_serial_port_data(port);
3612 + int result;
3613 + bool was_throttled;
3614 +
3615 +diff --git a/fs/ceph/super.c b/fs/ceph/super.c
3616 +index 4e9905374078..0d47422e3548 100644
3617 +--- a/fs/ceph/super.c
3618 ++++ b/fs/ceph/super.c
3619 +@@ -466,7 +466,7 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
3620 + if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
3621 + seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
3622 + if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
3623 +- seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
3624 ++ seq_show_option(m, "snapdirname", fsopt->snapdir_name);
3625 +
3626 + return 0;
3627 + }
3628 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
3629 +index 0a9fb6b53126..6a1119e87fbb 100644
3630 +--- a/fs/cifs/cifsfs.c
3631 ++++ b/fs/cifs/cifsfs.c
3632 +@@ -394,17 +394,17 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
3633 + struct sockaddr *srcaddr;
3634 + srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
3635 +
3636 +- seq_printf(s, ",vers=%s", tcon->ses->server->vals->version_string);
3637 ++ seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
3638 + cifs_show_security(s, tcon->ses);
3639 + cifs_show_cache_flavor(s, cifs_sb);
3640 +
3641 + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
3642 + seq_puts(s, ",multiuser");
3643 + else if (tcon->ses->user_name)
3644 +- seq_printf(s, ",username=%s", tcon->ses->user_name);
3645 ++ seq_show_option(s, "username", tcon->ses->user_name);
3646 +
3647 + if (tcon->ses->domainName)
3648 +- seq_printf(s, ",domain=%s", tcon->ses->domainName);
3649 ++ seq_show_option(s, "domain", tcon->ses->domainName);
3650 +
3651 + if (srcaddr->sa_family != AF_UNSPEC) {
3652 + struct sockaddr_in *saddr4;
3653 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3654 +index ca12affdba96..6b4eb94b04a5 100644
3655 +--- a/fs/ext4/super.c
3656 ++++ b/fs/ext4/super.c
3657 +@@ -1738,10 +1738,10 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
3658 + }
3659 +
3660 + if (sbi->s_qf_names[USRQUOTA])
3661 +- seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
3662 ++ seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
3663 +
3664 + if (sbi->s_qf_names[GRPQUOTA])
3665 +- seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
3666 ++ seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
3667 + #endif
3668 + }
3669 +
3670 +diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
3671 +index 859c6edbf81a..c18b49dc5d4f 100644
3672 +--- a/fs/gfs2/super.c
3673 ++++ b/fs/gfs2/super.c
3674 +@@ -1334,11 +1334,11 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
3675 + if (is_ancestor(root, sdp->sd_master_dir))
3676 + seq_puts(s, ",meta");
3677 + if (args->ar_lockproto[0])
3678 +- seq_printf(s, ",lockproto=%s", args->ar_lockproto);
3679 ++ seq_show_option(s, "lockproto", args->ar_lockproto);
3680 + if (args->ar_locktable[0])
3681 +- seq_printf(s, ",locktable=%s", args->ar_locktable);
3682 ++ seq_show_option(s, "locktable", args->ar_locktable);
3683 + if (args->ar_hostdata[0])
3684 +- seq_printf(s, ",hostdata=%s", args->ar_hostdata);
3685 ++ seq_show_option(s, "hostdata", args->ar_hostdata);
3686 + if (args->ar_spectator)
3687 + seq_puts(s, ",spectator");
3688 + if (args->ar_localflocks)
3689 +diff --git a/fs/hfs/super.c b/fs/hfs/super.c
3690 +index eee7206c38d1..410b65eea683 100644
3691 +--- a/fs/hfs/super.c
3692 ++++ b/fs/hfs/super.c
3693 +@@ -135,9 +135,9 @@ static int hfs_show_options(struct seq_file *seq, struct dentry *root)
3694 + struct hfs_sb_info *sbi = HFS_SB(root->d_sb);
3695 +
3696 + if (sbi->s_creator != cpu_to_be32(0x3f3f3f3f))
3697 +- seq_printf(seq, ",creator=%.4s", (char *)&sbi->s_creator);
3698 ++ seq_show_option_n(seq, "creator", (char *)&sbi->s_creator, 4);
3699 + if (sbi->s_type != cpu_to_be32(0x3f3f3f3f))
3700 +- seq_printf(seq, ",type=%.4s", (char *)&sbi->s_type);
3701 ++ seq_show_option_n(seq, "type", (char *)&sbi->s_type, 4);
3702 + seq_printf(seq, ",uid=%u,gid=%u",
3703 + from_kuid_munged(&init_user_ns, sbi->s_uid),
3704 + from_kgid_munged(&init_user_ns, sbi->s_gid));
3705 +diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
3706 +index c90b72ee676d..bb806e58c977 100644
3707 +--- a/fs/hfsplus/options.c
3708 ++++ b/fs/hfsplus/options.c
3709 +@@ -218,9 +218,9 @@ int hfsplus_show_options(struct seq_file *seq, struct dentry *root)
3710 + struct hfsplus_sb_info *sbi = HFSPLUS_SB(root->d_sb);
3711 +
3712 + if (sbi->creator != HFSPLUS_DEF_CR_TYPE)
3713 +- seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator);
3714 ++ seq_show_option_n(seq, "creator", (char *)&sbi->creator, 4);
3715 + if (sbi->type != HFSPLUS_DEF_CR_TYPE)
3716 +- seq_printf(seq, ",type=%.4s", (char *)&sbi->type);
3717 ++ seq_show_option_n(seq, "type", (char *)&sbi->type, 4);
3718 + seq_printf(seq, ",umask=%o,uid=%u,gid=%u", sbi->umask,
3719 + from_kuid_munged(&init_user_ns, sbi->uid),
3720 + from_kgid_munged(&init_user_ns, sbi->gid));
3721 +diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
3722 +index 07d8d8f52faf..de2d6245e9fa 100644
3723 +--- a/fs/hostfs/hostfs_kern.c
3724 ++++ b/fs/hostfs/hostfs_kern.c
3725 +@@ -260,7 +260,7 @@ static int hostfs_show_options(struct seq_file *seq, struct dentry *root)
3726 + size_t offset = strlen(root_ino) + 1;
3727 +
3728 + if (strlen(root_path) > offset)
3729 +- seq_printf(seq, ",%s", root_path + offset);
3730 ++ seq_show_option(seq, root_path + offset, NULL);
3731 +
3732 + if (append)
3733 + seq_puts(seq, ",append");
3734 +diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
3735 +index a0872f239f04..9e92c9c2d319 100644
3736 +--- a/fs/hpfs/namei.c
3737 ++++ b/fs/hpfs/namei.c
3738 +@@ -8,6 +8,17 @@
3739 + #include <linux/sched.h>
3740 + #include "hpfs_fn.h"
3741 +
3742 ++static void hpfs_update_directory_times(struct inode *dir)
3743 ++{
3744 ++ time_t t = get_seconds();
3745 ++ if (t == dir->i_mtime.tv_sec &&
3746 ++ t == dir->i_ctime.tv_sec)
3747 ++ return;
3748 ++ dir->i_mtime.tv_sec = dir->i_ctime.tv_sec = t;
3749 ++ dir->i_mtime.tv_nsec = dir->i_ctime.tv_nsec = 0;
3750 ++ hpfs_write_inode_nolock(dir);
3751 ++}
3752 ++
3753 + static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
3754 + {
3755 + const unsigned char *name = dentry->d_name.name;
3756 +@@ -99,6 +110,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
3757 + result->i_mode = mode | S_IFDIR;
3758 + hpfs_write_inode_nolock(result);
3759 + }
3760 ++ hpfs_update_directory_times(dir);
3761 + d_instantiate(dentry, result);
3762 + hpfs_unlock(dir->i_sb);
3763 + return 0;
3764 +@@ -187,6 +199,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, b
3765 + result->i_mode = mode | S_IFREG;
3766 + hpfs_write_inode_nolock(result);
3767 + }
3768 ++ hpfs_update_directory_times(dir);
3769 + d_instantiate(dentry, result);
3770 + hpfs_unlock(dir->i_sb);
3771 + return 0;
3772 +@@ -262,6 +275,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, de
3773 + insert_inode_hash(result);
3774 +
3775 + hpfs_write_inode_nolock(result);
3776 ++ hpfs_update_directory_times(dir);
3777 + d_instantiate(dentry, result);
3778 + brelse(bh);
3779 + hpfs_unlock(dir->i_sb);
3780 +@@ -340,6 +354,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
3781 + insert_inode_hash(result);
3782 +
3783 + hpfs_write_inode_nolock(result);
3784 ++ hpfs_update_directory_times(dir);
3785 + d_instantiate(dentry, result);
3786 + hpfs_unlock(dir->i_sb);
3787 + return 0;
3788 +@@ -423,6 +438,8 @@ again:
3789 + out1:
3790 + hpfs_brelse4(&qbh);
3791 + out:
3792 ++ if (!err)
3793 ++ hpfs_update_directory_times(dir);
3794 + hpfs_unlock(dir->i_sb);
3795 + return err;
3796 + }
3797 +@@ -477,6 +494,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
3798 + out1:
3799 + hpfs_brelse4(&qbh);
3800 + out:
3801 ++ if (!err)
3802 ++ hpfs_update_directory_times(dir);
3803 + hpfs_unlock(dir->i_sb);
3804 + return err;
3805 + }
3806 +@@ -595,7 +614,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
3807 + goto end1;
3808 + }
3809 +
3810 +- end:
3811 ++end:
3812 + hpfs_i(i)->i_parent_dir = new_dir->i_ino;
3813 + if (S_ISDIR(i->i_mode)) {
3814 + inc_nlink(new_dir);
3815 +@@ -610,6 +629,10 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
3816 + brelse(bh);
3817 + }
3818 + end1:
3819 ++ if (!err) {
3820 ++ hpfs_update_directory_times(old_dir);
3821 ++ hpfs_update_directory_times(new_dir);
3822 ++ }
3823 + hpfs_unlock(i->i_sb);
3824 + return err;
3825 + }
3826 +diff --git a/fs/libfs.c b/fs/libfs.c
3827 +index 02813592e121..f4641fd27bda 100644
3828 +--- a/fs/libfs.c
3829 ++++ b/fs/libfs.c
3830 +@@ -1176,7 +1176,7 @@ void make_empty_dir_inode(struct inode *inode)
3831 + inode->i_uid = GLOBAL_ROOT_UID;
3832 + inode->i_gid = GLOBAL_ROOT_GID;
3833 + inode->i_rdev = 0;
3834 +- inode->i_size = 2;
3835 ++ inode->i_size = 0;
3836 + inode->i_blkbits = PAGE_SHIFT;
3837 + inode->i_blocks = 0;
3838 +
3839 +diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
3840 +index 403c5660b306..a482e312c7b2 100644
3841 +--- a/fs/ocfs2/super.c
3842 ++++ b/fs/ocfs2/super.c
3843 +@@ -1550,8 +1550,8 @@ static int ocfs2_show_options(struct seq_file *s, struct dentry *root)
3844 + seq_printf(s, ",localflocks,");
3845 +
3846 + if (osb->osb_cluster_stack[0])
3847 +- seq_printf(s, ",cluster_stack=%.*s", OCFS2_STACK_LABEL_LEN,
3848 +- osb->osb_cluster_stack);
3849 ++ seq_show_option_n(s, "cluster_stack", osb->osb_cluster_stack,
3850 ++ OCFS2_STACK_LABEL_LEN);
3851 + if (opts & OCFS2_MOUNT_USRQUOTA)
3852 + seq_printf(s, ",usrquota");
3853 + if (opts & OCFS2_MOUNT_GRPQUOTA)
3854 +diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
3855 +index bf8537c7f455..155989455a72 100644
3856 +--- a/fs/overlayfs/super.c
3857 ++++ b/fs/overlayfs/super.c
3858 +@@ -517,10 +517,10 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
3859 + struct super_block *sb = dentry->d_sb;
3860 + struct ovl_fs *ufs = sb->s_fs_info;
3861 +
3862 +- seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir);
3863 ++ seq_show_option(m, "lowerdir", ufs->config.lowerdir);
3864 + if (ufs->config.upperdir) {
3865 +- seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
3866 +- seq_printf(m, ",workdir=%s", ufs->config.workdir);
3867 ++ seq_show_option(m, "upperdir", ufs->config.upperdir);
3868 ++ seq_show_option(m, "workdir", ufs->config.workdir);
3869 + }
3870 + return 0;
3871 + }
3872 +diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
3873 +index 0111ad0466ed..cf6fa25f884b 100644
3874 +--- a/fs/reiserfs/super.c
3875 ++++ b/fs/reiserfs/super.c
3876 +@@ -714,18 +714,20 @@ static int reiserfs_show_options(struct seq_file *seq, struct dentry *root)
3877 + seq_puts(seq, ",acl");
3878 +
3879 + if (REISERFS_SB(s)->s_jdev)
3880 +- seq_printf(seq, ",jdev=%s", REISERFS_SB(s)->s_jdev);
3881 ++ seq_show_option(seq, "jdev", REISERFS_SB(s)->s_jdev);
3882 +
3883 + if (journal->j_max_commit_age != journal->j_default_max_commit_age)
3884 + seq_printf(seq, ",commit=%d", journal->j_max_commit_age);
3885 +
3886 + #ifdef CONFIG_QUOTA
3887 + if (REISERFS_SB(s)->s_qf_names[USRQUOTA])
3888 +- seq_printf(seq, ",usrjquota=%s", REISERFS_SB(s)->s_qf_names[USRQUOTA]);
3889 ++ seq_show_option(seq, "usrjquota",
3890 ++ REISERFS_SB(s)->s_qf_names[USRQUOTA]);
3891 + else if (opts & (1 << REISERFS_USRQUOTA))
3892 + seq_puts(seq, ",usrquota");
3893 + if (REISERFS_SB(s)->s_qf_names[GRPQUOTA])
3894 +- seq_printf(seq, ",grpjquota=%s", REISERFS_SB(s)->s_qf_names[GRPQUOTA]);
3895 ++ seq_show_option(seq, "grpjquota",
3896 ++ REISERFS_SB(s)->s_qf_names[GRPQUOTA]);
3897 + else if (opts & (1 << REISERFS_GRPQUOTA))
3898 + seq_puts(seq, ",grpquota");
3899 + if (REISERFS_SB(s)->s_jquota_fmt) {
3900 +diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
3901 +index 74bcbabfa523..b14bbd6bb05f 100644
3902 +--- a/fs/xfs/libxfs/xfs_da_format.h
3903 ++++ b/fs/xfs/libxfs/xfs_da_format.h
3904 +@@ -680,8 +680,15 @@ typedef struct xfs_attr_leaf_name_remote {
3905 + typedef struct xfs_attr_leafblock {
3906 + xfs_attr_leaf_hdr_t hdr; /* constant-structure header block */
3907 + xfs_attr_leaf_entry_t entries[1]; /* sorted on key, not name */
3908 +- xfs_attr_leaf_name_local_t namelist; /* grows from bottom of buf */
3909 +- xfs_attr_leaf_name_remote_t valuelist; /* grows from bottom of buf */
3910 ++ /*
3911 ++ * The rest of the block contains the following structures after the
3912 ++ * leaf entries, growing from the bottom up. The variables are never
3913 ++ * referenced and definining them can actually make gcc optimize away
3914 ++ * accesses to the 'entries' array above index 0 so don't do that.
3915 ++ *
3916 ++ * xfs_attr_leaf_name_local_t namelist;
3917 ++ * xfs_attr_leaf_name_remote_t valuelist;
3918 ++ */
3919 + } xfs_attr_leafblock_t;
3920 +
3921 + /*
3922 +diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
3923 +index de1ea16f5748..534bbf283d6b 100644
3924 +--- a/fs/xfs/libxfs/xfs_dir2_data.c
3925 ++++ b/fs/xfs/libxfs/xfs_dir2_data.c
3926 +@@ -252,7 +252,8 @@ xfs_dir3_data_reada_verify(
3927 + return;
3928 + case cpu_to_be32(XFS_DIR2_DATA_MAGIC):
3929 + case cpu_to_be32(XFS_DIR3_DATA_MAGIC):
3930 +- xfs_dir3_data_verify(bp);
3931 ++ bp->b_ops = &xfs_dir3_data_buf_ops;
3932 ++ bp->b_ops->verify_read(bp);
3933 + return;
3934 + default:
3935 + xfs_buf_ioerror(bp, -EFSCORRUPTED);
3936 +diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
3937 +index 41b80d3d3877..06bb4218b362 100644
3938 +--- a/fs/xfs/libxfs/xfs_dir2_node.c
3939 ++++ b/fs/xfs/libxfs/xfs_dir2_node.c
3940 +@@ -2132,6 +2132,7 @@ xfs_dir2_node_replace(
3941 + int error; /* error return value */
3942 + int i; /* btree level */
3943 + xfs_ino_t inum; /* new inode number */
3944 ++ int ftype; /* new file type */
3945 + xfs_dir2_leaf_t *leaf; /* leaf structure */
3946 + xfs_dir2_leaf_entry_t *lep; /* leaf entry being changed */
3947 + int rval; /* internal return value */
3948 +@@ -2145,7 +2146,14 @@ xfs_dir2_node_replace(
3949 + state = xfs_da_state_alloc();
3950 + state->args = args;
3951 + state->mp = args->dp->i_mount;
3952 ++
3953 ++ /*
3954 ++ * We have to save new inode number and ftype since
3955 ++ * xfs_da3_node_lookup_int() is going to overwrite them
3956 ++ */
3957 + inum = args->inumber;
3958 ++ ftype = args->filetype;
3959 ++
3960 + /*
3961 + * Lookup the entry to change in the btree.
3962 + */
3963 +@@ -2183,7 +2191,7 @@ xfs_dir2_node_replace(
3964 + * Fill in the new inode number and log the entry.
3965 + */
3966 + dep->inumber = cpu_to_be64(inum);
3967 +- args->dp->d_ops->data_put_ftype(dep, args->filetype);
3968 ++ args->dp->d_ops->data_put_ftype(dep, ftype);
3969 + xfs_dir2_data_log_entry(args, state->extrablk.bp, dep);
3970 + rval = 0;
3971 + }
3972 +diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
3973 +index 858e1e62bbaa..65a45372fb1f 100644
3974 +--- a/fs/xfs/xfs_super.c
3975 ++++ b/fs/xfs/xfs_super.c
3976 +@@ -504,9 +504,9 @@ xfs_showargs(
3977 + seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
3978 +
3979 + if (mp->m_logname)
3980 +- seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
3981 ++ seq_show_option(m, MNTOPT_LOGDEV, mp->m_logname);
3982 + if (mp->m_rtname)
3983 +- seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
3984 ++ seq_show_option(m, MNTOPT_RTDEV, mp->m_rtname);
3985 +
3986 + if (mp->m_dalign > 0)
3987 + seq_printf(m, "," MNTOPT_SUNIT "=%d",
3988 +diff --git a/include/linux/acpi.h b/include/linux/acpi.h
3989 +index 4550be3bb63b..808c43afa8ac 100644
3990 +--- a/include/linux/acpi.h
3991 ++++ b/include/linux/acpi.h
3992 +@@ -198,7 +198,7 @@ struct pci_dev;
3993 +
3994 + int acpi_pci_irq_enable (struct pci_dev *dev);
3995 + void acpi_penalize_isa_irq(int irq, int active);
3996 +-
3997 ++void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
3998 + void acpi_pci_irq_disable (struct pci_dev *dev);
3999 +
4000 + extern int ec_read(u8 addr, u8 *val);
4001 +diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
4002 +index d86b753e9b30..5ed7771ad386 100644
4003 +--- a/include/linux/iio/iio.h
4004 ++++ b/include/linux/iio/iio.h
4005 +@@ -642,6 +642,15 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
4006 + #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
4007 +
4008 + /**
4009 ++ * IIO_RAD_TO_DEGREE() - Convert rad to degree
4010 ++ * @rad: A value in rad
4011 ++ *
4012 ++ * Returns the given value converted from rad to degree
4013 ++ */
4014 ++#define IIO_RAD_TO_DEGREE(rad) \
4015 ++ (((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
4016 ++
4017 ++/**
4018 + * IIO_G_TO_M_S_2() - Convert g to meter / second**2
4019 + * @g: A value in g
4020 + *
4021 +@@ -649,4 +658,12 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
4022 + */
4023 + #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
4024 +
4025 ++/**
4026 ++ * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
4027 ++ * @ms2: A value in meter / second**2
4028 ++ *
4029 ++ * Returns the given value converted from meter / second**2 to g
4030 ++ */
4031 ++#define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
4032 ++
4033 + #endif /* _INDUSTRIAL_IO_H_ */
4034 +diff --git a/include/linux/pci.h b/include/linux/pci.h
4035 +index 3ef3a52068df..6e935e5eab56 100644
4036 +--- a/include/linux/pci.h
4037 ++++ b/include/linux/pci.h
4038 +@@ -180,6 +180,8 @@ enum pci_dev_flags {
4039 + PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
4040 + /* Do not use PM reset even if device advertises NoSoftRst- */
4041 + PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
4042 ++ /* Get VPD from function 0 VPD */
4043 ++ PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
4044 + };
4045 +
4046 + enum pci_irq_reroute_variant {
4047 +diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
4048 +index afbb1fd77c77..7848473a5bc8 100644
4049 +--- a/include/linux/seq_file.h
4050 ++++ b/include/linux/seq_file.h
4051 +@@ -148,6 +148,41 @@ static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
4052 + #endif
4053 + }
4054 +
4055 ++/**
4056 ++ * seq_show_options - display mount options with appropriate escapes.
4057 ++ * @m: the seq_file handle
4058 ++ * @name: the mount option name
4059 ++ * @value: the mount option name's value, can be NULL
4060 ++ */
4061 ++static inline void seq_show_option(struct seq_file *m, const char *name,
4062 ++ const char *value)
4063 ++{
4064 ++ seq_putc(m, ',');
4065 ++ seq_escape(m, name, ",= \t\n\\");
4066 ++ if (value) {
4067 ++ seq_putc(m, '=');
4068 ++ seq_escape(m, value, ", \t\n\\");
4069 ++ }
4070 ++}
4071 ++
4072 ++/**
4073 ++ * seq_show_option_n - display mount options with appropriate escapes
4074 ++ * where @value must be a specific length.
4075 ++ * @m: the seq_file handle
4076 ++ * @name: the mount option name
4077 ++ * @value: the mount option name's value, cannot be NULL
4078 ++ * @length: the length of @value to display
4079 ++ *
4080 ++ * This is a macro since this uses "length" to define the size of the
4081 ++ * stack buffer.
4082 ++ */
4083 ++#define seq_show_option_n(m, name, value, length) { \
4084 ++ char val_buf[length + 1]; \
4085 ++ strncpy(val_buf, value, length); \
4086 ++ val_buf[length] = '\0'; \
4087 ++ seq_show_option(m, name, val_buf); \
4088 ++}
4089 ++
4090 + #define SEQ_START_TOKEN ((void *)1)
4091 + /*
4092 + * Helpers for iteration over list_head-s in seq_files
4093 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
4094 +index e8a5491be756..4d65b66ae60d 100644
4095 +--- a/kernel/cgroup.c
4096 ++++ b/kernel/cgroup.c
4097 +@@ -1319,7 +1319,7 @@ static int cgroup_show_options(struct seq_file *seq,
4098 +
4099 + for_each_subsys(ss, ssid)
4100 + if (root->subsys_mask & (1 << ssid))
4101 +- seq_printf(seq, ",%s", ss->name);
4102 ++ seq_show_option(seq, ss->name, NULL);
4103 + if (root->flags & CGRP_ROOT_NOPREFIX)
4104 + seq_puts(seq, ",noprefix");
4105 + if (root->flags & CGRP_ROOT_XATTR)
4106 +@@ -1327,13 +1327,14 @@ static int cgroup_show_options(struct seq_file *seq,
4107 +
4108 + spin_lock(&release_agent_path_lock);
4109 + if (strlen(root->release_agent_path))
4110 +- seq_printf(seq, ",release_agent=%s", root->release_agent_path);
4111 ++ seq_show_option(seq, "release_agent",
4112 ++ root->release_agent_path);
4113 + spin_unlock(&release_agent_path_lock);
4114 +
4115 + if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
4116 + seq_puts(seq, ",clone_children");
4117 + if (strlen(root->name))
4118 +- seq_printf(seq, ",name=%s", root->name);
4119 ++ seq_show_option(seq, "name", root->name);
4120 + return 0;
4121 + }
4122 +
4123 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
4124 +index 123673291ffb..e6910526c84b 100644
4125 +--- a/kernel/sched/core.c
4126 ++++ b/kernel/sched/core.c
4127 +@@ -5328,6 +5328,14 @@ static int sched_cpu_active(struct notifier_block *nfb,
4128 + case CPU_STARTING:
4129 + set_cpu_rq_start_time();
4130 + return NOTIFY_OK;
4131 ++ case CPU_ONLINE:
4132 ++ /*
4133 ++ * At this point a starting CPU has marked itself as online via
4134 ++ * set_cpu_online(). But it might not yet have marked itself
4135 ++ * as active, which is essential from here on.
4136 ++ *
4137 ++ * Thus, fall-through and help the starting CPU along.
4138 ++ */
4139 + case CPU_DOWN_FAILED:
4140 + set_cpu_active((long)hcpu, true);
4141 + return NOTIFY_OK;
4142 +diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
4143 +index 79e8f71aef5b..3f76eb84b395 100644
4144 +--- a/net/ceph/ceph_common.c
4145 ++++ b/net/ceph/ceph_common.c
4146 +@@ -495,8 +495,11 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client)
4147 + struct ceph_options *opt = client->options;
4148 + size_t pos = m->count;
4149 +
4150 +- if (opt->name)
4151 +- seq_printf(m, "name=%s,", opt->name);
4152 ++ if (opt->name) {
4153 ++ seq_puts(m, "name=");
4154 ++ seq_escape(m, opt->name, ", \t\n\\");
4155 ++ seq_putc(m, ',');
4156 ++ }
4157 + if (opt->key)
4158 + seq_puts(m, "secret=<hidden>,");
4159 +
4160 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
4161 +index 7f8d7f19e044..280235cc3a98 100644
4162 +--- a/security/selinux/hooks.c
4163 ++++ b/security/selinux/hooks.c
4164 +@@ -1095,7 +1095,7 @@ static void selinux_write_opts(struct seq_file *m,
4165 + seq_puts(m, prefix);
4166 + if (has_comma)
4167 + seq_putc(m, '\"');
4168 +- seq_puts(m, opts->mnt_opts[i]);
4169 ++ seq_escape(m, opts->mnt_opts[i], "\"\n\\");
4170 + if (has_comma)
4171 + seq_putc(m, '\"');
4172 + }
4173 +diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
4174 +index 4373ada95648..3a91a00fb973 100644
4175 +--- a/sound/soc/codecs/adav80x.c
4176 ++++ b/sound/soc/codecs/adav80x.c
4177 +@@ -864,7 +864,6 @@ const struct regmap_config adav80x_regmap_config = {
4178 + .val_bits = 8,
4179 + .pad_bits = 1,
4180 + .reg_bits = 7,
4181 +- .read_flag_mask = 0x01,
4182 +
4183 + .max_register = ADAV80X_PLL_OUTE,
4184 +
4185 +diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
4186 +index eff4b4d512b7..ee91edcf3cb0 100644
4187 +--- a/sound/soc/codecs/arizona.c
4188 ++++ b/sound/soc/codecs/arizona.c
4189 +@@ -1610,17 +1610,6 @@ int arizona_init_dai(struct arizona_priv *priv, int id)
4190 + }
4191 + EXPORT_SYMBOL_GPL(arizona_init_dai);
4192 +
4193 +-static irqreturn_t arizona_fll_clock_ok(int irq, void *data)
4194 +-{
4195 +- struct arizona_fll *fll = data;
4196 +-
4197 +- arizona_fll_dbg(fll, "clock OK\n");
4198 +-
4199 +- complete(&fll->ok);
4200 +-
4201 +- return IRQ_HANDLED;
4202 +-}
4203 +-
4204 + static struct {
4205 + unsigned int min;
4206 + unsigned int max;
4207 +@@ -1902,17 +1891,18 @@ static int arizona_is_enabled_fll(struct arizona_fll *fll)
4208 + static int arizona_enable_fll(struct arizona_fll *fll)
4209 + {
4210 + struct arizona *arizona = fll->arizona;
4211 +- unsigned long time_left;
4212 + bool use_sync = false;
4213 + int already_enabled = arizona_is_enabled_fll(fll);
4214 + struct arizona_fll_cfg cfg;
4215 ++ int i;
4216 ++ unsigned int val;
4217 +
4218 + if (already_enabled < 0)
4219 + return already_enabled;
4220 +
4221 + if (already_enabled) {
4222 + /* Facilitate smooth refclk across the transition */
4223 +- regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x7,
4224 ++ regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x9,
4225 + ARIZONA_FLL1_GAIN_MASK, 0);
4226 + regmap_update_bits_async(fll->arizona->regmap, fll->base + 1,
4227 + ARIZONA_FLL1_FREERUN,
4228 +@@ -1964,9 +1954,6 @@ static int arizona_enable_fll(struct arizona_fll *fll)
4229 + if (!already_enabled)
4230 + pm_runtime_get(arizona->dev);
4231 +
4232 +- /* Clear any pending completions */
4233 +- try_wait_for_completion(&fll->ok);
4234 +-
4235 + regmap_update_bits_async(arizona->regmap, fll->base + 1,
4236 + ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
4237 + if (use_sync)
4238 +@@ -1978,10 +1965,24 @@ static int arizona_enable_fll(struct arizona_fll *fll)
4239 + regmap_update_bits_async(arizona->regmap, fll->base + 1,
4240 + ARIZONA_FLL1_FREERUN, 0);
4241 +
4242 +- time_left = wait_for_completion_timeout(&fll->ok,
4243 +- msecs_to_jiffies(250));
4244 +- if (time_left == 0)
4245 ++ arizona_fll_dbg(fll, "Waiting for FLL lock...\n");
4246 ++ val = 0;
4247 ++ for (i = 0; i < 15; i++) {
4248 ++ if (i < 5)
4249 ++ usleep_range(200, 400);
4250 ++ else
4251 ++ msleep(20);
4252 ++
4253 ++ regmap_read(arizona->regmap,
4254 ++ ARIZONA_INTERRUPT_RAW_STATUS_5,
4255 ++ &val);
4256 ++ if (val & (ARIZONA_FLL1_CLOCK_OK_STS << (fll->id - 1)))
4257 ++ break;
4258 ++ }
4259 ++ if (i == 15)
4260 + arizona_fll_warn(fll, "Timed out waiting for lock\n");
4261 ++ else
4262 ++ arizona_fll_dbg(fll, "FLL locked (%d polls)\n", i);
4263 +
4264 + return 0;
4265 + }
4266 +@@ -2066,11 +2067,8 @@ EXPORT_SYMBOL_GPL(arizona_set_fll);
4267 + int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq,
4268 + int ok_irq, struct arizona_fll *fll)
4269 + {
4270 +- int ret;
4271 + unsigned int val;
4272 +
4273 +- init_completion(&fll->ok);
4274 +-
4275 + fll->id = id;
4276 + fll->base = base;
4277 + fll->arizona = arizona;
4278 +@@ -2092,13 +2090,6 @@ int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq,
4279 + snprintf(fll->clock_ok_name, sizeof(fll->clock_ok_name),
4280 + "FLL%d clock OK", id);
4281 +
4282 +- ret = arizona_request_irq(arizona, ok_irq, fll->clock_ok_name,
4283 +- arizona_fll_clock_ok, fll);
4284 +- if (ret != 0) {
4285 +- dev_err(arizona->dev, "Failed to get FLL%d clock OK IRQ: %d\n",
4286 +- id, ret);
4287 +- }
4288 +-
4289 + regmap_update_bits(arizona->regmap, fll->base + 1,
4290 + ARIZONA_FLL1_FREERUN, 0);
4291 +
4292 +diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
4293 +index 11ff899b0272..14e8485b5585 100644
4294 +--- a/sound/soc/codecs/arizona.h
4295 ++++ b/sound/soc/codecs/arizona.h
4296 +@@ -233,7 +233,6 @@ struct arizona_fll {
4297 + int id;
4298 + unsigned int base;
4299 + unsigned int vco_mult;
4300 +- struct completion ok;
4301 +
4302 + unsigned int fout;
4303 + int sync_src;
4304 +diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
4305 +index 178e55d4d481..06317f7d945f 100644
4306 +--- a/sound/soc/codecs/rt5640.c
4307 ++++ b/sound/soc/codecs/rt5640.c
4308 +@@ -985,6 +985,35 @@ static int rt5640_hp_event(struct snd_soc_dapm_widget *w,
4309 + return 0;
4310 + }
4311 +
4312 ++static int rt5640_lout_event(struct snd_soc_dapm_widget *w,
4313 ++ struct snd_kcontrol *kcontrol, int event)
4314 ++{
4315 ++ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
4316 ++
4317 ++ switch (event) {
4318 ++ case SND_SOC_DAPM_POST_PMU:
4319 ++ hp_amp_power_on(codec);
4320 ++ snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
4321 ++ RT5640_PWR_LM, RT5640_PWR_LM);
4322 ++ snd_soc_update_bits(codec, RT5640_OUTPUT,
4323 ++ RT5640_L_MUTE | RT5640_R_MUTE, 0);
4324 ++ break;
4325 ++
4326 ++ case SND_SOC_DAPM_PRE_PMD:
4327 ++ snd_soc_update_bits(codec, RT5640_OUTPUT,
4328 ++ RT5640_L_MUTE | RT5640_R_MUTE,
4329 ++ RT5640_L_MUTE | RT5640_R_MUTE);
4330 ++ snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
4331 ++ RT5640_PWR_LM, 0);
4332 ++ break;
4333 ++
4334 ++ default:
4335 ++ return 0;
4336 ++ }
4337 ++
4338 ++ return 0;
4339 ++}
4340 ++
4341 + static int rt5640_hp_power_event(struct snd_soc_dapm_widget *w,
4342 + struct snd_kcontrol *kcontrol, int event)
4343 + {
4344 +@@ -1180,13 +1209,16 @@ static const struct snd_soc_dapm_widget rt5640_dapm_widgets[] = {
4345 + 0, rt5640_spo_l_mix, ARRAY_SIZE(rt5640_spo_l_mix)),
4346 + SND_SOC_DAPM_MIXER("SPOR MIX", SND_SOC_NOPM, 0,
4347 + 0, rt5640_spo_r_mix, ARRAY_SIZE(rt5640_spo_r_mix)),
4348 +- SND_SOC_DAPM_MIXER("LOUT MIX", RT5640_PWR_ANLG1, RT5640_PWR_LM_BIT, 0,
4349 ++ SND_SOC_DAPM_MIXER("LOUT MIX", SND_SOC_NOPM, 0, 0,
4350 + rt5640_lout_mix, ARRAY_SIZE(rt5640_lout_mix)),
4351 + SND_SOC_DAPM_SUPPLY_S("Improve HP Amp Drv", 1, SND_SOC_NOPM,
4352 + 0, 0, rt5640_hp_power_event, SND_SOC_DAPM_POST_PMU),
4353 + SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0,
4354 + rt5640_hp_event,
4355 + SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
4356 ++ SND_SOC_DAPM_PGA_S("LOUT amp", 1, SND_SOC_NOPM, 0, 0,
4357 ++ rt5640_lout_event,
4358 ++ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
4359 + SND_SOC_DAPM_SUPPLY("HP L Amp", RT5640_PWR_ANLG1,
4360 + RT5640_PWR_HP_L_BIT, 0, NULL, 0),
4361 + SND_SOC_DAPM_SUPPLY("HP R Amp", RT5640_PWR_ANLG1,
4362 +@@ -1501,8 +1533,10 @@ static const struct snd_soc_dapm_route rt5640_dapm_routes[] = {
4363 + {"HP R Playback", "Switch", "HP Amp"},
4364 + {"HPOL", NULL, "HP L Playback"},
4365 + {"HPOR", NULL, "HP R Playback"},
4366 +- {"LOUTL", NULL, "LOUT MIX"},
4367 +- {"LOUTR", NULL, "LOUT MIX"},
4368 ++
4369 ++ {"LOUT amp", NULL, "LOUT MIX"},
4370 ++ {"LOUTL", NULL, "LOUT amp"},
4371 ++ {"LOUTR", NULL, "LOUT amp"},
4372 + };
4373 +
4374 + static const struct snd_soc_dapm_route rt5640_specific_dapm_routes[] = {
4375 +diff --git a/sound/soc/samsung/arndale_rt5631.c b/sound/soc/samsung/arndale_rt5631.c
4376 +index 8bf2e2c4bafb..9e371eb3e4fa 100644
4377 +--- a/sound/soc/samsung/arndale_rt5631.c
4378 ++++ b/sound/soc/samsung/arndale_rt5631.c
4379 +@@ -116,15 +116,6 @@ static int arndale_audio_probe(struct platform_device *pdev)
4380 + return ret;
4381 + }
4382 +
4383 +-static int arndale_audio_remove(struct platform_device *pdev)
4384 +-{
4385 +- struct snd_soc_card *card = platform_get_drvdata(pdev);
4386 +-
4387 +- snd_soc_unregister_card(card);
4388 +-
4389 +- return 0;
4390 +-}
4391 +-
4392 + static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = {
4393 + { .compatible = "samsung,arndale-rt5631", },
4394 + { .compatible = "samsung,arndale-alc5631", },
4395 +@@ -139,7 +130,6 @@ static struct platform_driver arndale_audio_driver = {
4396 + .of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match),
4397 + },
4398 + .probe = arndale_audio_probe,
4399 +- .remove = arndale_audio_remove,
4400 + };
4401 +
4402 + module_platform_driver(arndale_audio_driver);