Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Thu, 22 Feb 2018 23:22:41
Message-Id: 1519341742.9dbf6a359ab6dd6e5492fc527a68f849e35d3f18.mpagano@gentoo
1 commit: 9dbf6a359ab6dd6e5492fc527a68f849e35d3f18
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Feb 22 23:22:22 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Feb 22 23:22:22 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9dbf6a35
7
8 Linux patch 4.9.83
9
10 0000_README | 4 +
11 1082_linux-4.9.83.patch | 3291 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3295 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 363e368..faf1391 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -371,6 +371,10 @@ Patch: 1081_linux-4.9.82.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.82
21
22 +Patch: 1082_linux-4.9.83.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.83
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1082_linux-4.9.83.patch b/1082_linux-4.9.83.patch
31 new file mode 100644
32 index 0000000..9a6aed8
33 --- /dev/null
34 +++ b/1082_linux-4.9.83.patch
35 @@ -0,0 +1,3291 @@
36 +diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
37 +index 0f5583293c9c..633481e2a4ec 100644
38 +--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
39 ++++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
40 +@@ -63,6 +63,6 @@ Example:
41 + interrupts = <0 35 0x4>;
42 + status = "disabled";
43 + dmas = <&dmahost 12 0 1>,
44 +- <&dmahost 13 0 1 0>;
45 ++ <&dmahost 13 1 0>;
46 + dma-names = "rx", "rx";
47 + };
48 +diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
49 +index 6c0108eb0137..2139ea253142 100644
50 +--- a/Documentation/filesystems/ext4.txt
51 ++++ b/Documentation/filesystems/ext4.txt
52 +@@ -233,7 +233,7 @@ data_err=ignore(*) Just print an error message if an error occurs
53 + data_err=abort Abort the journal if an error occurs in a file
54 + data buffer in ordered mode.
55 +
56 +-grpid Give objects the same group ID as their creator.
57 ++grpid New objects have the group ID of their parent.
58 + bsdgroups
59 +
60 + nogrpid (*) New objects have the group ID of their creator.
61 +diff --git a/Makefile b/Makefile
62 +index d338530540e0..cfae9b823d2b 100644
63 +--- a/Makefile
64 ++++ b/Makefile
65 +@@ -1,6 +1,6 @@
66 + VERSION = 4
67 + PATCHLEVEL = 9
68 +-SUBLEVEL = 82
69 ++SUBLEVEL = 83
70 + EXTRAVERSION =
71 + NAME = Roaring Lionus
72 +
73 +diff --git a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
74 +index 7b8d90b7aeea..29b636fce23f 100644
75 +--- a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
76 ++++ b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
77 +@@ -150,11 +150,6 @@
78 + interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
79 + };
80 +
81 +-&charlcd {
82 +- interrupt-parent = <&intc>;
83 +- interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
84 +-};
85 +-
86 + &serial0 {
87 + interrupt-parent = <&intc>;
88 + interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
89 +diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi
90 +index 137f48464f8b..bb59fee072c0 100644
91 +--- a/arch/arm/boot/dts/exynos5410.dtsi
92 ++++ b/arch/arm/boot/dts/exynos5410.dtsi
93 +@@ -274,7 +274,6 @@
94 + &rtc {
95 + clocks = <&clock CLK_RTC>;
96 + clock-names = "rtc";
97 +- interrupt-parent = <&pmu_system_controller>;
98 + status = "disabled";
99 + };
100 +
101 +diff --git a/arch/arm/boot/dts/lpc3250-ea3250.dts b/arch/arm/boot/dts/lpc3250-ea3250.dts
102 +index 52b3ed10283a..e2bc731079be 100644
103 +--- a/arch/arm/boot/dts/lpc3250-ea3250.dts
104 ++++ b/arch/arm/boot/dts/lpc3250-ea3250.dts
105 +@@ -156,8 +156,8 @@
106 + uda1380: uda1380@18 {
107 + compatible = "nxp,uda1380";
108 + reg = <0x18>;
109 +- power-gpio = <&gpio 0x59 0>;
110 +- reset-gpio = <&gpio 0x51 0>;
111 ++ power-gpio = <&gpio 3 10 0>;
112 ++ reset-gpio = <&gpio 3 2 0>;
113 + dac-clk = "wspll";
114 + };
115 +
116 +diff --git a/arch/arm/boot/dts/lpc3250-phy3250.dts b/arch/arm/boot/dts/lpc3250-phy3250.dts
117 +index fd95e2b10357..b7bd3a110a8d 100644
118 +--- a/arch/arm/boot/dts/lpc3250-phy3250.dts
119 ++++ b/arch/arm/boot/dts/lpc3250-phy3250.dts
120 +@@ -81,8 +81,8 @@
121 + uda1380: uda1380@18 {
122 + compatible = "nxp,uda1380";
123 + reg = <0x18>;
124 +- power-gpio = <&gpio 0x59 0>;
125 +- reset-gpio = <&gpio 0x51 0>;
126 ++ power-gpio = <&gpio 3 10 0>;
127 ++ reset-gpio = <&gpio 3 2 0>;
128 + dac-clk = "wspll";
129 + };
130 +
131 +diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi
132 +index 77c6b931dc24..23fe0497f708 100644
133 +--- a/arch/arm/boot/dts/mt2701.dtsi
134 ++++ b/arch/arm/boot/dts/mt2701.dtsi
135 +@@ -197,12 +197,14 @@
136 + compatible = "mediatek,mt2701-hifsys", "syscon";
137 + reg = <0 0x1a000000 0 0x1000>;
138 + #clock-cells = <1>;
139 ++ #reset-cells = <1>;
140 + };
141 +
142 + ethsys: syscon@1b000000 {
143 + compatible = "mediatek,mt2701-ethsys", "syscon";
144 + reg = <0 0x1b000000 0 0x1000>;
145 + #clock-cells = <1>;
146 ++ #reset-cells = <1>;
147 + };
148 +
149 + bdpsys: syscon@1c000000 {
150 +diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
151 +index a853918be43f..0c10ba517cd0 100644
152 +--- a/arch/arm/boot/dts/s5pv210.dtsi
153 ++++ b/arch/arm/boot/dts/s5pv210.dtsi
154 +@@ -463,6 +463,7 @@
155 + compatible = "samsung,exynos4210-ohci";
156 + reg = <0xec300000 0x100>;
157 + interrupts = <23>;
158 ++ interrupt-parent = <&vic1>;
159 + clocks = <&clocks CLK_USB_HOST>;
160 + clock-names = "usbhost";
161 + #address-cells = <1>;
162 +diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
163 +index 84101e4eebbf..0f5f379323a8 100644
164 +--- a/arch/arm/boot/dts/spear1310-evb.dts
165 ++++ b/arch/arm/boot/dts/spear1310-evb.dts
166 +@@ -349,7 +349,7 @@
167 + spi0: spi@e0100000 {
168 + status = "okay";
169 + num-cs = <3>;
170 +- cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>;
171 ++ cs-gpios = <&gpio1 7 0>, <&spics 0 0>, <&spics 1 0>;
172 +
173 + stmpe610@0 {
174 + compatible = "st,stmpe610";
175 +diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
176 +index df2232d767ed..6361cbfcbe5e 100644
177 +--- a/arch/arm/boot/dts/spear1340.dtsi
178 ++++ b/arch/arm/boot/dts/spear1340.dtsi
179 +@@ -141,8 +141,8 @@
180 + reg = <0xb4100000 0x1000>;
181 + interrupts = <0 105 0x4>;
182 + status = "disabled";
183 +- dmas = <&dwdma0 0x600 0 0 1>, /* 0xC << 11 */
184 +- <&dwdma0 0x680 0 1 0>; /* 0xD << 7 */
185 ++ dmas = <&dwdma0 12 0 1>,
186 ++ <&dwdma0 13 1 0>;
187 + dma-names = "tx", "rx";
188 + };
189 +
190 +diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
191 +index 449acf0d8272..9564337c1815 100644
192 +--- a/arch/arm/boot/dts/spear13xx.dtsi
193 ++++ b/arch/arm/boot/dts/spear13xx.dtsi
194 +@@ -100,7 +100,7 @@
195 + reg = <0xb2800000 0x1000>;
196 + interrupts = <0 29 0x4>;
197 + status = "disabled";
198 +- dmas = <&dwdma0 0 0 0 0>;
199 ++ dmas = <&dwdma0 0 0 0>;
200 + dma-names = "data";
201 + };
202 +
203 +@@ -288,8 +288,8 @@
204 + #size-cells = <0>;
205 + interrupts = <0 31 0x4>;
206 + status = "disabled";
207 +- dmas = <&dwdma0 0x2000 0 0 0>, /* 0x4 << 11 */
208 +- <&dwdma0 0x0280 0 0 0>; /* 0x5 << 7 */
209 ++ dmas = <&dwdma0 4 0 0>,
210 ++ <&dwdma0 5 0 0>;
211 + dma-names = "tx", "rx";
212 + };
213 +
214 +diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
215 +index 9f60a7b6a42b..bd379034993c 100644
216 +--- a/arch/arm/boot/dts/spear600.dtsi
217 ++++ b/arch/arm/boot/dts/spear600.dtsi
218 +@@ -194,6 +194,7 @@
219 + rtc@fc900000 {
220 + compatible = "st,spear600-rtc";
221 + reg = <0xfc900000 0x1000>;
222 ++ interrupt-parent = <&vic0>;
223 + interrupts = <10>;
224 + status = "disabled";
225 + };
226 +diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
227 +index adb1c0998b81..1077ceebb2d6 100644
228 +--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
229 ++++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
230 +@@ -749,6 +749,7 @@
231 + reg = <0x10120000 0x1000>;
232 + interrupt-names = "combined";
233 + interrupts = <14>;
234 ++ interrupt-parent = <&vica>;
235 + clocks = <&clcdclk>, <&hclkclcd>;
236 + clock-names = "clcdclk", "apb_pclk";
237 + status = "disabled";
238 +diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi
239 +index 291ffacbd2e0..fe043d313ccd 100644
240 +--- a/arch/arm/boot/dts/stih407.dtsi
241 ++++ b/arch/arm/boot/dts/stih407.dtsi
242 +@@ -8,6 +8,7 @@
243 + */
244 + #include "stih407-clock.dtsi"
245 + #include "stih407-family.dtsi"
246 ++#include <dt-bindings/gpio/gpio.h>
247 + / {
248 + soc {
249 + sti-display-subsystem {
250 +@@ -122,7 +123,7 @@
251 + <&clk_s_d2_quadfs 0>,
252 + <&clk_s_d2_quadfs 1>;
253 +
254 +- hdmi,hpd-gpio = <&pio5 3>;
255 ++ hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
256 + reset-names = "hdmi";
257 + resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
258 + ddc = <&hdmiddc>;
259 +diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
260 +index 4d329b2908be..3c118fc2bf61 100644
261 +--- a/arch/arm/boot/dts/stih410.dtsi
262 ++++ b/arch/arm/boot/dts/stih410.dtsi
263 +@@ -9,6 +9,7 @@
264 + #include "stih410-clock.dtsi"
265 + #include "stih407-family.dtsi"
266 + #include "stih410-pinctrl.dtsi"
267 ++#include <dt-bindings/gpio/gpio.h>
268 + / {
269 + aliases {
270 + bdisp0 = &bdisp0;
271 +@@ -213,7 +214,7 @@
272 + <&clk_s_d2_quadfs 0>,
273 + <&clk_s_d2_quadfs 1>;
274 +
275 +- hdmi,hpd-gpio = <&pio5 3>;
276 ++ hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
277 + reset-names = "hdmi";
278 + resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
279 + ddc = <&hdmiddc>;
280 +diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c
281 +index 107f37210fb9..83606087edc7 100644
282 +--- a/arch/arm/mach-pxa/tosa-bt.c
283 ++++ b/arch/arm/mach-pxa/tosa-bt.c
284 +@@ -132,3 +132,7 @@ static struct platform_driver tosa_bt_driver = {
285 + },
286 + };
287 + module_platform_driver(tosa_bt_driver);
288 ++
289 ++MODULE_LICENSE("GPL");
290 ++MODULE_AUTHOR("Dmitry Baryshkov");
291 ++MODULE_DESCRIPTION("Bluetooth built-in chip control");
292 +diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
293 +index 466ca5705c99..08b88f6791be 100644
294 +--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
295 ++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
296 +@@ -796,6 +796,7 @@
297 + "dsi_phy_regulator";
298 +
299 + #clock-cells = <1>;
300 ++ #phy-cells = <0>;
301 +
302 + clocks = <&gcc GCC_MDSS_AHB_CLK>;
303 + clock-names = "iface_clk";
304 +@@ -906,8 +907,8 @@
305 + #address-cells = <1>;
306 + #size-cells = <0>;
307 +
308 +- qcom,ipc-1 = <&apcs 0 13>;
309 +- qcom,ipc-6 = <&apcs 0 19>;
310 ++ qcom,ipc-1 = <&apcs 8 13>;
311 ++ qcom,ipc-3 = <&apcs 8 19>;
312 +
313 + apps_smsm: apps@0 {
314 + reg = <0>;
315 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
316 +index 5e844f68e847..2d2fd79ced9d 100644
317 +--- a/arch/mips/Kconfig
318 ++++ b/arch/mips/Kconfig
319 +@@ -112,12 +112,12 @@ config MIPS_GENERIC
320 + select SYS_SUPPORTS_MULTITHREADING
321 + select SYS_SUPPORTS_RELOCATABLE
322 + select SYS_SUPPORTS_SMARTMIPS
323 +- select USB_EHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
324 +- select USB_EHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
325 +- select USB_OHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
326 +- select USB_OHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
327 +- select USB_UHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
328 +- select USB_UHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
329 ++ select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
330 ++ select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
331 ++ select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
332 ++ select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
333 ++ select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
334 ++ select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
335 + select USE_OF
336 + help
337 + Select this to build a kernel which aims to support multiple boards,
338 +diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
339 +index c33b69d10919..9121b9a35c8a 100644
340 +--- a/arch/powerpc/kernel/entry_64.S
341 ++++ b/arch/powerpc/kernel/entry_64.S
342 +@@ -39,6 +39,11 @@
343 + #include <asm/tm.h>
344 + #include <asm/ppc-opcode.h>
345 + #include <asm/export.h>
346 ++#ifdef CONFIG_PPC_BOOK3S
347 ++#include <asm/exception-64s.h>
348 ++#else
349 ++#include <asm/exception-64e.h>
350 ++#endif
351 +
352 + /*
353 + * System calls.
354 +diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
355 +index f06a9a0063f1..d0724a924184 100644
356 +--- a/arch/s390/kernel/compat_linux.c
357 ++++ b/arch/s390/kernel/compat_linux.c
358 +@@ -110,7 +110,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid)
359 +
360 + COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid)
361 + {
362 +- return sys_setgid((gid_t)gid);
363 ++ return sys_setgid(low2highgid(gid));
364 + }
365 +
366 + COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
367 +@@ -120,7 +120,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
368 +
369 + COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid)
370 + {
371 +- return sys_setuid((uid_t)uid);
372 ++ return sys_setuid(low2highuid(uid));
373 + }
374 +
375 + COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid)
376 +@@ -173,12 +173,12 @@ COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp,
377 +
378 + COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid)
379 + {
380 +- return sys_setfsuid((uid_t)uid);
381 ++ return sys_setfsuid(low2highuid(uid));
382 + }
383 +
384 + COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid)
385 + {
386 +- return sys_setfsgid((gid_t)gid);
387 ++ return sys_setfsgid(low2highgid(gid));
388 + }
389 +
390 + static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
391 +diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
392 +index d76a97653980..92c55738d543 100644
393 +--- a/arch/x86/entry/entry_64_compat.S
394 ++++ b/arch/x86/entry/entry_64_compat.S
395 +@@ -83,15 +83,25 @@ ENTRY(entry_SYSENTER_compat)
396 + pushq %rcx /* pt_regs->cx */
397 + pushq $-ENOSYS /* pt_regs->ax */
398 + pushq $0 /* pt_regs->r8 = 0 */
399 ++ xorq %r8, %r8 /* nospec r8 */
400 + pushq $0 /* pt_regs->r9 = 0 */
401 ++ xorq %r9, %r9 /* nospec r9 */
402 + pushq $0 /* pt_regs->r10 = 0 */
403 ++ xorq %r10, %r10 /* nospec r10 */
404 + pushq $0 /* pt_regs->r11 = 0 */
405 ++ xorq %r11, %r11 /* nospec r11 */
406 + pushq %rbx /* pt_regs->rbx */
407 ++ xorl %ebx, %ebx /* nospec rbx */
408 + pushq %rbp /* pt_regs->rbp (will be overwritten) */
409 ++ xorl %ebp, %ebp /* nospec rbp */
410 + pushq $0 /* pt_regs->r12 = 0 */
411 ++ xorq %r12, %r12 /* nospec r12 */
412 + pushq $0 /* pt_regs->r13 = 0 */
413 ++ xorq %r13, %r13 /* nospec r13 */
414 + pushq $0 /* pt_regs->r14 = 0 */
415 ++ xorq %r14, %r14 /* nospec r14 */
416 + pushq $0 /* pt_regs->r15 = 0 */
417 ++ xorq %r15, %r15 /* nospec r15 */
418 + cld
419 +
420 + /*
421 +@@ -209,15 +219,25 @@ ENTRY(entry_SYSCALL_compat)
422 + pushq %rbp /* pt_regs->cx (stashed in bp) */
423 + pushq $-ENOSYS /* pt_regs->ax */
424 + pushq $0 /* pt_regs->r8 = 0 */
425 ++ xorq %r8, %r8 /* nospec r8 */
426 + pushq $0 /* pt_regs->r9 = 0 */
427 ++ xorq %r9, %r9 /* nospec r9 */
428 + pushq $0 /* pt_regs->r10 = 0 */
429 ++ xorq %r10, %r10 /* nospec r10 */
430 + pushq $0 /* pt_regs->r11 = 0 */
431 ++ xorq %r11, %r11 /* nospec r11 */
432 + pushq %rbx /* pt_regs->rbx */
433 ++ xorl %ebx, %ebx /* nospec rbx */
434 + pushq %rbp /* pt_regs->rbp (will be overwritten) */
435 ++ xorl %ebp, %ebp /* nospec rbp */
436 + pushq $0 /* pt_regs->r12 = 0 */
437 ++ xorq %r12, %r12 /* nospec r12 */
438 + pushq $0 /* pt_regs->r13 = 0 */
439 ++ xorq %r13, %r13 /* nospec r13 */
440 + pushq $0 /* pt_regs->r14 = 0 */
441 ++ xorq %r14, %r14 /* nospec r14 */
442 + pushq $0 /* pt_regs->r15 = 0 */
443 ++ xorq %r15, %r15 /* nospec r15 */
444 +
445 + /*
446 + * User mode is traced as though IRQs are on, and SYSENTER
447 +@@ -320,15 +340,25 @@ ENTRY(entry_INT80_compat)
448 + pushq %rcx /* pt_regs->cx */
449 + pushq $-ENOSYS /* pt_regs->ax */
450 + pushq $0 /* pt_regs->r8 = 0 */
451 ++ xorq %r8, %r8 /* nospec r8 */
452 + pushq $0 /* pt_regs->r9 = 0 */
453 ++ xorq %r9, %r9 /* nospec r9 */
454 + pushq $0 /* pt_regs->r10 = 0 */
455 ++ xorq %r10, %r10 /* nospec r10 */
456 + pushq $0 /* pt_regs->r11 = 0 */
457 ++ xorq %r11, %r11 /* nospec r11 */
458 + pushq %rbx /* pt_regs->rbx */
459 ++ xorl %ebx, %ebx /* nospec rbx */
460 + pushq %rbp /* pt_regs->rbp */
461 ++ xorl %ebp, %ebp /* nospec rbp */
462 + pushq %r12 /* pt_regs->r12 */
463 ++ xorq %r12, %r12 /* nospec r12 */
464 + pushq %r13 /* pt_regs->r13 */
465 ++ xorq %r13, %r13 /* nospec r13 */
466 + pushq %r14 /* pt_regs->r14 */
467 ++ xorq %r14, %r14 /* nospec r14 */
468 + pushq %r15 /* pt_regs->r15 */
469 ++ xorq %r15, %r15 /* nospec r15 */
470 + cld
471 +
472 + /*
473 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
474 +index f0f197f459b5..0bd0c1cc3228 100644
475 +--- a/arch/x86/events/intel/core.c
476 ++++ b/arch/x86/events/intel/core.c
477 +@@ -3363,7 +3363,7 @@ static int intel_snb_pebs_broken(int cpu)
478 + break;
479 +
480 + case INTEL_FAM6_SANDYBRIDGE_X:
481 +- switch (cpu_data(cpu).x86_mask) {
482 ++ switch (cpu_data(cpu).x86_stepping) {
483 + case 6: rev = 0x618; break;
484 + case 7: rev = 0x70c; break;
485 + }
486 +diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
487 +index f924629836a8..5d103a87e984 100644
488 +--- a/arch/x86/events/intel/lbr.c
489 ++++ b/arch/x86/events/intel/lbr.c
490 +@@ -1131,7 +1131,7 @@ void __init intel_pmu_lbr_init_atom(void)
491 + * on PMU interrupt
492 + */
493 + if (boot_cpu_data.x86_model == 28
494 +- && boot_cpu_data.x86_mask < 10) {
495 ++ && boot_cpu_data.x86_stepping < 10) {
496 + pr_cont("LBR disabled due to erratum");
497 + return;
498 + }
499 +diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
500 +index 1f5c47ab4c65..c5e441baccc7 100644
501 +--- a/arch/x86/events/intel/p6.c
502 ++++ b/arch/x86/events/intel/p6.c
503 +@@ -233,7 +233,7 @@ static __initconst const struct x86_pmu p6_pmu = {
504 +
505 + static __init void p6_pmu_rdpmc_quirk(void)
506 + {
507 +- if (boot_cpu_data.x86_mask < 9) {
508 ++ if (boot_cpu_data.x86_stepping < 9) {
509 + /*
510 + * PPro erratum 26; fixed in stepping 9 and above.
511 + */
512 +diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
513 +index 5391b0ae7cc3..d32bab65de70 100644
514 +--- a/arch/x86/include/asm/acpi.h
515 ++++ b/arch/x86/include/asm/acpi.h
516 +@@ -92,7 +92,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
517 + if (boot_cpu_data.x86 == 0x0F &&
518 + boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
519 + boot_cpu_data.x86_model <= 0x05 &&
520 +- boot_cpu_data.x86_mask < 0x0A)
521 ++ boot_cpu_data.x86_stepping < 0x0A)
522 + return 1;
523 + else if (amd_e400_c1e_detected)
524 + return 1;
525 +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
526 +index 857590390397..78d1c6a3d221 100644
527 +--- a/arch/x86/include/asm/barrier.h
528 ++++ b/arch/x86/include/asm/barrier.h
529 +@@ -39,7 +39,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
530 +
531 + asm ("cmp %1,%2; sbb %0,%0;"
532 + :"=r" (mask)
533 +- :"r"(size),"r" (index)
534 ++ :"g"(size),"r" (index)
535 + :"cc");
536 + return mask;
537 + }
538 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
539 +index 300cc159b4a0..76b058533e47 100644
540 +--- a/arch/x86/include/asm/nospec-branch.h
541 ++++ b/arch/x86/include/asm/nospec-branch.h
542 +@@ -6,6 +6,7 @@
543 + #include <asm/alternative.h>
544 + #include <asm/alternative-asm.h>
545 + #include <asm/cpufeatures.h>
546 ++#include <asm/msr-index.h>
547 +
548 + #ifdef __ASSEMBLY__
549 +
550 +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
551 +index cb866ae1bc5d..ec15ca2b32d0 100644
552 +--- a/arch/x86/include/asm/processor.h
553 ++++ b/arch/x86/include/asm/processor.h
554 +@@ -88,7 +88,7 @@ struct cpuinfo_x86 {
555 + __u8 x86; /* CPU family */
556 + __u8 x86_vendor; /* CPU vendor */
557 + __u8 x86_model;
558 +- __u8 x86_mask;
559 ++ __u8 x86_stepping;
560 + #ifdef CONFIG_X86_32
561 + char wp_works_ok; /* It doesn't on 386's */
562 +
563 +@@ -113,7 +113,7 @@ struct cpuinfo_x86 {
564 + char x86_vendor_id[16];
565 + char x86_model_id[64];
566 + /* in KB - valid for CPUS which support this call: */
567 +- int x86_cache_size;
568 ++ unsigned int x86_cache_size;
569 + int x86_cache_alignment; /* In bytes */
570 + /* Cache QoS architectural values: */
571 + int x86_cache_max_rmid; /* max index */
572 +diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
573 +index 4fdf6230d93c..8462e2d4ed94 100644
574 +--- a/arch/x86/kernel/amd_nb.c
575 ++++ b/arch/x86/kernel/amd_nb.c
576 +@@ -105,7 +105,7 @@ int amd_cache_northbridges(void)
577 + if (boot_cpu_data.x86 == 0x10 &&
578 + boot_cpu_data.x86_model >= 0x8 &&
579 + (boot_cpu_data.x86_model > 0x9 ||
580 +- boot_cpu_data.x86_mask >= 0x1))
581 ++ boot_cpu_data.x86_stepping >= 0x1))
582 + amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
583 +
584 + if (boot_cpu_data.x86 == 0x15)
585 +diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
586 +index 880aa093268d..36ebb6de1a03 100644
587 +--- a/arch/x86/kernel/asm-offsets_32.c
588 ++++ b/arch/x86/kernel/asm-offsets_32.c
589 +@@ -20,7 +20,7 @@ void foo(void)
590 + OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
591 + OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
592 + OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
593 +- OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
594 ++ OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
595 + OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
596 + OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
597 + OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
598 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
599 +index 1b89f0c4251e..c375bc672f82 100644
600 +--- a/arch/x86/kernel/cpu/amd.c
601 ++++ b/arch/x86/kernel/cpu/amd.c
602 +@@ -118,7 +118,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
603 + return;
604 + }
605 +
606 +- if (c->x86_model == 6 && c->x86_mask == 1) {
607 ++ if (c->x86_model == 6 && c->x86_stepping == 1) {
608 + const int K6_BUG_LOOP = 1000000;
609 + int n;
610 + void (*f_vide)(void);
611 +@@ -147,7 +147,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
612 +
613 + /* K6 with old style WHCR */
614 + if (c->x86_model < 8 ||
615 +- (c->x86_model == 8 && c->x86_mask < 8)) {
616 ++ (c->x86_model == 8 && c->x86_stepping < 8)) {
617 + /* We can only write allocate on the low 508Mb */
618 + if (mbytes > 508)
619 + mbytes = 508;
620 +@@ -166,7 +166,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
621 + return;
622 + }
623 +
624 +- if ((c->x86_model == 8 && c->x86_mask > 7) ||
625 ++ if ((c->x86_model == 8 && c->x86_stepping > 7) ||
626 + c->x86_model == 9 || c->x86_model == 13) {
627 + /* The more serious chips .. */
628 +
629 +@@ -219,7 +219,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
630 + * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
631 + * As per AMD technical note 27212 0.2
632 + */
633 +- if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
634 ++ if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
635 + rdmsr(MSR_K7_CLK_CTL, l, h);
636 + if ((l & 0xfff00000) != 0x20000000) {
637 + pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
638 +@@ -239,12 +239,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
639 + * but they are not certified as MP capable.
640 + */
641 + /* Athlon 660/661 is valid. */
642 +- if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
643 +- (c->x86_mask == 1)))
644 ++ if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
645 ++ (c->x86_stepping == 1)))
646 + return;
647 +
648 + /* Duron 670 is valid */
649 +- if ((c->x86_model == 7) && (c->x86_mask == 0))
650 ++ if ((c->x86_model == 7) && (c->x86_stepping == 0))
651 + return;
652 +
653 + /*
654 +@@ -254,8 +254,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
655 + * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
656 + * more.
657 + */
658 +- if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
659 +- ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
660 ++ if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
661 ++ ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
662 + (c->x86_model > 7))
663 + if (cpu_has(c, X86_FEATURE_MP))
664 + return;
665 +@@ -569,7 +569,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
666 + /* Set MTRR capability flag if appropriate */
667 + if (c->x86 == 5)
668 + if (c->x86_model == 13 || c->x86_model == 9 ||
669 +- (c->x86_model == 8 && c->x86_mask >= 8))
670 ++ (c->x86_model == 8 && c->x86_stepping >= 8))
671 + set_cpu_cap(c, X86_FEATURE_K6_MTRR);
672 + #endif
673 + #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
674 +@@ -834,11 +834,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
675 + /* AMD errata T13 (order #21922) */
676 + if ((c->x86 == 6)) {
677 + /* Duron Rev A0 */
678 +- if (c->x86_model == 3 && c->x86_mask == 0)
679 ++ if (c->x86_model == 3 && c->x86_stepping == 0)
680 + size = 64;
681 + /* Tbird rev A1/A2 */
682 + if (c->x86_model == 4 &&
683 +- (c->x86_mask == 0 || c->x86_mask == 1))
684 ++ (c->x86_stepping == 0 || c->x86_stepping == 1))
685 + size = 256;
686 + }
687 + return size;
688 +@@ -975,7 +975,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
689 + }
690 +
691 + /* OSVW unavailable or ID unknown, match family-model-stepping range */
692 +- ms = (cpu->x86_model << 4) | cpu->x86_mask;
693 ++ ms = (cpu->x86_model << 4) | cpu->x86_stepping;
694 + while ((range = *erratum++))
695 + if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
696 + (ms >= AMD_MODEL_RANGE_START(range)) &&
697 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
698 +index 957ad443b786..baddc9ed3454 100644
699 +--- a/arch/x86/kernel/cpu/bugs.c
700 ++++ b/arch/x86/kernel/cpu/bugs.c
701 +@@ -161,8 +161,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
702 + if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
703 + return SPECTRE_V2_CMD_NONE;
704 + else {
705 +- ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
706 +- sizeof(arg));
707 ++ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
708 + if (ret < 0)
709 + return SPECTRE_V2_CMD_AUTO;
710 +
711 +@@ -174,8 +173,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
712 + }
713 +
714 + if (i >= ARRAY_SIZE(mitigation_options)) {
715 +- pr_err("unknown option (%s). Switching to AUTO select\n",
716 +- mitigation_options[i].option);
717 ++ pr_err("unknown option (%s). Switching to AUTO select\n", arg);
718 + return SPECTRE_V2_CMD_AUTO;
719 + }
720 + }
721 +@@ -184,8 +182,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
722 + cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
723 + cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
724 + !IS_ENABLED(CONFIG_RETPOLINE)) {
725 +- pr_err("%s selected but not compiled in. Switching to AUTO select\n",
726 +- mitigation_options[i].option);
727 ++ pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
728 + return SPECTRE_V2_CMD_AUTO;
729 + }
730 +
731 +@@ -255,14 +252,14 @@ static void __init spectre_v2_select_mitigation(void)
732 + goto retpoline_auto;
733 + break;
734 + }
735 +- pr_err("kernel not compiled with retpoline; no mitigation available!");
736 ++ pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
737 + return;
738 +
739 + retpoline_auto:
740 + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
741 + retpoline_amd:
742 + if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
743 +- pr_err("LFENCE not serializing. Switching to generic retpoline\n");
744 ++ pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
745 + goto retpoline_generic;
746 + }
747 + mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
748 +@@ -280,7 +277,7 @@ static void __init spectre_v2_select_mitigation(void)
749 + pr_info("%s\n", spectre_v2_strings[mode]);
750 +
751 + /*
752 +- * If neither SMEP or KPTI are available, there is a risk of
753 ++ * If neither SMEP nor PTI are available, there is a risk of
754 + * hitting userspace addresses in the RSB after a context switch
755 + * from a shallow call stack to a deeper one. To prevent this fill
756 + * the entire RSB, even when using IBRS.
757 +@@ -294,21 +291,20 @@ static void __init spectre_v2_select_mitigation(void)
758 + if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
759 + !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
760 + setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
761 +- pr_info("Filling RSB on context switch\n");
762 ++ pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
763 + }
764 +
765 + /* Initialize Indirect Branch Prediction Barrier if supported */
766 + if (boot_cpu_has(X86_FEATURE_IBPB)) {
767 + setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
768 +- pr_info("Enabling Indirect Branch Prediction Barrier\n");
769 ++ pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
770 + }
771 + }
772 +
773 + #undef pr_fmt
774 +
775 + #ifdef CONFIG_SYSFS
776 +-ssize_t cpu_show_meltdown(struct device *dev,
777 +- struct device_attribute *attr, char *buf)
778 ++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
779 + {
780 + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
781 + return sprintf(buf, "Not affected\n");
782 +@@ -317,16 +313,14 @@ ssize_t cpu_show_meltdown(struct device *dev,
783 + return sprintf(buf, "Vulnerable\n");
784 + }
785 +
786 +-ssize_t cpu_show_spectre_v1(struct device *dev,
787 +- struct device_attribute *attr, char *buf)
788 ++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
789 + {
790 + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
791 + return sprintf(buf, "Not affected\n");
792 + return sprintf(buf, "Mitigation: __user pointer sanitization\n");
793 + }
794 +
795 +-ssize_t cpu_show_spectre_v2(struct device *dev,
796 +- struct device_attribute *attr, char *buf)
797 ++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
798 + {
799 + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
800 + return sprintf(buf, "Not affected\n");
801 +diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
802 +index 1661d8ec9280..4d2f61f92fed 100644
803 +--- a/arch/x86/kernel/cpu/centaur.c
804 ++++ b/arch/x86/kernel/cpu/centaur.c
805 +@@ -134,7 +134,7 @@ static void init_centaur(struct cpuinfo_x86 *c)
806 + clear_cpu_cap(c, X86_FEATURE_TSC);
807 + break;
808 + case 8:
809 +- switch (c->x86_mask) {
810 ++ switch (c->x86_stepping) {
811 + default:
812 + name = "2";
813 + break;
814 +@@ -209,7 +209,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
815 + * - Note, it seems this may only be in engineering samples.
816 + */
817 + if ((c->x86 == 6) && (c->x86_model == 9) &&
818 +- (c->x86_mask == 1) && (size == 65))
819 ++ (c->x86_stepping == 1) && (size == 65))
820 + size -= 1;
821 + return size;
822 + }
823 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
824 +index 08e89ed6aa87..301bbd1f2373 100644
825 +--- a/arch/x86/kernel/cpu/common.c
826 ++++ b/arch/x86/kernel/cpu/common.c
827 +@@ -699,7 +699,7 @@ void cpu_detect(struct cpuinfo_x86 *c)
828 + cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
829 + c->x86 = x86_family(tfms);
830 + c->x86_model = x86_model(tfms);
831 +- c->x86_mask = x86_stepping(tfms);
832 ++ c->x86_stepping = x86_stepping(tfms);
833 +
834 + if (cap0 & (1<<19)) {
835 + c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
836 +@@ -1144,9 +1144,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
837 + int i;
838 +
839 + c->loops_per_jiffy = loops_per_jiffy;
840 +- c->x86_cache_size = -1;
841 ++ c->x86_cache_size = 0;
842 + c->x86_vendor = X86_VENDOR_UNKNOWN;
843 +- c->x86_model = c->x86_mask = 0; /* So far unknown... */
844 ++ c->x86_model = c->x86_stepping = 0; /* So far unknown... */
845 + c->x86_vendor_id[0] = '\0'; /* Unset */
846 + c->x86_model_id[0] = '\0'; /* Unset */
847 + c->x86_max_cores = 1;
848 +@@ -1391,8 +1391,8 @@ void print_cpu_info(struct cpuinfo_x86 *c)
849 +
850 + pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
851 +
852 +- if (c->x86_mask || c->cpuid_level >= 0)
853 +- pr_cont(", stepping: 0x%x)\n", c->x86_mask);
854 ++ if (c->x86_stepping || c->cpuid_level >= 0)
855 ++ pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
856 + else
857 + pr_cont(")\n");
858 +
859 +diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
860 +index bd9dcd6b712d..455d8ada9b9a 100644
861 +--- a/arch/x86/kernel/cpu/cyrix.c
862 ++++ b/arch/x86/kernel/cpu/cyrix.c
863 +@@ -212,7 +212,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
864 +
865 + /* common case step number/rev -- exceptions handled below */
866 + c->x86_model = (dir1 >> 4) + 1;
867 +- c->x86_mask = dir1 & 0xf;
868 ++ c->x86_stepping = dir1 & 0xf;
869 +
870 + /* Now cook; the original recipe is by Channing Corn, from Cyrix.
871 + * We do the same thing for each generation: we work out
872 +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
873 +index 4097b43cba2d..6ed206bd9071 100644
874 +--- a/arch/x86/kernel/cpu/intel.c
875 ++++ b/arch/x86/kernel/cpu/intel.c
876 +@@ -75,14 +75,13 @@ struct sku_microcode {
877 + u32 microcode;
878 + };
879 + static const struct sku_microcode spectre_bad_microcodes[] = {
880 +- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 },
881 +- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 },
882 +- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 },
883 +- { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 },
884 +- { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 },
885 ++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
886 ++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
887 ++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
888 ++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
889 ++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
890 + { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
891 + { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
892 +- { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 },
893 + { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
894 + { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
895 + { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
896 +@@ -95,8 +94,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
897 + { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
898 + { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
899 + { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
900 +- /* Updated in the 20180108 release; blacklist until we know otherwise */
901 +- { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 },
902 + /* Observed in the wild */
903 + { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
904 + { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
905 +@@ -108,7 +105,7 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
906 +
907 + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
908 + if (c->x86_model == spectre_bad_microcodes[i].model &&
909 +- c->x86_mask == spectre_bad_microcodes[i].stepping)
910 ++ c->x86_stepping == spectre_bad_microcodes[i].stepping)
911 + return (c->microcode <= spectre_bad_microcodes[i].microcode);
912 + }
913 + return false;
914 +@@ -161,7 +158,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
915 + * need the microcode to have already been loaded... so if it is
916 + * not, recommend a BIOS update and disable large pages.
917 + */
918 +- if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
919 ++ if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
920 + c->microcode < 0x20e) {
921 + pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
922 + clear_cpu_cap(c, X86_FEATURE_PSE);
923 +@@ -177,7 +174,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
924 +
925 + /* CPUID workaround for 0F33/0F34 CPU */
926 + if (c->x86 == 0xF && c->x86_model == 0x3
927 +- && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
928 ++ && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
929 + c->x86_phys_bits = 36;
930 +
931 + /*
932 +@@ -292,7 +289,7 @@ int ppro_with_ram_bug(void)
933 + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
934 + boot_cpu_data.x86 == 6 &&
935 + boot_cpu_data.x86_model == 1 &&
936 +- boot_cpu_data.x86_mask < 8) {
937 ++ boot_cpu_data.x86_stepping < 8) {
938 + pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
939 + return 1;
940 + }
941 +@@ -309,7 +306,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c)
942 + * Mask B, Pentium, but not Pentium MMX
943 + */
944 + if (c->x86 == 5 &&
945 +- c->x86_mask >= 1 && c->x86_mask <= 4 &&
946 ++ c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
947 + c->x86_model <= 3) {
948 + /*
949 + * Remember we have B step Pentia with bugs
950 +@@ -352,7 +349,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
951 + * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
952 + * model 3 mask 3
953 + */
954 +- if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
955 ++ if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
956 + clear_cpu_cap(c, X86_FEATURE_SEP);
957 +
958 + /*
959 +@@ -370,7 +367,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
960 + * P4 Xeon erratum 037 workaround.
961 + * Hardware prefetcher may cause stale data to be loaded into the cache.
962 + */
963 +- if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
964 ++ if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
965 + if (msr_set_bit(MSR_IA32_MISC_ENABLE,
966 + MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
967 + pr_info("CPU: C0 stepping P4 Xeon detected.\n");
968 +@@ -385,7 +382,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
969 + * Specification Update").
970 + */
971 + if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
972 +- (c->x86_mask < 0x6 || c->x86_mask == 0xb))
973 ++ (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
974 + set_cpu_bug(c, X86_BUG_11AP);
975 +
976 +
977 +@@ -604,7 +601,7 @@ static void init_intel(struct cpuinfo_x86 *c)
978 + case 6:
979 + if (l2 == 128)
980 + p = "Celeron (Mendocino)";
981 +- else if (c->x86_mask == 0 || c->x86_mask == 5)
982 ++ else if (c->x86_stepping == 0 || c->x86_stepping == 5)
983 + p = "Celeron-A";
984 + break;
985 +
986 +diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
987 +index f90f17610f62..4bcd30c87531 100644
988 +--- a/arch/x86/kernel/cpu/microcode/intel.c
989 ++++ b/arch/x86/kernel/cpu/microcode/intel.c
990 +@@ -1062,7 +1062,7 @@ static bool is_blacklisted(unsigned int cpu)
991 + */
992 + if (c->x86 == 6 &&
993 + c->x86_model == INTEL_FAM6_BROADWELL_X &&
994 +- c->x86_mask == 0x01 &&
995 ++ c->x86_stepping == 0x01 &&
996 + llc_size_per_core > 2621440 &&
997 + c->microcode < 0x0b000021) {
998 + pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
999 +@@ -1085,7 +1085,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
1000 + return UCODE_NFOUND;
1001 +
1002 + sprintf(name, "intel-ucode/%02x-%02x-%02x",
1003 +- c->x86, c->x86_model, c->x86_mask);
1004 ++ c->x86, c->x86_model, c->x86_stepping);
1005 +
1006 + if (request_firmware_direct(&firmware, name, device)) {
1007 + pr_debug("data file %s load failed\n", name);
1008 +@@ -1132,7 +1132,7 @@ static struct microcode_ops microcode_intel_ops = {
1009 +
1010 + static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
1011 + {
1012 +- u64 llc_size = c->x86_cache_size * 1024;
1013 ++ u64 llc_size = c->x86_cache_size * 1024ULL;
1014 +
1015 + do_div(llc_size, c->x86_max_cores);
1016 +
1017 +diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
1018 +index fdc55215d44d..e12ee86906c6 100644
1019 +--- a/arch/x86/kernel/cpu/mtrr/generic.c
1020 ++++ b/arch/x86/kernel/cpu/mtrr/generic.c
1021 +@@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
1022 + */
1023 + if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
1024 + boot_cpu_data.x86_model == 1 &&
1025 +- boot_cpu_data.x86_mask <= 7) {
1026 ++ boot_cpu_data.x86_stepping <= 7) {
1027 + if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
1028 + pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
1029 + return -EINVAL;
1030 +diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
1031 +index 24e87e74990d..fae740c22657 100644
1032 +--- a/arch/x86/kernel/cpu/mtrr/main.c
1033 ++++ b/arch/x86/kernel/cpu/mtrr/main.c
1034 +@@ -699,8 +699,8 @@ void __init mtrr_bp_init(void)
1035 + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
1036 + boot_cpu_data.x86 == 0xF &&
1037 + boot_cpu_data.x86_model == 0x3 &&
1038 +- (boot_cpu_data.x86_mask == 0x3 ||
1039 +- boot_cpu_data.x86_mask == 0x4))
1040 ++ (boot_cpu_data.x86_stepping == 0x3 ||
1041 ++ boot_cpu_data.x86_stepping == 0x4))
1042 + phys_addr = 36;
1043 +
1044 + size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
1045 +diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
1046 +index 18ca99f2798b..c4f772d3f35c 100644
1047 +--- a/arch/x86/kernel/cpu/proc.c
1048 ++++ b/arch/x86/kernel/cpu/proc.c
1049 +@@ -70,8 +70,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1050 + c->x86_model,
1051 + c->x86_model_id[0] ? c->x86_model_id : "unknown");
1052 +
1053 +- if (c->x86_mask || c->cpuid_level >= 0)
1054 +- seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1055 ++ if (c->x86_stepping || c->cpuid_level >= 0)
1056 ++ seq_printf(m, "stepping\t: %d\n", c->x86_stepping);
1057 + else
1058 + seq_puts(m, "stepping\t: unknown\n");
1059 + if (c->microcode)
1060 +@@ -87,8 +87,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1061 + }
1062 +
1063 + /* Cache size */
1064 +- if (c->x86_cache_size >= 0)
1065 +- seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1066 ++ if (c->x86_cache_size)
1067 ++ seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size);
1068 +
1069 + show_cpuinfo_core(m, c, cpu);
1070 + show_cpuinfo_misc(m, c);
1071 +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
1072 +index 2dabea46f039..82155d0cc310 100644
1073 +--- a/arch/x86/kernel/head_32.S
1074 ++++ b/arch/x86/kernel/head_32.S
1075 +@@ -35,7 +35,7 @@
1076 + #define X86 new_cpu_data+CPUINFO_x86
1077 + #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
1078 + #define X86_MODEL new_cpu_data+CPUINFO_x86_model
1079 +-#define X86_MASK new_cpu_data+CPUINFO_x86_mask
1080 ++#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping
1081 + #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
1082 + #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
1083 + #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
1084 +@@ -441,7 +441,7 @@ enable_paging:
1085 + shrb $4,%al
1086 + movb %al,X86_MODEL
1087 + andb $0x0f,%cl # mask mask revision
1088 +- movb %cl,X86_MASK
1089 ++ movb %cl,X86_STEPPING
1090 + movl %edx,X86_CAPABILITY
1091 +
1092 + is486:
1093 +diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
1094 +index 0f8d20497383..d0fb941330c6 100644
1095 +--- a/arch/x86/kernel/mpparse.c
1096 ++++ b/arch/x86/kernel/mpparse.c
1097 +@@ -406,7 +406,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
1098 + processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
1099 + processor.cpuflag = CPU_ENABLED;
1100 + processor.cpufeature = (boot_cpu_data.x86 << 8) |
1101 +- (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
1102 ++ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
1103 + processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
1104 + processor.reserved[0] = 0;
1105 + processor.reserved[1] = 0;
1106 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1107 +index 0a324e120942..a16c06604a56 100644
1108 +--- a/arch/x86/kvm/mmu.c
1109 ++++ b/arch/x86/kvm/mmu.c
1110 +@@ -4640,7 +4640,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
1111 + typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
1112 +
1113 + /* The caller should hold mmu-lock before calling this function. */
1114 +-static bool
1115 ++static __always_inline bool
1116 + slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
1117 + slot_level_handler fn, int start_level, int end_level,
1118 + gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
1119 +@@ -4670,7 +4670,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
1120 + return flush;
1121 + }
1122 +
1123 +-static bool
1124 ++static __always_inline bool
1125 + slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
1126 + slot_level_handler fn, int start_level, int end_level,
1127 + bool lock_flush_tlb)
1128 +@@ -4681,7 +4681,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
1129 + lock_flush_tlb);
1130 + }
1131 +
1132 +-static bool
1133 ++static __always_inline bool
1134 + slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
1135 + slot_level_handler fn, bool lock_flush_tlb)
1136 + {
1137 +@@ -4689,7 +4689,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
1138 + PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
1139 + }
1140 +
1141 +-static bool
1142 ++static __always_inline bool
1143 + slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
1144 + slot_level_handler fn, bool lock_flush_tlb)
1145 + {
1146 +@@ -4697,7 +4697,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
1147 + PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
1148 + }
1149 +
1150 +-static bool
1151 ++static __always_inline bool
1152 + slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
1153 + slot_level_handler fn, bool lock_flush_tlb)
1154 + {
1155 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1156 +index d66224e695cf..1e16821c1378 100644
1157 +--- a/arch/x86/kvm/vmx.c
1158 ++++ b/arch/x86/kvm/vmx.c
1159 +@@ -9606,8 +9606,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
1160 + * updated to reflect this when L1 (or its L2s) actually write to
1161 + * the MSR.
1162 + */
1163 +- bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
1164 +- bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
1165 ++ bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
1166 ++ bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
1167 +
1168 + if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
1169 + !pred_cmd && !spec_ctrl)
1170 +diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
1171 +index d6f848d1211d..2dd1fe13a37b 100644
1172 +--- a/arch/x86/lib/cpu.c
1173 ++++ b/arch/x86/lib/cpu.c
1174 +@@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig)
1175 + {
1176 + unsigned int fam, model;
1177 +
1178 +- fam = x86_family(sig);
1179 ++ fam = x86_family(sig);
1180 +
1181 + model = (sig >> 4) & 0xf;
1182 +
1183 +diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
1184 +index 44ce80606944..e278125ddf41 100644
1185 +--- a/drivers/char/hw_random/via-rng.c
1186 ++++ b/drivers/char/hw_random/via-rng.c
1187 +@@ -166,7 +166,7 @@ static int via_rng_init(struct hwrng *rng)
1188 + /* Enable secondary noise source on CPUs where it is present. */
1189 +
1190 + /* Nehemiah stepping 8 and higher */
1191 +- if ((c->x86_model == 9) && (c->x86_mask > 7))
1192 ++ if ((c->x86_model == 9) && (c->x86_stepping > 7))
1193 + lo |= VIA_NOISESRC2;
1194 +
1195 + /* Esther */
1196 +diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
1197 +index 297e9128fe9f..1ee3674a99bb 100644
1198 +--- a/drivers/cpufreq/acpi-cpufreq.c
1199 ++++ b/drivers/cpufreq/acpi-cpufreq.c
1200 +@@ -648,7 +648,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
1201 + if (c->x86_vendor == X86_VENDOR_INTEL) {
1202 + if ((c->x86 == 15) &&
1203 + (c->x86_model == 6) &&
1204 +- (c->x86_mask == 8)) {
1205 ++ (c->x86_stepping == 8)) {
1206 + pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
1207 + return -ENODEV;
1208 + }
1209 +diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
1210 +index c46a12df40dd..d5e27bc7585a 100644
1211 +--- a/drivers/cpufreq/longhaul.c
1212 ++++ b/drivers/cpufreq/longhaul.c
1213 +@@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
1214 + break;
1215 +
1216 + case 7:
1217 +- switch (c->x86_mask) {
1218 ++ switch (c->x86_stepping) {
1219 + case 0:
1220 + longhaul_version = TYPE_LONGHAUL_V1;
1221 + cpu_model = CPU_SAMUEL2;
1222 +@@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
1223 + break;
1224 + case 1 ... 15:
1225 + longhaul_version = TYPE_LONGHAUL_V2;
1226 +- if (c->x86_mask < 8) {
1227 ++ if (c->x86_stepping < 8) {
1228 + cpu_model = CPU_SAMUEL2;
1229 + cpuname = "C3 'Samuel 2' [C5B]";
1230 + } else {
1231 +@@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
1232 + numscales = 32;
1233 + memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
1234 + memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
1235 +- switch (c->x86_mask) {
1236 ++ switch (c->x86_stepping) {
1237 + case 0 ... 1:
1238 + cpu_model = CPU_NEHEMIAH;
1239 + cpuname = "C3 'Nehemiah A' [C5XLOE]";
1240 +diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
1241 +index fd77812313f3..a25741b1281b 100644
1242 +--- a/drivers/cpufreq/p4-clockmod.c
1243 ++++ b/drivers/cpufreq/p4-clockmod.c
1244 +@@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
1245 + #endif
1246 +
1247 + /* Errata workaround */
1248 +- cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
1249 ++ cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
1250 + switch (cpuid) {
1251 + case 0x0f07:
1252 + case 0x0f0a:
1253 +diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
1254 +index 9f013ed42977..ef276f6a8c46 100644
1255 +--- a/drivers/cpufreq/powernow-k7.c
1256 ++++ b/drivers/cpufreq/powernow-k7.c
1257 +@@ -131,7 +131,7 @@ static int check_powernow(void)
1258 + return 0;
1259 + }
1260 +
1261 +- if ((c->x86_model == 6) && (c->x86_mask == 0)) {
1262 ++ if ((c->x86_model == 6) && (c->x86_stepping == 0)) {
1263 + pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
1264 + have_a0 = 1;
1265 + }
1266 +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
1267 +index a84724eabfb8..6fb3cd24c1b6 100644
1268 +--- a/drivers/cpufreq/powernv-cpufreq.c
1269 ++++ b/drivers/cpufreq/powernv-cpufreq.c
1270 +@@ -260,9 +260,9 @@ static int init_powernv_pstates(void)
1271 +
1272 + if (id == pstate_max)
1273 + powernv_pstate_info.max = i;
1274 +- else if (id == pstate_nominal)
1275 ++ if (id == pstate_nominal)
1276 + powernv_pstate_info.nominal = i;
1277 +- else if (id == pstate_min)
1278 ++ if (id == pstate_min)
1279 + powernv_pstate_info.min = i;
1280 + }
1281 +
1282 +diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
1283 +index 41bc5397f4bb..4fa5adf16c70 100644
1284 +--- a/drivers/cpufreq/speedstep-centrino.c
1285 ++++ b/drivers/cpufreq/speedstep-centrino.c
1286 +@@ -37,7 +37,7 @@ struct cpu_id
1287 + {
1288 + __u8 x86; /* CPU family */
1289 + __u8 x86_model; /* model */
1290 +- __u8 x86_mask; /* stepping */
1291 ++ __u8 x86_stepping; /* stepping */
1292 + };
1293 +
1294 + enum {
1295 +@@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
1296 + {
1297 + if ((c->x86 == x->x86) &&
1298 + (c->x86_model == x->x86_model) &&
1299 +- (c->x86_mask == x->x86_mask))
1300 ++ (c->x86_stepping == x->x86_stepping))
1301 + return 1;
1302 + return 0;
1303 + }
1304 +diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
1305 +index 1b8062182c81..ade98a219cc1 100644
1306 +--- a/drivers/cpufreq/speedstep-lib.c
1307 ++++ b/drivers/cpufreq/speedstep-lib.c
1308 +@@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void)
1309 + ebx = cpuid_ebx(0x00000001);
1310 + ebx &= 0x000000FF;
1311 +
1312 +- pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
1313 ++ pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping);
1314 +
1315 +- switch (c->x86_mask) {
1316 ++ switch (c->x86_stepping) {
1317 + case 4:
1318 + /*
1319 + * B-stepping [M-P4-M]
1320 +@@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void)
1321 + msr_lo, msr_hi);
1322 + if ((msr_hi & (1<<18)) &&
1323 + (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
1324 +- if (c->x86_mask == 0x01) {
1325 ++ if (c->x86_stepping == 0x01) {
1326 + pr_debug("early PIII version\n");
1327 + return SPEEDSTEP_CPU_PIII_C_EARLY;
1328 + } else
1329 +diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
1330 +index 441e86b23571..9126627cbf4d 100644
1331 +--- a/drivers/crypto/padlock-aes.c
1332 ++++ b/drivers/crypto/padlock-aes.c
1333 +@@ -531,7 +531,7 @@ static int __init padlock_init(void)
1334 +
1335 + printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
1336 +
1337 +- if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
1338 ++ if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
1339 + ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
1340 + cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
1341 + printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
1342 +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
1343 +index a2449d77af07..9e5674c5a07b 100644
1344 +--- a/drivers/devfreq/devfreq.c
1345 ++++ b/drivers/devfreq/devfreq.c
1346 +@@ -684,7 +684,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
1347 + devfreq = devfreq_add_device(dev, profile, governor_name, data);
1348 + if (IS_ERR(devfreq)) {
1349 + devres_free(ptr);
1350 +- return ERR_PTR(-ENOMEM);
1351 ++ return devfreq;
1352 + }
1353 +
1354 + *ptr = devfreq;
1355 +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
1356 +index 6e197c1c213d..1c5f23224b3c 100644
1357 +--- a/drivers/edac/amd64_edac.c
1358 ++++ b/drivers/edac/amd64_edac.c
1359 +@@ -2719,7 +2719,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
1360 + struct amd64_family_type *fam_type = NULL;
1361 +
1362 + pvt->ext_model = boot_cpu_data.x86_model >> 4;
1363 +- pvt->stepping = boot_cpu_data.x86_mask;
1364 ++ pvt->stepping = boot_cpu_data.x86_stepping;
1365 + pvt->model = boot_cpu_data.x86_model;
1366 + pvt->fam = boot_cpu_data.x86;
1367 +
1368 +diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
1369 +index 7db692ed3dea..ac0c6c83b6d6 100644
1370 +--- a/drivers/edac/mce_amd.c
1371 ++++ b/drivers/edac/mce_amd.c
1372 +@@ -948,7 +948,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
1373 +
1374 + pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s",
1375 + m->extcpu,
1376 +- c->x86, c->x86_model, c->x86_mask,
1377 ++ c->x86, c->x86_model, c->x86_stepping,
1378 + m->bank,
1379 + ((m->status & MCI_STATUS_OVER) ? "Over" : "-"),
1380 + ((m->status & MCI_STATUS_UC) ? "UE" :
1381 +diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
1382 +index 0cd0e7bdee55..16239b07ce45 100644
1383 +--- a/drivers/gpu/drm/radeon/radeon_uvd.c
1384 ++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
1385 +@@ -995,7 +995,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
1386 + /* calc dclk divider with current vco freq */
1387 + dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
1388 + pd_min, pd_even);
1389 +- if (vclk_div > pd_max)
1390 ++ if (dclk_div > pd_max)
1391 + break; /* vco is too big, it has to stop */
1392 +
1393 + /* calc score with current vco freq */
1394 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
1395 +index 8bd9e6c371d1..574ab0016a57 100644
1396 +--- a/drivers/gpu/drm/radeon/si_dpm.c
1397 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
1398 +@@ -3029,6 +3029,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
1399 + max_sclk = 75000;
1400 + max_mclk = 80000;
1401 + }
1402 ++ if ((rdev->pdev->revision == 0xC3) ||
1403 ++ (rdev->pdev->device == 0x6665)) {
1404 ++ max_sclk = 60000;
1405 ++ max_mclk = 80000;
1406 ++ }
1407 + } else if (rdev->family == CHIP_OLAND) {
1408 + if ((rdev->pdev->revision == 0xC7) ||
1409 + (rdev->pdev->revision == 0x80) ||
1410 +diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
1411 +index 6a27eb2fed17..be1e380fa1c3 100644
1412 +--- a/drivers/hwmon/coretemp.c
1413 ++++ b/drivers/hwmon/coretemp.c
1414 +@@ -269,13 +269,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
1415 + for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
1416 + const struct tjmax_model *tm = &tjmax_model_table[i];
1417 + if (c->x86_model == tm->model &&
1418 +- (tm->mask == ANY || c->x86_mask == tm->mask))
1419 ++ (tm->mask == ANY || c->x86_stepping == tm->mask))
1420 + return tm->tjmax;
1421 + }
1422 +
1423 + /* Early chips have no MSR for TjMax */
1424 +
1425 +- if (c->x86_model == 0xf && c->x86_mask < 4)
1426 ++ if (c->x86_model == 0xf && c->x86_stepping < 4)
1427 + usemsr_ee = 0;
1428 +
1429 + if (c->x86_model > 0xe && usemsr_ee) {
1430 +@@ -426,7 +426,7 @@ static int chk_ucode_version(unsigned int cpu)
1431 + * Readings might stop update when processor visited too deep sleep,
1432 + * fixed for stepping D0 (6EC).
1433 + */
1434 +- if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
1435 ++ if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
1436 + pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
1437 + return -ENODEV;
1438 + }
1439 +diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
1440 +index ef91b8a67549..84e91286fc4f 100644
1441 +--- a/drivers/hwmon/hwmon-vid.c
1442 ++++ b/drivers/hwmon/hwmon-vid.c
1443 +@@ -293,7 +293,7 @@ u8 vid_which_vrm(void)
1444 + if (c->x86 < 6) /* Any CPU with family lower than 6 */
1445 + return 0; /* doesn't have VID */
1446 +
1447 +- vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor);
1448 ++ vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor);
1449 + if (vrm_ret == 134)
1450 + vrm_ret = get_via_model_d_vrm();
1451 + if (vrm_ret == 0)
1452 +diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
1453 +index 9cdfde6515ad..0124584a6a6d 100644
1454 +--- a/drivers/hwmon/k10temp.c
1455 ++++ b/drivers/hwmon/k10temp.c
1456 +@@ -179,7 +179,7 @@ static bool has_erratum_319(struct pci_dev *pdev)
1457 + * and AM3 formats, but that's the best we can do.
1458 + */
1459 + return boot_cpu_data.x86_model < 4 ||
1460 +- (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2);
1461 ++ (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
1462 + }
1463 +
1464 + static int k10temp_probe(struct pci_dev *pdev,
1465 +diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
1466 +index 734d55d48cc8..486502798fc5 100644
1467 +--- a/drivers/hwmon/k8temp.c
1468 ++++ b/drivers/hwmon/k8temp.c
1469 +@@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev,
1470 + return -ENOMEM;
1471 +
1472 + model = boot_cpu_data.x86_model;
1473 +- stepping = boot_cpu_data.x86_mask;
1474 ++ stepping = boot_cpu_data.x86_stepping;
1475 +
1476 + /* feature available since SH-C0, exclude older revisions */
1477 + if ((model == 4 && stepping == 0) ||
1478 +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
1479 +index 8059b7eaf3a8..c41c8d0a4ac0 100644
1480 +--- a/drivers/infiniband/hw/mlx4/main.c
1481 ++++ b/drivers/infiniband/hw/mlx4/main.c
1482 +@@ -2928,9 +2928,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1483 + kfree(ibdev->ib_uc_qpns_bitmap);
1484 +
1485 + err_steer_qp_release:
1486 +- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
1487 +- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
1488 +- ibdev->steer_qpn_count);
1489 ++ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
1490 ++ ibdev->steer_qpn_count);
1491 + err_counter:
1492 + for (i = 0; i < ibdev->num_ports; ++i)
1493 + mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
1494 +@@ -3035,11 +3034,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
1495 + ibdev->iboe.nb.notifier_call = NULL;
1496 + }
1497 +
1498 +- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1499 +- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
1500 +- ibdev->steer_qpn_count);
1501 +- kfree(ibdev->ib_uc_qpns_bitmap);
1502 +- }
1503 ++ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
1504 ++ ibdev->steer_qpn_count);
1505 ++ kfree(ibdev->ib_uc_qpns_bitmap);
1506 +
1507 + iounmap(ibdev->uar_map);
1508 + for (p = 0; p < ibdev->num_ports; ++p)
1509 +diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
1510 +index c1523f9a3c12..e4d4f5c44afe 100644
1511 +--- a/drivers/infiniband/hw/qib/qib_rc.c
1512 ++++ b/drivers/infiniband/hw/qib/qib_rc.c
1513 +@@ -443,13 +443,13 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
1514 + qp->s_state = OP(COMPARE_SWAP);
1515 + put_ib_ateth_swap(wqe->atomic_wr.swap,
1516 + &ohdr->u.atomic_eth);
1517 +- put_ib_ateth_swap(wqe->atomic_wr.compare_add,
1518 +- &ohdr->u.atomic_eth);
1519 ++ put_ib_ateth_compare(wqe->atomic_wr.compare_add,
1520 ++ &ohdr->u.atomic_eth);
1521 + } else {
1522 + qp->s_state = OP(FETCH_ADD);
1523 + put_ib_ateth_swap(wqe->atomic_wr.compare_add,
1524 + &ohdr->u.atomic_eth);
1525 +- put_ib_ateth_swap(0, &ohdr->u.atomic_eth);
1526 ++ put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
1527 + }
1528 + put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
1529 + &ohdr->u.atomic_eth);
1530 +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
1531 +index 19841c863daf..59f37f412a7f 100644
1532 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
1533 ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
1534 +@@ -848,6 +848,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
1535 + (queue_count(qp->sq.queue) > 1);
1536 +
1537 + rxe_run_task(&qp->req.task, must_sched);
1538 ++ if (unlikely(qp->req.state == QP_STATE_ERROR))
1539 ++ rxe_run_task(&qp->comp.task, 1);
1540 +
1541 + return err;
1542 + }
1543 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1544 +index c5522551122f..2ffe7db75acb 100644
1545 +--- a/drivers/md/dm.c
1546 ++++ b/drivers/md/dm.c
1547 +@@ -809,7 +809,8 @@ static void dec_pending(struct dm_io *io, int error)
1548 + } else {
1549 + /* done with normal IO or empty flush */
1550 + trace_block_bio_complete(md->queue, bio, io_error);
1551 +- bio->bi_error = io_error;
1552 ++ if (io_error)
1553 ++ bio->bi_error = io_error;
1554 + bio_endio(bio);
1555 + }
1556 + }
1557 +diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
1558 +index 08dca40356d2..006dac6e8940 100644
1559 +--- a/drivers/media/tuners/r820t.c
1560 ++++ b/drivers/media/tuners/r820t.c
1561 +@@ -396,9 +396,11 @@ static int r820t_write(struct r820t_priv *priv, u8 reg, const u8 *val,
1562 + return 0;
1563 + }
1564 +
1565 +-static int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val)
1566 ++static inline int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val)
1567 + {
1568 +- return r820t_write(priv, reg, &val, 1);
1569 ++ u8 tmp = val; /* work around GCC PR81715 with asan-stack=1 */
1570 ++
1571 ++ return r820t_write(priv, reg, &tmp, 1);
1572 + }
1573 +
1574 + static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
1575 +@@ -411,17 +413,18 @@ static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
1576 + return -EINVAL;
1577 + }
1578 +
1579 +-static int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val,
1580 ++static inline int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val,
1581 + u8 bit_mask)
1582 + {
1583 ++ u8 tmp = val;
1584 + int rc = r820t_read_cache_reg(priv, reg);
1585 +
1586 + if (rc < 0)
1587 + return rc;
1588 +
1589 +- val = (rc & ~bit_mask) | (val & bit_mask);
1590 ++ tmp = (rc & ~bit_mask) | (tmp & bit_mask);
1591 +
1592 +- return r820t_write(priv, reg, &val, 1);
1593 ++ return r820t_write(priv, reg, &tmp, 1);
1594 + }
1595 +
1596 + static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len)
1597 +diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
1598 +index 3ad514c44dcb..ddc629e3f63a 100644
1599 +--- a/drivers/mtd/nand/vf610_nfc.c
1600 ++++ b/drivers/mtd/nand/vf610_nfc.c
1601 +@@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev)
1602 + if (mtd->oobsize > 64)
1603 + mtd->oobsize = 64;
1604 +
1605 +- /*
1606 +- * mtd->ecclayout is not specified here because we're using the
1607 +- * default large page ECC layout defined in NAND core.
1608 +- */
1609 ++ /* Use default large page ECC layout defined in NAND core */
1610 ++ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
1611 + if (chip->ecc.strength == 32) {
1612 + nfc->ecc_mode = ECC_60_BYTE;
1613 + chip->ecc.bytes = 60;
1614 +diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
1615 +index ed6fae964ec5..7e2ebfc565ee 100644
1616 +--- a/drivers/net/ethernet/marvell/mvpp2.c
1617 ++++ b/drivers/net/ethernet/marvell/mvpp2.c
1618 +@@ -5657,6 +5657,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
1619 + int id = port->id;
1620 + bool allmulti = dev->flags & IFF_ALLMULTI;
1621 +
1622 ++retry:
1623 + mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
1624 + mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
1625 + mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
1626 +@@ -5664,9 +5665,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
1627 + /* Remove all port->id's mcast enries */
1628 + mvpp2_prs_mcast_del_all(priv, id);
1629 +
1630 +- if (allmulti && !netdev_mc_empty(dev)) {
1631 +- netdev_for_each_mc_addr(ha, dev)
1632 +- mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
1633 ++ if (!allmulti) {
1634 ++ netdev_for_each_mc_addr(ha, dev) {
1635 ++ if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
1636 ++ allmulti = true;
1637 ++ goto retry;
1638 ++ }
1639 ++ }
1640 + }
1641 + }
1642 +
1643 +diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
1644 +index d1cd9c32a9ae..6143113a7fef 100644
1645 +--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
1646 ++++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
1647 +@@ -286,6 +286,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
1648 + u64 in_param = 0;
1649 + int err;
1650 +
1651 ++ if (!cnt)
1652 ++ return;
1653 ++
1654 + if (mlx4_is_mfunc(dev)) {
1655 + set_param_l(&in_param, base_qpn);
1656 + set_param_h(&in_param, cnt);
1657 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
1658 +index 82d53895ce4d..0c3fe177fd14 100644
1659 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
1660 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
1661 +@@ -1128,7 +1128,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
1662 + }
1663 + if (0 == tmp) {
1664 + read_addr = REG_DBI_RDATA + addr % 4;
1665 +- ret = rtl_read_word(rtlpriv, read_addr);
1666 ++ ret = rtl_read_byte(rtlpriv, read_addr);
1667 + }
1668 + return ret;
1669 + }
1670 +@@ -1170,7 +1170,8 @@ static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw)
1671 + }
1672 +
1673 + tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f);
1674 +- _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7));
1675 ++ _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7) |
1676 ++ ASPM_L1_LATENCY << 3);
1677 +
1678 + tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719);
1679 + _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4));
1680 +diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
1681 +index dafe486f8448..340e7b324ef8 100644
1682 +--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
1683 ++++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
1684 +@@ -99,6 +99,7 @@
1685 + #define RTL_USB_MAX_RX_COUNT 100
1686 + #define QBSS_LOAD_SIZE 5
1687 + #define MAX_WMMELE_LENGTH 64
1688 ++#define ASPM_L1_LATENCY 7
1689 +
1690 + #define TOTAL_CAM_ENTRY 32
1691 +
1692 +diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
1693 +index 043c19a05da1..eac0a1238e9d 100644
1694 +--- a/drivers/pci/host/pci-keystone.c
1695 ++++ b/drivers/pci/host/pci-keystone.c
1696 +@@ -181,7 +181,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
1697 + }
1698 +
1699 + /* interrupt controller is in a child node */
1700 +- *np_temp = of_find_node_by_name(np_pcie, controller);
1701 ++ *np_temp = of_get_child_by_name(np_pcie, controller);
1702 + if (!(*np_temp)) {
1703 + dev_err(dev, "Node for %s is absent\n", controller);
1704 + return -EINVAL;
1705 +@@ -190,6 +190,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
1706 + temp = of_irq_count(*np_temp);
1707 + if (!temp) {
1708 + dev_err(dev, "No IRQ entries in %s\n", controller);
1709 ++ of_node_put(*np_temp);
1710 + return -EINVAL;
1711 + }
1712 +
1713 +@@ -207,6 +208,8 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
1714 + break;
1715 + }
1716 +
1717 ++ of_node_put(*np_temp);
1718 ++
1719 + if (temp) {
1720 + *num_irqs = temp;
1721 + return 0;
1722 +diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
1723 +index ea20f627dabe..e4324dcf9508 100644
1724 +--- a/drivers/rtc/rtc-opal.c
1725 ++++ b/drivers/rtc/rtc-opal.c
1726 +@@ -58,6 +58,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
1727 + static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
1728 + {
1729 + long rc = OPAL_BUSY;
1730 ++ int retries = 10;
1731 + u32 y_m_d;
1732 + u64 h_m_s_ms;
1733 + __be32 __y_m_d;
1734 +@@ -67,8 +68,11 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
1735 + rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
1736 + if (rc == OPAL_BUSY_EVENT)
1737 + opal_poll_events(NULL);
1738 +- else
1739 ++ else if (retries-- && (rc == OPAL_HARDWARE
1740 ++ || rc == OPAL_INTERNAL_ERROR))
1741 + msleep(10);
1742 ++ else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
1743 ++ break;
1744 + }
1745 +
1746 + if (rc != OPAL_SUCCESS)
1747 +@@ -84,6 +88,7 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
1748 + static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
1749 + {
1750 + long rc = OPAL_BUSY;
1751 ++ int retries = 10;
1752 + u32 y_m_d = 0;
1753 + u64 h_m_s_ms = 0;
1754 +
1755 +@@ -92,8 +97,11 @@ static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
1756 + rc = opal_rtc_write(y_m_d, h_m_s_ms);
1757 + if (rc == OPAL_BUSY_EVENT)
1758 + opal_poll_events(NULL);
1759 +- else
1760 ++ else if (retries-- && (rc == OPAL_HARDWARE
1761 ++ || rc == OPAL_INTERNAL_ERROR))
1762 + msleep(10);
1763 ++ else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
1764 ++ break;
1765 + }
1766 +
1767 + return rc == OPAL_SUCCESS ? 0 : -EIO;
1768 +diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
1769 +index 0f42a225a664..e6b779930230 100644
1770 +--- a/drivers/scsi/smartpqi/Makefile
1771 ++++ b/drivers/scsi/smartpqi/Makefile
1772 +@@ -1,3 +1,3 @@
1773 + ccflags-y += -I.
1774 +-obj-m += smartpqi.o
1775 ++obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o
1776 + smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
1777 +diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
1778 +index e116f0e845c0..98f75e5811c8 100644
1779 +--- a/drivers/target/iscsi/iscsi_target_auth.c
1780 ++++ b/drivers/target/iscsi/iscsi_target_auth.c
1781 +@@ -413,7 +413,8 @@ static int chap_server_compute_md5(
1782 + auth_ret = 0;
1783 + out:
1784 + kzfree(desc);
1785 +- crypto_free_shash(tfm);
1786 ++ if (tfm)
1787 ++ crypto_free_shash(tfm);
1788 + kfree(challenge);
1789 + kfree(challenge_binhex);
1790 + return auth_ret;
1791 +diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
1792 +index 644e978cbd3e..0103f777b97a 100644
1793 +--- a/drivers/usb/Kconfig
1794 ++++ b/drivers/usb/Kconfig
1795 +@@ -19,6 +19,14 @@ config USB_EHCI_BIG_ENDIAN_MMIO
1796 + config USB_EHCI_BIG_ENDIAN_DESC
1797 + bool
1798 +
1799 ++config USB_UHCI_BIG_ENDIAN_MMIO
1800 ++ bool
1801 ++ default y if SPARC_LEON
1802 ++
1803 ++config USB_UHCI_BIG_ENDIAN_DESC
1804 ++ bool
1805 ++ default y if SPARC_LEON
1806 ++
1807 + menuconfig USB_SUPPORT
1808 + bool "USB support"
1809 + depends on HAS_IOMEM
1810 +diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
1811 +index eb121b2a55d4..0e7cc71b34a9 100644
1812 +--- a/drivers/usb/host/Kconfig
1813 ++++ b/drivers/usb/host/Kconfig
1814 +@@ -628,14 +628,6 @@ config USB_UHCI_PLATFORM
1815 + bool
1816 + default y if ARCH_VT8500
1817 +
1818 +-config USB_UHCI_BIG_ENDIAN_MMIO
1819 +- bool
1820 +- default y if SPARC_LEON
1821 +-
1822 +-config USB_UHCI_BIG_ENDIAN_DESC
1823 +- bool
1824 +- default y if SPARC_LEON
1825 +-
1826 + config USB_FHCI_HCD
1827 + tristate "Freescale QE USB Host Controller support"
1828 + depends on OF_GPIO && QE_GPIO && QUICC_ENGINE
1829 +diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
1830 +index 9269d5685239..b90ef96e43d6 100644
1831 +--- a/drivers/video/console/dummycon.c
1832 ++++ b/drivers/video/console/dummycon.c
1833 +@@ -67,7 +67,6 @@ const struct consw dummy_con = {
1834 + .con_switch = DUMMY,
1835 + .con_blank = DUMMY,
1836 + .con_font_set = DUMMY,
1837 +- .con_font_get = DUMMY,
1838 + .con_font_default = DUMMY,
1839 + .con_font_copy = DUMMY,
1840 + };
1841 +diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
1842 +index 669ecc755fa9..8f439fd58db6 100644
1843 +--- a/drivers/video/fbdev/atmel_lcdfb.c
1844 ++++ b/drivers/video/fbdev/atmel_lcdfb.c
1845 +@@ -1119,7 +1119,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
1846 + goto put_display_node;
1847 + }
1848 +
1849 +- timings_np = of_find_node_by_name(display_np, "display-timings");
1850 ++ timings_np = of_get_child_by_name(display_np, "display-timings");
1851 + if (!timings_np) {
1852 + dev_err(dev, "failed to find display-timings node\n");
1853 + ret = -ENODEV;
1854 +@@ -1140,6 +1140,12 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
1855 + fb_add_videomode(&fb_vm, &info->modelist);
1856 + }
1857 +
1858 ++ /*
1859 ++ * FIXME: Make sure we are not referencing any fields in display_np
1860 ++ * and timings_np and drop our references to them before returning to
1861 ++ * avoid leaking the nodes on probe deferral and driver unbind.
1862 ++ */
1863 ++
1864 + return 0;
1865 +
1866 + put_timings_node:
1867 +diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c
1868 +index 6082f653c68a..67773e8bbb95 100644
1869 +--- a/drivers/video/fbdev/geode/video_gx.c
1870 ++++ b/drivers/video/fbdev/geode/video_gx.c
1871 +@@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info)
1872 + int timeout = 1000;
1873 +
1874 + /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
1875 +- if (cpu_data(0).x86_mask == 1) {
1876 ++ if (cpu_data(0).x86_stepping == 1) {
1877 + pll_table = gx_pll_table_14MHz;
1878 + pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);
1879 + } else {
1880 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
1881 +index a8a1fb40e258..d196ce4be31c 100644
1882 +--- a/fs/btrfs/inode.c
1883 ++++ b/fs/btrfs/inode.c
1884 +@@ -1320,8 +1320,11 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1885 + leaf = path->nodes[0];
1886 + if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1887 + ret = btrfs_next_leaf(root, path);
1888 +- if (ret < 0)
1889 ++ if (ret < 0) {
1890 ++ if (cow_start != (u64)-1)
1891 ++ cur_offset = cow_start;
1892 + goto error;
1893 ++ }
1894 + if (ret > 0)
1895 + break;
1896 + leaf = path->nodes[0];
1897 +@@ -5226,7 +5229,7 @@ void btrfs_evict_inode(struct inode *inode)
1898 + trace_btrfs_inode_evict(inode);
1899 +
1900 + if (!root) {
1901 +- kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
1902 ++ clear_inode(inode);
1903 + return;
1904 + }
1905 +
1906 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1907 +index 309313b71617..5539f0b95efa 100644
1908 +--- a/fs/btrfs/tree-log.c
1909 ++++ b/fs/btrfs/tree-log.c
1910 +@@ -28,6 +28,7 @@
1911 + #include "hash.h"
1912 + #include "compression.h"
1913 + #include "qgroup.h"
1914 ++#include "inode-map.h"
1915 +
1916 + /* magic values for the inode_only field in btrfs_log_inode:
1917 + *
1918 +@@ -2463,6 +2464,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1919 + next);
1920 + btrfs_wait_tree_block_writeback(next);
1921 + btrfs_tree_unlock(next);
1922 ++ } else {
1923 ++ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
1924 ++ clear_extent_buffer_dirty(next);
1925 + }
1926 +
1927 + WARN_ON(root_owner !=
1928 +@@ -2542,6 +2546,9 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
1929 + next);
1930 + btrfs_wait_tree_block_writeback(next);
1931 + btrfs_tree_unlock(next);
1932 ++ } else {
1933 ++ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
1934 ++ clear_extent_buffer_dirty(next);
1935 + }
1936 +
1937 + WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
1938 +@@ -2618,6 +2625,9 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
1939 + clean_tree_block(trans, log->fs_info, next);
1940 + btrfs_wait_tree_block_writeback(next);
1941 + btrfs_tree_unlock(next);
1942 ++ } else {
1943 ++ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
1944 ++ clear_extent_buffer_dirty(next);
1945 + }
1946 +
1947 + WARN_ON(log->root_key.objectid !=
1948 +@@ -3004,13 +3014,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
1949 +
1950 + while (1) {
1951 + ret = find_first_extent_bit(&log->dirty_log_pages,
1952 +- 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
1953 ++ 0, &start, &end,
1954 ++ EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT,
1955 + NULL);
1956 + if (ret)
1957 + break;
1958 +
1959 + clear_extent_bits(&log->dirty_log_pages, start, end,
1960 +- EXTENT_DIRTY | EXTENT_NEW);
1961 ++ EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
1962 + }
1963 +
1964 + /*
1965 +@@ -5651,6 +5662,23 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
1966 + path);
1967 + }
1968 +
1969 ++ if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
1970 ++ struct btrfs_root *root = wc.replay_dest;
1971 ++
1972 ++ btrfs_release_path(path);
1973 ++
1974 ++ /*
1975 ++ * We have just replayed everything, and the highest
1976 ++ * objectid of fs roots probably has changed in case
1977 ++ * some inode_item's got replayed.
1978 ++ *
1979 ++ * root->objectid_mutex is not acquired as log replay
1980 ++ * could only happen during mount.
1981 ++ */
1982 ++ ret = btrfs_find_highest_objectid(root,
1983 ++ &root->highest_objectid);
1984 ++ }
1985 ++
1986 + key.offset = found_key.offset - 1;
1987 + wc.replay_dest->log_root = NULL;
1988 + free_extent_buffer(log->node);
1989 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1990 +index ec28e8ebb984..5cccec68a0a5 100644
1991 +--- a/fs/ext4/inode.c
1992 ++++ b/fs/ext4/inode.c
1993 +@@ -3526,10 +3526,18 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
1994 + /* Credits for sb + inode write */
1995 + handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
1996 + if (IS_ERR(handle)) {
1997 +- /* This is really bad luck. We've written the data
1998 +- * but cannot extend i_size. Bail out and pretend
1999 +- * the write failed... */
2000 +- ret = PTR_ERR(handle);
2001 ++ /*
2002 ++ * We wrote the data but cannot extend
2003 ++ * i_size. Bail out. In async io case, we do
2004 ++ * not return error here because we have
2005 ++ * already submmitted the corresponding
2006 ++ * bio. Returning error here makes the caller
2007 ++ * think that this IO is done and failed
2008 ++ * resulting in race with bio's completion
2009 ++ * handler.
2010 ++ */
2011 ++ if (!ret)
2012 ++ ret = PTR_ERR(handle);
2013 + if (inode->i_nlink)
2014 + ext4_orphan_del(NULL, inode);
2015 +
2016 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2017 +index 1f581791b39d..1ec4b6e34747 100644
2018 +--- a/fs/ext4/super.c
2019 ++++ b/fs/ext4/super.c
2020 +@@ -720,6 +720,7 @@ __acquires(bitlock)
2021 + }
2022 +
2023 + ext4_unlock_group(sb, grp);
2024 ++ ext4_commit_super(sb, 1);
2025 + ext4_handle_error(sb);
2026 + /*
2027 + * We only get here in the ERRORS_RO case; relocking the group
2028 +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
2029 +index 5e659ee08d6a..4e5c6103b76c 100644
2030 +--- a/fs/jbd2/transaction.c
2031 ++++ b/fs/jbd2/transaction.c
2032 +@@ -488,8 +488,10 @@ void jbd2_journal_free_reserved(handle_t *handle)
2033 + EXPORT_SYMBOL(jbd2_journal_free_reserved);
2034 +
2035 + /**
2036 +- * int jbd2_journal_start_reserved(handle_t *handle) - start reserved handle
2037 ++ * int jbd2_journal_start_reserved() - start reserved handle
2038 + * @handle: handle to start
2039 ++ * @type: for handle statistics
2040 ++ * @line_no: for handle statistics
2041 + *
2042 + * Start handle that has been previously reserved with jbd2_journal_reserve().
2043 + * This attaches @handle to the running transaction (or creates one if there's
2044 +@@ -619,6 +621,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
2045 + * int jbd2_journal_restart() - restart a handle .
2046 + * @handle: handle to restart
2047 + * @nblocks: nr credits requested
2048 ++ * @gfp_mask: memory allocation flags (for start_this_handle)
2049 + *
2050 + * Restart a handle for a multi-transaction filesystem
2051 + * operation.
2052 +diff --git a/fs/mbcache.c b/fs/mbcache.c
2053 +index c5bd19ffa326..27e6bf6f09c6 100644
2054 +--- a/fs/mbcache.c
2055 ++++ b/fs/mbcache.c
2056 +@@ -93,6 +93,7 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
2057 + entry->e_key = key;
2058 + entry->e_block = block;
2059 + entry->e_reusable = reusable;
2060 ++ entry->e_referenced = 0;
2061 + head = mb_cache_entry_head(cache, key);
2062 + hlist_bl_lock(head);
2063 + hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
2064 +diff --git a/fs/namei.c b/fs/namei.c
2065 +index e7d125c23aa6..6cfb45f262aa 100644
2066 +--- a/fs/namei.c
2067 ++++ b/fs/namei.c
2068 +@@ -2138,6 +2138,9 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
2069 + int retval = 0;
2070 + const char *s = nd->name->name;
2071 +
2072 ++ if (!*s)
2073 ++ flags &= ~LOOKUP_RCU;
2074 ++
2075 + nd->last_type = LAST_ROOT; /* if there are only slashes... */
2076 + nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
2077 + nd->depth = 0;
2078 +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
2079 +index 8dce4099a6ca..785fcc29d85d 100644
2080 +--- a/fs/ocfs2/dlmglue.c
2081 ++++ b/fs/ocfs2/dlmglue.c
2082 +@@ -2485,6 +2485,15 @@ int ocfs2_inode_lock_with_page(struct inode *inode,
2083 + ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
2084 + if (ret == -EAGAIN) {
2085 + unlock_page(page);
2086 ++ /*
2087 ++ * If we can't get inode lock immediately, we should not return
2088 ++ * directly here, since this will lead to a softlockup problem.
2089 ++ * The method is to get a blocking lock and immediately unlock
2090 ++ * before returning, this can avoid CPU resource waste due to
2091 ++ * lots of retries, and benefits fairness in getting lock.
2092 ++ */
2093 ++ if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
2094 ++ ocfs2_inode_unlock(inode, ex);
2095 + ret = AOP_TRUNCATED_PAGE;
2096 + }
2097 +
2098 +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
2099 +index 928e5ca0caee..eb0ed31193a3 100644
2100 +--- a/include/linux/compiler-gcc.h
2101 ++++ b/include/linux/compiler-gcc.h
2102 +@@ -187,6 +187,10 @@
2103 + #endif /* __CHECKER__ */
2104 + #endif /* GCC_VERSION >= 40300 */
2105 +
2106 ++#if GCC_VERSION >= 40400
2107 ++#define __optimize(level) __attribute__((__optimize__(level)))
2108 ++#endif /* GCC_VERSION >= 40400 */
2109 ++
2110 + #if GCC_VERSION >= 40500
2111 +
2112 + #ifndef __CHECKER__
2113 +diff --git a/include/linux/compiler.h b/include/linux/compiler.h
2114 +index cf0fa5d86059..5ce911db7d88 100644
2115 +--- a/include/linux/compiler.h
2116 ++++ b/include/linux/compiler.h
2117 +@@ -469,6 +469,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
2118 + # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
2119 + #endif
2120 +
2121 ++#ifndef __optimize
2122 ++# define __optimize(level)
2123 ++#endif
2124 ++
2125 + /* Compile time object size, -1 for unknown */
2126 + #ifndef __compiletime_object_size
2127 + # define __compiletime_object_size(obj) -1
2128 +diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
2129 +index dfaa1f4dcb0c..d073470cb342 100644
2130 +--- a/include/linux/jbd2.h
2131 ++++ b/include/linux/jbd2.h
2132 +@@ -418,26 +418,41 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
2133 + #define JI_WAIT_DATA (1 << __JI_WAIT_DATA)
2134 +
2135 + /**
2136 +- * struct jbd_inode is the structure linking inodes in ordered mode
2137 +- * present in a transaction so that we can sync them during commit.
2138 ++ * struct jbd_inode - The jbd_inode type is the structure linking inodes in
2139 ++ * ordered mode present in a transaction so that we can sync them during commit.
2140 + */
2141 + struct jbd2_inode {
2142 +- /* Which transaction does this inode belong to? Either the running
2143 +- * transaction or the committing one. [j_list_lock] */
2144 ++ /**
2145 ++ * @i_transaction:
2146 ++ *
2147 ++ * Which transaction does this inode belong to? Either the running
2148 ++ * transaction or the committing one. [j_list_lock]
2149 ++ */
2150 + transaction_t *i_transaction;
2151 +
2152 +- /* Pointer to the running transaction modifying inode's data in case
2153 +- * there is already a committing transaction touching it. [j_list_lock] */
2154 ++ /**
2155 ++ * @i_next_transaction:
2156 ++ *
2157 ++ * Pointer to the running transaction modifying inode's data in case
2158 ++ * there is already a committing transaction touching it. [j_list_lock]
2159 ++ */
2160 + transaction_t *i_next_transaction;
2161 +
2162 +- /* List of inodes in the i_transaction [j_list_lock] */
2163 ++ /**
2164 ++ * @i_list: List of inodes in the i_transaction [j_list_lock]
2165 ++ */
2166 + struct list_head i_list;
2167 +
2168 +- /* VFS inode this inode belongs to [constant during the lifetime
2169 +- * of the structure] */
2170 ++ /**
2171 ++ * @i_vfs_inode:
2172 ++ *
2173 ++ * VFS inode this inode belongs to [constant for lifetime of structure]
2174 ++ */
2175 + struct inode *i_vfs_inode;
2176 +
2177 +- /* Flags of inode [j_list_lock] */
2178 ++ /**
2179 ++ * @i_flags: Flags of inode [j_list_lock]
2180 ++ */
2181 + unsigned long i_flags;
2182 + };
2183 +
2184 +@@ -447,12 +462,20 @@ struct jbd2_revoke_table_s;
2185 + * struct handle_s - The handle_s type is the concrete type associated with
2186 + * handle_t.
2187 + * @h_transaction: Which compound transaction is this update a part of?
2188 ++ * @h_journal: Which journal handle belongs to - used iff h_reserved set.
2189 ++ * @h_rsv_handle: Handle reserved for finishing the logical operation.
2190 + * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
2191 +- * @h_ref: Reference count on this handle
2192 +- * @h_err: Field for caller's use to track errors through large fs operations
2193 +- * @h_sync: flag for sync-on-close
2194 +- * @h_jdata: flag to force data journaling
2195 +- * @h_aborted: flag indicating fatal error on handle
2196 ++ * @h_ref: Reference count on this handle.
2197 ++ * @h_err: Field for caller's use to track errors through large fs operations.
2198 ++ * @h_sync: Flag for sync-on-close.
2199 ++ * @h_jdata: Flag to force data journaling.
2200 ++ * @h_reserved: Flag for handle for reserved credits.
2201 ++ * @h_aborted: Flag indicating fatal error on handle.
2202 ++ * @h_type: For handle statistics.
2203 ++ * @h_line_no: For handle statistics.
2204 ++ * @h_start_jiffies: Handle Start time.
2205 ++ * @h_requested_credits: Holds @h_buffer_credits after handle is started.
2206 ++ * @saved_alloc_context: Saved context while transaction is open.
2207 + **/
2208 +
2209 + /* Docbook can't yet cope with the bit fields, but will leave the documentation
2210 +@@ -462,32 +485,23 @@ struct jbd2_revoke_table_s;
2211 + struct jbd2_journal_handle
2212 + {
2213 + union {
2214 +- /* Which compound transaction is this update a part of? */
2215 + transaction_t *h_transaction;
2216 + /* Which journal handle belongs to - used iff h_reserved set */
2217 + journal_t *h_journal;
2218 + };
2219 +
2220 +- /* Handle reserved for finishing the logical operation */
2221 + handle_t *h_rsv_handle;
2222 +-
2223 +- /* Number of remaining buffers we are allowed to dirty: */
2224 + int h_buffer_credits;
2225 +-
2226 +- /* Reference count on this handle */
2227 + int h_ref;
2228 +-
2229 +- /* Field for caller's use to track errors through large fs */
2230 +- /* operations */
2231 + int h_err;
2232 +
2233 + /* Flags [no locking] */
2234 +- unsigned int h_sync: 1; /* sync-on-close */
2235 +- unsigned int h_jdata: 1; /* force data journaling */
2236 +- unsigned int h_reserved: 1; /* handle with reserved credits */
2237 +- unsigned int h_aborted: 1; /* fatal error on handle */
2238 +- unsigned int h_type: 8; /* for handle statistics */
2239 +- unsigned int h_line_no: 16; /* for handle statistics */
2240 ++ unsigned int h_sync: 1;
2241 ++ unsigned int h_jdata: 1;
2242 ++ unsigned int h_reserved: 1;
2243 ++ unsigned int h_aborted: 1;
2244 ++ unsigned int h_type: 8;
2245 ++ unsigned int h_line_no: 16;
2246 +
2247 + unsigned long h_start_jiffies;
2248 + unsigned int h_requested_credits;
2249 +@@ -727,228 +741,253 @@ jbd2_time_diff(unsigned long start, unsigned long end)
2250 + /**
2251 + * struct journal_s - The journal_s type is the concrete type associated with
2252 + * journal_t.
2253 +- * @j_flags: General journaling state flags
2254 +- * @j_errno: Is there an outstanding uncleared error on the journal (from a
2255 +- * prior abort)?
2256 +- * @j_sb_buffer: First part of superblock buffer
2257 +- * @j_superblock: Second part of superblock buffer
2258 +- * @j_format_version: Version of the superblock format
2259 +- * @j_state_lock: Protect the various scalars in the journal
2260 +- * @j_barrier_count: Number of processes waiting to create a barrier lock
2261 +- * @j_barrier: The barrier lock itself
2262 +- * @j_running_transaction: The current running transaction..
2263 +- * @j_committing_transaction: the transaction we are pushing to disk
2264 +- * @j_checkpoint_transactions: a linked circular list of all transactions
2265 +- * waiting for checkpointing
2266 +- * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
2267 +- * to start committing, or for a barrier lock to be released
2268 +- * @j_wait_done_commit: Wait queue for waiting for commit to complete
2269 +- * @j_wait_commit: Wait queue to trigger commit
2270 +- * @j_wait_updates: Wait queue to wait for updates to complete
2271 +- * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop
2272 +- * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
2273 +- * @j_head: Journal head - identifies the first unused block in the journal
2274 +- * @j_tail: Journal tail - identifies the oldest still-used block in the
2275 +- * journal.
2276 +- * @j_free: Journal free - how many free blocks are there in the journal?
2277 +- * @j_first: The block number of the first usable block
2278 +- * @j_last: The block number one beyond the last usable block
2279 +- * @j_dev: Device where we store the journal
2280 +- * @j_blocksize: blocksize for the location where we store the journal.
2281 +- * @j_blk_offset: starting block offset for into the device where we store the
2282 +- * journal
2283 +- * @j_fs_dev: Device which holds the client fs. For internal journal this will
2284 +- * be equal to j_dev
2285 +- * @j_reserved_credits: Number of buffers reserved from the running transaction
2286 +- * @j_maxlen: Total maximum capacity of the journal region on disk.
2287 +- * @j_list_lock: Protects the buffer lists and internal buffer state.
2288 +- * @j_inode: Optional inode where we store the journal. If present, all journal
2289 +- * block numbers are mapped into this inode via bmap().
2290 +- * @j_tail_sequence: Sequence number of the oldest transaction in the log
2291 +- * @j_transaction_sequence: Sequence number of the next transaction to grant
2292 +- * @j_commit_sequence: Sequence number of the most recently committed
2293 +- * transaction
2294 +- * @j_commit_request: Sequence number of the most recent transaction wanting
2295 +- * commit
2296 +- * @j_uuid: Uuid of client object.
2297 +- * @j_task: Pointer to the current commit thread for this journal
2298 +- * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
2299 +- * single compound commit transaction
2300 +- * @j_commit_interval: What is the maximum transaction lifetime before we begin
2301 +- * a commit?
2302 +- * @j_commit_timer: The timer used to wakeup the commit thread
2303 +- * @j_revoke_lock: Protect the revoke table
2304 +- * @j_revoke: The revoke table - maintains the list of revoked blocks in the
2305 +- * current transaction.
2306 +- * @j_revoke_table: alternate revoke tables for j_revoke
2307 +- * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction
2308 +- * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
2309 +- * number that will fit in j_blocksize
2310 +- * @j_last_sync_writer: most recent pid which did a synchronous write
2311 +- * @j_history_lock: Protect the transactions statistics history
2312 +- * @j_proc_entry: procfs entry for the jbd statistics directory
2313 +- * @j_stats: Overall statistics
2314 +- * @j_private: An opaque pointer to fs-private information.
2315 +- * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies
2316 + */
2317 +-
2318 + struct journal_s
2319 + {
2320 +- /* General journaling state flags [j_state_lock] */
2321 ++ /**
2322 ++ * @j_flags: General journaling state flags [j_state_lock]
2323 ++ */
2324 + unsigned long j_flags;
2325 +
2326 +- /*
2327 ++ /**
2328 ++ * @j_errno:
2329 ++ *
2330 + * Is there an outstanding uncleared error on the journal (from a prior
2331 + * abort)? [j_state_lock]
2332 + */
2333 + int j_errno;
2334 +
2335 +- /* The superblock buffer */
2336 ++ /**
2337 ++ * @j_sb_buffer: The first part of the superblock buffer.
2338 ++ */
2339 + struct buffer_head *j_sb_buffer;
2340 ++
2341 ++ /**
2342 ++ * @j_superblock: The second part of the superblock buffer.
2343 ++ */
2344 + journal_superblock_t *j_superblock;
2345 +
2346 +- /* Version of the superblock format */
2347 ++ /**
2348 ++ * @j_format_version: Version of the superblock format.
2349 ++ */
2350 + int j_format_version;
2351 +
2352 +- /*
2353 +- * Protect the various scalars in the journal
2354 ++ /**
2355 ++ * @j_state_lock: Protect the various scalars in the journal.
2356 + */
2357 + rwlock_t j_state_lock;
2358 +
2359 +- /*
2360 ++ /**
2361 ++ * @j_barrier_count:
2362 ++ *
2363 + * Number of processes waiting to create a barrier lock [j_state_lock]
2364 + */
2365 + int j_barrier_count;
2366 +
2367 +- /* The barrier lock itself */
2368 ++ /**
2369 ++ * @j_barrier: The barrier lock itself.
2370 ++ */
2371 + struct mutex j_barrier;
2372 +
2373 +- /*
2374 ++ /**
2375 ++ * @j_running_transaction:
2376 ++ *
2377 + * Transactions: The current running transaction...
2378 + * [j_state_lock] [caller holding open handle]
2379 + */
2380 + transaction_t *j_running_transaction;
2381 +
2382 +- /*
2383 ++ /**
2384 ++ * @j_committing_transaction:
2385 ++ *
2386 + * the transaction we are pushing to disk
2387 + * [j_state_lock] [caller holding open handle]
2388 + */
2389 + transaction_t *j_committing_transaction;
2390 +
2391 +- /*
2392 ++ /**
2393 ++ * @j_checkpoint_transactions:
2394 ++ *
2395 + * ... and a linked circular list of all transactions waiting for
2396 + * checkpointing. [j_list_lock]
2397 + */
2398 + transaction_t *j_checkpoint_transactions;
2399 +
2400 +- /*
2401 ++ /**
2402 ++ * @j_wait_transaction_locked:
2403 ++ *
2404 + * Wait queue for waiting for a locked transaction to start committing,
2405 +- * or for a barrier lock to be released
2406 ++ * or for a barrier lock to be released.
2407 + */
2408 + wait_queue_head_t j_wait_transaction_locked;
2409 +
2410 +- /* Wait queue for waiting for commit to complete */
2411 ++ /**
2412 ++ * @j_wait_done_commit: Wait queue for waiting for commit to complete.
2413 ++ */
2414 + wait_queue_head_t j_wait_done_commit;
2415 +
2416 +- /* Wait queue to trigger commit */
2417 ++ /**
2418 ++ * @j_wait_commit: Wait queue to trigger commit.
2419 ++ */
2420 + wait_queue_head_t j_wait_commit;
2421 +
2422 +- /* Wait queue to wait for updates to complete */
2423 ++ /**
2424 ++ * @j_wait_updates: Wait queue to wait for updates to complete.
2425 ++ */
2426 + wait_queue_head_t j_wait_updates;
2427 +
2428 +- /* Wait queue to wait for reserved buffer credits to drop */
2429 ++ /**
2430 ++ * @j_wait_reserved:
2431 ++ *
2432 ++ * Wait queue to wait for reserved buffer credits to drop.
2433 ++ */
2434 + wait_queue_head_t j_wait_reserved;
2435 +
2436 +- /* Semaphore for locking against concurrent checkpoints */
2437 ++ /**
2438 ++ * @j_checkpoint_mutex:
2439 ++ *
2440 ++ * Semaphore for locking against concurrent checkpoints.
2441 ++ */
2442 + struct mutex j_checkpoint_mutex;
2443 +
2444 +- /*
2445 ++ /**
2446 ++ * @j_chkpt_bhs:
2447 ++ *
2448 + * List of buffer heads used by the checkpoint routine. This
2449 + * was moved from jbd2_log_do_checkpoint() to reduce stack
2450 + * usage. Access to this array is controlled by the
2451 +- * j_checkpoint_mutex. [j_checkpoint_mutex]
2452 ++ * @j_checkpoint_mutex. [j_checkpoint_mutex]
2453 + */
2454 + struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH];
2455 +-
2456 +- /*
2457 ++
2458 ++ /**
2459 ++ * @j_head:
2460 ++ *
2461 + * Journal head: identifies the first unused block in the journal.
2462 + * [j_state_lock]
2463 + */
2464 + unsigned long j_head;
2465 +
2466 +- /*
2467 ++ /**
2468 ++ * @j_tail:
2469 ++ *
2470 + * Journal tail: identifies the oldest still-used block in the journal.
2471 + * [j_state_lock]
2472 + */
2473 + unsigned long j_tail;
2474 +
2475 +- /*
2476 ++ /**
2477 ++ * @j_free:
2478 ++ *
2479 + * Journal free: how many free blocks are there in the journal?
2480 + * [j_state_lock]
2481 + */
2482 + unsigned long j_free;
2483 +
2484 +- /*
2485 +- * Journal start and end: the block numbers of the first usable block
2486 +- * and one beyond the last usable block in the journal. [j_state_lock]
2487 ++ /**
2488 ++ * @j_first:
2489 ++ *
2490 ++ * The block number of the first usable block in the journal
2491 ++ * [j_state_lock].
2492 + */
2493 + unsigned long j_first;
2494 ++
2495 ++ /**
2496 ++ * @j_last:
2497 ++ *
2498 ++ * The block number one beyond the last usable block in the journal
2499 ++ * [j_state_lock].
2500 ++ */
2501 + unsigned long j_last;
2502 +
2503 +- /*
2504 +- * Device, blocksize and starting block offset for the location where we
2505 +- * store the journal.
2506 ++ /**
2507 ++ * @j_dev: Device where we store the journal.
2508 + */
2509 + struct block_device *j_dev;
2510 ++
2511 ++ /**
2512 ++ * @j_blocksize: Block size for the location where we store the journal.
2513 ++ */
2514 + int j_blocksize;
2515 ++
2516 ++ /**
2517 ++ * @j_blk_offset:
2518 ++ *
2519 ++ * Starting block offset into the device where we store the journal.
2520 ++ */
2521 + unsigned long long j_blk_offset;
2522 ++
2523 ++ /**
2524 ++ * @j_devname: Journal device name.
2525 ++ */
2526 + char j_devname[BDEVNAME_SIZE+24];
2527 +
2528 +- /*
2529 ++ /**
2530 ++ * @j_fs_dev:
2531 ++ *
2532 + * Device which holds the client fs. For internal journal this will be
2533 + * equal to j_dev.
2534 + */
2535 + struct block_device *j_fs_dev;
2536 +
2537 +- /* Total maximum capacity of the journal region on disk. */
2538 ++ /**
2539 ++ * @j_maxlen: Total maximum capacity of the journal region on disk.
2540 ++ */
2541 + unsigned int j_maxlen;
2542 +
2543 +- /* Number of buffers reserved from the running transaction */
2544 ++ /**
2545 ++ * @j_reserved_credits:
2546 ++ *
2547 ++ * Number of buffers reserved from the running transaction.
2548 ++ */
2549 + atomic_t j_reserved_credits;
2550 +
2551 +- /*
2552 +- * Protects the buffer lists and internal buffer state.
2553 ++ /**
2554 ++ * @j_list_lock: Protects the buffer lists and internal buffer state.
2555 + */
2556 + spinlock_t j_list_lock;
2557 +
2558 +- /* Optional inode where we store the journal. If present, all */
2559 +- /* journal block numbers are mapped into this inode via */
2560 +- /* bmap(). */
2561 ++ /**
2562 ++ * @j_inode:
2563 ++ *
2564 ++ * Optional inode where we store the journal. If present, all
2565 ++ * journal block numbers are mapped into this inode via bmap().
2566 ++ */
2567 + struct inode *j_inode;
2568 +
2569 +- /*
2570 ++ /**
2571 ++ * @j_tail_sequence:
2572 ++ *
2573 + * Sequence number of the oldest transaction in the log [j_state_lock]
2574 + */
2575 + tid_t j_tail_sequence;
2576 +
2577 +- /*
2578 ++ /**
2579 ++ * @j_transaction_sequence:
2580 ++ *
2581 + * Sequence number of the next transaction to grant [j_state_lock]
2582 + */
2583 + tid_t j_transaction_sequence;
2584 +
2585 +- /*
2586 ++ /**
2587 ++ * @j_commit_sequence:
2588 ++ *
2589 + * Sequence number of the most recently committed transaction
2590 + * [j_state_lock].
2591 + */
2592 + tid_t j_commit_sequence;
2593 +
2594 +- /*
2595 ++ /**
2596 ++ * @j_commit_request:
2597 ++ *
2598 + * Sequence number of the most recent transaction wanting commit
2599 + * [j_state_lock]
2600 + */
2601 + tid_t j_commit_request;
2602 +
2603 +- /*
2604 ++ /**
2605 ++ * @j_uuid:
2606 ++ *
2607 + * Journal uuid: identifies the object (filesystem, LVM volume etc)
2608 + * backed by this journal. This will eventually be replaced by an array
2609 + * of uuids, allowing us to index multiple devices within a single
2610 +@@ -956,85 +995,151 @@ struct journal_s
2611 + */
2612 + __u8 j_uuid[16];
2613 +
2614 +- /* Pointer to the current commit thread for this journal */
2615 ++ /**
2616 ++ * @j_task: Pointer to the current commit thread for this journal.
2617 ++ */
2618 + struct task_struct *j_task;
2619 +
2620 +- /*
2621 ++ /**
2622 ++ * @j_max_transaction_buffers:
2623 ++ *
2624 + * Maximum number of metadata buffers to allow in a single compound
2625 +- * commit transaction
2626 ++ * commit transaction.
2627 + */
2628 + int j_max_transaction_buffers;
2629 +
2630 +- /*
2631 ++ /**
2632 ++ * @j_commit_interval:
2633 ++ *
2634 + * What is the maximum transaction lifetime before we begin a commit?
2635 + */
2636 + unsigned long j_commit_interval;
2637 +
2638 +- /* The timer used to wakeup the commit thread: */
2639 ++ /**
2640 ++ * @j_commit_timer: The timer used to wakeup the commit thread.
2641 ++ */
2642 + struct timer_list j_commit_timer;
2643 +
2644 +- /*
2645 +- * The revoke table: maintains the list of revoked blocks in the
2646 +- * current transaction. [j_revoke_lock]
2647 ++ /**
2648 ++ * @j_revoke_lock: Protect the revoke table.
2649 + */
2650 + spinlock_t j_revoke_lock;
2651 ++
2652 ++ /**
2653 ++ * @j_revoke:
2654 ++ *
2655 ++ * The revoke table - maintains the list of revoked blocks in the
2656 ++ * current transaction.
2657 ++ */
2658 + struct jbd2_revoke_table_s *j_revoke;
2659 ++
2660 ++ /**
2661 ++ * @j_revoke_table: Alternate revoke tables for j_revoke.
2662 ++ */
2663 + struct jbd2_revoke_table_s *j_revoke_table[2];
2664 +
2665 +- /*
2666 +- * array of bhs for jbd2_journal_commit_transaction
2667 ++ /**
2668 ++ * @j_wbuf: Array of bhs for jbd2_journal_commit_transaction.
2669 + */
2670 + struct buffer_head **j_wbuf;
2671 ++
2672 ++ /**
2673 ++ * @j_wbufsize:
2674 ++ *
2675 ++ * Size of @j_wbuf array.
2676 ++ */
2677 + int j_wbufsize;
2678 +
2679 +- /*
2680 +- * this is the pid of hte last person to run a synchronous operation
2681 +- * through the journal
2682 ++ /**
2683 ++ * @j_last_sync_writer:
2684 ++ *
2685 ++ * The pid of the last person to run a synchronous operation
2686 ++ * through the journal.
2687 + */
2688 + pid_t j_last_sync_writer;
2689 +
2690 +- /*
2691 +- * the average amount of time in nanoseconds it takes to commit a
2692 ++ /**
2693 ++ * @j_average_commit_time:
2694 ++ *
2695 ++ * The average amount of time in nanoseconds it takes to commit a
2696 + * transaction to disk. [j_state_lock]
2697 + */
2698 + u64 j_average_commit_time;
2699 +
2700 +- /*
2701 +- * minimum and maximum times that we should wait for
2702 +- * additional filesystem operations to get batched into a
2703 +- * synchronous handle in microseconds
2704 ++ /**
2705 ++ * @j_min_batch_time:
2706 ++ *
2707 ++ * Minimum time that we should wait for additional filesystem operations
2708 ++ * to get batched into a synchronous handle in microseconds.
2709 + */
2710 + u32 j_min_batch_time;
2711 ++
2712 ++ /**
2713 ++ * @j_max_batch_time:
2714 ++ *
2715 ++ * Maximum time that we should wait for additional filesystem operations
2716 ++ * to get batched into a synchronous handle in microseconds.
2717 ++ */
2718 + u32 j_max_batch_time;
2719 +
2720 +- /* This function is called when a transaction is closed */
2721 ++ /**
2722 ++ * @j_commit_callback:
2723 ++ *
2724 ++ * This function is called when a transaction is closed.
2725 ++ */
2726 + void (*j_commit_callback)(journal_t *,
2727 + transaction_t *);
2728 +
2729 + /*
2730 + * Journal statistics
2731 + */
2732 ++
2733 ++ /**
2734 ++ * @j_history_lock: Protect the transactions statistics history.
2735 ++ */
2736 + spinlock_t j_history_lock;
2737 ++
2738 ++ /**
2739 ++ * @j_proc_entry: procfs entry for the jbd statistics directory.
2740 ++ */
2741 + struct proc_dir_entry *j_proc_entry;
2742 ++
2743 ++ /**
2744 ++ * @j_stats: Overall statistics.
2745 ++ */
2746 + struct transaction_stats_s j_stats;
2747 +
2748 +- /* Failed journal commit ID */
2749 ++ /**
2750 ++ * @j_failed_commit: Failed journal commit ID.
2751 ++ */
2752 + unsigned int j_failed_commit;
2753 +
2754 +- /*
2755 ++ /**
2756 ++ * @j_private:
2757 ++ *
2758 + * An opaque pointer to fs-private information. ext3 puts its
2759 +- * superblock pointer here
2760 ++ * superblock pointer here.
2761 + */
2762 + void *j_private;
2763 +
2764 +- /* Reference to checksum algorithm driver via cryptoapi */
2765 ++ /**
2766 ++ * @j_chksum_driver:
2767 ++ *
2768 ++ * Reference to checksum algorithm driver via cryptoapi.
2769 ++ */
2770 + struct crypto_shash *j_chksum_driver;
2771 +
2772 +- /* Precomputed journal UUID checksum for seeding other checksums */
2773 ++ /**
2774 ++ * @j_csum_seed:
2775 ++ *
2776 ++ * Precomputed journal UUID checksum for seeding other checksums.
2777 ++ */
2778 + __u32 j_csum_seed;
2779 +
2780 + #ifdef CONFIG_DEBUG_LOCK_ALLOC
2781 +- /*
2782 ++ /**
2783 ++ * @j_trans_commit_map:
2784 ++ *
2785 + * Lockdep entity to track transaction commit dependencies. Handles
2786 + * hold this "lock" for read, when we wait for commit, we acquire the
2787 + * "lock" for writing. This matches the properties of jbd2 journalling
2788 +diff --git a/include/linux/kaiser.h b/include/linux/kaiser.h
2789 +index 58c55b1589d0..b56c19010480 100644
2790 +--- a/include/linux/kaiser.h
2791 ++++ b/include/linux/kaiser.h
2792 +@@ -32,7 +32,7 @@ static inline void kaiser_init(void)
2793 + {
2794 + }
2795 + static inline int kaiser_add_mapping(unsigned long addr,
2796 +- unsigned long size, unsigned long flags)
2797 ++ unsigned long size, u64 flags)
2798 + {
2799 + return 0;
2800 + }
2801 +diff --git a/include/linux/nospec.h b/include/linux/nospec.h
2802 +index b99bced39ac2..fbc98e2c8228 100644
2803 +--- a/include/linux/nospec.h
2804 ++++ b/include/linux/nospec.h
2805 +@@ -19,20 +19,6 @@
2806 + static inline unsigned long array_index_mask_nospec(unsigned long index,
2807 + unsigned long size)
2808 + {
2809 +- /*
2810 +- * Warn developers about inappropriate array_index_nospec() usage.
2811 +- *
2812 +- * Even if the CPU speculates past the WARN_ONCE branch, the
2813 +- * sign bit of @index is taken into account when generating the
2814 +- * mask.
2815 +- *
2816 +- * This warning is compiled out when the compiler can infer that
2817 +- * @index and @size are less than LONG_MAX.
2818 +- */
2819 +- if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,
2820 +- "array_index_nospec() limited to range of [0, LONG_MAX]\n"))
2821 +- return 0;
2822 +-
2823 + /*
2824 + * Always calculate and emit the mask even if the compiler
2825 + * thinks the mask is not needed. The compiler does not take
2826 +@@ -43,6 +29,26 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
2827 + }
2828 + #endif
2829 +
2830 ++/*
2831 ++ * Warn developers about inappropriate array_index_nospec() usage.
2832 ++ *
2833 ++ * Even if the CPU speculates past the WARN_ONCE branch, the
2834 ++ * sign bit of @index is taken into account when generating the
2835 ++ * mask.
2836 ++ *
2837 ++ * This warning is compiled out when the compiler can infer that
2838 ++ * @index and @size are less than LONG_MAX.
2839 ++ */
2840 ++#define array_index_mask_nospec_check(index, size) \
2841 ++({ \
2842 ++ if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \
2843 ++ "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \
2844 ++ _mask = 0; \
2845 ++ else \
2846 ++ _mask = array_index_mask_nospec(index, size); \
2847 ++ _mask; \
2848 ++})
2849 ++
2850 + /*
2851 + * array_index_nospec - sanitize an array index after a bounds check
2852 + *
2853 +@@ -61,7 +67,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
2854 + ({ \
2855 + typeof(index) _i = (index); \
2856 + typeof(size) _s = (size); \
2857 +- unsigned long _mask = array_index_mask_nospec(_i, _s); \
2858 ++ unsigned long _mask = array_index_mask_nospec_check(_i, _s); \
2859 + \
2860 + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
2861 + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
2862 +diff --git a/kernel/memremap.c b/kernel/memremap.c
2863 +index 06123234f118..426547a21a0c 100644
2864 +--- a/kernel/memremap.c
2865 ++++ b/kernel/memremap.c
2866 +@@ -245,7 +245,8 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
2867 +
2868 + /* pages are dead and unused, undo the arch mapping */
2869 + align_start = res->start & ~(SECTION_SIZE - 1);
2870 +- align_size = ALIGN(resource_size(res), SECTION_SIZE);
2871 ++ align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
2872 ++ - align_start;
2873 +
2874 + lock_device_hotplug();
2875 + mem_hotplug_begin();
2876 +diff --git a/mm/memory.c b/mm/memory.c
2877 +index 1aa63e7dd790..e2e68767a373 100644
2878 +--- a/mm/memory.c
2879 ++++ b/mm/memory.c
2880 +@@ -75,7 +75,7 @@
2881 +
2882 + #include "internal.h"
2883 +
2884 +-#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
2885 ++#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
2886 + #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
2887 + #endif
2888 +
2889 +diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
2890 +index f3a4efcf1456..3aa5a93ad107 100644
2891 +--- a/net/9p/trans_virtio.c
2892 ++++ b/net/9p/trans_virtio.c
2893 +@@ -160,7 +160,8 @@ static void req_done(struct virtqueue *vq)
2894 + spin_unlock_irqrestore(&chan->lock, flags);
2895 + /* Wakeup if anyone waiting for VirtIO ring space. */
2896 + wake_up(chan->vc_wq);
2897 +- p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
2898 ++ if (len)
2899 ++ p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
2900 + }
2901 + }
2902 +
2903 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
2904 +index 16580a82e1c8..0b408617b2c9 100644
2905 +--- a/sound/core/seq/seq_clientmgr.c
2906 ++++ b/sound/core/seq/seq_clientmgr.c
2907 +@@ -999,7 +999,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
2908 + {
2909 + struct snd_seq_client *client = file->private_data;
2910 + int written = 0, len;
2911 +- int err = -EINVAL;
2912 ++ int err;
2913 + struct snd_seq_event event;
2914 +
2915 + if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
2916 +@@ -1014,11 +1014,15 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
2917 +
2918 + /* allocate the pool now if the pool is not allocated yet */
2919 + if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
2920 +- if (snd_seq_pool_init(client->pool) < 0)
2921 ++ mutex_lock(&client->ioctl_mutex);
2922 ++ err = snd_seq_pool_init(client->pool);
2923 ++ mutex_unlock(&client->ioctl_mutex);
2924 ++ if (err < 0)
2925 + return -ENOMEM;
2926 + }
2927 +
2928 + /* only process whole events */
2929 ++ err = -EINVAL;
2930 + while (count >= sizeof(struct snd_seq_event)) {
2931 + /* Read in the event header from the user */
2932 + len = sizeof(event);
2933 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2934 +index 71a058fcf884..89c166b97e81 100644
2935 +--- a/sound/pci/hda/patch_realtek.c
2936 ++++ b/sound/pci/hda/patch_realtek.c
2937 +@@ -3130,6 +3130,19 @@ static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
2938 + spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
2939 + }
2940 +
2941 ++static void alc269_fixup_pincfg_U7x7_headset_mic(struct hda_codec *codec,
2942 ++ const struct hda_fixup *fix,
2943 ++ int action)
2944 ++{
2945 ++ unsigned int cfg_headphone = snd_hda_codec_get_pincfg(codec, 0x21);
2946 ++ unsigned int cfg_headset_mic = snd_hda_codec_get_pincfg(codec, 0x19);
2947 ++
2948 ++ if (cfg_headphone && cfg_headset_mic == 0x411111f0)
2949 ++ snd_hda_codec_set_pincfg(codec, 0x19,
2950 ++ (cfg_headphone & ~AC_DEFCFG_DEVICE) |
2951 ++ (AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT));
2952 ++}
2953 ++
2954 + static void alc269_fixup_hweq(struct hda_codec *codec,
2955 + const struct hda_fixup *fix, int action)
2956 + {
2957 +@@ -4455,6 +4468,28 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
2958 + }
2959 + }
2960 +
2961 ++static void alc_fixup_tpt470_dock(struct hda_codec *codec,
2962 ++ const struct hda_fixup *fix, int action)
2963 ++{
2964 ++ static const struct hda_pintbl pincfgs[] = {
2965 ++ { 0x17, 0x21211010 }, /* dock headphone */
2966 ++ { 0x19, 0x21a11010 }, /* dock mic */
2967 ++ { }
2968 ++ };
2969 ++ struct alc_spec *spec = codec->spec;
2970 ++
2971 ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
2972 ++ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
2973 ++ /* Enable DOCK device */
2974 ++ snd_hda_codec_write(codec, 0x17, 0,
2975 ++ AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
2976 ++ /* Enable DOCK device */
2977 ++ snd_hda_codec_write(codec, 0x19, 0,
2978 ++ AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
2979 ++ snd_hda_apply_pincfgs(codec, pincfgs);
2980 ++ }
2981 ++}
2982 ++
2983 + static void alc_shutup_dell_xps13(struct hda_codec *codec)
2984 + {
2985 + struct alc_spec *spec = codec->spec;
2986 +@@ -4797,6 +4832,7 @@ enum {
2987 + ALC269_FIXUP_LIFEBOOK_EXTMIC,
2988 + ALC269_FIXUP_LIFEBOOK_HP_PIN,
2989 + ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
2990 ++ ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC,
2991 + ALC269_FIXUP_AMIC,
2992 + ALC269_FIXUP_DMIC,
2993 + ALC269VB_FIXUP_AMIC,
2994 +@@ -4877,6 +4913,7 @@ enum {
2995 + ALC292_FIXUP_TPT460,
2996 + ALC298_FIXUP_SPK_VOLUME,
2997 + ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
2998 ++ ALC298_FIXUP_TPT470_DOCK,
2999 + };
3000 +
3001 + static const struct hda_fixup alc269_fixups[] = {
3002 +@@ -4987,6 +5024,10 @@ static const struct hda_fixup alc269_fixups[] = {
3003 + .type = HDA_FIXUP_FUNC,
3004 + .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
3005 + },
3006 ++ [ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC] = {
3007 ++ .type = HDA_FIXUP_FUNC,
3008 ++ .v.func = alc269_fixup_pincfg_U7x7_headset_mic,
3009 ++ },
3010 + [ALC269_FIXUP_AMIC] = {
3011 + .type = HDA_FIXUP_PINS,
3012 + .v.pins = (const struct hda_pintbl[]) {
3013 +@@ -5568,6 +5609,12 @@ static const struct hda_fixup alc269_fixups[] = {
3014 + .chained = true,
3015 + .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
3016 + },
3017 ++ [ALC298_FIXUP_TPT470_DOCK] = {
3018 ++ .type = HDA_FIXUP_FUNC,
3019 ++ .v.func = alc_fixup_tpt470_dock,
3020 ++ .chained = true,
3021 ++ .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
3022 ++ },
3023 + };
3024 +
3025 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3026 +@@ -5704,6 +5751,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3027 + SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
3028 + SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
3029 + SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
3030 ++ SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
3031 + SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
3032 + SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
3033 + SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
3034 +@@ -5729,8 +5777,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3035 + SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
3036 + SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
3037 + SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
3038 ++ SND_PCI_QUIRK(0x17aa, 0x222d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3039 ++ SND_PCI_QUIRK(0x17aa, 0x222e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3040 + SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
3041 + SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
3042 ++ SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
3043 ++ SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3044 ++ SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3045 ++ SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3046 ++ SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3047 ++ SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3048 + SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
3049 + SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
3050 + SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
3051 +@@ -5749,7 +5805,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3052 + SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
3053 + SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
3054 + SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
3055 ++ SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3056 ++ SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3057 ++ SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3058 + SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3059 ++ SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3060 ++ SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3061 + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
3062 + SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
3063 + SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
3064 +@@ -5993,6 +6054,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
3065 + {0x12, 0xb7a60130},
3066 + {0x14, 0x90170110},
3067 + {0x21, 0x02211020}),
3068 ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
3069 ++ {0x12, 0x90a60130},
3070 ++ {0x14, 0x90170110},
3071 ++ {0x14, 0x01011020},
3072 ++ {0x21, 0x0221101f}),
3073 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
3074 + ALC256_STANDARD_PINS),
3075 + SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
3076 +@@ -6049,6 +6115,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
3077 + {0x12, 0x90a60120},
3078 + {0x14, 0x90170110},
3079 + {0x21, 0x0321101f}),
3080 ++ SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
3081 ++ {0x12, 0xb7a60130},
3082 ++ {0x14, 0x90170110},
3083 ++ {0x21, 0x04211020}),
3084 + SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
3085 + ALC290_STANDARD_PINS,
3086 + {0x15, 0x04211040},
3087 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
3088 +index 08015c139116..dedf8eb4570e 100644
3089 +--- a/sound/usb/mixer.c
3090 ++++ b/sound/usb/mixer.c
3091 +@@ -344,17 +344,20 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
3092 + int validx, int *value_ret)
3093 + {
3094 + struct snd_usb_audio *chip = cval->head.mixer->chip;
3095 +- unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */
3096 ++ /* enough space for one range */
3097 ++ unsigned char buf[sizeof(__u16) + 3 * sizeof(__u32)];
3098 + unsigned char *val;
3099 +- int idx = 0, ret, size;
3100 ++ int idx = 0, ret, val_size, size;
3101 + __u8 bRequest;
3102 +
3103 ++ val_size = uac2_ctl_value_size(cval->val_type);
3104 ++
3105 + if (request == UAC_GET_CUR) {
3106 + bRequest = UAC2_CS_CUR;
3107 +- size = uac2_ctl_value_size(cval->val_type);
3108 ++ size = val_size;
3109 + } else {
3110 + bRequest = UAC2_CS_RANGE;
3111 +- size = sizeof(buf);
3112 ++ size = sizeof(__u16) + 3 * val_size;
3113 + }
3114 +
3115 + memset(buf, 0, sizeof(buf));
3116 +@@ -387,16 +390,17 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
3117 + val = buf + sizeof(__u16);
3118 + break;
3119 + case UAC_GET_MAX:
3120 +- val = buf + sizeof(__u16) * 2;
3121 ++ val = buf + sizeof(__u16) + val_size;
3122 + break;
3123 + case UAC_GET_RES:
3124 +- val = buf + sizeof(__u16) * 3;
3125 ++ val = buf + sizeof(__u16) + val_size * 2;
3126 + break;
3127 + default:
3128 + return -EINVAL;
3129 + }
3130 +
3131 +- *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16)));
3132 ++ *value_ret = convert_signed_value(cval,
3133 ++ snd_usb_combine_bytes(val, val_size));
3134 +
3135 + return 0;
3136 + }
3137 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
3138 +index cf8459a6fad8..c5dfe82beb24 100644
3139 +--- a/sound/usb/pcm.c
3140 ++++ b/sound/usb/pcm.c
3141 +@@ -352,6 +352,15 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
3142 + ep = 0x86;
3143 + iface = usb_ifnum_to_if(dev, 2);
3144 +
3145 ++ if (!iface || iface->num_altsetting == 0)
3146 ++ return -EINVAL;
3147 ++
3148 ++ alts = &iface->altsetting[1];
3149 ++ goto add_sync_ep;
3150 ++ case USB_ID(0x1397, 0x0002):
3151 ++ ep = 0x81;
3152 ++ iface = usb_ifnum_to_if(dev, 1);
3153 ++
3154 + if (!iface || iface->num_altsetting == 0)
3155 + return -EINVAL;
3156 +
3157 +diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c
3158 +index 6d1437f895b8..298f69e2834c 100644
3159 +--- a/tools/testing/selftests/vm/compaction_test.c
3160 ++++ b/tools/testing/selftests/vm/compaction_test.c
3161 +@@ -136,6 +136,8 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
3162 + printf("No of huge pages allocated = %d\n",
3163 + (atoi(nr_hugepages)));
3164 +
3165 ++ lseek(fd, 0, SEEK_SET);
3166 ++
3167 + if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
3168 + != strlen(initial_nr_hugepages)) {
3169 + perror("Failed to write value to /proc/sys/vm/nr_hugepages\n");
3170 +diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
3171 +index 4af37bfe4aea..6eb50152baf0 100644
3172 +--- a/tools/testing/selftests/x86/Makefile
3173 ++++ b/tools/testing/selftests/x86/Makefile
3174 +@@ -26,11 +26,13 @@ CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
3175 + ifeq ($(CAN_BUILD_I386),1)
3176 + all: all_32
3177 + TEST_PROGS += $(BINARIES_32)
3178 ++EXTRA_CFLAGS += -DCAN_BUILD_32
3179 + endif
3180 +
3181 + ifeq ($(CAN_BUILD_X86_64),1)
3182 + all: all_64
3183 + TEST_PROGS += $(BINARIES_64)
3184 ++EXTRA_CFLAGS += -DCAN_BUILD_64
3185 + endif
3186 +
3187 + all_32: $(BINARIES_32)
3188 +diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
3189 +index 616ee9673339..79e1d13d1cda 100644
3190 +--- a/tools/testing/selftests/x86/mpx-mini-test.c
3191 ++++ b/tools/testing/selftests/x86/mpx-mini-test.c
3192 +@@ -315,11 +315,39 @@ static inline void *__si_bounds_upper(siginfo_t *si)
3193 + return si->si_upper;
3194 + }
3195 + #else
3196 ++
3197 ++/*
3198 ++ * This deals with old version of _sigfault in some distros:
3199 ++ *
3200 ++
3201 ++old _sigfault:
3202 ++ struct {
3203 ++ void *si_addr;
3204 ++ } _sigfault;
3205 ++
3206 ++new _sigfault:
3207 ++ struct {
3208 ++ void __user *_addr;
3209 ++ int _trapno;
3210 ++ short _addr_lsb;
3211 ++ union {
3212 ++ struct {
3213 ++ void __user *_lower;
3214 ++ void __user *_upper;
3215 ++ } _addr_bnd;
3216 ++ __u32 _pkey;
3217 ++ };
3218 ++ } _sigfault;
3219 ++ *
3220 ++ */
3221 ++
3222 + static inline void **__si_bounds_hack(siginfo_t *si)
3223 + {
3224 + void *sigfault = &si->_sifields._sigfault;
3225 + void *end_sigfault = sigfault + sizeof(si->_sifields._sigfault);
3226 +- void **__si_lower = end_sigfault;
3227 ++ int *trapno = (int*)end_sigfault;
3228 ++ /* skip _trapno and _addr_lsb */
3229 ++ void **__si_lower = (void**)(trapno + 2);
3230 +
3231 + return __si_lower;
3232 + }
3233 +@@ -331,7 +359,7 @@ static inline void *__si_bounds_lower(siginfo_t *si)
3234 +
3235 + static inline void *__si_bounds_upper(siginfo_t *si)
3236 + {
3237 +- return (*__si_bounds_hack(si)) + sizeof(void *);
3238 ++ return *(__si_bounds_hack(si) + 1);
3239 + }
3240 + #endif
3241 +
3242 +diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
3243 +index bdd58c78902e..2842a5fa22b3 100644
3244 +--- a/tools/testing/selftests/x86/protection_keys.c
3245 ++++ b/tools/testing/selftests/x86/protection_keys.c
3246 +@@ -381,34 +381,6 @@ pid_t fork_lazy_child(void)
3247 + return forkret;
3248 + }
3249 +
3250 +-void davecmp(void *_a, void *_b, int len)
3251 +-{
3252 +- int i;
3253 +- unsigned long *a = _a;
3254 +- unsigned long *b = _b;
3255 +-
3256 +- for (i = 0; i < len / sizeof(*a); i++) {
3257 +- if (a[i] == b[i])
3258 +- continue;
3259 +-
3260 +- dprintf3("[%3d]: a: %016lx b: %016lx\n", i, a[i], b[i]);
3261 +- }
3262 +-}
3263 +-
3264 +-void dumpit(char *f)
3265 +-{
3266 +- int fd = open(f, O_RDONLY);
3267 +- char buf[100];
3268 +- int nr_read;
3269 +-
3270 +- dprintf2("maps fd: %d\n", fd);
3271 +- do {
3272 +- nr_read = read(fd, &buf[0], sizeof(buf));
3273 +- write(1, buf, nr_read);
3274 +- } while (nr_read > 0);
3275 +- close(fd);
3276 +-}
3277 +-
3278 + #define PKEY_DISABLE_ACCESS 0x1
3279 + #define PKEY_DISABLE_WRITE 0x2
3280 +
3281 +diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
3282 +index a48da95c18fd..ddfdd635de16 100644
3283 +--- a/tools/testing/selftests/x86/single_step_syscall.c
3284 ++++ b/tools/testing/selftests/x86/single_step_syscall.c
3285 +@@ -119,7 +119,9 @@ static void check_result(void)
3286 +
3287 + int main()
3288 + {
3289 ++#ifdef CAN_BUILD_32
3290 + int tmp;
3291 ++#endif
3292 +
3293 + sethandler(SIGTRAP, sigtrap, 0);
3294 +
3295 +@@ -139,12 +141,13 @@ int main()
3296 + : : "c" (post_nop) : "r11");
3297 + check_result();
3298 + #endif
3299 +-
3300 ++#ifdef CAN_BUILD_32
3301 + printf("[RUN]\tSet TF and check int80\n");
3302 + set_eflags(get_eflags() | X86_EFLAGS_TF);
3303 + asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
3304 + : INT80_CLOBBERS);
3305 + check_result();
3306 ++#endif
3307 +
3308 + /*
3309 + * This test is particularly interesting if fast syscalls use
3310 +diff --git a/tools/testing/selftests/x86/test_mremap_vdso.c b/tools/testing/selftests/x86/test_mremap_vdso.c
3311 +index bf0d687c7db7..64f11c8d9b76 100644
3312 +--- a/tools/testing/selftests/x86/test_mremap_vdso.c
3313 ++++ b/tools/testing/selftests/x86/test_mremap_vdso.c
3314 +@@ -90,8 +90,12 @@ int main(int argc, char **argv, char **envp)
3315 + vdso_size += PAGE_SIZE;
3316 + }
3317 +
3318 ++#ifdef __i386__
3319 + /* Glibc is likely to explode now - exit with raw syscall */
3320 + asm volatile ("int $0x80" : : "a" (__NR_exit), "b" (!!ret));
3321 ++#else /* __x86_64__ */
3322 ++ syscall(SYS_exit, ret);
3323 ++#endif
3324 + } else {
3325 + int status;
3326 +