Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.15 commit in: /
Date: Thu, 22 Feb 2018 23:24:44
Message-Id: 1519341871.0efeda7197763f237f87dab43a52676839e87f2d.mpagano@gentoo
1 commit: 0efeda7197763f237f87dab43a52676839e87f2d
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Feb 22 23:24:31 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Feb 22 23:24:31 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0efeda71
7
8 Linux patch 4.15.5
9
10 0000_README | 4 +
11 1004_linux-4.15.5.patch | 6693 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 6697 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index ffe8729..f22a6fe 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -59,6 +59,10 @@ Patch: 1003_linux-4.15.4.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.15.4
21
22 +Patch: 1004_linux-4.15.5.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.15.5
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1004_linux-4.15.5.patch b/1004_linux-4.15.5.patch
31 new file mode 100644
32 index 0000000..5340f07
33 --- /dev/null
34 +++ b/1004_linux-4.15.5.patch
35 @@ -0,0 +1,6693 @@
36 +diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
37 +index a122723907ac..99acc712f83a 100644
38 +--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
39 ++++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
40 +@@ -64,6 +64,6 @@ Example:
41 + reg = <0xe0000000 0x1000>;
42 + interrupts = <0 35 0x4>;
43 + dmas = <&dmahost 12 0 1>,
44 +- <&dmahost 13 0 1 0>;
45 ++ <&dmahost 13 1 0>;
46 + dma-names = "rx", "rx";
47 + };
48 +diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
49 +index 75236c0c2ac2..d081ce0482cc 100644
50 +--- a/Documentation/filesystems/ext4.txt
51 ++++ b/Documentation/filesystems/ext4.txt
52 +@@ -233,7 +233,7 @@ data_err=ignore(*) Just print an error message if an error occurs
53 + data_err=abort Abort the journal if an error occurs in a file
54 + data buffer in ordered mode.
55 +
56 +-grpid Give objects the same group ID as their creator.
57 ++grpid New objects have the group ID of their parent.
58 + bsdgroups
59 +
60 + nogrpid (*) New objects have the group ID of their creator.
61 +diff --git a/Makefile b/Makefile
62 +index 8495e1ca052e..28c537fbe328 100644
63 +--- a/Makefile
64 ++++ b/Makefile
65 +@@ -1,7 +1,7 @@
66 + # SPDX-License-Identifier: GPL-2.0
67 + VERSION = 4
68 + PATCHLEVEL = 15
69 +-SUBLEVEL = 4
70 ++SUBLEVEL = 5
71 + EXTRAVERSION =
72 + NAME = Fearless Coyote
73 +
74 +diff --git a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
75 +index 7b8d90b7aeea..29b636fce23f 100644
76 +--- a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
77 ++++ b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
78 +@@ -150,11 +150,6 @@
79 + interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
80 + };
81 +
82 +-&charlcd {
83 +- interrupt-parent = <&intc>;
84 +- interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
85 +-};
86 +-
87 + &serial0 {
88 + interrupt-parent = <&intc>;
89 + interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
90 +diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi
91 +index 06713ec86f0d..d2174727df9a 100644
92 +--- a/arch/arm/boot/dts/exynos5410.dtsi
93 ++++ b/arch/arm/boot/dts/exynos5410.dtsi
94 +@@ -333,7 +333,6 @@
95 + &rtc {
96 + clocks = <&clock CLK_RTC>;
97 + clock-names = "rtc";
98 +- interrupt-parent = <&pmu_system_controller>;
99 + status = "disabled";
100 + };
101 +
102 +diff --git a/arch/arm/boot/dts/lpc3250-ea3250.dts b/arch/arm/boot/dts/lpc3250-ea3250.dts
103 +index c43adb7b4d7c..58ea0a4e7afa 100644
104 +--- a/arch/arm/boot/dts/lpc3250-ea3250.dts
105 ++++ b/arch/arm/boot/dts/lpc3250-ea3250.dts
106 +@@ -156,8 +156,8 @@
107 + uda1380: uda1380@18 {
108 + compatible = "nxp,uda1380";
109 + reg = <0x18>;
110 +- power-gpio = <&gpio 0x59 0>;
111 +- reset-gpio = <&gpio 0x51 0>;
112 ++ power-gpio = <&gpio 3 10 0>;
113 ++ reset-gpio = <&gpio 3 2 0>;
114 + dac-clk = "wspll";
115 + };
116 +
117 +diff --git a/arch/arm/boot/dts/lpc3250-phy3250.dts b/arch/arm/boot/dts/lpc3250-phy3250.dts
118 +index c72eb9845603..1e1c2f517a82 100644
119 +--- a/arch/arm/boot/dts/lpc3250-phy3250.dts
120 ++++ b/arch/arm/boot/dts/lpc3250-phy3250.dts
121 +@@ -81,8 +81,8 @@
122 + uda1380: uda1380@18 {
123 + compatible = "nxp,uda1380";
124 + reg = <0x18>;
125 +- power-gpio = <&gpio 0x59 0>;
126 +- reset-gpio = <&gpio 0x51 0>;
127 ++ power-gpio = <&gpio 3 10 0>;
128 ++ reset-gpio = <&gpio 3 2 0>;
129 + dac-clk = "wspll";
130 + };
131 +
132 +diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi
133 +index 965ddfbc9953..05557fce0f1d 100644
134 +--- a/arch/arm/boot/dts/mt2701.dtsi
135 ++++ b/arch/arm/boot/dts/mt2701.dtsi
136 +@@ -604,6 +604,7 @@
137 + compatible = "mediatek,mt2701-hifsys", "syscon";
138 + reg = <0 0x1a000000 0 0x1000>;
139 + #clock-cells = <1>;
140 ++ #reset-cells = <1>;
141 + };
142 +
143 + usb0: usb@1a1c0000 {
144 +@@ -688,6 +689,7 @@
145 + compatible = "mediatek,mt2701-ethsys", "syscon";
146 + reg = <0 0x1b000000 0 0x1000>;
147 + #clock-cells = <1>;
148 ++ #reset-cells = <1>;
149 + };
150 +
151 + eth: ethernet@1b100000 {
152 +diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi
153 +index 0640fb75bf59..3a442a16ea06 100644
154 +--- a/arch/arm/boot/dts/mt7623.dtsi
155 ++++ b/arch/arm/boot/dts/mt7623.dtsi
156 +@@ -758,6 +758,7 @@
157 + "syscon";
158 + reg = <0 0x1b000000 0 0x1000>;
159 + #clock-cells = <1>;
160 ++ #reset-cells = <1>;
161 + };
162 +
163 + eth: ethernet@1b100000 {
164 +diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
165 +index 688a86378cee..7bf5aa2237c9 100644
166 +--- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
167 ++++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
168 +@@ -204,7 +204,7 @@
169 + bus-width = <4>;
170 + max-frequency = <50000000>;
171 + cap-sd-highspeed;
172 +- cd-gpios = <&pio 261 0>;
173 ++ cd-gpios = <&pio 261 GPIO_ACTIVE_LOW>;
174 + vmmc-supply = <&mt6323_vmch_reg>;
175 + vqmmc-supply = <&mt6323_vio18_reg>;
176 + };
177 +diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
178 +index 726c5d0dbd5b..b290a5abb901 100644
179 +--- a/arch/arm/boot/dts/s5pv210.dtsi
180 ++++ b/arch/arm/boot/dts/s5pv210.dtsi
181 +@@ -463,6 +463,7 @@
182 + compatible = "samsung,exynos4210-ohci";
183 + reg = <0xec300000 0x100>;
184 + interrupts = <23>;
185 ++ interrupt-parent = <&vic1>;
186 + clocks = <&clocks CLK_USB_HOST>;
187 + clock-names = "usbhost";
188 + #address-cells = <1>;
189 +diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
190 +index 84101e4eebbf..0f5f379323a8 100644
191 +--- a/arch/arm/boot/dts/spear1310-evb.dts
192 ++++ b/arch/arm/boot/dts/spear1310-evb.dts
193 +@@ -349,7 +349,7 @@
194 + spi0: spi@e0100000 {
195 + status = "okay";
196 + num-cs = <3>;
197 +- cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>;
198 ++ cs-gpios = <&gpio1 7 0>, <&spics 0 0>, <&spics 1 0>;
199 +
200 + stmpe610@0 {
201 + compatible = "st,stmpe610";
202 +diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
203 +index 5f347054527d..d4dbc4098653 100644
204 +--- a/arch/arm/boot/dts/spear1340.dtsi
205 ++++ b/arch/arm/boot/dts/spear1340.dtsi
206 +@@ -142,8 +142,8 @@
207 + reg = <0xb4100000 0x1000>;
208 + interrupts = <0 105 0x4>;
209 + status = "disabled";
210 +- dmas = <&dwdma0 0x600 0 0 1>, /* 0xC << 11 */
211 +- <&dwdma0 0x680 0 1 0>; /* 0xD << 7 */
212 ++ dmas = <&dwdma0 12 0 1>,
213 ++ <&dwdma0 13 1 0>;
214 + dma-names = "tx", "rx";
215 + };
216 +
217 +diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
218 +index 17ea0abcdbd7..086b4b333249 100644
219 +--- a/arch/arm/boot/dts/spear13xx.dtsi
220 ++++ b/arch/arm/boot/dts/spear13xx.dtsi
221 +@@ -100,7 +100,7 @@
222 + reg = <0xb2800000 0x1000>;
223 + interrupts = <0 29 0x4>;
224 + status = "disabled";
225 +- dmas = <&dwdma0 0 0 0 0>;
226 ++ dmas = <&dwdma0 0 0 0>;
227 + dma-names = "data";
228 + };
229 +
230 +@@ -290,8 +290,8 @@
231 + #size-cells = <0>;
232 + interrupts = <0 31 0x4>;
233 + status = "disabled";
234 +- dmas = <&dwdma0 0x2000 0 0 0>, /* 0x4 << 11 */
235 +- <&dwdma0 0x0280 0 0 0>; /* 0x5 << 7 */
236 ++ dmas = <&dwdma0 4 0 0>,
237 ++ <&dwdma0 5 0 0>;
238 + dma-names = "tx", "rx";
239 + };
240 +
241 +diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
242 +index 6b32d20acc9f..00166eb9be86 100644
243 +--- a/arch/arm/boot/dts/spear600.dtsi
244 ++++ b/arch/arm/boot/dts/spear600.dtsi
245 +@@ -194,6 +194,7 @@
246 + rtc: rtc@fc900000 {
247 + compatible = "st,spear600-rtc";
248 + reg = <0xfc900000 0x1000>;
249 ++ interrupt-parent = <&vic0>;
250 + interrupts = <10>;
251 + status = "disabled";
252 + };
253 +diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
254 +index 68aab50a73ab..733678b75b88 100644
255 +--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
256 ++++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
257 +@@ -750,6 +750,7 @@
258 + reg = <0x10120000 0x1000>;
259 + interrupt-names = "combined";
260 + interrupts = <14>;
261 ++ interrupt-parent = <&vica>;
262 + clocks = <&clcdclk>, <&hclkclcd>;
263 + clock-names = "clcdclk", "apb_pclk";
264 + status = "disabled";
265 +diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi
266 +index fa149837df14..11fdecd9312e 100644
267 +--- a/arch/arm/boot/dts/stih407.dtsi
268 ++++ b/arch/arm/boot/dts/stih407.dtsi
269 +@@ -8,6 +8,7 @@
270 + */
271 + #include "stih407-clock.dtsi"
272 + #include "stih407-family.dtsi"
273 ++#include <dt-bindings/gpio/gpio.h>
274 + / {
275 + soc {
276 + sti-display-subsystem {
277 +@@ -122,7 +123,7 @@
278 + <&clk_s_d2_quadfs 0>,
279 + <&clk_s_d2_quadfs 1>;
280 +
281 +- hdmi,hpd-gpio = <&pio5 3>;
282 ++ hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
283 + reset-names = "hdmi";
284 + resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
285 + ddc = <&hdmiddc>;
286 +diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
287 +index cffa50db5d72..68b5ff91d6a7 100644
288 +--- a/arch/arm/boot/dts/stih410.dtsi
289 ++++ b/arch/arm/boot/dts/stih410.dtsi
290 +@@ -9,6 +9,7 @@
291 + #include "stih410-clock.dtsi"
292 + #include "stih407-family.dtsi"
293 + #include "stih410-pinctrl.dtsi"
294 ++#include <dt-bindings/gpio/gpio.h>
295 + / {
296 + aliases {
297 + bdisp0 = &bdisp0;
298 +@@ -213,7 +214,7 @@
299 + <&clk_s_d2_quadfs 0>,
300 + <&clk_s_d2_quadfs 1>;
301 +
302 +- hdmi,hpd-gpio = <&pio5 3>;
303 ++ hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
304 + reset-names = "hdmi";
305 + resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
306 + ddc = <&hdmiddc>;
307 +diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c
308 +index 107f37210fb9..83606087edc7 100644
309 +--- a/arch/arm/mach-pxa/tosa-bt.c
310 ++++ b/arch/arm/mach-pxa/tosa-bt.c
311 +@@ -132,3 +132,7 @@ static struct platform_driver tosa_bt_driver = {
312 + },
313 + };
314 + module_platform_driver(tosa_bt_driver);
315 ++
316 ++MODULE_LICENSE("GPL");
317 ++MODULE_AUTHOR("Dmitry Baryshkov");
318 ++MODULE_DESCRIPTION("Bluetooth built-in chip control");
319 +diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
320 +index 6b2127a6ced1..b84c0ca4f84a 100644
321 +--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
322 ++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
323 +@@ -906,6 +906,7 @@
324 + "dsi_phy_regulator";
325 +
326 + #clock-cells = <1>;
327 ++ #phy-cells = <0>;
328 +
329 + clocks = <&gcc GCC_MDSS_AHB_CLK>;
330 + clock-names = "iface_clk";
331 +@@ -1435,8 +1436,8 @@
332 + #address-cells = <1>;
333 + #size-cells = <0>;
334 +
335 +- qcom,ipc-1 = <&apcs 0 13>;
336 +- qcom,ipc-6 = <&apcs 0 19>;
337 ++ qcom,ipc-1 = <&apcs 8 13>;
338 ++ qcom,ipc-3 = <&apcs 8 19>;
339 +
340 + apps_smsm: apps@0 {
341 + reg = <0>;
342 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
343 +index 07823595b7f0..52f15cd896e1 100644
344 +--- a/arch/arm64/kernel/cpu_errata.c
345 ++++ b/arch/arm64/kernel/cpu_errata.c
346 +@@ -406,6 +406,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
347 + .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
348 + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
349 + },
350 ++ {
351 ++ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
352 ++ MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
353 ++ .enable = qcom_enable_link_stack_sanitization,
354 ++ },
355 ++ {
356 ++ .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
357 ++ MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
358 ++ },
359 + {
360 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
361 + MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
362 +diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
363 +index 0b5ab4d8b57d..30b5495b82b5 100644
364 +--- a/arch/arm64/kvm/hyp/switch.c
365 ++++ b/arch/arm64/kvm/hyp/switch.c
366 +@@ -400,8 +400,10 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
367 + u32 midr = read_cpuid_id();
368 +
369 + /* Apply BTAC predictors mitigation to all Falkor chips */
370 +- if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)
371 ++ if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
372 ++ ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) {
373 + __qcom_hyp_sanitize_btac_predictors();
374 ++ }
375 + }
376 +
377 + fp_enabled = __fpsimd_enabled();
378 +diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
379 +index 08572f95bd8a..248f2e7b24ab 100644
380 +--- a/arch/arm64/mm/proc.S
381 ++++ b/arch/arm64/mm/proc.S
382 +@@ -189,7 +189,8 @@ ENDPROC(idmap_cpu_replace_ttbr1)
383 + dc cvac, cur_\()\type\()p // Ensure any existing dirty
384 + dmb sy // lines are written back before
385 + ldr \type, [cur_\()\type\()p] // loading the entry
386 +- tbz \type, #0, next_\()\type // Skip invalid entries
387 ++ tbz \type, #0, skip_\()\type // Skip invalid and
388 ++ tbnz \type, #11, skip_\()\type // non-global entries
389 + .endm
390 +
391 + .macro __idmap_kpti_put_pgtable_ent_ng, type
392 +@@ -249,8 +250,9 @@ ENTRY(idmap_kpti_install_ng_mappings)
393 + add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
394 + do_pgd: __idmap_kpti_get_pgtable_ent pgd
395 + tbnz pgd, #1, walk_puds
396 +- __idmap_kpti_put_pgtable_ent_ng pgd
397 + next_pgd:
398 ++ __idmap_kpti_put_pgtable_ent_ng pgd
399 ++skip_pgd:
400 + add cur_pgdp, cur_pgdp, #8
401 + cmp cur_pgdp, end_pgdp
402 + b.ne do_pgd
403 +@@ -278,8 +280,9 @@ walk_puds:
404 + add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
405 + do_pud: __idmap_kpti_get_pgtable_ent pud
406 + tbnz pud, #1, walk_pmds
407 +- __idmap_kpti_put_pgtable_ent_ng pud
408 + next_pud:
409 ++ __idmap_kpti_put_pgtable_ent_ng pud
410 ++skip_pud:
411 + add cur_pudp, cur_pudp, 8
412 + cmp cur_pudp, end_pudp
413 + b.ne do_pud
414 +@@ -298,8 +301,9 @@ walk_pmds:
415 + add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
416 + do_pmd: __idmap_kpti_get_pgtable_ent pmd
417 + tbnz pmd, #1, walk_ptes
418 +- __idmap_kpti_put_pgtable_ent_ng pmd
419 + next_pmd:
420 ++ __idmap_kpti_put_pgtable_ent_ng pmd
421 ++skip_pmd:
422 + add cur_pmdp, cur_pmdp, #8
423 + cmp cur_pmdp, end_pmdp
424 + b.ne do_pmd
425 +@@ -317,7 +321,7 @@ walk_ptes:
426 + add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
427 + do_pte: __idmap_kpti_get_pgtable_ent pte
428 + __idmap_kpti_put_pgtable_ent_ng pte
429 +-next_pte:
430 ++skip_pte:
431 + add cur_ptep, cur_ptep, #8
432 + cmp cur_ptep, end_ptep
433 + b.ne do_pte
434 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
435 +index 8e0b3702f1c0..efaa3b130f4d 100644
436 +--- a/arch/mips/Kconfig
437 ++++ b/arch/mips/Kconfig
438 +@@ -119,12 +119,12 @@ config MIPS_GENERIC
439 + select SYS_SUPPORTS_MULTITHREADING
440 + select SYS_SUPPORTS_RELOCATABLE
441 + select SYS_SUPPORTS_SMARTMIPS
442 +- select USB_EHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
443 +- select USB_EHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
444 +- select USB_OHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
445 +- select USB_OHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
446 +- select USB_UHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
447 +- select USB_UHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
448 ++ select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
449 ++ select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
450 ++ select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
451 ++ select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
452 ++ select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
453 ++ select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
454 + select USE_OF
455 + help
456 + Select this to build a kernel which aims to support multiple boards,
457 +diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
458 +index e68e6e04063a..1025f937ab0e 100644
459 +--- a/arch/mips/kernel/cps-vec.S
460 ++++ b/arch/mips/kernel/cps-vec.S
461 +@@ -388,15 +388,16 @@ LEAF(mips_cps_boot_vpes)
462 +
463 + #elif defined(CONFIG_MIPS_MT)
464 +
465 +- .set push
466 +- .set MIPS_ISA_LEVEL_RAW
467 +- .set mt
468 +-
469 + /* If the core doesn't support MT then return */
470 + has_mt t0, 5f
471 +
472 + /* Enter VPE configuration state */
473 ++ .set push
474 ++ .set MIPS_ISA_LEVEL_RAW
475 ++ .set mt
476 + dvpe
477 ++ .set pop
478 ++
479 + PTR_LA t1, 1f
480 + jr.hb t1
481 + nop
482 +@@ -422,6 +423,10 @@ LEAF(mips_cps_boot_vpes)
483 + mtc0 t0, CP0_VPECONTROL
484 + ehb
485 +
486 ++ .set push
487 ++ .set MIPS_ISA_LEVEL_RAW
488 ++ .set mt
489 ++
490 + /* Skip the VPE if its TC is not halted */
491 + mftc0 t0, CP0_TCHALT
492 + beqz t0, 2f
493 +@@ -495,6 +500,8 @@ LEAF(mips_cps_boot_vpes)
494 + ehb
495 + evpe
496 +
497 ++ .set pop
498 ++
499 + /* Check whether this VPE is meant to be running */
500 + li t0, 1
501 + sll t0, t0, a1
502 +@@ -509,7 +516,7 @@ LEAF(mips_cps_boot_vpes)
503 + 1: jr.hb t0
504 + nop
505 +
506 +-2: .set pop
507 ++2:
508 +
509 + #endif /* CONFIG_MIPS_MT_SMP */
510 +
511 +diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
512 +index 702c678de116..e4a1581ce822 100644
513 +--- a/arch/mips/kernel/setup.c
514 ++++ b/arch/mips/kernel/setup.c
515 +@@ -375,6 +375,7 @@ static void __init bootmem_init(void)
516 + unsigned long reserved_end;
517 + unsigned long mapstart = ~0UL;
518 + unsigned long bootmap_size;
519 ++ phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX;
520 + bool bootmap_valid = false;
521 + int i;
522 +
523 +@@ -395,7 +396,8 @@ static void __init bootmem_init(void)
524 + max_low_pfn = 0;
525 +
526 + /*
527 +- * Find the highest page frame number we have available.
528 ++ * Find the highest page frame number we have available
529 ++ * and the lowest used RAM address
530 + */
531 + for (i = 0; i < boot_mem_map.nr_map; i++) {
532 + unsigned long start, end;
533 +@@ -407,6 +409,8 @@ static void __init bootmem_init(void)
534 + end = PFN_DOWN(boot_mem_map.map[i].addr
535 + + boot_mem_map.map[i].size);
536 +
537 ++ ramstart = min(ramstart, boot_mem_map.map[i].addr);
538 ++
539 + #ifndef CONFIG_HIGHMEM
540 + /*
541 + * Skip highmem here so we get an accurate max_low_pfn if low
542 +@@ -436,6 +440,13 @@ static void __init bootmem_init(void)
543 + mapstart = max(reserved_end, start);
544 + }
545 +
546 ++ /*
547 ++ * Reserve any memory between the start of RAM and PHYS_OFFSET
548 ++ */
549 ++ if (ramstart > PHYS_OFFSET)
550 ++ add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
551 ++ BOOT_MEM_RESERVED);
552 ++
553 + if (min_low_pfn >= max_low_pfn)
554 + panic("Incorrect memory mapping !!!");
555 + if (min_low_pfn > ARCH_PFN_OFFSET) {
556 +@@ -664,9 +675,6 @@ static int __init early_parse_mem(char *p)
557 +
558 + add_memory_region(start, size, BOOT_MEM_RAM);
559 +
560 +- if (start && start > PHYS_OFFSET)
561 +- add_memory_region(PHYS_OFFSET, start - PHYS_OFFSET,
562 +- BOOT_MEM_RESERVED);
563 + return 0;
564 + }
565 + early_param("mem", early_parse_mem);
566 +diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
567 +index 88187c285c70..1c02e6900f78 100644
568 +--- a/arch/powerpc/include/asm/topology.h
569 ++++ b/arch/powerpc/include/asm/topology.h
570 +@@ -44,6 +44,11 @@ extern int sysfs_add_device_to_node(struct device *dev, int nid);
571 + extern void sysfs_remove_device_from_node(struct device *dev, int nid);
572 + extern int numa_update_cpu_topology(bool cpus_locked);
573 +
574 ++static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node)
575 ++{
576 ++ numa_cpu_lookup_table[cpu] = node;
577 ++}
578 ++
579 + static inline int early_cpu_to_node(int cpu)
580 + {
581 + int nid;
582 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
583 +index 72be0c32e902..2010e4c827b7 100644
584 +--- a/arch/powerpc/kernel/process.c
585 ++++ b/arch/powerpc/kernel/process.c
586 +@@ -1509,14 +1509,15 @@ static int assign_thread_tidr(void)
587 + {
588 + int index;
589 + int err;
590 ++ unsigned long flags;
591 +
592 + again:
593 + if (!ida_pre_get(&vas_thread_ida, GFP_KERNEL))
594 + return -ENOMEM;
595 +
596 +- spin_lock(&vas_thread_id_lock);
597 ++ spin_lock_irqsave(&vas_thread_id_lock, flags);
598 + err = ida_get_new_above(&vas_thread_ida, 1, &index);
599 +- spin_unlock(&vas_thread_id_lock);
600 ++ spin_unlock_irqrestore(&vas_thread_id_lock, flags);
601 +
602 + if (err == -EAGAIN)
603 + goto again;
604 +@@ -1524,9 +1525,9 @@ static int assign_thread_tidr(void)
605 + return err;
606 +
607 + if (index > MAX_THREAD_CONTEXT) {
608 +- spin_lock(&vas_thread_id_lock);
609 ++ spin_lock_irqsave(&vas_thread_id_lock, flags);
610 + ida_remove(&vas_thread_ida, index);
611 +- spin_unlock(&vas_thread_id_lock);
612 ++ spin_unlock_irqrestore(&vas_thread_id_lock, flags);
613 + return -ENOMEM;
614 + }
615 +
616 +@@ -1535,9 +1536,11 @@ static int assign_thread_tidr(void)
617 +
618 + static void free_thread_tidr(int id)
619 + {
620 +- spin_lock(&vas_thread_id_lock);
621 ++ unsigned long flags;
622 ++
623 ++ spin_lock_irqsave(&vas_thread_id_lock, flags);
624 + ida_remove(&vas_thread_ida, id);
625 +- spin_unlock(&vas_thread_id_lock);
626 ++ spin_unlock_irqrestore(&vas_thread_id_lock, flags);
627 + }
628 +
629 + /*
630 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
631 +index adb6364f4091..09be66fcea68 100644
632 +--- a/arch/powerpc/mm/numa.c
633 ++++ b/arch/powerpc/mm/numa.c
634 +@@ -142,11 +142,6 @@ static void reset_numa_cpu_lookup_table(void)
635 + numa_cpu_lookup_table[cpu] = -1;
636 + }
637 +
638 +-static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
639 +-{
640 +- numa_cpu_lookup_table[cpu] = node;
641 +-}
642 +-
643 + static void map_cpu_to_node(int cpu, int node)
644 + {
645 + update_numa_cpu_lookup_table(cpu, node);
646 +diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
647 +index cfbbee941a76..17ae5c15a9e0 100644
648 +--- a/arch/powerpc/mm/pgtable-radix.c
649 ++++ b/arch/powerpc/mm/pgtable-radix.c
650 +@@ -17,6 +17,7 @@
651 + #include <linux/of_fdt.h>
652 + #include <linux/mm.h>
653 + #include <linux/string_helpers.h>
654 ++#include <linux/stop_machine.h>
655 +
656 + #include <asm/pgtable.h>
657 + #include <asm/pgalloc.h>
658 +@@ -671,6 +672,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
659 + pud_clear(pud);
660 + }
661 +
662 ++struct change_mapping_params {
663 ++ pte_t *pte;
664 ++ unsigned long start;
665 ++ unsigned long end;
666 ++ unsigned long aligned_start;
667 ++ unsigned long aligned_end;
668 ++};
669 ++
670 ++static int stop_machine_change_mapping(void *data)
671 ++{
672 ++ struct change_mapping_params *params =
673 ++ (struct change_mapping_params *)data;
674 ++
675 ++ if (!data)
676 ++ return -1;
677 ++
678 ++ spin_unlock(&init_mm.page_table_lock);
679 ++ pte_clear(&init_mm, params->aligned_start, params->pte);
680 ++ create_physical_mapping(params->aligned_start, params->start);
681 ++ create_physical_mapping(params->end, params->aligned_end);
682 ++ spin_lock(&init_mm.page_table_lock);
683 ++ return 0;
684 ++}
685 ++
686 + static void remove_pte_table(pte_t *pte_start, unsigned long addr,
687 + unsigned long end)
688 + {
689 +@@ -699,6 +724,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
690 + }
691 + }
692 +
693 ++/*
694 ++ * clear the pte and potentially split the mapping helper
695 ++ */
696 ++static void split_kernel_mapping(unsigned long addr, unsigned long end,
697 ++ unsigned long size, pte_t *pte)
698 ++{
699 ++ unsigned long mask = ~(size - 1);
700 ++ unsigned long aligned_start = addr & mask;
701 ++ unsigned long aligned_end = addr + size;
702 ++ struct change_mapping_params params;
703 ++ bool split_region = false;
704 ++
705 ++ if ((end - addr) < size) {
706 ++ /*
707 ++ * We're going to clear the PTE, but not flushed
708 ++ * the mapping, time to remap and flush. The
709 ++ * effects if visible outside the processor or
710 ++ * if we are running in code close to the
711 ++ * mapping we cleared, we are in trouble.
712 ++ */
713 ++ if (overlaps_kernel_text(aligned_start, addr) ||
714 ++ overlaps_kernel_text(end, aligned_end)) {
715 ++ /*
716 ++ * Hack, just return, don't pte_clear
717 ++ */
718 ++ WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
719 ++ "text, not splitting\n", addr, end);
720 ++ return;
721 ++ }
722 ++ split_region = true;
723 ++ }
724 ++
725 ++ if (split_region) {
726 ++ params.pte = pte;
727 ++ params.start = addr;
728 ++ params.end = end;
729 ++ params.aligned_start = addr & ~(size - 1);
730 ++ params.aligned_end = min_t(unsigned long, aligned_end,
731 ++ (unsigned long)__va(memblock_end_of_DRAM()));
732 ++ stop_machine(stop_machine_change_mapping, &params, NULL);
733 ++ return;
734 ++ }
735 ++
736 ++ pte_clear(&init_mm, addr, pte);
737 ++}
738 ++
739 + static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
740 + unsigned long end)
741 + {
742 +@@ -714,13 +785,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
743 + continue;
744 +
745 + if (pmd_huge(*pmd)) {
746 +- if (!IS_ALIGNED(addr, PMD_SIZE) ||
747 +- !IS_ALIGNED(next, PMD_SIZE)) {
748 +- WARN_ONCE(1, "%s: unaligned range\n", __func__);
749 +- continue;
750 +- }
751 +-
752 +- pte_clear(&init_mm, addr, (pte_t *)pmd);
753 ++ split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
754 + continue;
755 + }
756 +
757 +@@ -745,13 +810,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
758 + continue;
759 +
760 + if (pud_huge(*pud)) {
761 +- if (!IS_ALIGNED(addr, PUD_SIZE) ||
762 +- !IS_ALIGNED(next, PUD_SIZE)) {
763 +- WARN_ONCE(1, "%s: unaligned range\n", __func__);
764 +- continue;
765 +- }
766 +-
767 +- pte_clear(&init_mm, addr, (pte_t *)pud);
768 ++ split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
769 + continue;
770 + }
771 +
772 +@@ -777,13 +836,7 @@ static void remove_pagetable(unsigned long start, unsigned long end)
773 + continue;
774 +
775 + if (pgd_huge(*pgd)) {
776 +- if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
777 +- !IS_ALIGNED(next, PGDIR_SIZE)) {
778 +- WARN_ONCE(1, "%s: unaligned range\n", __func__);
779 +- continue;
780 +- }
781 +-
782 +- pte_clear(&init_mm, addr, (pte_t *)pgd);
783 ++ split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
784 + continue;
785 + }
786 +
787 +diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
788 +index 813ea22c3e00..eec1367c2f32 100644
789 +--- a/arch/powerpc/mm/pgtable_64.c
790 ++++ b/arch/powerpc/mm/pgtable_64.c
791 +@@ -483,6 +483,8 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
792 + if (old & PATB_HR) {
793 + asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
794 + "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
795 ++ asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
796 ++ "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
797 + trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
798 + } else {
799 + asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
800 +diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
801 +index 884f4b705b57..913a2b81b177 100644
802 +--- a/arch/powerpc/mm/tlb-radix.c
803 ++++ b/arch/powerpc/mm/tlb-radix.c
804 +@@ -600,14 +600,12 @@ void radix__flush_tlb_all(void)
805 + */
806 + asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
807 + : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
808 +- trace_tlbie(0, 0, rb, rs, ric, prs, r);
809 + /*
810 + * now flush host entires by passing PRS = 0 and LPID == 0
811 + */
812 + asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
813 + : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
814 + asm volatile("eieio; tlbsync; ptesync": : :"memory");
815 +- trace_tlbie(0, 0, rb, 0, ric, prs, r);
816 + }
817 +
818 + void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
819 +diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
820 +index 2b3eb01ab110..b7c53a51c31b 100644
821 +--- a/arch/powerpc/platforms/powernv/vas-window.c
822 ++++ b/arch/powerpc/platforms/powernv/vas-window.c
823 +@@ -1063,16 +1063,16 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
824 + rc = PTR_ERR(txwin->paste_kaddr);
825 + goto free_window;
826 + }
827 ++ } else {
828 ++ /*
829 ++ * A user mapping must ensure that context switch issues
830 ++ * CP_ABORT for this thread.
831 ++ */
832 ++ rc = set_thread_uses_vas();
833 ++ if (rc)
834 ++ goto free_window;
835 + }
836 +
837 +- /*
838 +- * Now that we have a send window, ensure context switch issues
839 +- * CP_ABORT for this thread.
840 +- */
841 +- rc = -EINVAL;
842 +- if (set_thread_uses_vas() < 0)
843 +- goto free_window;
844 +-
845 + set_vinst_win(vinst, txwin);
846 +
847 + return txwin;
848 +diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
849 +index a7d14aa7bb7c..09083ad82f7a 100644
850 +--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
851 ++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
852 +@@ -36,6 +36,7 @@
853 + #include <asm/xics.h>
854 + #include <asm/xive.h>
855 + #include <asm/plpar_wrappers.h>
856 ++#include <asm/topology.h>
857 +
858 + #include "pseries.h"
859 + #include "offline_states.h"
860 +@@ -331,6 +332,7 @@ static void pseries_remove_processor(struct device_node *np)
861 + BUG_ON(cpu_online(cpu));
862 + set_cpu_present(cpu, false);
863 + set_hard_smp_processor_id(cpu, -1);
864 ++ update_numa_cpu_lookup_table(cpu, -1);
865 + break;
866 + }
867 + if (cpu >= nr_cpu_ids)
868 +diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
869 +index d9c4c9366049..091f1d0d0af1 100644
870 +--- a/arch/powerpc/sysdev/xive/spapr.c
871 ++++ b/arch/powerpc/sysdev/xive/spapr.c
872 +@@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
873 +
874 + rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
875 + if (rc) {
876 +- pr_err("Error %lld getting queue info prio %d\n", rc, prio);
877 ++ pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
878 ++ target, prio);
879 + rc = -EIO;
880 + goto fail;
881 + }
882 +@@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
883 + /* Configure and enable the queue in HW */
884 + rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
885 + if (rc) {
886 +- pr_err("Error %lld setting queue for prio %d\n", rc, prio);
887 ++ pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
888 ++ target, prio);
889 + rc = -EIO;
890 + } else {
891 + q->qpage = qpage;
892 +@@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
893 + if (IS_ERR(qpage))
894 + return PTR_ERR(qpage);
895 +
896 +- return xive_spapr_configure_queue(cpu, q, prio, qpage,
897 +- xive_queue_shift);
898 ++ return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
899 ++ q, prio, qpage, xive_queue_shift);
900 + }
901 +
902 + static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
903 +@@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
904 + struct xive_q *q = &xc->queue[prio];
905 + unsigned int alloc_order;
906 + long rc;
907 ++ int hw_cpu = get_hard_smp_processor_id(cpu);
908 +
909 +- rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0);
910 ++ rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
911 + if (rc)
912 +- pr_err("Error %ld setting queue for prio %d\n", rc, prio);
913 ++ pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
914 ++ hw_cpu, prio);
915 +
916 + alloc_order = xive_alloc_order(xive_queue_shift);
917 + free_pages((unsigned long)q->qpage, alloc_order);
918 +diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
919 +index 59eea9c65d3e..79b7a3438d54 100644
920 +--- a/arch/s390/kernel/compat_linux.c
921 ++++ b/arch/s390/kernel/compat_linux.c
922 +@@ -110,7 +110,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid)
923 +
924 + COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid)
925 + {
926 +- return sys_setgid((gid_t)gid);
927 ++ return sys_setgid(low2highgid(gid));
928 + }
929 +
930 + COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
931 +@@ -120,7 +120,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
932 +
933 + COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid)
934 + {
935 +- return sys_setuid((uid_t)uid);
936 ++ return sys_setuid(low2highuid(uid));
937 + }
938 +
939 + COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid)
940 +@@ -173,12 +173,12 @@ COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp,
941 +
942 + COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid)
943 + {
944 +- return sys_setfsuid((uid_t)uid);
945 ++ return sys_setfsuid(low2highuid(uid));
946 + }
947 +
948 + COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid)
949 + {
950 +- return sys_setfsgid((gid_t)gid);
951 ++ return sys_setfsgid(low2highgid(gid));
952 + }
953 +
954 + static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
955 +diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
956 +index 3f48f695d5e6..dce7092ab24a 100644
957 +--- a/arch/x86/entry/calling.h
958 ++++ b/arch/x86/entry/calling.h
959 +@@ -97,80 +97,69 @@ For 32-bit we have the following conventions - kernel is built with
960 +
961 + #define SIZEOF_PTREGS 21*8
962 +
963 +- .macro ALLOC_PT_GPREGS_ON_STACK
964 +- addq $-(15*8), %rsp
965 +- .endm
966 ++.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax
967 ++ /*
968 ++ * Push registers and sanitize registers of values that a
969 ++ * speculation attack might otherwise want to exploit. The
970 ++ * lower registers are likely clobbered well before they
971 ++ * could be put to use in a speculative execution gadget.
972 ++ * Interleave XOR with PUSH for better uop scheduling:
973 ++ */
974 ++ pushq %rdi /* pt_regs->di */
975 ++ pushq %rsi /* pt_regs->si */
976 ++ pushq \rdx /* pt_regs->dx */
977 ++ pushq %rcx /* pt_regs->cx */
978 ++ pushq \rax /* pt_regs->ax */
979 ++ pushq %r8 /* pt_regs->r8 */
980 ++ xorq %r8, %r8 /* nospec r8 */
981 ++ pushq %r9 /* pt_regs->r9 */
982 ++ xorq %r9, %r9 /* nospec r9 */
983 ++ pushq %r10 /* pt_regs->r10 */
984 ++ xorq %r10, %r10 /* nospec r10 */
985 ++ pushq %r11 /* pt_regs->r11 */
986 ++ xorq %r11, %r11 /* nospec r11*/
987 ++ pushq %rbx /* pt_regs->rbx */
988 ++ xorl %ebx, %ebx /* nospec rbx*/
989 ++ pushq %rbp /* pt_regs->rbp */
990 ++ xorl %ebp, %ebp /* nospec rbp*/
991 ++ pushq %r12 /* pt_regs->r12 */
992 ++ xorq %r12, %r12 /* nospec r12*/
993 ++ pushq %r13 /* pt_regs->r13 */
994 ++ xorq %r13, %r13 /* nospec r13*/
995 ++ pushq %r14 /* pt_regs->r14 */
996 ++ xorq %r14, %r14 /* nospec r14*/
997 ++ pushq %r15 /* pt_regs->r15 */
998 ++ xorq %r15, %r15 /* nospec r15*/
999 ++ UNWIND_HINT_REGS
1000 ++.endm
1001 +
1002 +- .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
1003 +- .if \r11
1004 +- movq %r11, 6*8+\offset(%rsp)
1005 +- .endif
1006 +- .if \r8910
1007 +- movq %r10, 7*8+\offset(%rsp)
1008 +- movq %r9, 8*8+\offset(%rsp)
1009 +- movq %r8, 9*8+\offset(%rsp)
1010 +- .endif
1011 +- .if \rax
1012 +- movq %rax, 10*8+\offset(%rsp)
1013 +- .endif
1014 +- .if \rcx
1015 +- movq %rcx, 11*8+\offset(%rsp)
1016 +- .endif
1017 +- movq %rdx, 12*8+\offset(%rsp)
1018 +- movq %rsi, 13*8+\offset(%rsp)
1019 +- movq %rdi, 14*8+\offset(%rsp)
1020 +- UNWIND_HINT_REGS offset=\offset extra=0
1021 +- .endm
1022 +- .macro SAVE_C_REGS offset=0
1023 +- SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
1024 +- .endm
1025 +- .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
1026 +- SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
1027 +- .endm
1028 +- .macro SAVE_C_REGS_EXCEPT_R891011
1029 +- SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
1030 +- .endm
1031 +- .macro SAVE_C_REGS_EXCEPT_RCX_R891011
1032 +- SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
1033 +- .endm
1034 +- .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
1035 +- SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
1036 +- .endm
1037 +-
1038 +- .macro SAVE_EXTRA_REGS offset=0
1039 +- movq %r15, 0*8+\offset(%rsp)
1040 +- movq %r14, 1*8+\offset(%rsp)
1041 +- movq %r13, 2*8+\offset(%rsp)
1042 +- movq %r12, 3*8+\offset(%rsp)
1043 +- movq %rbp, 4*8+\offset(%rsp)
1044 +- movq %rbx, 5*8+\offset(%rsp)
1045 +- UNWIND_HINT_REGS offset=\offset
1046 +- .endm
1047 +-
1048 +- .macro POP_EXTRA_REGS
1049 ++.macro POP_REGS pop_rdi=1 skip_r11rcx=0
1050 + popq %r15
1051 + popq %r14
1052 + popq %r13
1053 + popq %r12
1054 + popq %rbp
1055 + popq %rbx
1056 +- .endm
1057 +-
1058 +- .macro POP_C_REGS
1059 ++ .if \skip_r11rcx
1060 ++ popq %rsi
1061 ++ .else
1062 + popq %r11
1063 ++ .endif
1064 + popq %r10
1065 + popq %r9
1066 + popq %r8
1067 + popq %rax
1068 ++ .if \skip_r11rcx
1069 ++ popq %rsi
1070 ++ .else
1071 + popq %rcx
1072 ++ .endif
1073 + popq %rdx
1074 + popq %rsi
1075 ++ .if \pop_rdi
1076 + popq %rdi
1077 +- .endm
1078 +-
1079 +- .macro icebp
1080 +- .byte 0xf1
1081 +- .endm
1082 ++ .endif
1083 ++.endm
1084 +
1085 + /*
1086 + * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
1087 +@@ -178,7 +167,7 @@ For 32-bit we have the following conventions - kernel is built with
1088 + * is just setting the LSB, which makes it an invalid stack address and is also
1089 + * a signal to the unwinder that it's a pt_regs pointer in disguise.
1090 + *
1091 +- * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts
1092 ++ * NOTE: This macro must be used *after* PUSH_AND_CLEAR_REGS because it corrupts
1093 + * the original rbp.
1094 + */
1095 + .macro ENCODE_FRAME_POINTER ptregs_offset=0
1096 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
1097 +index c752abe89d80..4fd9044e72e7 100644
1098 +--- a/arch/x86/entry/entry_64.S
1099 ++++ b/arch/x86/entry/entry_64.S
1100 +@@ -213,7 +213,7 @@ ENTRY(entry_SYSCALL_64)
1101 +
1102 + swapgs
1103 + /*
1104 +- * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it
1105 ++ * This path is only taken when PAGE_TABLE_ISOLATION is disabled so it
1106 + * is not required to switch CR3.
1107 + */
1108 + movq %rsp, PER_CPU_VAR(rsp_scratch)
1109 +@@ -227,22 +227,8 @@ ENTRY(entry_SYSCALL_64)
1110 + pushq %rcx /* pt_regs->ip */
1111 + GLOBAL(entry_SYSCALL_64_after_hwframe)
1112 + pushq %rax /* pt_regs->orig_ax */
1113 +- pushq %rdi /* pt_regs->di */
1114 +- pushq %rsi /* pt_regs->si */
1115 +- pushq %rdx /* pt_regs->dx */
1116 +- pushq %rcx /* pt_regs->cx */
1117 +- pushq $-ENOSYS /* pt_regs->ax */
1118 +- pushq %r8 /* pt_regs->r8 */
1119 +- pushq %r9 /* pt_regs->r9 */
1120 +- pushq %r10 /* pt_regs->r10 */
1121 +- pushq %r11 /* pt_regs->r11 */
1122 +- pushq %rbx /* pt_regs->rbx */
1123 +- pushq %rbp /* pt_regs->rbp */
1124 +- pushq %r12 /* pt_regs->r12 */
1125 +- pushq %r13 /* pt_regs->r13 */
1126 +- pushq %r14 /* pt_regs->r14 */
1127 +- pushq %r15 /* pt_regs->r15 */
1128 +- UNWIND_HINT_REGS
1129 ++
1130 ++ PUSH_AND_CLEAR_REGS rax=$-ENOSYS
1131 +
1132 + TRACE_IRQS_OFF
1133 +
1134 +@@ -321,15 +307,7 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
1135 + syscall_return_via_sysret:
1136 + /* rcx and r11 are already restored (see code above) */
1137 + UNWIND_HINT_EMPTY
1138 +- POP_EXTRA_REGS
1139 +- popq %rsi /* skip r11 */
1140 +- popq %r10
1141 +- popq %r9
1142 +- popq %r8
1143 +- popq %rax
1144 +- popq %rsi /* skip rcx */
1145 +- popq %rdx
1146 +- popq %rsi
1147 ++ POP_REGS pop_rdi=0 skip_r11rcx=1
1148 +
1149 + /*
1150 + * Now all regs are restored except RSP and RDI.
1151 +@@ -559,9 +537,7 @@ END(irq_entries_start)
1152 + call switch_to_thread_stack
1153 + 1:
1154 +
1155 +- ALLOC_PT_GPREGS_ON_STACK
1156 +- SAVE_C_REGS
1157 +- SAVE_EXTRA_REGS
1158 ++ PUSH_AND_CLEAR_REGS
1159 + ENCODE_FRAME_POINTER
1160 +
1161 + testb $3, CS(%rsp)
1162 +@@ -622,15 +598,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
1163 + ud2
1164 + 1:
1165 + #endif
1166 +- POP_EXTRA_REGS
1167 +- popq %r11
1168 +- popq %r10
1169 +- popq %r9
1170 +- popq %r8
1171 +- popq %rax
1172 +- popq %rcx
1173 +- popq %rdx
1174 +- popq %rsi
1175 ++ POP_REGS pop_rdi=0
1176 +
1177 + /*
1178 + * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
1179 +@@ -688,8 +656,7 @@ GLOBAL(restore_regs_and_return_to_kernel)
1180 + ud2
1181 + 1:
1182 + #endif
1183 +- POP_EXTRA_REGS
1184 +- POP_C_REGS
1185 ++ POP_REGS
1186 + addq $8, %rsp /* skip regs->orig_ax */
1187 + INTERRUPT_RETURN
1188 +
1189 +@@ -904,7 +871,9 @@ ENTRY(\sym)
1190 + pushq $-1 /* ORIG_RAX: no syscall to restart */
1191 + .endif
1192 +
1193 +- ALLOC_PT_GPREGS_ON_STACK
1194 ++ /* Save all registers in pt_regs */
1195 ++ PUSH_AND_CLEAR_REGS
1196 ++ ENCODE_FRAME_POINTER
1197 +
1198 + .if \paranoid < 2
1199 + testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
1200 +@@ -1117,9 +1086,7 @@ ENTRY(xen_failsafe_callback)
1201 + addq $0x30, %rsp
1202 + UNWIND_HINT_IRET_REGS
1203 + pushq $-1 /* orig_ax = -1 => not a system call */
1204 +- ALLOC_PT_GPREGS_ON_STACK
1205 +- SAVE_C_REGS
1206 +- SAVE_EXTRA_REGS
1207 ++ PUSH_AND_CLEAR_REGS
1208 + ENCODE_FRAME_POINTER
1209 + jmp error_exit
1210 + END(xen_failsafe_callback)
1211 +@@ -1156,16 +1123,13 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1
1212 + #endif
1213 +
1214 + /*
1215 +- * Save all registers in pt_regs, and switch gs if needed.
1216 ++ * Switch gs if needed.
1217 + * Use slow, but surefire "are we in kernel?" check.
1218 + * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
1219 + */
1220 + ENTRY(paranoid_entry)
1221 + UNWIND_HINT_FUNC
1222 + cld
1223 +- SAVE_C_REGS 8
1224 +- SAVE_EXTRA_REGS 8
1225 +- ENCODE_FRAME_POINTER 8
1226 + movl $1, %ebx
1227 + movl $MSR_GS_BASE, %ecx
1228 + rdmsr
1229 +@@ -1204,21 +1168,18 @@ ENTRY(paranoid_exit)
1230 + jmp .Lparanoid_exit_restore
1231 + .Lparanoid_exit_no_swapgs:
1232 + TRACE_IRQS_IRETQ_DEBUG
1233 ++ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
1234 + .Lparanoid_exit_restore:
1235 + jmp restore_regs_and_return_to_kernel
1236 + END(paranoid_exit)
1237 +
1238 + /*
1239 +- * Save all registers in pt_regs, and switch gs if needed.
1240 ++ * Switch gs if needed.
1241 + * Return: EBX=0: came from user mode; EBX=1: otherwise
1242 + */
1243 + ENTRY(error_entry)
1244 +- UNWIND_HINT_FUNC
1245 ++ UNWIND_HINT_REGS offset=8
1246 + cld
1247 +- SAVE_C_REGS 8
1248 +- SAVE_EXTRA_REGS 8
1249 +- ENCODE_FRAME_POINTER 8
1250 +- xorl %ebx, %ebx
1251 + testb $3, CS+8(%rsp)
1252 + jz .Lerror_kernelspace
1253 +
1254 +@@ -1399,22 +1360,7 @@ ENTRY(nmi)
1255 + pushq 1*8(%rdx) /* pt_regs->rip */
1256 + UNWIND_HINT_IRET_REGS
1257 + pushq $-1 /* pt_regs->orig_ax */
1258 +- pushq %rdi /* pt_regs->di */
1259 +- pushq %rsi /* pt_regs->si */
1260 +- pushq (%rdx) /* pt_regs->dx */
1261 +- pushq %rcx /* pt_regs->cx */
1262 +- pushq %rax /* pt_regs->ax */
1263 +- pushq %r8 /* pt_regs->r8 */
1264 +- pushq %r9 /* pt_regs->r9 */
1265 +- pushq %r10 /* pt_regs->r10 */
1266 +- pushq %r11 /* pt_regs->r11 */
1267 +- pushq %rbx /* pt_regs->rbx */
1268 +- pushq %rbp /* pt_regs->rbp */
1269 +- pushq %r12 /* pt_regs->r12 */
1270 +- pushq %r13 /* pt_regs->r13 */
1271 +- pushq %r14 /* pt_regs->r14 */
1272 +- pushq %r15 /* pt_regs->r15 */
1273 +- UNWIND_HINT_REGS
1274 ++ PUSH_AND_CLEAR_REGS rdx=(%rdx)
1275 + ENCODE_FRAME_POINTER
1276 +
1277 + /*
1278 +@@ -1624,7 +1570,8 @@ end_repeat_nmi:
1279 + * frame to point back to repeat_nmi.
1280 + */
1281 + pushq $-1 /* ORIG_RAX: no syscall to restart */
1282 +- ALLOC_PT_GPREGS_ON_STACK
1283 ++ PUSH_AND_CLEAR_REGS
1284 ++ ENCODE_FRAME_POINTER
1285 +
1286 + /*
1287 + * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1288 +@@ -1648,8 +1595,7 @@ end_repeat_nmi:
1289 + nmi_swapgs:
1290 + SWAPGS_UNSAFE_STACK
1291 + nmi_restore:
1292 +- POP_EXTRA_REGS
1293 +- POP_C_REGS
1294 ++ POP_REGS
1295 +
1296 + /*
1297 + * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
1298 +diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
1299 +index 98d5358e4041..fd65e016e413 100644
1300 +--- a/arch/x86/entry/entry_64_compat.S
1301 ++++ b/arch/x86/entry/entry_64_compat.S
1302 +@@ -85,15 +85,25 @@ ENTRY(entry_SYSENTER_compat)
1303 + pushq %rcx /* pt_regs->cx */
1304 + pushq $-ENOSYS /* pt_regs->ax */
1305 + pushq $0 /* pt_regs->r8 = 0 */
1306 ++ xorq %r8, %r8 /* nospec r8 */
1307 + pushq $0 /* pt_regs->r9 = 0 */
1308 ++ xorq %r9, %r9 /* nospec r9 */
1309 + pushq $0 /* pt_regs->r10 = 0 */
1310 ++ xorq %r10, %r10 /* nospec r10 */
1311 + pushq $0 /* pt_regs->r11 = 0 */
1312 ++ xorq %r11, %r11 /* nospec r11 */
1313 + pushq %rbx /* pt_regs->rbx */
1314 ++ xorl %ebx, %ebx /* nospec rbx */
1315 + pushq %rbp /* pt_regs->rbp (will be overwritten) */
1316 ++ xorl %ebp, %ebp /* nospec rbp */
1317 + pushq $0 /* pt_regs->r12 = 0 */
1318 ++ xorq %r12, %r12 /* nospec r12 */
1319 + pushq $0 /* pt_regs->r13 = 0 */
1320 ++ xorq %r13, %r13 /* nospec r13 */
1321 + pushq $0 /* pt_regs->r14 = 0 */
1322 ++ xorq %r14, %r14 /* nospec r14 */
1323 + pushq $0 /* pt_regs->r15 = 0 */
1324 ++ xorq %r15, %r15 /* nospec r15 */
1325 + cld
1326 +
1327 + /*
1328 +@@ -214,15 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
1329 + pushq %rbp /* pt_regs->cx (stashed in bp) */
1330 + pushq $-ENOSYS /* pt_regs->ax */
1331 + pushq $0 /* pt_regs->r8 = 0 */
1332 ++ xorq %r8, %r8 /* nospec r8 */
1333 + pushq $0 /* pt_regs->r9 = 0 */
1334 ++ xorq %r9, %r9 /* nospec r9 */
1335 + pushq $0 /* pt_regs->r10 = 0 */
1336 ++ xorq %r10, %r10 /* nospec r10 */
1337 + pushq $0 /* pt_regs->r11 = 0 */
1338 ++ xorq %r11, %r11 /* nospec r11 */
1339 + pushq %rbx /* pt_regs->rbx */
1340 ++ xorl %ebx, %ebx /* nospec rbx */
1341 + pushq %rbp /* pt_regs->rbp (will be overwritten) */
1342 ++ xorl %ebp, %ebp /* nospec rbp */
1343 + pushq $0 /* pt_regs->r12 = 0 */
1344 ++ xorq %r12, %r12 /* nospec r12 */
1345 + pushq $0 /* pt_regs->r13 = 0 */
1346 ++ xorq %r13, %r13 /* nospec r13 */
1347 + pushq $0 /* pt_regs->r14 = 0 */
1348 ++ xorq %r14, %r14 /* nospec r14 */
1349 + pushq $0 /* pt_regs->r15 = 0 */
1350 ++ xorq %r15, %r15 /* nospec r15 */
1351 +
1352 + /*
1353 + * User mode is traced as though IRQs are on, and SYSENTER
1354 +@@ -338,15 +358,25 @@ ENTRY(entry_INT80_compat)
1355 + pushq %rcx /* pt_regs->cx */
1356 + pushq $-ENOSYS /* pt_regs->ax */
1357 + pushq $0 /* pt_regs->r8 = 0 */
1358 ++ xorq %r8, %r8 /* nospec r8 */
1359 + pushq $0 /* pt_regs->r9 = 0 */
1360 ++ xorq %r9, %r9 /* nospec r9 */
1361 + pushq $0 /* pt_regs->r10 = 0 */
1362 ++ xorq %r10, %r10 /* nospec r10 */
1363 + pushq $0 /* pt_regs->r11 = 0 */
1364 ++ xorq %r11, %r11 /* nospec r11 */
1365 + pushq %rbx /* pt_regs->rbx */
1366 ++ xorl %ebx, %ebx /* nospec rbx */
1367 + pushq %rbp /* pt_regs->rbp */
1368 ++ xorl %ebp, %ebp /* nospec rbp */
1369 + pushq %r12 /* pt_regs->r12 */
1370 ++ xorq %r12, %r12 /* nospec r12 */
1371 + pushq %r13 /* pt_regs->r13 */
1372 ++ xorq %r13, %r13 /* nospec r13 */
1373 + pushq %r14 /* pt_regs->r14 */
1374 ++ xorq %r14, %r14 /* nospec r14 */
1375 + pushq %r15 /* pt_regs->r15 */
1376 ++ xorq %r15, %r15 /* nospec r15 */
1377 + cld
1378 +
1379 + /*
1380 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
1381 +index 731153a4681e..56457cb73448 100644
1382 +--- a/arch/x86/events/intel/core.c
1383 ++++ b/arch/x86/events/intel/core.c
1384 +@@ -3559,7 +3559,7 @@ static int intel_snb_pebs_broken(int cpu)
1385 + break;
1386 +
1387 + case INTEL_FAM6_SANDYBRIDGE_X:
1388 +- switch (cpu_data(cpu).x86_mask) {
1389 ++ switch (cpu_data(cpu).x86_stepping) {
1390 + case 6: rev = 0x618; break;
1391 + case 7: rev = 0x70c; break;
1392 + }
1393 +diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
1394 +index ae64d0b69729..cf372b90557e 100644
1395 +--- a/arch/x86/events/intel/lbr.c
1396 ++++ b/arch/x86/events/intel/lbr.c
1397 +@@ -1186,7 +1186,7 @@ void __init intel_pmu_lbr_init_atom(void)
1398 + * on PMU interrupt
1399 + */
1400 + if (boot_cpu_data.x86_model == 28
1401 +- && boot_cpu_data.x86_mask < 10) {
1402 ++ && boot_cpu_data.x86_stepping < 10) {
1403 + pr_cont("LBR disabled due to erratum");
1404 + return;
1405 + }
1406 +diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
1407 +index a5604c352930..408879b0c0d4 100644
1408 +--- a/arch/x86/events/intel/p6.c
1409 ++++ b/arch/x86/events/intel/p6.c
1410 +@@ -234,7 +234,7 @@ static __initconst const struct x86_pmu p6_pmu = {
1411 +
1412 + static __init void p6_pmu_rdpmc_quirk(void)
1413 + {
1414 +- if (boot_cpu_data.x86_mask < 9) {
1415 ++ if (boot_cpu_data.x86_stepping < 9) {
1416 + /*
1417 + * PPro erratum 26; fixed in stepping 9 and above.
1418 + */
1419 +diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
1420 +index 8d0ec9df1cbe..f077401869ee 100644
1421 +--- a/arch/x86/include/asm/acpi.h
1422 ++++ b/arch/x86/include/asm/acpi.h
1423 +@@ -94,7 +94,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
1424 + if (boot_cpu_data.x86 == 0x0F &&
1425 + boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
1426 + boot_cpu_data.x86_model <= 0x05 &&
1427 +- boot_cpu_data.x86_mask < 0x0A)
1428 ++ boot_cpu_data.x86_stepping < 0x0A)
1429 + return 1;
1430 + else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E))
1431 + return 1;
1432 +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
1433 +index 30d406146016..e1259f043ae9 100644
1434 +--- a/arch/x86/include/asm/barrier.h
1435 ++++ b/arch/x86/include/asm/barrier.h
1436 +@@ -40,7 +40,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
1437 +
1438 + asm ("cmp %1,%2; sbb %0,%0;"
1439 + :"=r" (mask)
1440 +- :"r"(size),"r" (index)
1441 ++ :"g"(size),"r" (index)
1442 + :"cc");
1443 + return mask;
1444 + }
1445 +diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
1446 +index 34d99af43994..6804d6642767 100644
1447 +--- a/arch/x86/include/asm/bug.h
1448 ++++ b/arch/x86/include/asm/bug.h
1449 +@@ -5,23 +5,20 @@
1450 + #include <linux/stringify.h>
1451 +
1452 + /*
1453 +- * Since some emulators terminate on UD2, we cannot use it for WARN.
1454 +- * Since various instruction decoders disagree on the length of UD1,
1455 +- * we cannot use it either. So use UD0 for WARN.
1456 ++ * Despite that some emulators terminate on UD2, we use it for WARN().
1457 + *
1458 +- * (binutils knows about "ud1" but {en,de}codes it as 2 bytes, whereas
1459 +- * our kernel decoder thinks it takes a ModRM byte, which seems consistent
1460 +- * with various things like the Intel SDM instruction encoding rules)
1461 ++ * Since various instruction decoders/specs disagree on the encoding of
1462 ++ * UD0/UD1.
1463 + */
1464 +
1465 +-#define ASM_UD0 ".byte 0x0f, 0xff"
1466 ++#define ASM_UD0 ".byte 0x0f, 0xff" /* + ModRM (for Intel) */
1467 + #define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */
1468 + #define ASM_UD2 ".byte 0x0f, 0x0b"
1469 +
1470 + #define INSN_UD0 0xff0f
1471 + #define INSN_UD2 0x0b0f
1472 +
1473 +-#define LEN_UD0 2
1474 ++#define LEN_UD2 2
1475 +
1476 + #ifdef CONFIG_GENERIC_BUG
1477 +
1478 +@@ -77,7 +74,11 @@ do { \
1479 + unreachable(); \
1480 + } while (0)
1481 +
1482 +-#define __WARN_FLAGS(flags) _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags))
1483 ++#define __WARN_FLAGS(flags) \
1484 ++do { \
1485 ++ _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \
1486 ++ annotate_reachable(); \
1487 ++} while (0)
1488 +
1489 + #include <asm-generic/bug.h>
1490 +
1491 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
1492 +index 4d57894635f2..76b058533e47 100644
1493 +--- a/arch/x86/include/asm/nospec-branch.h
1494 ++++ b/arch/x86/include/asm/nospec-branch.h
1495 +@@ -6,6 +6,7 @@
1496 + #include <asm/alternative.h>
1497 + #include <asm/alternative-asm.h>
1498 + #include <asm/cpufeatures.h>
1499 ++#include <asm/msr-index.h>
1500 +
1501 + #ifdef __ASSEMBLY__
1502 +
1503 +@@ -164,10 +165,15 @@ static inline void vmexit_fill_RSB(void)
1504 +
1505 + static inline void indirect_branch_prediction_barrier(void)
1506 + {
1507 +- alternative_input("",
1508 +- "call __ibp_barrier",
1509 +- X86_FEATURE_USE_IBPB,
1510 +- ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory"));
1511 ++ asm volatile(ALTERNATIVE("",
1512 ++ "movl %[msr], %%ecx\n\t"
1513 ++ "movl %[val], %%eax\n\t"
1514 ++ "movl $0, %%edx\n\t"
1515 ++ "wrmsr",
1516 ++ X86_FEATURE_USE_IBPB)
1517 ++ : : [msr] "i" (MSR_IA32_PRED_CMD),
1518 ++ [val] "i" (PRED_CMD_IBPB)
1519 ++ : "eax", "ecx", "edx", "memory");
1520 + }
1521 +
1522 + #endif /* __ASSEMBLY__ */
1523 +diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
1524 +index 4baa6bceb232..d652a3808065 100644
1525 +--- a/arch/x86/include/asm/page_64.h
1526 ++++ b/arch/x86/include/asm/page_64.h
1527 +@@ -52,10 +52,6 @@ static inline void clear_page(void *page)
1528 +
1529 + void copy_page(void *to, void *from);
1530 +
1531 +-#ifdef CONFIG_X86_MCE
1532 +-#define arch_unmap_kpfn arch_unmap_kpfn
1533 +-#endif
1534 +-
1535 + #endif /* !__ASSEMBLY__ */
1536 +
1537 + #ifdef CONFIG_X86_VSYSCALL_EMULATION
1538 +diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
1539 +index 892df375b615..554841fab717 100644
1540 +--- a/arch/x86/include/asm/paravirt.h
1541 ++++ b/arch/x86/include/asm/paravirt.h
1542 +@@ -297,9 +297,9 @@ static inline void __flush_tlb_global(void)
1543 + {
1544 + PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
1545 + }
1546 +-static inline void __flush_tlb_single(unsigned long addr)
1547 ++static inline void __flush_tlb_one_user(unsigned long addr)
1548 + {
1549 +- PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
1550 ++ PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr);
1551 + }
1552 +
1553 + static inline void flush_tlb_others(const struct cpumask *cpumask,
1554 +diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
1555 +index 6ec54d01972d..f624f1f10316 100644
1556 +--- a/arch/x86/include/asm/paravirt_types.h
1557 ++++ b/arch/x86/include/asm/paravirt_types.h
1558 +@@ -217,7 +217,7 @@ struct pv_mmu_ops {
1559 + /* TLB operations */
1560 + void (*flush_tlb_user)(void);
1561 + void (*flush_tlb_kernel)(void);
1562 +- void (*flush_tlb_single)(unsigned long addr);
1563 ++ void (*flush_tlb_one_user)(unsigned long addr);
1564 + void (*flush_tlb_others)(const struct cpumask *cpus,
1565 + const struct flush_tlb_info *info);
1566 +
1567 +diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
1568 +index e67c0620aec2..e55466760ff8 100644
1569 +--- a/arch/x86/include/asm/pgtable_32.h
1570 ++++ b/arch/x86/include/asm/pgtable_32.h
1571 +@@ -61,7 +61,7 @@ void paging_init(void);
1572 + #define kpte_clear_flush(ptep, vaddr) \
1573 + do { \
1574 + pte_clear(&init_mm, (vaddr), (ptep)); \
1575 +- __flush_tlb_one((vaddr)); \
1576 ++ __flush_tlb_one_kernel((vaddr)); \
1577 + } while (0)
1578 +
1579 + #endif /* !__ASSEMBLY__ */
1580 +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
1581 +index 513f9604c192..44c2c4ec6d60 100644
1582 +--- a/arch/x86/include/asm/processor.h
1583 ++++ b/arch/x86/include/asm/processor.h
1584 +@@ -91,7 +91,7 @@ struct cpuinfo_x86 {
1585 + __u8 x86; /* CPU family */
1586 + __u8 x86_vendor; /* CPU vendor */
1587 + __u8 x86_model;
1588 +- __u8 x86_mask;
1589 ++ __u8 x86_stepping;
1590 + #ifdef CONFIG_X86_64
1591 + /* Number of 4K pages in DTLB/ITLB combined(in pages): */
1592 + int x86_tlbsize;
1593 +@@ -109,7 +109,7 @@ struct cpuinfo_x86 {
1594 + char x86_vendor_id[16];
1595 + char x86_model_id[64];
1596 + /* in KB - valid for CPUS which support this call: */
1597 +- int x86_cache_size;
1598 ++ unsigned int x86_cache_size;
1599 + int x86_cache_alignment; /* In bytes */
1600 + /* Cache QoS architectural values: */
1601 + int x86_cache_max_rmid; /* max index */
1602 +@@ -969,7 +969,4 @@ bool xen_set_default_idle(void);
1603 +
1604 + void stop_this_cpu(void *dummy);
1605 + void df_debug(struct pt_regs *regs, long error_code);
1606 +-
1607 +-void __ibp_barrier(void);
1608 +-
1609 + #endif /* _ASM_X86_PROCESSOR_H */
1610 +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
1611 +index 2b8f18ca5874..84137c22fdfa 100644
1612 +--- a/arch/x86/include/asm/tlbflush.h
1613 ++++ b/arch/x86/include/asm/tlbflush.h
1614 +@@ -140,7 +140,7 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
1615 + #else
1616 + #define __flush_tlb() __native_flush_tlb()
1617 + #define __flush_tlb_global() __native_flush_tlb_global()
1618 +-#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
1619 ++#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
1620 + #endif
1621 +
1622 + static inline bool tlb_defer_switch_to_init_mm(void)
1623 +@@ -400,7 +400,7 @@ static inline void __native_flush_tlb_global(void)
1624 + /*
1625 + * flush one page in the user mapping
1626 + */
1627 +-static inline void __native_flush_tlb_single(unsigned long addr)
1628 ++static inline void __native_flush_tlb_one_user(unsigned long addr)
1629 + {
1630 + u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
1631 +
1632 +@@ -437,18 +437,31 @@ static inline void __flush_tlb_all(void)
1633 + /*
1634 + * flush one page in the kernel mapping
1635 + */
1636 +-static inline void __flush_tlb_one(unsigned long addr)
1637 ++static inline void __flush_tlb_one_kernel(unsigned long addr)
1638 + {
1639 + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
1640 +- __flush_tlb_single(addr);
1641 ++
1642 ++ /*
1643 ++ * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
1644 ++ * paravirt equivalent. Even with PCID, this is sufficient: we only
1645 ++ * use PCID if we also use global PTEs for the kernel mapping, and
1646 ++ * INVLPG flushes global translations across all address spaces.
1647 ++ *
1648 ++ * If PTI is on, then the kernel is mapped with non-global PTEs, and
1649 ++ * __flush_tlb_one_user() will flush the given address for the current
1650 ++ * kernel address space and for its usermode counterpart, but it does
1651 ++ * not flush it for other address spaces.
1652 ++ */
1653 ++ __flush_tlb_one_user(addr);
1654 +
1655 + if (!static_cpu_has(X86_FEATURE_PTI))
1656 + return;
1657 +
1658 + /*
1659 +- * __flush_tlb_single() will have cleared the TLB entry for this ASID,
1660 +- * but since kernel space is replicated across all, we must also
1661 +- * invalidate all others.
1662 ++ * See above. We need to propagate the flush to all other address
1663 ++ * spaces. In principle, we only need to propagate it to kernelmode
1664 ++ * address spaces, but the extra bookkeeping we would need is not
1665 ++ * worth it.
1666 + */
1667 + invalidate_other_asid();
1668 + }
1669 +diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
1670 +index 6db28f17ff28..c88e0b127810 100644
1671 +--- a/arch/x86/kernel/amd_nb.c
1672 ++++ b/arch/x86/kernel/amd_nb.c
1673 +@@ -235,7 +235,7 @@ int amd_cache_northbridges(void)
1674 + if (boot_cpu_data.x86 == 0x10 &&
1675 + boot_cpu_data.x86_model >= 0x8 &&
1676 + (boot_cpu_data.x86_model > 0x9 ||
1677 +- boot_cpu_data.x86_mask >= 0x1))
1678 ++ boot_cpu_data.x86_stepping >= 0x1))
1679 + amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
1680 +
1681 + if (boot_cpu_data.x86 == 0x15)
1682 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
1683 +index 25ddf02598d2..b203af0855b5 100644
1684 +--- a/arch/x86/kernel/apic/apic.c
1685 ++++ b/arch/x86/kernel/apic/apic.c
1686 +@@ -546,7 +546,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
1687 +
1688 + static u32 hsx_deadline_rev(void)
1689 + {
1690 +- switch (boot_cpu_data.x86_mask) {
1691 ++ switch (boot_cpu_data.x86_stepping) {
1692 + case 0x02: return 0x3a; /* EP */
1693 + case 0x04: return 0x0f; /* EX */
1694 + }
1695 +@@ -556,7 +556,7 @@ static u32 hsx_deadline_rev(void)
1696 +
1697 + static u32 bdx_deadline_rev(void)
1698 + {
1699 +- switch (boot_cpu_data.x86_mask) {
1700 ++ switch (boot_cpu_data.x86_stepping) {
1701 + case 0x02: return 0x00000011;
1702 + case 0x03: return 0x0700000e;
1703 + case 0x04: return 0x0f00000c;
1704 +@@ -568,7 +568,7 @@ static u32 bdx_deadline_rev(void)
1705 +
1706 + static u32 skx_deadline_rev(void)
1707 + {
1708 +- switch (boot_cpu_data.x86_mask) {
1709 ++ switch (boot_cpu_data.x86_stepping) {
1710 + case 0x03: return 0x01000136;
1711 + case 0x04: return 0x02000014;
1712 + }
1713 +diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
1714 +index e4b0d92b3ae0..2a7fd56e67b3 100644
1715 +--- a/arch/x86/kernel/apm_32.c
1716 ++++ b/arch/x86/kernel/apm_32.c
1717 +@@ -2389,6 +2389,7 @@ static int __init apm_init(void)
1718 + if (HZ != 100)
1719 + idle_period = (idle_period * HZ) / 100;
1720 + if (idle_threshold < 100) {
1721 ++ cpuidle_poll_state_init(&apm_idle_driver);
1722 + if (!cpuidle_register_driver(&apm_idle_driver))
1723 + if (cpuidle_register_device(&apm_cpuidle_device))
1724 + cpuidle_unregister_driver(&apm_idle_driver);
1725 +diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
1726 +index fa1261eefa16..f91ba53e06c8 100644
1727 +--- a/arch/x86/kernel/asm-offsets_32.c
1728 ++++ b/arch/x86/kernel/asm-offsets_32.c
1729 +@@ -18,7 +18,7 @@ void foo(void)
1730 + OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
1731 + OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
1732 + OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
1733 +- OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
1734 ++ OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
1735 + OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
1736 + OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
1737 + OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
1738 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
1739 +index ea831c858195..e7d5a7883632 100644
1740 +--- a/arch/x86/kernel/cpu/amd.c
1741 ++++ b/arch/x86/kernel/cpu/amd.c
1742 +@@ -119,7 +119,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
1743 + return;
1744 + }
1745 +
1746 +- if (c->x86_model == 6 && c->x86_mask == 1) {
1747 ++ if (c->x86_model == 6 && c->x86_stepping == 1) {
1748 + const int K6_BUG_LOOP = 1000000;
1749 + int n;
1750 + void (*f_vide)(void);
1751 +@@ -149,7 +149,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
1752 +
1753 + /* K6 with old style WHCR */
1754 + if (c->x86_model < 8 ||
1755 +- (c->x86_model == 8 && c->x86_mask < 8)) {
1756 ++ (c->x86_model == 8 && c->x86_stepping < 8)) {
1757 + /* We can only write allocate on the low 508Mb */
1758 + if (mbytes > 508)
1759 + mbytes = 508;
1760 +@@ -168,7 +168,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
1761 + return;
1762 + }
1763 +
1764 +- if ((c->x86_model == 8 && c->x86_mask > 7) ||
1765 ++ if ((c->x86_model == 8 && c->x86_stepping > 7) ||
1766 + c->x86_model == 9 || c->x86_model == 13) {
1767 + /* The more serious chips .. */
1768 +
1769 +@@ -221,7 +221,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
1770 + * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
1771 + * As per AMD technical note 27212 0.2
1772 + */
1773 +- if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
1774 ++ if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
1775 + rdmsr(MSR_K7_CLK_CTL, l, h);
1776 + if ((l & 0xfff00000) != 0x20000000) {
1777 + pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
1778 +@@ -241,12 +241,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
1779 + * but they are not certified as MP capable.
1780 + */
1781 + /* Athlon 660/661 is valid. */
1782 +- if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
1783 +- (c->x86_mask == 1)))
1784 ++ if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
1785 ++ (c->x86_stepping == 1)))
1786 + return;
1787 +
1788 + /* Duron 670 is valid */
1789 +- if ((c->x86_model == 7) && (c->x86_mask == 0))
1790 ++ if ((c->x86_model == 7) && (c->x86_stepping == 0))
1791 + return;
1792 +
1793 + /*
1794 +@@ -256,8 +256,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
1795 + * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
1796 + * more.
1797 + */
1798 +- if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
1799 +- ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
1800 ++ if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
1801 ++ ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
1802 + (c->x86_model > 7))
1803 + if (cpu_has(c, X86_FEATURE_MP))
1804 + return;
1805 +@@ -583,7 +583,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
1806 + /* Set MTRR capability flag if appropriate */
1807 + if (c->x86 == 5)
1808 + if (c->x86_model == 13 || c->x86_model == 9 ||
1809 +- (c->x86_model == 8 && c->x86_mask >= 8))
1810 ++ (c->x86_model == 8 && c->x86_stepping >= 8))
1811 + set_cpu_cap(c, X86_FEATURE_K6_MTRR);
1812 + #endif
1813 + #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
1814 +@@ -769,7 +769,7 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
1815 + * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
1816 + * all up to and including B1.
1817 + */
1818 +- if (c->x86_model <= 1 && c->x86_mask <= 1)
1819 ++ if (c->x86_model <= 1 && c->x86_stepping <= 1)
1820 + set_cpu_cap(c, X86_FEATURE_CPB);
1821 + }
1822 +
1823 +@@ -880,11 +880,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1824 + /* AMD errata T13 (order #21922) */
1825 + if ((c->x86 == 6)) {
1826 + /* Duron Rev A0 */
1827 +- if (c->x86_model == 3 && c->x86_mask == 0)
1828 ++ if (c->x86_model == 3 && c->x86_stepping == 0)
1829 + size = 64;
1830 + /* Tbird rev A1/A2 */
1831 + if (c->x86_model == 4 &&
1832 +- (c->x86_mask == 0 || c->x86_mask == 1))
1833 ++ (c->x86_stepping == 0 || c->x86_stepping == 1))
1834 + size = 256;
1835 + }
1836 + return size;
1837 +@@ -1021,7 +1021,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1838 + }
1839 +
1840 + /* OSVW unavailable or ID unknown, match family-model-stepping range */
1841 +- ms = (cpu->x86_model << 4) | cpu->x86_mask;
1842 ++ ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1843 + while ((range = *erratum++))
1844 + if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1845 + (ms >= AMD_MODEL_RANGE_START(range)) &&
1846 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
1847 +index 71949bf2de5a..d71c8b54b696 100644
1848 +--- a/arch/x86/kernel/cpu/bugs.c
1849 ++++ b/arch/x86/kernel/cpu/bugs.c
1850 +@@ -162,8 +162,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1851 + if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
1852 + return SPECTRE_V2_CMD_NONE;
1853 + else {
1854 +- ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
1855 +- sizeof(arg));
1856 ++ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
1857 + if (ret < 0)
1858 + return SPECTRE_V2_CMD_AUTO;
1859 +
1860 +@@ -175,8 +174,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1861 + }
1862 +
1863 + if (i >= ARRAY_SIZE(mitigation_options)) {
1864 +- pr_err("unknown option (%s). Switching to AUTO select\n",
1865 +- mitigation_options[i].option);
1866 ++ pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1867 + return SPECTRE_V2_CMD_AUTO;
1868 + }
1869 + }
1870 +@@ -185,8 +183,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1871 + cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
1872 + cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
1873 + !IS_ENABLED(CONFIG_RETPOLINE)) {
1874 +- pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1875 +- mitigation_options[i].option);
1876 ++ pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
1877 + return SPECTRE_V2_CMD_AUTO;
1878 + }
1879 +
1880 +@@ -256,14 +253,14 @@ static void __init spectre_v2_select_mitigation(void)
1881 + goto retpoline_auto;
1882 + break;
1883 + }
1884 +- pr_err("kernel not compiled with retpoline; no mitigation available!");
1885 ++ pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
1886 + return;
1887 +
1888 + retpoline_auto:
1889 + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
1890 + retpoline_amd:
1891 + if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
1892 +- pr_err("LFENCE not serializing. Switching to generic retpoline\n");
1893 ++ pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
1894 + goto retpoline_generic;
1895 + }
1896 + mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
1897 +@@ -281,7 +278,7 @@ static void __init spectre_v2_select_mitigation(void)
1898 + pr_info("%s\n", spectre_v2_strings[mode]);
1899 +
1900 + /*
1901 +- * If neither SMEP or KPTI are available, there is a risk of
1902 ++ * If neither SMEP nor PTI are available, there is a risk of
1903 + * hitting userspace addresses in the RSB after a context switch
1904 + * from a shallow call stack to a deeper one. To prevent this fill
1905 + * the entire RSB, even when using IBRS.
1906 +@@ -295,21 +292,20 @@ static void __init spectre_v2_select_mitigation(void)
1907 + if ((!boot_cpu_has(X86_FEATURE_PTI) &&
1908 + !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
1909 + setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
1910 +- pr_info("Filling RSB on context switch\n");
1911 ++ pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
1912 + }
1913 +
1914 + /* Initialize Indirect Branch Prediction Barrier if supported */
1915 + if (boot_cpu_has(X86_FEATURE_IBPB)) {
1916 + setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
1917 +- pr_info("Enabling Indirect Branch Prediction Barrier\n");
1918 ++ pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
1919 + }
1920 + }
1921 +
1922 + #undef pr_fmt
1923 +
1924 + #ifdef CONFIG_SYSFS
1925 +-ssize_t cpu_show_meltdown(struct device *dev,
1926 +- struct device_attribute *attr, char *buf)
1927 ++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1928 + {
1929 + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
1930 + return sprintf(buf, "Not affected\n");
1931 +@@ -318,16 +314,14 @@ ssize_t cpu_show_meltdown(struct device *dev,
1932 + return sprintf(buf, "Vulnerable\n");
1933 + }
1934 +
1935 +-ssize_t cpu_show_spectre_v1(struct device *dev,
1936 +- struct device_attribute *attr, char *buf)
1937 ++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1938 + {
1939 + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1940 + return sprintf(buf, "Not affected\n");
1941 + return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1942 + }
1943 +
1944 +-ssize_t cpu_show_spectre_v2(struct device *dev,
1945 +- struct device_attribute *attr, char *buf)
1946 ++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1947 + {
1948 + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1949 + return sprintf(buf, "Not affected\n");
1950 +@@ -337,9 +331,3 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
1951 + spectre_v2_module_string());
1952 + }
1953 + #endif
1954 +-
1955 +-void __ibp_barrier(void)
1956 +-{
1957 +- __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
1958 +-}
1959 +-EXPORT_SYMBOL_GPL(__ibp_barrier);
1960 +diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
1961 +index 68bc6d9b3132..595be776727d 100644
1962 +--- a/arch/x86/kernel/cpu/centaur.c
1963 ++++ b/arch/x86/kernel/cpu/centaur.c
1964 +@@ -136,7 +136,7 @@ static void init_centaur(struct cpuinfo_x86 *c)
1965 + clear_cpu_cap(c, X86_FEATURE_TSC);
1966 + break;
1967 + case 8:
1968 +- switch (c->x86_mask) {
1969 ++ switch (c->x86_stepping) {
1970 + default:
1971 + name = "2";
1972 + break;
1973 +@@ -211,7 +211,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1974 + * - Note, it seems this may only be in engineering samples.
1975 + */
1976 + if ((c->x86 == 6) && (c->x86_model == 9) &&
1977 +- (c->x86_mask == 1) && (size == 65))
1978 ++ (c->x86_stepping == 1) && (size == 65))
1979 + size -= 1;
1980 + return size;
1981 + }
1982 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
1983 +index d63f4b5706e4..824aee0117bb 100644
1984 +--- a/arch/x86/kernel/cpu/common.c
1985 ++++ b/arch/x86/kernel/cpu/common.c
1986 +@@ -731,7 +731,7 @@ void cpu_detect(struct cpuinfo_x86 *c)
1987 + cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
1988 + c->x86 = x86_family(tfms);
1989 + c->x86_model = x86_model(tfms);
1990 +- c->x86_mask = x86_stepping(tfms);
1991 ++ c->x86_stepping = x86_stepping(tfms);
1992 +
1993 + if (cap0 & (1<<19)) {
1994 + c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1995 +@@ -1184,9 +1184,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
1996 + int i;
1997 +
1998 + c->loops_per_jiffy = loops_per_jiffy;
1999 +- c->x86_cache_size = -1;
2000 ++ c->x86_cache_size = 0;
2001 + c->x86_vendor = X86_VENDOR_UNKNOWN;
2002 +- c->x86_model = c->x86_mask = 0; /* So far unknown... */
2003 ++ c->x86_model = c->x86_stepping = 0; /* So far unknown... */
2004 + c->x86_vendor_id[0] = '\0'; /* Unset */
2005 + c->x86_model_id[0] = '\0'; /* Unset */
2006 + c->x86_max_cores = 1;
2007 +@@ -1378,8 +1378,8 @@ void print_cpu_info(struct cpuinfo_x86 *c)
2008 +
2009 + pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
2010 +
2011 +- if (c->x86_mask || c->cpuid_level >= 0)
2012 +- pr_cont(", stepping: 0x%x)\n", c->x86_mask);
2013 ++ if (c->x86_stepping || c->cpuid_level >= 0)
2014 ++ pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
2015 + else
2016 + pr_cont(")\n");
2017 + }
2018 +diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
2019 +index 6b4bb335641f..8949b7ae6d92 100644
2020 +--- a/arch/x86/kernel/cpu/cyrix.c
2021 ++++ b/arch/x86/kernel/cpu/cyrix.c
2022 +@@ -215,7 +215,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
2023 +
2024 + /* common case step number/rev -- exceptions handled below */
2025 + c->x86_model = (dir1 >> 4) + 1;
2026 +- c->x86_mask = dir1 & 0xf;
2027 ++ c->x86_stepping = dir1 & 0xf;
2028 +
2029 + /* Now cook; the original recipe is by Channing Corn, from Cyrix.
2030 + * We do the same thing for each generation: we work out
2031 +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
2032 +index 319bf989fad1..d19e903214b4 100644
2033 +--- a/arch/x86/kernel/cpu/intel.c
2034 ++++ b/arch/x86/kernel/cpu/intel.c
2035 +@@ -116,14 +116,13 @@ struct sku_microcode {
2036 + u32 microcode;
2037 + };
2038 + static const struct sku_microcode spectre_bad_microcodes[] = {
2039 +- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 },
2040 +- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 },
2041 +- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 },
2042 +- { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 },
2043 +- { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 },
2044 ++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
2045 ++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
2046 ++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
2047 ++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
2048 ++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
2049 + { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
2050 + { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
2051 +- { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 },
2052 + { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
2053 + { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
2054 + { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
2055 +@@ -136,8 +135,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
2056 + { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
2057 + { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
2058 + { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
2059 +- /* Updated in the 20180108 release; blacklist until we know otherwise */
2060 +- { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 },
2061 + /* Observed in the wild */
2062 + { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
2063 + { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
2064 +@@ -149,7 +146,7 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
2065 +
2066 + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
2067 + if (c->x86_model == spectre_bad_microcodes[i].model &&
2068 +- c->x86_mask == spectre_bad_microcodes[i].stepping)
2069 ++ c->x86_stepping == spectre_bad_microcodes[i].stepping)
2070 + return (c->microcode <= spectre_bad_microcodes[i].microcode);
2071 + }
2072 + return false;
2073 +@@ -196,7 +193,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
2074 + * need the microcode to have already been loaded... so if it is
2075 + * not, recommend a BIOS update and disable large pages.
2076 + */
2077 +- if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
2078 ++ if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
2079 + c->microcode < 0x20e) {
2080 + pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
2081 + clear_cpu_cap(c, X86_FEATURE_PSE);
2082 +@@ -212,7 +209,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
2083 +
2084 + /* CPUID workaround for 0F33/0F34 CPU */
2085 + if (c->x86 == 0xF && c->x86_model == 0x3
2086 +- && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
2087 ++ && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
2088 + c->x86_phys_bits = 36;
2089 +
2090 + /*
2091 +@@ -310,7 +307,7 @@ int ppro_with_ram_bug(void)
2092 + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
2093 + boot_cpu_data.x86 == 6 &&
2094 + boot_cpu_data.x86_model == 1 &&
2095 +- boot_cpu_data.x86_mask < 8) {
2096 ++ boot_cpu_data.x86_stepping < 8) {
2097 + pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
2098 + return 1;
2099 + }
2100 +@@ -327,7 +324,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c)
2101 + * Mask B, Pentium, but not Pentium MMX
2102 + */
2103 + if (c->x86 == 5 &&
2104 +- c->x86_mask >= 1 && c->x86_mask <= 4 &&
2105 ++ c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
2106 + c->x86_model <= 3) {
2107 + /*
2108 + * Remember we have B step Pentia with bugs
2109 +@@ -370,7 +367,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
2110 + * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
2111 + * model 3 mask 3
2112 + */
2113 +- if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
2114 ++ if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
2115 + clear_cpu_cap(c, X86_FEATURE_SEP);
2116 +
2117 + /*
2118 +@@ -388,7 +385,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
2119 + * P4 Xeon erratum 037 workaround.
2120 + * Hardware prefetcher may cause stale data to be loaded into the cache.
2121 + */
2122 +- if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
2123 ++ if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
2124 + if (msr_set_bit(MSR_IA32_MISC_ENABLE,
2125 + MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
2126 + pr_info("CPU: C0 stepping P4 Xeon detected.\n");
2127 +@@ -403,7 +400,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
2128 + * Specification Update").
2129 + */
2130 + if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
2131 +- (c->x86_mask < 0x6 || c->x86_mask == 0xb))
2132 ++ (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
2133 + set_cpu_bug(c, X86_BUG_11AP);
2134 +
2135 +
2136 +@@ -650,7 +647,7 @@ static void init_intel(struct cpuinfo_x86 *c)
2137 + case 6:
2138 + if (l2 == 128)
2139 + p = "Celeron (Mendocino)";
2140 +- else if (c->x86_mask == 0 || c->x86_mask == 5)
2141 ++ else if (c->x86_stepping == 0 || c->x86_stepping == 5)
2142 + p = "Celeron-A";
2143 + break;
2144 +
2145 +diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
2146 +index 99442370de40..18dd8f22e353 100644
2147 +--- a/arch/x86/kernel/cpu/intel_rdt.c
2148 ++++ b/arch/x86/kernel/cpu/intel_rdt.c
2149 +@@ -771,7 +771,7 @@ static __init void rdt_quirks(void)
2150 + cache_alloc_hsw_probe();
2151 + break;
2152 + case INTEL_FAM6_SKYLAKE_X:
2153 +- if (boot_cpu_data.x86_mask <= 4)
2154 ++ if (boot_cpu_data.x86_stepping <= 4)
2155 + set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
2156 + }
2157 + }
2158 +diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
2159 +index aa0d5df9dc60..e956eb267061 100644
2160 +--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
2161 ++++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
2162 +@@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
2163 +
2164 + extern struct mca_config mca_cfg;
2165 +
2166 ++#ifndef CONFIG_X86_64
2167 ++/*
2168 ++ * On 32-bit systems it would be difficult to safely unmap a poison page
2169 ++ * from the kernel 1:1 map because there are no non-canonical addresses that
2170 ++ * we can use to refer to the address without risking a speculative access.
2171 ++ * However, this isn't much of an issue because:
2172 ++ * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which
2173 ++ * are only mapped into the kernel as needed
2174 ++ * 2) Few people would run a 32-bit kernel on a machine that supports
2175 ++ * recoverable errors because they have too much memory to boot 32-bit.
2176 ++ */
2177 ++static inline void mce_unmap_kpfn(unsigned long pfn) {}
2178 ++#define mce_unmap_kpfn mce_unmap_kpfn
2179 ++#endif
2180 ++
2181 + #endif /* __X86_MCE_INTERNAL_H__ */
2182 +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
2183 +index 868e412b4f0c..2fe482f6ecd8 100644
2184 +--- a/arch/x86/kernel/cpu/mcheck/mce.c
2185 ++++ b/arch/x86/kernel/cpu/mcheck/mce.c
2186 +@@ -106,6 +106,10 @@ static struct irq_work mce_irq_work;
2187 +
2188 + static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
2189 +
2190 ++#ifndef mce_unmap_kpfn
2191 ++static void mce_unmap_kpfn(unsigned long pfn);
2192 ++#endif
2193 ++
2194 + /*
2195 + * CPU/chipset specific EDAC code can register a notifier call here to print
2196 + * MCE errors in a human-readable form.
2197 +@@ -582,7 +586,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
2198 +
2199 + if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
2200 + pfn = mce->addr >> PAGE_SHIFT;
2201 +- memory_failure(pfn, MCE_VECTOR, 0);
2202 ++ if (memory_failure(pfn, MCE_VECTOR, 0))
2203 ++ mce_unmap_kpfn(pfn);
2204 + }
2205 +
2206 + return NOTIFY_OK;
2207 +@@ -1049,12 +1054,13 @@ static int do_memory_failure(struct mce *m)
2208 + ret = memory_failure(m->addr >> PAGE_SHIFT, MCE_VECTOR, flags);
2209 + if (ret)
2210 + pr_err("Memory error not recovered");
2211 ++ else
2212 ++ mce_unmap_kpfn(m->addr >> PAGE_SHIFT);
2213 + return ret;
2214 + }
2215 +
2216 +-#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE)
2217 +-
2218 +-void arch_unmap_kpfn(unsigned long pfn)
2219 ++#ifndef mce_unmap_kpfn
2220 ++static void mce_unmap_kpfn(unsigned long pfn)
2221 + {
2222 + unsigned long decoy_addr;
2223 +
2224 +@@ -1065,7 +1071,7 @@ void arch_unmap_kpfn(unsigned long pfn)
2225 + * We would like to just call:
2226 + * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1);
2227 + * but doing that would radically increase the odds of a
2228 +- * speculative access to the posion page because we'd have
2229 ++ * speculative access to the poison page because we'd have
2230 + * the virtual address of the kernel 1:1 mapping sitting
2231 + * around in registers.
2232 + * Instead we get tricky. We create a non-canonical address
2233 +@@ -1090,7 +1096,6 @@ void arch_unmap_kpfn(unsigned long pfn)
2234 +
2235 + if (set_memory_np(decoy_addr, 1))
2236 + pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
2237 +-
2238 + }
2239 + #endif
2240 +
2241 +diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
2242 +index f7c55b0e753a..a15db2b4e0d6 100644
2243 +--- a/arch/x86/kernel/cpu/microcode/intel.c
2244 ++++ b/arch/x86/kernel/cpu/microcode/intel.c
2245 +@@ -921,7 +921,7 @@ static bool is_blacklisted(unsigned int cpu)
2246 + */
2247 + if (c->x86 == 6 &&
2248 + c->x86_model == INTEL_FAM6_BROADWELL_X &&
2249 +- c->x86_mask == 0x01 &&
2250 ++ c->x86_stepping == 0x01 &&
2251 + llc_size_per_core > 2621440 &&
2252 + c->microcode < 0x0b000021) {
2253 + pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
2254 +@@ -944,7 +944,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
2255 + return UCODE_NFOUND;
2256 +
2257 + sprintf(name, "intel-ucode/%02x-%02x-%02x",
2258 +- c->x86, c->x86_model, c->x86_mask);
2259 ++ c->x86, c->x86_model, c->x86_stepping);
2260 +
2261 + if (request_firmware_direct(&firmware, name, device)) {
2262 + pr_debug("data file %s load failed\n", name);
2263 +@@ -982,7 +982,7 @@ static struct microcode_ops microcode_intel_ops = {
2264 +
2265 + static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
2266 + {
2267 +- u64 llc_size = c->x86_cache_size * 1024;
2268 ++ u64 llc_size = c->x86_cache_size * 1024ULL;
2269 +
2270 + do_div(llc_size, c->x86_max_cores);
2271 +
2272 +diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
2273 +index fdc55215d44d..e12ee86906c6 100644
2274 +--- a/arch/x86/kernel/cpu/mtrr/generic.c
2275 ++++ b/arch/x86/kernel/cpu/mtrr/generic.c
2276 +@@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
2277 + */
2278 + if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
2279 + boot_cpu_data.x86_model == 1 &&
2280 +- boot_cpu_data.x86_mask <= 7) {
2281 ++ boot_cpu_data.x86_stepping <= 7) {
2282 + if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
2283 + pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
2284 + return -EINVAL;
2285 +diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
2286 +index 40d5a8a75212..7468de429087 100644
2287 +--- a/arch/x86/kernel/cpu/mtrr/main.c
2288 ++++ b/arch/x86/kernel/cpu/mtrr/main.c
2289 +@@ -711,8 +711,8 @@ void __init mtrr_bp_init(void)
2290 + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
2291 + boot_cpu_data.x86 == 0xF &&
2292 + boot_cpu_data.x86_model == 0x3 &&
2293 +- (boot_cpu_data.x86_mask == 0x3 ||
2294 +- boot_cpu_data.x86_mask == 0x4))
2295 ++ (boot_cpu_data.x86_stepping == 0x3 ||
2296 ++ boot_cpu_data.x86_stepping == 0x4))
2297 + phys_addr = 36;
2298 +
2299 + size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
2300 +diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
2301 +index e7ecedafa1c8..2c8522a39ed5 100644
2302 +--- a/arch/x86/kernel/cpu/proc.c
2303 ++++ b/arch/x86/kernel/cpu/proc.c
2304 +@@ -72,8 +72,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
2305 + c->x86_model,
2306 + c->x86_model_id[0] ? c->x86_model_id : "unknown");
2307 +
2308 +- if (c->x86_mask || c->cpuid_level >= 0)
2309 +- seq_printf(m, "stepping\t: %d\n", c->x86_mask);
2310 ++ if (c->x86_stepping || c->cpuid_level >= 0)
2311 ++ seq_printf(m, "stepping\t: %d\n", c->x86_stepping);
2312 + else
2313 + seq_puts(m, "stepping\t: unknown\n");
2314 + if (c->microcode)
2315 +@@ -91,8 +91,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
2316 + }
2317 +
2318 + /* Cache size */
2319 +- if (c->x86_cache_size >= 0)
2320 +- seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
2321 ++ if (c->x86_cache_size)
2322 ++ seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size);
2323 +
2324 + show_cpuinfo_core(m, c, cpu);
2325 + show_cpuinfo_misc(m, c);
2326 +diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
2327 +index 1e82f787c160..c87560e1e3ef 100644
2328 +--- a/arch/x86/kernel/early-quirks.c
2329 ++++ b/arch/x86/kernel/early-quirks.c
2330 +@@ -527,6 +527,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
2331 + INTEL_SKL_IDS(&gen9_early_ops),
2332 + INTEL_BXT_IDS(&gen9_early_ops),
2333 + INTEL_KBL_IDS(&gen9_early_ops),
2334 ++ INTEL_CFL_IDS(&gen9_early_ops),
2335 + INTEL_GLK_IDS(&gen9_early_ops),
2336 + INTEL_CNL_IDS(&gen9_early_ops),
2337 + };
2338 +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
2339 +index c29020907886..b59e4fb40fd9 100644
2340 +--- a/arch/x86/kernel/head_32.S
2341 ++++ b/arch/x86/kernel/head_32.S
2342 +@@ -37,7 +37,7 @@
2343 + #define X86 new_cpu_data+CPUINFO_x86
2344 + #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
2345 + #define X86_MODEL new_cpu_data+CPUINFO_x86_model
2346 +-#define X86_MASK new_cpu_data+CPUINFO_x86_mask
2347 ++#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping
2348 + #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
2349 + #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
2350 + #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
2351 +@@ -332,7 +332,7 @@ ENTRY(startup_32_smp)
2352 + shrb $4,%al
2353 + movb %al,X86_MODEL
2354 + andb $0x0f,%cl # mask mask revision
2355 +- movb %cl,X86_MASK
2356 ++ movb %cl,X86_STEPPING
2357 + movl %edx,X86_CAPABILITY
2358 +
2359 + .Lis486:
2360 +diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
2361 +index 3a4b12809ab5..bc6bc6689e68 100644
2362 +--- a/arch/x86/kernel/mpparse.c
2363 ++++ b/arch/x86/kernel/mpparse.c
2364 +@@ -407,7 +407,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
2365 + processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
2366 + processor.cpuflag = CPU_ENABLED;
2367 + processor.cpufeature = (boot_cpu_data.x86 << 8) |
2368 +- (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
2369 ++ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
2370 + processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
2371 + processor.reserved[0] = 0;
2372 + processor.reserved[1] = 0;
2373 +diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
2374 +index 041096bdef86..99dc79e76bdc 100644
2375 +--- a/arch/x86/kernel/paravirt.c
2376 ++++ b/arch/x86/kernel/paravirt.c
2377 +@@ -200,9 +200,9 @@ static void native_flush_tlb_global(void)
2378 + __native_flush_tlb_global();
2379 + }
2380 +
2381 +-static void native_flush_tlb_single(unsigned long addr)
2382 ++static void native_flush_tlb_one_user(unsigned long addr)
2383 + {
2384 +- __native_flush_tlb_single(addr);
2385 ++ __native_flush_tlb_one_user(addr);
2386 + }
2387 +
2388 + struct static_key paravirt_steal_enabled;
2389 +@@ -401,7 +401,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
2390 +
2391 + .flush_tlb_user = native_flush_tlb,
2392 + .flush_tlb_kernel = native_flush_tlb_global,
2393 +- .flush_tlb_single = native_flush_tlb_single,
2394 ++ .flush_tlb_one_user = native_flush_tlb_one_user,
2395 + .flush_tlb_others = native_flush_tlb_others,
2396 +
2397 + .pgd_alloc = __paravirt_pgd_alloc,
2398 +diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
2399 +index 307d3bac5f04..11eda21eb697 100644
2400 +--- a/arch/x86/kernel/relocate_kernel_64.S
2401 ++++ b/arch/x86/kernel/relocate_kernel_64.S
2402 +@@ -68,6 +68,9 @@ relocate_kernel:
2403 + movq %cr4, %rax
2404 + movq %rax, CR4(%r11)
2405 +
2406 ++ /* Save CR4. Required to enable the right paging mode later. */
2407 ++ movq %rax, %r13
2408 ++
2409 + /* zero out flags, and disable interrupts */
2410 + pushq $0
2411 + popfq
2412 +@@ -126,8 +129,13 @@ identity_mapped:
2413 + /*
2414 + * Set cr4 to a known state:
2415 + * - physical address extension enabled
2416 ++ * - 5-level paging, if it was enabled before
2417 + */
2418 + movl $X86_CR4_PAE, %eax
2419 ++ testq $X86_CR4_LA57, %r13
2420 ++ jz 1f
2421 ++ orl $X86_CR4_LA57, %eax
2422 ++1:
2423 + movq %rax, %cr4
2424 +
2425 + jmp 1f
2426 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
2427 +index ed556d50d7ed..844279c3ff4a 100644
2428 +--- a/arch/x86/kernel/smpboot.c
2429 ++++ b/arch/x86/kernel/smpboot.c
2430 +@@ -1431,7 +1431,6 @@ static void remove_siblinginfo(int cpu)
2431 + cpumask_clear(cpu_llc_shared_mask(cpu));
2432 + cpumask_clear(topology_sibling_cpumask(cpu));
2433 + cpumask_clear(topology_core_cpumask(cpu));
2434 +- c->phys_proc_id = 0;
2435 + c->cpu_core_id = 0;
2436 + cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
2437 + recompute_smt_state();
2438 +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
2439 +index 446c9ef8cfc3..3d9b2308e7fa 100644
2440 +--- a/arch/x86/kernel/traps.c
2441 ++++ b/arch/x86/kernel/traps.c
2442 +@@ -181,7 +181,7 @@ int fixup_bug(struct pt_regs *regs, int trapnr)
2443 + break;
2444 +
2445 + case BUG_TRAP_TYPE_WARN:
2446 +- regs->ip += LEN_UD0;
2447 ++ regs->ip += LEN_UD2;
2448 + return 1;
2449 + }
2450 +
2451 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
2452 +index 2b8eb4da4d08..cc83bdcb65d1 100644
2453 +--- a/arch/x86/kvm/mmu.c
2454 ++++ b/arch/x86/kvm/mmu.c
2455 +@@ -5058,7 +5058,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
2456 + typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
2457 +
2458 + /* The caller should hold mmu-lock before calling this function. */
2459 +-static bool
2460 ++static __always_inline bool
2461 + slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
2462 + slot_level_handler fn, int start_level, int end_level,
2463 + gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
2464 +@@ -5088,7 +5088,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
2465 + return flush;
2466 + }
2467 +
2468 +-static bool
2469 ++static __always_inline bool
2470 + slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
2471 + slot_level_handler fn, int start_level, int end_level,
2472 + bool lock_flush_tlb)
2473 +@@ -5099,7 +5099,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
2474 + lock_flush_tlb);
2475 + }
2476 +
2477 +-static bool
2478 ++static __always_inline bool
2479 + slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
2480 + slot_level_handler fn, bool lock_flush_tlb)
2481 + {
2482 +@@ -5107,7 +5107,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
2483 + PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
2484 + }
2485 +
2486 +-static bool
2487 ++static __always_inline bool
2488 + slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
2489 + slot_level_handler fn, bool lock_flush_tlb)
2490 + {
2491 +@@ -5115,7 +5115,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
2492 + PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
2493 + }
2494 +
2495 +-static bool
2496 ++static __always_inline bool
2497 + slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
2498 + slot_level_handler fn, bool lock_flush_tlb)
2499 + {
2500 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
2501 +index 6f623848260f..561d8937fac5 100644
2502 +--- a/arch/x86/kvm/vmx.c
2503 ++++ b/arch/x86/kvm/vmx.c
2504 +@@ -10131,7 +10131,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
2505 + if (cpu_has_vmx_msr_bitmap() &&
2506 + nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS) &&
2507 + nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
2508 +- ;
2509 ++ vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
2510 ++ CPU_BASED_USE_MSR_BITMAPS);
2511 + else
2512 + vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
2513 + CPU_BASED_USE_MSR_BITMAPS);
2514 +@@ -10220,8 +10221,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
2515 + * updated to reflect this when L1 (or its L2s) actually write to
2516 + * the MSR.
2517 + */
2518 +- bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
2519 +- bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
2520 ++ bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
2521 ++ bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
2522 +
2523 + if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
2524 + !pred_cmd && !spec_ctrl)
2525 +diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
2526 +index d6f848d1211d..2dd1fe13a37b 100644
2527 +--- a/arch/x86/lib/cpu.c
2528 ++++ b/arch/x86/lib/cpu.c
2529 +@@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig)
2530 + {
2531 + unsigned int fam, model;
2532 +
2533 +- fam = x86_family(sig);
2534 ++ fam = x86_family(sig);
2535 +
2536 + model = (sig >> 4) & 0xf;
2537 +
2538 +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
2539 +index 4a837289f2ad..60ae1fe3609f 100644
2540 +--- a/arch/x86/mm/init_64.c
2541 ++++ b/arch/x86/mm/init_64.c
2542 +@@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
2543 + * It's enough to flush this one mapping.
2544 + * (PGE mappings get flushed as well)
2545 + */
2546 +- __flush_tlb_one(vaddr);
2547 ++ __flush_tlb_one_kernel(vaddr);
2548