Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.15 commit in: /
Date: Thu, 22 Feb 2018 23:24:44
Message-Id: 1519341871.0efeda7197763f237f87dab43a52676839e87f2d.mpagano@gentoo
1 commit: 0efeda7197763f237f87dab43a52676839e87f2d
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Feb 22 23:24:31 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Feb 22 23:24:31 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0efeda71
7
8 Linux patch 4.15.5
9
10 0000_README | 4 +
11 1004_linux-4.15.5.patch | 6693 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 6697 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index ffe8729..f22a6fe 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -59,6 +59,10 @@ Patch: 1003_linux-4.15.4.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.15.4
21
22 +Patch: 1004_linux-4.15.5.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.15.5
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1004_linux-4.15.5.patch b/1004_linux-4.15.5.patch
31 new file mode 100644
32 index 0000000..5340f07
33 --- /dev/null
34 +++ b/1004_linux-4.15.5.patch
35 @@ -0,0 +1,6693 @@
36 +diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
37 +index a122723907ac..99acc712f83a 100644
38 +--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
39 ++++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
40 +@@ -64,6 +64,6 @@ Example:
41 + reg = <0xe0000000 0x1000>;
42 + interrupts = <0 35 0x4>;
43 + dmas = <&dmahost 12 0 1>,
44 +- <&dmahost 13 0 1 0>;
45 ++ <&dmahost 13 1 0>;
46 + dma-names = "rx", "rx";
47 + };
48 +diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
49 +index 75236c0c2ac2..d081ce0482cc 100644
50 +--- a/Documentation/filesystems/ext4.txt
51 ++++ b/Documentation/filesystems/ext4.txt
52 +@@ -233,7 +233,7 @@ data_err=ignore(*) Just print an error message if an error occurs
53 + data_err=abort Abort the journal if an error occurs in a file
54 + data buffer in ordered mode.
55 +
56 +-grpid Give objects the same group ID as their creator.
57 ++grpid New objects have the group ID of their parent.
58 + bsdgroups
59 +
60 + nogrpid (*) New objects have the group ID of their creator.
61 +diff --git a/Makefile b/Makefile
62 +index 8495e1ca052e..28c537fbe328 100644
63 +--- a/Makefile
64 ++++ b/Makefile
65 +@@ -1,7 +1,7 @@
66 + # SPDX-License-Identifier: GPL-2.0
67 + VERSION = 4
68 + PATCHLEVEL = 15
69 +-SUBLEVEL = 4
70 ++SUBLEVEL = 5
71 + EXTRAVERSION =
72 + NAME = Fearless Coyote
73 +
74 +diff --git a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
75 +index 7b8d90b7aeea..29b636fce23f 100644
76 +--- a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
77 ++++ b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
78 +@@ -150,11 +150,6 @@
79 + interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
80 + };
81 +
82 +-&charlcd {
83 +- interrupt-parent = <&intc>;
84 +- interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
85 +-};
86 +-
87 + &serial0 {
88 + interrupt-parent = <&intc>;
89 + interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
90 +diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi
91 +index 06713ec86f0d..d2174727df9a 100644
92 +--- a/arch/arm/boot/dts/exynos5410.dtsi
93 ++++ b/arch/arm/boot/dts/exynos5410.dtsi
94 +@@ -333,7 +333,6 @@
95 + &rtc {
96 + clocks = <&clock CLK_RTC>;
97 + clock-names = "rtc";
98 +- interrupt-parent = <&pmu_system_controller>;
99 + status = "disabled";
100 + };
101 +
102 +diff --git a/arch/arm/boot/dts/lpc3250-ea3250.dts b/arch/arm/boot/dts/lpc3250-ea3250.dts
103 +index c43adb7b4d7c..58ea0a4e7afa 100644
104 +--- a/arch/arm/boot/dts/lpc3250-ea3250.dts
105 ++++ b/arch/arm/boot/dts/lpc3250-ea3250.dts
106 +@@ -156,8 +156,8 @@
107 + uda1380: uda1380@18 {
108 + compatible = "nxp,uda1380";
109 + reg = <0x18>;
110 +- power-gpio = <&gpio 0x59 0>;
111 +- reset-gpio = <&gpio 0x51 0>;
112 ++ power-gpio = <&gpio 3 10 0>;
113 ++ reset-gpio = <&gpio 3 2 0>;
114 + dac-clk = "wspll";
115 + };
116 +
117 +diff --git a/arch/arm/boot/dts/lpc3250-phy3250.dts b/arch/arm/boot/dts/lpc3250-phy3250.dts
118 +index c72eb9845603..1e1c2f517a82 100644
119 +--- a/arch/arm/boot/dts/lpc3250-phy3250.dts
120 ++++ b/arch/arm/boot/dts/lpc3250-phy3250.dts
121 +@@ -81,8 +81,8 @@
122 + uda1380: uda1380@18 {
123 + compatible = "nxp,uda1380";
124 + reg = <0x18>;
125 +- power-gpio = <&gpio 0x59 0>;
126 +- reset-gpio = <&gpio 0x51 0>;
127 ++ power-gpio = <&gpio 3 10 0>;
128 ++ reset-gpio = <&gpio 3 2 0>;
129 + dac-clk = "wspll";
130 + };
131 +
132 +diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi
133 +index 965ddfbc9953..05557fce0f1d 100644
134 +--- a/arch/arm/boot/dts/mt2701.dtsi
135 ++++ b/arch/arm/boot/dts/mt2701.dtsi
136 +@@ -604,6 +604,7 @@
137 + compatible = "mediatek,mt2701-hifsys", "syscon";
138 + reg = <0 0x1a000000 0 0x1000>;
139 + #clock-cells = <1>;
140 ++ #reset-cells = <1>;
141 + };
142 +
143 + usb0: usb@1a1c0000 {
144 +@@ -688,6 +689,7 @@
145 + compatible = "mediatek,mt2701-ethsys", "syscon";
146 + reg = <0 0x1b000000 0 0x1000>;
147 + #clock-cells = <1>;
148 ++ #reset-cells = <1>;
149 + };
150 +
151 + eth: ethernet@1b100000 {
152 +diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi
153 +index 0640fb75bf59..3a442a16ea06 100644
154 +--- a/arch/arm/boot/dts/mt7623.dtsi
155 ++++ b/arch/arm/boot/dts/mt7623.dtsi
156 +@@ -758,6 +758,7 @@
157 + "syscon";
158 + reg = <0 0x1b000000 0 0x1000>;
159 + #clock-cells = <1>;
160 ++ #reset-cells = <1>;
161 + };
162 +
163 + eth: ethernet@1b100000 {
164 +diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
165 +index 688a86378cee..7bf5aa2237c9 100644
166 +--- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
167 ++++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
168 +@@ -204,7 +204,7 @@
169 + bus-width = <4>;
170 + max-frequency = <50000000>;
171 + cap-sd-highspeed;
172 +- cd-gpios = <&pio 261 0>;
173 ++ cd-gpios = <&pio 261 GPIO_ACTIVE_LOW>;
174 + vmmc-supply = <&mt6323_vmch_reg>;
175 + vqmmc-supply = <&mt6323_vio18_reg>;
176 + };
177 +diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
178 +index 726c5d0dbd5b..b290a5abb901 100644
179 +--- a/arch/arm/boot/dts/s5pv210.dtsi
180 ++++ b/arch/arm/boot/dts/s5pv210.dtsi
181 +@@ -463,6 +463,7 @@
182 + compatible = "samsung,exynos4210-ohci";
183 + reg = <0xec300000 0x100>;
184 + interrupts = <23>;
185 ++ interrupt-parent = <&vic1>;
186 + clocks = <&clocks CLK_USB_HOST>;
187 + clock-names = "usbhost";
188 + #address-cells = <1>;
189 +diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
190 +index 84101e4eebbf..0f5f379323a8 100644
191 +--- a/arch/arm/boot/dts/spear1310-evb.dts
192 ++++ b/arch/arm/boot/dts/spear1310-evb.dts
193 +@@ -349,7 +349,7 @@
194 + spi0: spi@e0100000 {
195 + status = "okay";
196 + num-cs = <3>;
197 +- cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>;
198 ++ cs-gpios = <&gpio1 7 0>, <&spics 0 0>, <&spics 1 0>;
199 +
200 + stmpe610@0 {
201 + compatible = "st,stmpe610";
202 +diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
203 +index 5f347054527d..d4dbc4098653 100644
204 +--- a/arch/arm/boot/dts/spear1340.dtsi
205 ++++ b/arch/arm/boot/dts/spear1340.dtsi
206 +@@ -142,8 +142,8 @@
207 + reg = <0xb4100000 0x1000>;
208 + interrupts = <0 105 0x4>;
209 + status = "disabled";
210 +- dmas = <&dwdma0 0x600 0 0 1>, /* 0xC << 11 */
211 +- <&dwdma0 0x680 0 1 0>; /* 0xD << 7 */
212 ++ dmas = <&dwdma0 12 0 1>,
213 ++ <&dwdma0 13 1 0>;
214 + dma-names = "tx", "rx";
215 + };
216 +
217 +diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
218 +index 17ea0abcdbd7..086b4b333249 100644
219 +--- a/arch/arm/boot/dts/spear13xx.dtsi
220 ++++ b/arch/arm/boot/dts/spear13xx.dtsi
221 +@@ -100,7 +100,7 @@
222 + reg = <0xb2800000 0x1000>;
223 + interrupts = <0 29 0x4>;
224 + status = "disabled";
225 +- dmas = <&dwdma0 0 0 0 0>;
226 ++ dmas = <&dwdma0 0 0 0>;
227 + dma-names = "data";
228 + };
229 +
230 +@@ -290,8 +290,8 @@
231 + #size-cells = <0>;
232 + interrupts = <0 31 0x4>;
233 + status = "disabled";
234 +- dmas = <&dwdma0 0x2000 0 0 0>, /* 0x4 << 11 */
235 +- <&dwdma0 0x0280 0 0 0>; /* 0x5 << 7 */
236 ++ dmas = <&dwdma0 4 0 0>,
237 ++ <&dwdma0 5 0 0>;
238 + dma-names = "tx", "rx";
239 + };
240 +
241 +diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
242 +index 6b32d20acc9f..00166eb9be86 100644
243 +--- a/arch/arm/boot/dts/spear600.dtsi
244 ++++ b/arch/arm/boot/dts/spear600.dtsi
245 +@@ -194,6 +194,7 @@
246 + rtc: rtc@fc900000 {
247 + compatible = "st,spear600-rtc";
248 + reg = <0xfc900000 0x1000>;
249 ++ interrupt-parent = <&vic0>;
250 + interrupts = <10>;
251 + status = "disabled";
252 + };
253 +diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
254 +index 68aab50a73ab..733678b75b88 100644
255 +--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
256 ++++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
257 +@@ -750,6 +750,7 @@
258 + reg = <0x10120000 0x1000>;
259 + interrupt-names = "combined";
260 + interrupts = <14>;
261 ++ interrupt-parent = <&vica>;
262 + clocks = <&clcdclk>, <&hclkclcd>;
263 + clock-names = "clcdclk", "apb_pclk";
264 + status = "disabled";
265 +diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi
266 +index fa149837df14..11fdecd9312e 100644
267 +--- a/arch/arm/boot/dts/stih407.dtsi
268 ++++ b/arch/arm/boot/dts/stih407.dtsi
269 +@@ -8,6 +8,7 @@
270 + */
271 + #include "stih407-clock.dtsi"
272 + #include "stih407-family.dtsi"
273 ++#include <dt-bindings/gpio/gpio.h>
274 + / {
275 + soc {
276 + sti-display-subsystem {
277 +@@ -122,7 +123,7 @@
278 + <&clk_s_d2_quadfs 0>,
279 + <&clk_s_d2_quadfs 1>;
280 +
281 +- hdmi,hpd-gpio = <&pio5 3>;
282 ++ hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
283 + reset-names = "hdmi";
284 + resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
285 + ddc = <&hdmiddc>;
286 +diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
287 +index cffa50db5d72..68b5ff91d6a7 100644
288 +--- a/arch/arm/boot/dts/stih410.dtsi
289 ++++ b/arch/arm/boot/dts/stih410.dtsi
290 +@@ -9,6 +9,7 @@
291 + #include "stih410-clock.dtsi"
292 + #include "stih407-family.dtsi"
293 + #include "stih410-pinctrl.dtsi"
294 ++#include <dt-bindings/gpio/gpio.h>
295 + / {
296 + aliases {
297 + bdisp0 = &bdisp0;
298 +@@ -213,7 +214,7 @@
299 + <&clk_s_d2_quadfs 0>,
300 + <&clk_s_d2_quadfs 1>;
301 +
302 +- hdmi,hpd-gpio = <&pio5 3>;
303 ++ hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
304 + reset-names = "hdmi";
305 + resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
306 + ddc = <&hdmiddc>;
307 +diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c
308 +index 107f37210fb9..83606087edc7 100644
309 +--- a/arch/arm/mach-pxa/tosa-bt.c
310 ++++ b/arch/arm/mach-pxa/tosa-bt.c
311 +@@ -132,3 +132,7 @@ static struct platform_driver tosa_bt_driver = {
312 + },
313 + };
314 + module_platform_driver(tosa_bt_driver);
315 ++
316 ++MODULE_LICENSE("GPL");
317 ++MODULE_AUTHOR("Dmitry Baryshkov");
318 ++MODULE_DESCRIPTION("Bluetooth built-in chip control");
319 +diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
320 +index 6b2127a6ced1..b84c0ca4f84a 100644
321 +--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
322 ++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
323 +@@ -906,6 +906,7 @@
324 + "dsi_phy_regulator";
325 +
326 + #clock-cells = <1>;
327 ++ #phy-cells = <0>;
328 +
329 + clocks = <&gcc GCC_MDSS_AHB_CLK>;
330 + clock-names = "iface_clk";
331 +@@ -1435,8 +1436,8 @@
332 + #address-cells = <1>;
333 + #size-cells = <0>;
334 +
335 +- qcom,ipc-1 = <&apcs 0 13>;
336 +- qcom,ipc-6 = <&apcs 0 19>;
337 ++ qcom,ipc-1 = <&apcs 8 13>;
338 ++ qcom,ipc-3 = <&apcs 8 19>;
339 +
340 + apps_smsm: apps@0 {
341 + reg = <0>;
342 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
343 +index 07823595b7f0..52f15cd896e1 100644
344 +--- a/arch/arm64/kernel/cpu_errata.c
345 ++++ b/arch/arm64/kernel/cpu_errata.c
346 +@@ -406,6 +406,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
347 + .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
348 + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
349 + },
350 ++ {
351 ++ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
352 ++ MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
353 ++ .enable = qcom_enable_link_stack_sanitization,
354 ++ },
355 ++ {
356 ++ .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
357 ++ MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
358 ++ },
359 + {
360 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
361 + MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
362 +diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
363 +index 0b5ab4d8b57d..30b5495b82b5 100644
364 +--- a/arch/arm64/kvm/hyp/switch.c
365 ++++ b/arch/arm64/kvm/hyp/switch.c
366 +@@ -400,8 +400,10 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
367 + u32 midr = read_cpuid_id();
368 +
369 + /* Apply BTAC predictors mitigation to all Falkor chips */
370 +- if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)
371 ++ if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
372 ++ ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) {
373 + __qcom_hyp_sanitize_btac_predictors();
374 ++ }
375 + }
376 +
377 + fp_enabled = __fpsimd_enabled();
378 +diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
379 +index 08572f95bd8a..248f2e7b24ab 100644
380 +--- a/arch/arm64/mm/proc.S
381 ++++ b/arch/arm64/mm/proc.S
382 +@@ -189,7 +189,8 @@ ENDPROC(idmap_cpu_replace_ttbr1)
383 + dc cvac, cur_\()\type\()p // Ensure any existing dirty
384 + dmb sy // lines are written back before
385 + ldr \type, [cur_\()\type\()p] // loading the entry
386 +- tbz \type, #0, next_\()\type // Skip invalid entries
387 ++ tbz \type, #0, skip_\()\type // Skip invalid and
388 ++ tbnz \type, #11, skip_\()\type // non-global entries
389 + .endm
390 +
391 + .macro __idmap_kpti_put_pgtable_ent_ng, type
392 +@@ -249,8 +250,9 @@ ENTRY(idmap_kpti_install_ng_mappings)
393 + add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
394 + do_pgd: __idmap_kpti_get_pgtable_ent pgd
395 + tbnz pgd, #1, walk_puds
396 +- __idmap_kpti_put_pgtable_ent_ng pgd
397 + next_pgd:
398 ++ __idmap_kpti_put_pgtable_ent_ng pgd
399 ++skip_pgd:
400 + add cur_pgdp, cur_pgdp, #8
401 + cmp cur_pgdp, end_pgdp
402 + b.ne do_pgd
403 +@@ -278,8 +280,9 @@ walk_puds:
404 + add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
405 + do_pud: __idmap_kpti_get_pgtable_ent pud
406 + tbnz pud, #1, walk_pmds
407 +- __idmap_kpti_put_pgtable_ent_ng pud
408 + next_pud:
409 ++ __idmap_kpti_put_pgtable_ent_ng pud
410 ++skip_pud:
411 + add cur_pudp, cur_pudp, 8
412 + cmp cur_pudp, end_pudp
413 + b.ne do_pud
414 +@@ -298,8 +301,9 @@ walk_pmds:
415 + add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
416 + do_pmd: __idmap_kpti_get_pgtable_ent pmd
417 + tbnz pmd, #1, walk_ptes
418 +- __idmap_kpti_put_pgtable_ent_ng pmd
419 + next_pmd:
420 ++ __idmap_kpti_put_pgtable_ent_ng pmd
421 ++skip_pmd:
422 + add cur_pmdp, cur_pmdp, #8
423 + cmp cur_pmdp, end_pmdp
424 + b.ne do_pmd
425 +@@ -317,7 +321,7 @@ walk_ptes:
426 + add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
427 + do_pte: __idmap_kpti_get_pgtable_ent pte
428 + __idmap_kpti_put_pgtable_ent_ng pte
429 +-next_pte:
430 ++skip_pte:
431 + add cur_ptep, cur_ptep, #8
432 + cmp cur_ptep, end_ptep
433 + b.ne do_pte
434 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
435 +index 8e0b3702f1c0..efaa3b130f4d 100644
436 +--- a/arch/mips/Kconfig
437 ++++ b/arch/mips/Kconfig
438 +@@ -119,12 +119,12 @@ config MIPS_GENERIC
439 + select SYS_SUPPORTS_MULTITHREADING
440 + select SYS_SUPPORTS_RELOCATABLE
441 + select SYS_SUPPORTS_SMARTMIPS
442 +- select USB_EHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
443 +- select USB_EHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
444 +- select USB_OHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
445 +- select USB_OHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
446 +- select USB_UHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
447 +- select USB_UHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
448 ++ select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
449 ++ select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
450 ++ select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
451 ++ select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
452 ++ select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
453 ++ select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
454 + select USE_OF
455 + help
456 + Select this to build a kernel which aims to support multiple boards,
457 +diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
458 +index e68e6e04063a..1025f937ab0e 100644
459 +--- a/arch/mips/kernel/cps-vec.S
460 ++++ b/arch/mips/kernel/cps-vec.S
461 +@@ -388,15 +388,16 @@ LEAF(mips_cps_boot_vpes)
462 +
463 + #elif defined(CONFIG_MIPS_MT)
464 +
465 +- .set push
466 +- .set MIPS_ISA_LEVEL_RAW
467 +- .set mt
468 +-
469 + /* If the core doesn't support MT then return */
470 + has_mt t0, 5f
471 +
472 + /* Enter VPE configuration state */
473 ++ .set push
474 ++ .set MIPS_ISA_LEVEL_RAW
475 ++ .set mt
476 + dvpe
477 ++ .set pop
478 ++
479 + PTR_LA t1, 1f
480 + jr.hb t1
481 + nop
482 +@@ -422,6 +423,10 @@ LEAF(mips_cps_boot_vpes)
483 + mtc0 t0, CP0_VPECONTROL
484 + ehb
485 +
486 ++ .set push
487 ++ .set MIPS_ISA_LEVEL_RAW
488 ++ .set mt
489 ++
490 + /* Skip the VPE if its TC is not halted */
491 + mftc0 t0, CP0_TCHALT
492 + beqz t0, 2f
493 +@@ -495,6 +500,8 @@ LEAF(mips_cps_boot_vpes)
494 + ehb
495 + evpe
496 +
497 ++ .set pop
498 ++
499 + /* Check whether this VPE is meant to be running */
500 + li t0, 1
501 + sll t0, t0, a1
502 +@@ -509,7 +516,7 @@ LEAF(mips_cps_boot_vpes)
503 + 1: jr.hb t0
504 + nop
505 +
506 +-2: .set pop
507 ++2:
508 +
509 + #endif /* CONFIG_MIPS_MT_SMP */
510 +
511 +diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
512 +index 702c678de116..e4a1581ce822 100644
513 +--- a/arch/mips/kernel/setup.c
514 ++++ b/arch/mips/kernel/setup.c
515 +@@ -375,6 +375,7 @@ static void __init bootmem_init(void)
516 + unsigned long reserved_end;
517 + unsigned long mapstart = ~0UL;
518 + unsigned long bootmap_size;
519 ++ phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX;
520 + bool bootmap_valid = false;
521 + int i;
522 +
523 +@@ -395,7 +396,8 @@ static void __init bootmem_init(void)
524 + max_low_pfn = 0;
525 +
526 + /*
527 +- * Find the highest page frame number we have available.
528 ++ * Find the highest page frame number we have available
529 ++ * and the lowest used RAM address
530 + */
531 + for (i = 0; i < boot_mem_map.nr_map; i++) {
532 + unsigned long start, end;
533 +@@ -407,6 +409,8 @@ static void __init bootmem_init(void)
534 + end = PFN_DOWN(boot_mem_map.map[i].addr
535 + + boot_mem_map.map[i].size);
536 +
537 ++ ramstart = min(ramstart, boot_mem_map.map[i].addr);
538 ++
539 + #ifndef CONFIG_HIGHMEM
540 + /*
541 + * Skip highmem here so we get an accurate max_low_pfn if low
542 +@@ -436,6 +440,13 @@ static void __init bootmem_init(void)
543 + mapstart = max(reserved_end, start);
544 + }
545 +
546 ++ /*
547 ++ * Reserve any memory between the start of RAM and PHYS_OFFSET
548 ++ */
549 ++ if (ramstart > PHYS_OFFSET)
550 ++ add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
551 ++ BOOT_MEM_RESERVED);
552 ++
553 + if (min_low_pfn >= max_low_pfn)
554 + panic("Incorrect memory mapping !!!");
555 + if (min_low_pfn > ARCH_PFN_OFFSET) {
556 +@@ -664,9 +675,6 @@ static int __init early_parse_mem(char *p)
557 +
558 + add_memory_region(start, size, BOOT_MEM_RAM);
559 +
560 +- if (start && start > PHYS_OFFSET)
561 +- add_memory_region(PHYS_OFFSET, start - PHYS_OFFSET,
562 +- BOOT_MEM_RESERVED);
563 + return 0;
564 + }
565 + early_param("mem", early_parse_mem);
566 +diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
567 +index 88187c285c70..1c02e6900f78 100644
568 +--- a/arch/powerpc/include/asm/topology.h
569 ++++ b/arch/powerpc/include/asm/topology.h
570 +@@ -44,6 +44,11 @@ extern int sysfs_add_device_to_node(struct device *dev, int nid);
571 + extern void sysfs_remove_device_from_node(struct device *dev, int nid);
572 + extern int numa_update_cpu_topology(bool cpus_locked);
573 +
574 ++static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node)
575 ++{
576 ++ numa_cpu_lookup_table[cpu] = node;
577 ++}
578 ++
579 + static inline int early_cpu_to_node(int cpu)
580 + {
581 + int nid;
582 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
583 +index 72be0c32e902..2010e4c827b7 100644
584 +--- a/arch/powerpc/kernel/process.c
585 ++++ b/arch/powerpc/kernel/process.c
586 +@@ -1509,14 +1509,15 @@ static int assign_thread_tidr(void)
587 + {
588 + int index;
589 + int err;
590 ++ unsigned long flags;
591 +
592 + again:
593 + if (!ida_pre_get(&vas_thread_ida, GFP_KERNEL))
594 + return -ENOMEM;
595 +
596 +- spin_lock(&vas_thread_id_lock);
597 ++ spin_lock_irqsave(&vas_thread_id_lock, flags);
598 + err = ida_get_new_above(&vas_thread_ida, 1, &index);
599 +- spin_unlock(&vas_thread_id_lock);
600 ++ spin_unlock_irqrestore(&vas_thread_id_lock, flags);
601 +
602 + if (err == -EAGAIN)
603 + goto again;
604 +@@ -1524,9 +1525,9 @@ static int assign_thread_tidr(void)
605 + return err;
606 +
607 + if (index > MAX_THREAD_CONTEXT) {
608 +- spin_lock(&vas_thread_id_lock);
609 ++ spin_lock_irqsave(&vas_thread_id_lock, flags);
610 + ida_remove(&vas_thread_ida, index);
611 +- spin_unlock(&vas_thread_id_lock);
612 ++ spin_unlock_irqrestore(&vas_thread_id_lock, flags);
613 + return -ENOMEM;
614 + }
615 +
616 +@@ -1535,9 +1536,11 @@ static int assign_thread_tidr(void)
617 +
618 + static void free_thread_tidr(int id)
619 + {
620 +- spin_lock(&vas_thread_id_lock);
621 ++ unsigned long flags;
622 ++
623 ++ spin_lock_irqsave(&vas_thread_id_lock, flags);
624 + ida_remove(&vas_thread_ida, id);
625 +- spin_unlock(&vas_thread_id_lock);
626 ++ spin_unlock_irqrestore(&vas_thread_id_lock, flags);
627 + }
628 +
629 + /*
630 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
631 +index adb6364f4091..09be66fcea68 100644
632 +--- a/arch/powerpc/mm/numa.c
633 ++++ b/arch/powerpc/mm/numa.c
634 +@@ -142,11 +142,6 @@ static void reset_numa_cpu_lookup_table(void)
635 + numa_cpu_lookup_table[cpu] = -1;
636 + }
637 +
638 +-static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
639 +-{
640 +- numa_cpu_lookup_table[cpu] = node;
641 +-}
642 +-
643 + static void map_cpu_to_node(int cpu, int node)
644 + {
645 + update_numa_cpu_lookup_table(cpu, node);
646 +diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
647 +index cfbbee941a76..17ae5c15a9e0 100644
648 +--- a/arch/powerpc/mm/pgtable-radix.c
649 ++++ b/arch/powerpc/mm/pgtable-radix.c
650 +@@ -17,6 +17,7 @@
651 + #include <linux/of_fdt.h>
652 + #include <linux/mm.h>
653 + #include <linux/string_helpers.h>
654 ++#include <linux/stop_machine.h>
655 +
656 + #include <asm/pgtable.h>
657 + #include <asm/pgalloc.h>
658 +@@ -671,6 +672,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
659 + pud_clear(pud);
660 + }
661 +
662 ++struct change_mapping_params {
663 ++ pte_t *pte;
664 ++ unsigned long start;
665 ++ unsigned long end;
666 ++ unsigned long aligned_start;
667 ++ unsigned long aligned_end;
668 ++};
669 ++
670 ++static int stop_machine_change_mapping(void *data)
671 ++{
672 ++ struct change_mapping_params *params =
673 ++ (struct change_mapping_params *)data;
674 ++
675 ++ if (!data)
676 ++ return -1;
677 ++
678 ++ spin_unlock(&init_mm.page_table_lock);
679 ++ pte_clear(&init_mm, params->aligned_start, params->pte);
680 ++ create_physical_mapping(params->aligned_start, params->start);
681 ++ create_physical_mapping(params->end, params->aligned_end);
682 ++ spin_lock(&init_mm.page_table_lock);
683 ++ return 0;
684 ++}
685 ++
686 + static void remove_pte_table(pte_t *pte_start, unsigned long addr,
687 + unsigned long end)
688 + {
689 +@@ -699,6 +724,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
690 + }
691 + }
692 +
693 ++/*
694 ++ * clear the pte and potentially split the mapping helper
695 ++ */
696 ++static void split_kernel_mapping(unsigned long addr, unsigned long end,
697 ++ unsigned long size, pte_t *pte)
698 ++{
699 ++ unsigned long mask = ~(size - 1);
700 ++ unsigned long aligned_start = addr & mask;
701 ++ unsigned long aligned_end = addr + size;
702 ++ struct change_mapping_params params;
703 ++ bool split_region = false;
704 ++
705 ++ if ((end - addr) < size) {
706 ++ /*
707 ++ * We're going to clear the PTE, but not flushed
708 ++ * the mapping, time to remap and flush. The
709 ++ * effects if visible outside the processor or
710 ++ * if we are running in code close to the
711 ++ * mapping we cleared, we are in trouble.
712 ++ */
713 ++ if (overlaps_kernel_text(aligned_start, addr) ||
714 ++ overlaps_kernel_text(end, aligned_end)) {
715 ++ /*
716 ++ * Hack, just return, don't pte_clear
717 ++ */
718 ++ WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
719 ++ "text, not splitting\n", addr, end);
720 ++ return;
721 ++ }
722 ++ split_region = true;
723 ++ }
724 ++
725 ++ if (split_region) {
726 ++ params.pte = pte;
727 ++ params.start = addr;
728 ++ params.end = end;
729 ++ params.aligned_start = addr & ~(size - 1);
730 ++ params.aligned_end = min_t(unsigned long, aligned_end,
731 ++ (unsigned long)__va(memblock_end_of_DRAM()));
732 ++ stop_machine(stop_machine_change_mapping, &params, NULL);
733 ++ return;
734 ++ }
735 ++
736 ++ pte_clear(&init_mm, addr, pte);
737 ++}
738 ++
739 + static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
740 + unsigned long end)
741 + {
742 +@@ -714,13 +785,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
743 + continue;
744 +
745 + if (pmd_huge(*pmd)) {
746 +- if (!IS_ALIGNED(addr, PMD_SIZE) ||
747 +- !IS_ALIGNED(next, PMD_SIZE)) {
748 +- WARN_ONCE(1, "%s: unaligned range\n", __func__);
749 +- continue;
750 +- }
751 +-
752 +- pte_clear(&init_mm, addr, (pte_t *)pmd);
753 ++ split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
754 + continue;
755 + }
756 +
757 +@@ -745,13 +810,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
758 + continue;
759 +
760 + if (pud_huge(*pud)) {
761 +- if (!IS_ALIGNED(addr, PUD_SIZE) ||
762 +- !IS_ALIGNED(next, PUD_SIZE)) {
763 +- WARN_ONCE(1, "%s: unaligned range\n", __func__);
764 +- continue;
765 +- }
766 +-
767 +- pte_clear(&init_mm, addr, (pte_t *)pud);
768 ++ split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
769 + continue;
770 + }
771 +
772 +@@ -777,13 +836,7 @@ static void remove_pagetable(unsigned long start, unsigned long end)
773 + continue;
774 +
775 + if (pgd_huge(*pgd)) {
776 +- if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
777 +- !IS_ALIGNED(next, PGDIR_SIZE)) {
778 +- WARN_ONCE(1, "%s: unaligned range\n", __func__);
779 +- continue;
780 +- }
781 +-
782 +- pte_clear(&init_mm, addr, (pte_t *)pgd);
783 ++ split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
784 + continue;
785 + }
786 +
787 +diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
788 +index 813ea22c3e00..eec1367c2f32 100644
789 +--- a/arch/powerpc/mm/pgtable_64.c
790 ++++ b/arch/powerpc/mm/pgtable_64.c
791 +@@ -483,6 +483,8 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
792 + if (old & PATB_HR) {
793 + asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
794 + "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
795 ++ asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
796 ++ "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
797 + trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
798 + } else {
799 + asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
800 +diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
801 +index 884f4b705b57..913a2b81b177 100644
802 +--- a/arch/powerpc/mm/tlb-radix.c
803 ++++ b/arch/powerpc/mm/tlb-radix.c
804 +@@ -600,14 +600,12 @@ void radix__flush_tlb_all(void)
805 + */
806 + asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
807 + : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
808 +- trace_tlbie(0, 0, rb, rs, ric, prs, r);
809 + /*
810 + * now flush host entires by passing PRS = 0 and LPID == 0
811 + */
812 + asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
813 + : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
814 + asm volatile("eieio; tlbsync; ptesync": : :"memory");
815 +- trace_tlbie(0, 0, rb, 0, ric, prs, r);
816 + }
817 +
818 + void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
819 +diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
820 +index 2b3eb01ab110..b7c53a51c31b 100644
821 +--- a/arch/powerpc/platforms/powernv/vas-window.c
822 ++++ b/arch/powerpc/platforms/powernv/vas-window.c
823 +@@ -1063,16 +1063,16 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
824 + rc = PTR_ERR(txwin->paste_kaddr);
825 + goto free_window;
826 + }
827 ++ } else {
828 ++ /*
829 ++ * A user mapping must ensure that context switch issues
830 ++ * CP_ABORT for this thread.
831 ++ */
832 ++ rc = set_thread_uses_vas();
833 ++ if (rc)
834 ++ goto free_window;
835 + }
836 +
837 +- /*
838 +- * Now that we have a send window, ensure context switch issues
839 +- * CP_ABORT for this thread.
840 +- */
841 +- rc = -EINVAL;
842 +- if (set_thread_uses_vas() < 0)
843 +- goto free_window;
844 +-
845 + set_vinst_win(vinst, txwin);
846 +
847 + return txwin;
848 +diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
849 +index a7d14aa7bb7c..09083ad82f7a 100644
850 +--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
851 ++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
852 +@@ -36,6 +36,7 @@
853 + #include <asm/xics.h>
854 + #include <asm/xive.h>
855 + #include <asm/plpar_wrappers.h>
856 ++#include <asm/topology.h>
857 +
858 + #include "pseries.h"
859 + #include "offline_states.h"
860 +@@ -331,6 +332,7 @@ static void pseries_remove_processor(struct device_node *np)
861 + BUG_ON(cpu_online(cpu));
862 + set_cpu_present(cpu, false);
863 + set_hard_smp_processor_id(cpu, -1);
864 ++ update_numa_cpu_lookup_table(cpu, -1);
865 + break;
866 + }
867 + if (cpu >= nr_cpu_ids)
868 +diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
869 +index d9c4c9366049..091f1d0d0af1 100644
870 +--- a/arch/powerpc/sysdev/xive/spapr.c
871 ++++ b/arch/powerpc/sysdev/xive/spapr.c
872 +@@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
873 +
874 + rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
875 + if (rc) {
876 +- pr_err("Error %lld getting queue info prio %d\n", rc, prio);
877 ++ pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
878 ++ target, prio);
879 + rc = -EIO;
880 + goto fail;
881 + }
882 +@@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
883 + /* Configure and enable the queue in HW */
884 + rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
885 + if (rc) {
886 +- pr_err("Error %lld setting queue for prio %d\n", rc, prio);
887 ++ pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
888 ++ target, prio);
889 + rc = -EIO;
890 + } else {
891 + q->qpage = qpage;
892 +@@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
893 + if (IS_ERR(qpage))
894 + return PTR_ERR(qpage);
895 +
896 +- return xive_spapr_configure_queue(cpu, q, prio, qpage,
897 +- xive_queue_shift);
898 ++ return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
899 ++ q, prio, qpage, xive_queue_shift);
900 + }
901 +
902 + static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
903 +@@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
904 + struct xive_q *q = &xc->queue[prio];
905 + unsigned int alloc_order;
906 + long rc;
907 ++ int hw_cpu = get_hard_smp_processor_id(cpu);
908 +
909 +- rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0);
910 ++ rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
911 + if (rc)
912 +- pr_err("Error %ld setting queue for prio %d\n", rc, prio);
913 ++ pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
914 ++ hw_cpu, prio);
915 +
916 + alloc_order = xive_alloc_order(xive_queue_shift);
917 + free_pages((unsigned long)q->qpage, alloc_order);
918 +diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
919 +index 59eea9c65d3e..79b7a3438d54 100644
920 +--- a/arch/s390/kernel/compat_linux.c
921 ++++ b/arch/s390/kernel/compat_linux.c
922 +@@ -110,7 +110,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid)
923 +
924 + COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid)
925 + {
926 +- return sys_setgid((gid_t)gid);
927 ++ return sys_setgid(low2highgid(gid));
928 + }
929 +
930 + COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
931 +@@ -120,7 +120,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
932 +
933 + COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid)
934 + {
935 +- return sys_setuid((uid_t)uid);
936 ++ return sys_setuid(low2highuid(uid));
937 + }
938 +
939 + COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid)
940 +@@ -173,12 +173,12 @@ COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp,
941 +
942 + COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid)
943 + {
944 +- return sys_setfsuid((uid_t)uid);
945 ++ return sys_setfsuid(low2highuid(uid));
946 + }
947 +
948 + COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid)
949 + {
950 +- return sys_setfsgid((gid_t)gid);
951 ++ return sys_setfsgid(low2highgid(gid));
952 + }
953 +
954 + static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
955 +diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
956 +index 3f48f695d5e6..dce7092ab24a 100644
957 +--- a/arch/x86/entry/calling.h
958 ++++ b/arch/x86/entry/calling.h
959 +@@ -97,80 +97,69 @@ For 32-bit we have the following conventions - kernel is built with
960 +
961 + #define SIZEOF_PTREGS 21*8
962 +
963 +- .macro ALLOC_PT_GPREGS_ON_STACK
964 +- addq $-(15*8), %rsp
965 +- .endm
966 ++.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax
967 ++ /*
968 ++ * Push registers and sanitize registers of values that a
969 ++ * speculation attack might otherwise want to exploit. The
970 ++ * lower registers are likely clobbered well before they
971 ++ * could be put to use in a speculative execution gadget.
972 ++ * Interleave XOR with PUSH for better uop scheduling:
973 ++ */
974 ++ pushq %rdi /* pt_regs->di */
975 ++ pushq %rsi /* pt_regs->si */
976 ++ pushq \rdx /* pt_regs->dx */
977 ++ pushq %rcx /* pt_regs->cx */
978 ++ pushq \rax /* pt_regs->ax */
979 ++ pushq %r8 /* pt_regs->r8 */
980 ++ xorq %r8, %r8 /* nospec r8 */
981 ++ pushq %r9 /* pt_regs->r9 */
982 ++ xorq %r9, %r9 /* nospec r9 */
983 ++ pushq %r10 /* pt_regs->r10 */
984 ++ xorq %r10, %r10 /* nospec r10 */
985 ++ pushq %r11 /* pt_regs->r11 */
986 ++ xorq %r11, %r11 /* nospec r11*/
987 ++ pushq %rbx /* pt_regs->rbx */
988 ++ xorl %ebx, %ebx /* nospec rbx*/
989 ++ pushq %rbp /* pt_regs->rbp */
990 ++ xorl %ebp, %ebp /* nospec rbp*/
991 ++ pushq %r12 /* pt_regs->r12 */
992 ++ xorq %r12, %r12 /* nospec r12*/
993 ++ pushq %r13 /* pt_regs->r13 */
994 ++ xorq %r13, %r13 /* nospec r13*/
995 ++ pushq %r14 /* pt_regs->r14 */
996 ++ xorq %r14, %r14 /* nospec r14*/
997 ++ pushq %r15 /* pt_regs->r15 */
998 ++ xorq %r15, %r15 /* nospec r15*/
999 ++ UNWIND_HINT_REGS
1000 ++.endm
1001 +
1002 +- .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
1003 +- .if \r11
1004 +- movq %r11, 6*8+\offset(%rsp)
1005 +- .endif
1006 +- .if \r8910
1007 +- movq %r10, 7*8+\offset(%rsp)
1008 +- movq %r9, 8*8+\offset(%rsp)
1009 +- movq %r8, 9*8+\offset(%rsp)
1010 +- .endif
1011 +- .if \rax
1012 +- movq %rax, 10*8+\offset(%rsp)
1013 +- .endif
1014 +- .if \rcx
1015 +- movq %rcx, 11*8+\offset(%rsp)
1016 +- .endif
1017 +- movq %rdx, 12*8+\offset(%rsp)
1018 +- movq %rsi, 13*8+\offset(%rsp)
1019 +- movq %rdi, 14*8+\offset(%rsp)
1020 +- UNWIND_HINT_REGS offset=\offset extra=0
1021 +- .endm
1022 +- .macro SAVE_C_REGS offset=0
1023 +- SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
1024 +- .endm
1025 +- .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
1026 +- SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
1027 +- .endm
1028 +- .macro SAVE_C_REGS_EXCEPT_R891011
1029 +- SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
1030 +- .endm
1031 +- .macro SAVE_C_REGS_EXCEPT_RCX_R891011
1032 +- SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
1033 +- .endm
1034 +- .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
1035 +- SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
1036 +- .endm
1037 +-
1038 +- .macro SAVE_EXTRA_REGS offset=0
1039 +- movq %r15, 0*8+\offset(%rsp)
1040 +- movq %r14, 1*8+\offset(%rsp)
1041 +- movq %r13, 2*8+\offset(%rsp)
1042 +- movq %r12, 3*8+\offset(%rsp)
1043 +- movq %rbp, 4*8+\offset(%rsp)
1044 +- movq %rbx, 5*8+\offset(%rsp)
1045 +- UNWIND_HINT_REGS offset=\offset
1046 +- .endm
1047 +-
1048 +- .macro POP_EXTRA_REGS
1049 ++.macro POP_REGS pop_rdi=1 skip_r11rcx=0
1050 + popq %r15
1051 + popq %r14
1052 + popq %r13
1053 + popq %r12
1054 + popq %rbp
1055 + popq %rbx
1056 +- .endm
1057 +-
1058 +- .macro POP_C_REGS
1059 ++ .if \skip_r11rcx
1060 ++ popq %rsi
1061 ++ .else
1062 + popq %r11
1063 ++ .endif
1064 + popq %r10
1065 + popq %r9
1066 + popq %r8
1067 + popq %rax
1068 ++ .if \skip_r11rcx
1069 ++ popq %rsi
1070 ++ .else
1071 + popq %rcx
1072 ++ .endif
1073 + popq %rdx
1074 + popq %rsi
1075 ++ .if \pop_rdi
1076 + popq %rdi
1077 +- .endm
1078 +-
1079 +- .macro icebp
1080 +- .byte 0xf1
1081 +- .endm
1082 ++ .endif
1083 ++.endm
1084 +
1085 + /*
1086 + * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
1087 +@@ -178,7 +167,7 @@ For 32-bit we have the following conventions - kernel is built with
1088 + * is just setting the LSB, which makes it an invalid stack address and is also
1089 + * a signal to the unwinder that it's a pt_regs pointer in disguise.
1090 + *
1091 +- * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts
1092 ++ * NOTE: This macro must be used *after* PUSH_AND_CLEAR_REGS because it corrupts
1093 + * the original rbp.
1094 + */
1095 + .macro ENCODE_FRAME_POINTER ptregs_offset=0
1096 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
1097 +index c752abe89d80..4fd9044e72e7 100644
1098 +--- a/arch/x86/entry/entry_64.S
1099 ++++ b/arch/x86/entry/entry_64.S
1100 +@@ -213,7 +213,7 @@ ENTRY(entry_SYSCALL_64)
1101 +
1102 + swapgs
1103 + /*
1104 +- * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it
1105 ++ * This path is only taken when PAGE_TABLE_ISOLATION is disabled so it
1106 + * is not required to switch CR3.
1107 + */
1108 + movq %rsp, PER_CPU_VAR(rsp_scratch)
1109 +@@ -227,22 +227,8 @@ ENTRY(entry_SYSCALL_64)
1110 + pushq %rcx /* pt_regs->ip */
1111 + GLOBAL(entry_SYSCALL_64_after_hwframe)
1112 + pushq %rax /* pt_regs->orig_ax */
1113 +- pushq %rdi /* pt_regs->di */
1114 +- pushq %rsi /* pt_regs->si */
1115 +- pushq %rdx /* pt_regs->dx */
1116 +- pushq %rcx /* pt_regs->cx */
1117 +- pushq $-ENOSYS /* pt_regs->ax */
1118 +- pushq %r8 /* pt_regs->r8 */
1119 +- pushq %r9 /* pt_regs->r9 */
1120 +- pushq %r10 /* pt_regs->r10 */
1121 +- pushq %r11 /* pt_regs->r11 */
1122 +- pushq %rbx /* pt_regs->rbx */
1123 +- pushq %rbp /* pt_regs->rbp */
1124 +- pushq %r12 /* pt_regs->r12 */
1125 +- pushq %r13 /* pt_regs->r13 */
1126 +- pushq %r14 /* pt_regs->r14 */
1127 +- pushq %r15 /* pt_regs->r15 */
1128 +- UNWIND_HINT_REGS
1129 ++
1130 ++ PUSH_AND_CLEAR_REGS rax=$-ENOSYS
1131 +
1132 + TRACE_IRQS_OFF
1133 +
1134 +@@ -321,15 +307,7 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
1135 + syscall_return_via_sysret:
1136 + /* rcx and r11 are already restored (see code above) */
1137 + UNWIND_HINT_EMPTY
1138 +- POP_EXTRA_REGS
1139 +- popq %rsi /* skip r11 */
1140 +- popq %r10
1141 +- popq %r9
1142 +- popq %r8
1143 +- popq %rax
1144 +- popq %rsi /* skip rcx */
1145 +- popq %rdx
1146 +- popq %rsi
1147 ++ POP_REGS pop_rdi=0 skip_r11rcx=1
1148 +
1149 + /*
1150 + * Now all regs are restored except RSP and RDI.
1151 +@@ -559,9 +537,7 @@ END(irq_entries_start)
1152 + call switch_to_thread_stack
1153 + 1:
1154 +
1155 +- ALLOC_PT_GPREGS_ON_STACK
1156 +- SAVE_C_REGS
1157 +- SAVE_EXTRA_REGS
1158 ++ PUSH_AND_CLEAR_REGS
1159 + ENCODE_FRAME_POINTER
1160 +
1161 + testb $3, CS(%rsp)
1162 +@@ -622,15 +598,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
1163 + ud2
1164 + 1:
1165 + #endif
1166 +- POP_EXTRA_REGS
1167 +- popq %r11
1168 +- popq %r10
1169 +- popq %r9
1170 +- popq %r8
1171 +- popq %rax
1172 +- popq %rcx
1173 +- popq %rdx
1174 +- popq %rsi
1175 ++ POP_REGS pop_rdi=0
1176 +
1177 + /*
1178 + * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
1179 +@@ -688,8 +656,7 @@ GLOBAL(restore_regs_and_return_to_kernel)
1180 + ud2
1181 + 1:
1182 + #endif
1183 +- POP_EXTRA_REGS
1184 +- POP_C_REGS
1185 ++ POP_REGS
1186 + addq $8, %rsp /* skip regs->orig_ax */
1187 + INTERRUPT_RETURN
1188 +
1189 +@@ -904,7 +871,9 @@ ENTRY(\sym)
1190 + pushq $-1 /* ORIG_RAX: no syscall to restart */
1191 + .endif
1192 +
1193 +- ALLOC_PT_GPREGS_ON_STACK
1194 ++ /* Save all registers in pt_regs */
1195 ++ PUSH_AND_CLEAR_REGS
1196 ++ ENCODE_FRAME_POINTER
1197 +
1198 + .if \paranoid < 2
1199 + testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
1200 +@@ -1117,9 +1086,7 @@ ENTRY(xen_failsafe_callback)
1201 + addq $0x30, %rsp
1202 + UNWIND_HINT_IRET_REGS
1203 + pushq $-1 /* orig_ax = -1 => not a system call */
1204 +- ALLOC_PT_GPREGS_ON_STACK
1205 +- SAVE_C_REGS
1206 +- SAVE_EXTRA_REGS
1207 ++ PUSH_AND_CLEAR_REGS
1208 + ENCODE_FRAME_POINTER
1209 + jmp error_exit
1210 + END(xen_failsafe_callback)
1211 +@@ -1156,16 +1123,13 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1
1212 + #endif
1213 +
1214 + /*
1215 +- * Save all registers in pt_regs, and switch gs if needed.
1216 ++ * Switch gs if needed.
1217 + * Use slow, but surefire "are we in kernel?" check.
1218 + * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
1219 + */
1220 + ENTRY(paranoid_entry)
1221 + UNWIND_HINT_FUNC
1222 + cld
1223 +- SAVE_C_REGS 8
1224 +- SAVE_EXTRA_REGS 8
1225 +- ENCODE_FRAME_POINTER 8
1226 + movl $1, %ebx
1227 + movl $MSR_GS_BASE, %ecx
1228 + rdmsr
1229 +@@ -1204,21 +1168,18 @@ ENTRY(paranoid_exit)
1230 + jmp .Lparanoid_exit_restore
1231 + .Lparanoid_exit_no_swapgs:
1232 + TRACE_IRQS_IRETQ_DEBUG
1233 ++ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
1234 + .Lparanoid_exit_restore:
1235 + jmp restore_regs_and_return_to_kernel
1236 + END(paranoid_exit)
1237 +
1238 + /*
1239 +- * Save all registers in pt_regs, and switch gs if needed.
1240 ++ * Switch gs if needed.
1241 + * Return: EBX=0: came from user mode; EBX=1: otherwise
1242 + */
1243 + ENTRY(error_entry)
1244 +- UNWIND_HINT_FUNC
1245 ++ UNWIND_HINT_REGS offset=8
1246 + cld
1247 +- SAVE_C_REGS 8
1248 +- SAVE_EXTRA_REGS 8
1249 +- ENCODE_FRAME_POINTER 8
1250 +- xorl %ebx, %ebx
1251 + testb $3, CS+8(%rsp)
1252 + jz .Lerror_kernelspace
1253 +
1254 +@@ -1399,22 +1360,7 @@ ENTRY(nmi)
1255 + pushq 1*8(%rdx) /* pt_regs->rip */
1256 + UNWIND_HINT_IRET_REGS
1257 + pushq $-1 /* pt_regs->orig_ax */
1258 +- pushq %rdi /* pt_regs->di */
1259 +- pushq %rsi /* pt_regs->si */
1260 +- pushq (%rdx) /* pt_regs->dx */
1261 +- pushq %rcx /* pt_regs->cx */
1262 +- pushq %rax /* pt_regs->ax */
1263 +- pushq %r8 /* pt_regs->r8 */
1264 +- pushq %r9 /* pt_regs->r9 */
1265 +- pushq %r10 /* pt_regs->r10 */
1266 +- pushq %r11 /* pt_regs->r11 */
1267 +- pushq %rbx /* pt_regs->rbx */
1268 +- pushq %rbp /* pt_regs->rbp */
1269 +- pushq %r12 /* pt_regs->r12 */
1270 +- pushq %r13 /* pt_regs->r13 */
1271 +- pushq %r14 /* pt_regs->r14 */
1272 +- pushq %r15 /* pt_regs->r15 */
1273 +- UNWIND_HINT_REGS
1274 ++ PUSH_AND_CLEAR_REGS rdx=(%rdx)
1275 + ENCODE_FRAME_POINTER
1276 +
1277 + /*
1278 +@@ -1624,7 +1570,8 @@ end_repeat_nmi:
1279 + * frame to point back to repeat_nmi.
1280 + */
1281 + pushq $-1 /* ORIG_RAX: no syscall to restart */
1282 +- ALLOC_PT_GPREGS_ON_STACK
1283 ++ PUSH_AND_CLEAR_REGS
1284 ++ ENCODE_FRAME_POINTER
1285 +
1286 + /*
1287 + * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1288 +@@ -1648,8 +1595,7 @@ end_repeat_nmi:
1289 + nmi_swapgs:
1290 + SWAPGS_UNSAFE_STACK
1291 + nmi_restore:
1292 +- POP_EXTRA_REGS
1293 +- POP_C_REGS
1294 ++ POP_REGS
1295 +
1296 + /*
1297 + * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
1298 +diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
1299 +index 98d5358e4041..fd65e016e413 100644
1300 +--- a/arch/x86/entry/entry_64_compat.S
1301 ++++ b/arch/x86/entry/entry_64_compat.S
1302 +@@ -85,15 +85,25 @@ ENTRY(entry_SYSENTER_compat)
1303 + pushq %rcx /* pt_regs->cx */
1304 + pushq $-ENOSYS /* pt_regs->ax */
1305 + pushq $0 /* pt_regs->r8 = 0 */
1306 ++ xorq %r8, %r8 /* nospec r8 */
1307 + pushq $0 /* pt_regs->r9 = 0 */
1308 ++ xorq %r9, %r9 /* nospec r9 */
1309 + pushq $0 /* pt_regs->r10 = 0 */
1310 ++ xorq %r10, %r10 /* nospec r10 */
1311 + pushq $0 /* pt_regs->r11 = 0 */
1312 ++ xorq %r11, %r11 /* nospec r11 */
1313 + pushq %rbx /* pt_regs->rbx */
1314 ++ xorl %ebx, %ebx /* nospec rbx */
1315 + pushq %rbp /* pt_regs->rbp (will be overwritten) */
1316 ++ xorl %ebp, %ebp /* nospec rbp */
1317 + pushq $0 /* pt_regs->r12 = 0 */
1318 ++ xorq %r12, %r12 /* nospec r12 */
1319 + pushq $0 /* pt_regs->r13 = 0 */
1320 ++ xorq %r13, %r13 /* nospec r13 */
1321 + pushq $0 /* pt_regs->r14 = 0 */
1322 ++ xorq %r14, %r14 /* nospec r14 */
1323 + pushq $0 /* pt_regs->r15 = 0 */
1324 ++ xorq %r15, %r15 /* nospec r15 */
1325 + cld
1326 +
1327 + /*
1328 +@@ -214,15 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
1329 + pushq %rbp /* pt_regs->cx (stashed in bp) */
1330 + pushq $-ENOSYS /* pt_regs->ax */
1331 + pushq $0 /* pt_regs->r8 = 0 */
1332 ++ xorq %r8, %r8 /* nospec r8 */
1333 + pushq $0 /* pt_regs->r9 = 0 */
1334 ++ xorq %r9, %r9 /* nospec r9 */
1335 + pushq $0 /* pt_regs->r10 = 0 */
1336 ++ xorq %r10, %r10 /* nospec r10 */
1337 + pushq $0 /* pt_regs->r11 = 0 */
1338 ++ xorq %r11, %r11 /* nospec r11 */
1339 + pushq %rbx /* pt_regs->rbx */
1340 ++ xorl %ebx, %ebx /* nospec rbx */
1341 + pushq %rbp /* pt_regs->rbp (will be overwritten) */
1342 ++ xorl %ebp, %ebp /* nospec rbp */
1343 + pushq $0 /* pt_regs->r12 = 0 */
1344 ++ xorq %r12, %r12 /* nospec r12 */
1345 + pushq $0 /* pt_regs->r13 = 0 */
1346 ++ xorq %r13, %r13 /* nospec r13 */
1347 + pushq $0 /* pt_regs->r14 = 0 */
1348 ++ xorq %r14, %r14 /* nospec r14 */
1349 + pushq $0 /* pt_regs->r15 = 0 */
1350 ++ xorq %r15, %r15 /* nospec r15 */
1351 +
1352 + /*
1353 + * User mode is traced as though IRQs are on, and SYSENTER
1354 +@@ -338,15 +358,25 @@ ENTRY(entry_INT80_compat)
1355 + pushq %rcx /* pt_regs->cx */
1356 + pushq $-ENOSYS /* pt_regs->ax */
1357 + pushq $0 /* pt_regs->r8 = 0 */
1358 ++ xorq %r8, %r8 /* nospec r8 */
1359 + pushq $0 /* pt_regs->r9 = 0 */
1360 ++ xorq %r9, %r9 /* nospec r9 */
1361 + pushq $0 /* pt_regs->r10 = 0 */
1362 ++ xorq %r10, %r10 /* nospec r10 */
1363 + pushq $0 /* pt_regs->r11 = 0 */
1364 ++ xorq %r11, %r11 /* nospec r11 */
1365 + pushq %rbx /* pt_regs->rbx */
1366 ++ xorl %ebx, %ebx /* nospec rbx */
1367 + pushq %rbp /* pt_regs->rbp */
1368 ++ xorl %ebp, %ebp /* nospec rbp */
1369 + pushq %r12 /* pt_regs->r12 */
1370 ++ xorq %r12, %r12 /* nospec r12 */
1371 + pushq %r13 /* pt_regs->r13 */
1372 ++ xorq %r13, %r13 /* nospec r13 */
1373 + pushq %r14 /* pt_regs->r14 */
1374 ++ xorq %r14, %r14 /* nospec r14 */
1375 + pushq %r15 /* pt_regs->r15 */
1376 ++ xorq %r15, %r15 /* nospec r15 */
1377 + cld
1378 +
1379 + /*
1380 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
1381 +index 731153a4681e..56457cb73448 100644
1382 +--- a/arch/x86/events/intel/core.c
1383 ++++ b/arch/x86/events/intel/core.c
1384 +@@ -3559,7 +3559,7 @@ static int intel_snb_pebs_broken(int cpu)
1385 + break;
1386 +
1387 + case INTEL_FAM6_SANDYBRIDGE_X:
1388 +- switch (cpu_data(cpu).x86_mask) {
1389 ++ switch (cpu_data(cpu).x86_stepping) {
1390 + case 6: rev = 0x618; break;
1391 + case 7: rev = 0x70c; break;
1392 + }
1393 +diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
1394 +index ae64d0b69729..cf372b90557e 100644
1395 +--- a/arch/x86/events/intel/lbr.c
1396 ++++ b/arch/x86/events/intel/lbr.c
1397 +@@ -1186,7 +1186,7 @@ void __init intel_pmu_lbr_init_atom(void)
1398 + * on PMU interrupt
1399 + */
1400 + if (boot_cpu_data.x86_model == 28
1401 +- && boot_cpu_data.x86_mask < 10) {
1402 ++ && boot_cpu_data.x86_stepping < 10) {
1403 + pr_cont("LBR disabled due to erratum");
1404 + return;
1405 + }
1406 +diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
1407 +index a5604c352930..408879b0c0d4 100644
1408 +--- a/arch/x86/events/intel/p6.c
1409 ++++ b/arch/x86/events/intel/p6.c
1410 +@@ -234,7 +234,7 @@ static __initconst const struct x86_pmu p6_pmu = {
1411 +
1412 + static __init void p6_pmu_rdpmc_quirk(void)
1413 + {
1414 +- if (boot_cpu_data.x86_mask < 9) {
1415 ++ if (boot_cpu_data.x86_stepping < 9) {
1416 + /*
1417 + * PPro erratum 26; fixed in stepping 9 and above.
1418 + */
1419 +diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
1420 +index 8d0ec9df1cbe..f077401869ee 100644
1421 +--- a/arch/x86/include/asm/acpi.h
1422 ++++ b/arch/x86/include/asm/acpi.h
1423 +@@ -94,7 +94,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
1424 + if (boot_cpu_data.x86 == 0x0F &&
1425 + boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
1426 + boot_cpu_data.x86_model <= 0x05 &&
1427 +- boot_cpu_data.x86_mask < 0x0A)
1428 ++ boot_cpu_data.x86_stepping < 0x0A)
1429 + return 1;
1430 + else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E))
1431 + return 1;
1432 +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
1433 +index 30d406146016..e1259f043ae9 100644
1434 +--- a/arch/x86/include/asm/barrier.h
1435 ++++ b/arch/x86/include/asm/barrier.h
1436 +@@ -40,7 +40,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
1437 +
1438 + asm ("cmp %1,%2; sbb %0,%0;"
1439 + :"=r" (mask)
1440 +- :"r"(size),"r" (index)
1441 ++ :"g"(size),"r" (index)
1442 + :"cc");
1443 + return mask;
1444 + }
1445 +diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
1446 +index 34d99af43994..6804d6642767 100644
1447 +--- a/arch/x86/include/asm/bug.h
1448 ++++ b/arch/x86/include/asm/bug.h
1449 +@@ -5,23 +5,20 @@
1450 + #include <linux/stringify.h>
1451 +
1452 + /*
1453 +- * Since some emulators terminate on UD2, we cannot use it for WARN.
1454 +- * Since various instruction decoders disagree on the length of UD1,
1455 +- * we cannot use it either. So use UD0 for WARN.
1456 ++ * Despite that some emulators terminate on UD2, we use it for WARN().
1457 + *
1458 +- * (binutils knows about "ud1" but {en,de}codes it as 2 bytes, whereas
1459 +- * our kernel decoder thinks it takes a ModRM byte, which seems consistent
1460 +- * with various things like the Intel SDM instruction encoding rules)
1461 ++ * Since various instruction decoders/specs disagree on the encoding of
1462 ++ * UD0/UD1.
1463 + */
1464 +
1465 +-#define ASM_UD0 ".byte 0x0f, 0xff"
1466 ++#define ASM_UD0 ".byte 0x0f, 0xff" /* + ModRM (for Intel) */
1467 + #define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */
1468 + #define ASM_UD2 ".byte 0x0f, 0x0b"
1469 +
1470 + #define INSN_UD0 0xff0f
1471 + #define INSN_UD2 0x0b0f
1472 +
1473 +-#define LEN_UD0 2
1474 ++#define LEN_UD2 2
1475 +
1476 + #ifdef CONFIG_GENERIC_BUG
1477 +
1478 +@@ -77,7 +74,11 @@ do { \
1479 + unreachable(); \
1480 + } while (0)
1481 +
1482 +-#define __WARN_FLAGS(flags) _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags))
1483 ++#define __WARN_FLAGS(flags) \
1484 ++do { \
1485 ++ _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \
1486 ++ annotate_reachable(); \
1487 ++} while (0)
1488 +
1489 + #include <asm-generic/bug.h>
1490 +
1491 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
1492 +index 4d57894635f2..76b058533e47 100644
1493 +--- a/arch/x86/include/asm/nospec-branch.h
1494 ++++ b/arch/x86/include/asm/nospec-branch.h
1495 +@@ -6,6 +6,7 @@
1496 + #include <asm/alternative.h>
1497 + #include <asm/alternative-asm.h>
1498 + #include <asm/cpufeatures.h>
1499 ++#include <asm/msr-index.h>
1500 +
1501 + #ifdef __ASSEMBLY__
1502 +
1503 +@@ -164,10 +165,15 @@ static inline void vmexit_fill_RSB(void)
1504 +
1505 + static inline void indirect_branch_prediction_barrier(void)
1506 + {
1507 +- alternative_input("",
1508 +- "call __ibp_barrier",
1509 +- X86_FEATURE_USE_IBPB,
1510 +- ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory"));
1511 ++ asm volatile(ALTERNATIVE("",
1512 ++ "movl %[msr], %%ecx\n\t"
1513 ++ "movl %[val], %%eax\n\t"
1514 ++ "movl $0, %%edx\n\t"
1515 ++ "wrmsr",
1516 ++ X86_FEATURE_USE_IBPB)
1517 ++ : : [msr] "i" (MSR_IA32_PRED_CMD),
1518 ++ [val] "i" (PRED_CMD_IBPB)
1519 ++ : "eax", "ecx", "edx", "memory");
1520 + }
1521 +
1522 + #endif /* __ASSEMBLY__ */
1523 +diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
1524 +index 4baa6bceb232..d652a3808065 100644
1525 +--- a/arch/x86/include/asm/page_64.h
1526 ++++ b/arch/x86/include/asm/page_64.h
1527 +@@ -52,10 +52,6 @@ static inline void clear_page(void *page)
1528 +
1529 + void copy_page(void *to, void *from);
1530 +
1531 +-#ifdef CONFIG_X86_MCE
1532 +-#define arch_unmap_kpfn arch_unmap_kpfn
1533 +-#endif
1534 +-
1535 + #endif /* !__ASSEMBLY__ */
1536 +
1537 + #ifdef CONFIG_X86_VSYSCALL_EMULATION
1538 +diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
1539 +index 892df375b615..554841fab717 100644
1540 +--- a/arch/x86/include/asm/paravirt.h
1541 ++++ b/arch/x86/include/asm/paravirt.h
1542 +@@ -297,9 +297,9 @@ static inline void __flush_tlb_global(void)
1543 + {
1544 + PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
1545 + }
1546 +-static inline void __flush_tlb_single(unsigned long addr)
1547 ++static inline void __flush_tlb_one_user(unsigned long addr)
1548 + {
1549 +- PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
1550 ++ PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr);
1551 + }
1552 +
1553 + static inline void flush_tlb_others(const struct cpumask *cpumask,
1554 +diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
1555 +index 6ec54d01972d..f624f1f10316 100644
1556 +--- a/arch/x86/include/asm/paravirt_types.h
1557 ++++ b/arch/x86/include/asm/paravirt_types.h
1558 +@@ -217,7 +217,7 @@ struct pv_mmu_ops {
1559 + /* TLB operations */
1560 + void (*flush_tlb_user)(void);
1561 + void (*flush_tlb_kernel)(void);
1562 +- void (*flush_tlb_single)(unsigned long addr);
1563 ++ void (*flush_tlb_one_user)(unsigned long addr);
1564 + void (*flush_tlb_others)(const struct cpumask *cpus,
1565 + const struct flush_tlb_info *info);
1566 +
1567 +diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
1568 +index e67c0620aec2..e55466760ff8 100644
1569 +--- a/arch/x86/include/asm/pgtable_32.h
1570 ++++ b/arch/x86/include/asm/pgtable_32.h
1571 +@@ -61,7 +61,7 @@ void paging_init(void);
1572 + #define kpte_clear_flush(ptep, vaddr) \
1573 + do { \
1574 + pte_clear(&init_mm, (vaddr), (ptep)); \
1575 +- __flush_tlb_one((vaddr)); \
1576 ++ __flush_tlb_one_kernel((vaddr)); \
1577 + } while (0)
1578 +
1579 + #endif /* !__ASSEMBLY__ */
1580 +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
1581 +index 513f9604c192..44c2c4ec6d60 100644
1582 +--- a/arch/x86/include/asm/processor.h
1583 ++++ b/arch/x86/include/asm/processor.h
1584 +@@ -91,7 +91,7 @@ struct cpuinfo_x86 {
1585 + __u8 x86; /* CPU family */
1586 + __u8 x86_vendor; /* CPU vendor */
1587 + __u8 x86_model;
1588 +- __u8 x86_mask;
1589 ++ __u8 x86_stepping;
1590 + #ifdef CONFIG_X86_64
1591 + /* Number of 4K pages in DTLB/ITLB combined(in pages): */
1592 + int x86_tlbsize;
1593 +@@ -109,7 +109,7 @@ struct cpuinfo_x86 {
1594 + char x86_vendor_id[16];
1595 + char x86_model_id[64];
1596 + /* in KB - valid for CPUS which support this call: */
1597 +- int x86_cache_size;
1598 ++ unsigned int x86_cache_size;
1599 + int x86_cache_alignment; /* In bytes */
1600 + /* Cache QoS architectural values: */
1601 + int x86_cache_max_rmid; /* max index */
1602 +@@ -969,7 +969,4 @@ bool xen_set_default_idle(void);
1603 +
1604 + void stop_this_cpu(void *dummy);
1605 + void df_debug(struct pt_regs *regs, long error_code);
1606 +-
1607 +-void __ibp_barrier(void);
1608 +-
1609 + #endif /* _ASM_X86_PROCESSOR_H */
1610 +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
1611 +index 2b8f18ca5874..84137c22fdfa 100644
1612 +--- a/arch/x86/include/asm/tlbflush.h
1613 ++++ b/arch/x86/include/asm/tlbflush.h
1614 +@@ -140,7 +140,7 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
1615 + #else
1616 + #define __flush_tlb() __native_flush_tlb()
1617 + #define __flush_tlb_global() __native_flush_tlb_global()
1618 +-#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
1619 ++#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
1620 + #endif
1621 +
1622 + static inline bool tlb_defer_switch_to_init_mm(void)
1623 +@@ -400,7 +400,7 @@ static inline void __native_flush_tlb_global(void)
1624 + /*
1625 + * flush one page in the user mapping
1626 + */
1627 +-static inline void __native_flush_tlb_single(unsigned long addr)
1628 ++static inline void __native_flush_tlb_one_user(unsigned long addr)
1629 + {
1630 + u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
1631 +
1632 +@@ -437,18 +437,31 @@ static inline void __flush_tlb_all(void)
1633 + /*
1634 + * flush one page in the kernel mapping
1635 + */
1636 +-static inline void __flush_tlb_one(unsigned long addr)
1637 ++static inline void __flush_tlb_one_kernel(unsigned long addr)
1638 + {
1639 + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
1640 +- __flush_tlb_single(addr);
1641 ++
1642 ++ /*
1643 ++ * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
1644 ++ * paravirt equivalent. Even with PCID, this is sufficient: we only
1645 ++ * use PCID if we also use global PTEs for the kernel mapping, and
1646 ++ * INVLPG flushes global translations across all address spaces.
1647 ++ *
1648 ++ * If PTI is on, then the kernel is mapped with non-global PTEs, and
1649 ++ * __flush_tlb_one_user() will flush the given address for the current
1650 ++ * kernel address space and for its usermode counterpart, but it does
1651 ++ * not flush it for other address spaces.
1652 ++ */
1653 ++ __flush_tlb_one_user(addr);
1654 +
1655 + if (!static_cpu_has(X86_FEATURE_PTI))
1656 + return;
1657 +
1658 + /*
1659 +- * __flush_tlb_single() will have cleared the TLB entry for this ASID,
1660 +- * but since kernel space is replicated across all, we must also
1661 +- * invalidate all others.
1662 ++ * See above. We need to propagate the flush to all other address
1663 ++ * spaces. In principle, we only need to propagate it to kernelmode
1664 ++ * address spaces, but the extra bookkeeping we would need is not
1665 ++ * worth it.
1666 + */
1667 + invalidate_other_asid();
1668 + }
1669 +diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
1670 +index 6db28f17ff28..c88e0b127810 100644
1671 +--- a/arch/x86/kernel/amd_nb.c
1672 ++++ b/arch/x86/kernel/amd_nb.c
1673 +@@ -235,7 +235,7 @@ int amd_cache_northbridges(void)
1674 + if (boot_cpu_data.x86 == 0x10 &&
1675 + boot_cpu_data.x86_model >= 0x8 &&
1676 + (boot_cpu_data.x86_model > 0x9 ||
1677 +- boot_cpu_data.x86_mask >= 0x1))
1678 ++ boot_cpu_data.x86_stepping >= 0x1))
1679 + amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
1680 +
1681 + if (boot_cpu_data.x86 == 0x15)
1682 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
1683 +index 25ddf02598d2..b203af0855b5 100644
1684 +--- a/arch/x86/kernel/apic/apic.c
1685 ++++ b/arch/x86/kernel/apic/apic.c
1686 +@@ -546,7 +546,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
1687 +
1688 + static u32 hsx_deadline_rev(void)
1689 + {
1690 +- switch (boot_cpu_data.x86_mask) {
1691 ++ switch (boot_cpu_data.x86_stepping) {
1692 + case 0x02: return 0x3a; /* EP */
1693 + case 0x04: return 0x0f; /* EX */
1694 + }
1695 +@@ -556,7 +556,7 @@ static u32 hsx_deadline_rev(void)
1696 +
1697 + static u32 bdx_deadline_rev(void)
1698 + {
1699 +- switch (boot_cpu_data.x86_mask) {
1700 ++ switch (boot_cpu_data.x86_stepping) {
1701 + case 0x02: return 0x00000011;
1702 + case 0x03: return 0x0700000e;
1703 + case 0x04: return 0x0f00000c;
1704 +@@ -568,7 +568,7 @@ static u32 bdx_deadline_rev(void)
1705 +
1706 + static u32 skx_deadline_rev(void)
1707 + {
1708 +- switch (boot_cpu_data.x86_mask) {
1709 ++ switch (boot_cpu_data.x86_stepping) {
1710 + case 0x03: return 0x01000136;
1711 + case 0x04: return 0x02000014;
1712 + }
1713 +diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
1714 +index e4b0d92b3ae0..2a7fd56e67b3 100644
1715 +--- a/arch/x86/kernel/apm_32.c
1716 ++++ b/arch/x86/kernel/apm_32.c
1717 +@@ -2389,6 +2389,7 @@ static int __init apm_init(void)
1718 + if (HZ != 100)
1719 + idle_period = (idle_period * HZ) / 100;
1720 + if (idle_threshold < 100) {
1721 ++ cpuidle_poll_state_init(&apm_idle_driver);
1722 + if (!cpuidle_register_driver(&apm_idle_driver))
1723 + if (cpuidle_register_device(&apm_cpuidle_device))
1724 + cpuidle_unregister_driver(&apm_idle_driver);
1725 +diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
1726 +index fa1261eefa16..f91ba53e06c8 100644
1727 +--- a/arch/x86/kernel/asm-offsets_32.c
1728 ++++ b/arch/x86/kernel/asm-offsets_32.c
1729 +@@ -18,7 +18,7 @@ void foo(void)
1730 + OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
1731 + OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
1732 + OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
1733 +- OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
1734 ++ OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
1735 + OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
1736 + OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
1737 + OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
1738 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
1739 +index ea831c858195..e7d5a7883632 100644
1740 +--- a/arch/x86/kernel/cpu/amd.c
1741 ++++ b/arch/x86/kernel/cpu/amd.c
1742 +@@ -119,7 +119,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
1743 + return;
1744 + }
1745 +
1746 +- if (c->x86_model == 6 && c->x86_mask == 1) {
1747 ++ if (c->x86_model == 6 && c->x86_stepping == 1) {
1748 + const int K6_BUG_LOOP = 1000000;
1749 + int n;
1750 + void (*f_vide)(void);
1751 +@@ -149,7 +149,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
1752 +
1753 + /* K6 with old style WHCR */
1754 + if (c->x86_model < 8 ||
1755 +- (c->x86_model == 8 && c->x86_mask < 8)) {
1756 ++ (c->x86_model == 8 && c->x86_stepping < 8)) {
1757 + /* We can only write allocate on the low 508Mb */
1758 + if (mbytes > 508)
1759 + mbytes = 508;
1760 +@@ -168,7 +168,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
1761 + return;
1762 + }
1763 +
1764 +- if ((c->x86_model == 8 && c->x86_mask > 7) ||
1765 ++ if ((c->x86_model == 8 && c->x86_stepping > 7) ||
1766 + c->x86_model == 9 || c->x86_model == 13) {
1767 + /* The more serious chips .. */
1768 +
1769 +@@ -221,7 +221,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
1770 + * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
1771 + * As per AMD technical note 27212 0.2
1772 + */
1773 +- if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
1774 ++ if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
1775 + rdmsr(MSR_K7_CLK_CTL, l, h);
1776 + if ((l & 0xfff00000) != 0x20000000) {
1777 + pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
1778 +@@ -241,12 +241,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
1779 + * but they are not certified as MP capable.
1780 + */
1781 + /* Athlon 660/661 is valid. */
1782 +- if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
1783 +- (c->x86_mask == 1)))
1784 ++ if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
1785 ++ (c->x86_stepping == 1)))
1786 + return;
1787 +
1788 + /* Duron 670 is valid */
1789 +- if ((c->x86_model == 7) && (c->x86_mask == 0))
1790 ++ if ((c->x86_model == 7) && (c->x86_stepping == 0))
1791 + return;
1792 +
1793 + /*
1794 +@@ -256,8 +256,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
1795 + * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
1796 + * more.
1797 + */
1798 +- if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
1799 +- ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
1800 ++ if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
1801 ++ ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
1802 + (c->x86_model > 7))
1803 + if (cpu_has(c, X86_FEATURE_MP))
1804 + return;
1805 +@@ -583,7 +583,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
1806 + /* Set MTRR capability flag if appropriate */
1807 + if (c->x86 == 5)
1808 + if (c->x86_model == 13 || c->x86_model == 9 ||
1809 +- (c->x86_model == 8 && c->x86_mask >= 8))
1810 ++ (c->x86_model == 8 && c->x86_stepping >= 8))
1811 + set_cpu_cap(c, X86_FEATURE_K6_MTRR);
1812 + #endif
1813 + #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
1814 +@@ -769,7 +769,7 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
1815 + * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
1816 + * all up to and including B1.
1817 + */
1818 +- if (c->x86_model <= 1 && c->x86_mask <= 1)
1819 ++ if (c->x86_model <= 1 && c->x86_stepping <= 1)
1820 + set_cpu_cap(c, X86_FEATURE_CPB);
1821 + }
1822 +
1823 +@@ -880,11 +880,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1824 + /* AMD errata T13 (order #21922) */
1825 + if ((c->x86 == 6)) {
1826 + /* Duron Rev A0 */
1827 +- if (c->x86_model == 3 && c->x86_mask == 0)
1828 ++ if (c->x86_model == 3 && c->x86_stepping == 0)
1829 + size = 64;
1830 + /* Tbird rev A1/A2 */
1831 + if (c->x86_model == 4 &&
1832 +- (c->x86_mask == 0 || c->x86_mask == 1))
1833 ++ (c->x86_stepping == 0 || c->x86_stepping == 1))
1834 + size = 256;
1835 + }
1836 + return size;
1837 +@@ -1021,7 +1021,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1838 + }
1839 +
1840 + /* OSVW unavailable or ID unknown, match family-model-stepping range */
1841 +- ms = (cpu->x86_model << 4) | cpu->x86_mask;
1842 ++ ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1843 + while ((range = *erratum++))
1844 + if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1845 + (ms >= AMD_MODEL_RANGE_START(range)) &&
1846 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
1847 +index 71949bf2de5a..d71c8b54b696 100644
1848 +--- a/arch/x86/kernel/cpu/bugs.c
1849 ++++ b/arch/x86/kernel/cpu/bugs.c
1850 +@@ -162,8 +162,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1851 + if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
1852 + return SPECTRE_V2_CMD_NONE;
1853 + else {
1854 +- ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
1855 +- sizeof(arg));
1856 ++ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
1857 + if (ret < 0)
1858 + return SPECTRE_V2_CMD_AUTO;
1859 +
1860 +@@ -175,8 +174,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1861 + }
1862 +
1863 + if (i >= ARRAY_SIZE(mitigation_options)) {
1864 +- pr_err("unknown option (%s). Switching to AUTO select\n",
1865 +- mitigation_options[i].option);
1866 ++ pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1867 + return SPECTRE_V2_CMD_AUTO;
1868 + }
1869 + }
1870 +@@ -185,8 +183,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1871 + cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
1872 + cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
1873 + !IS_ENABLED(CONFIG_RETPOLINE)) {
1874 +- pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1875 +- mitigation_options[i].option);
1876 ++ pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
1877 + return SPECTRE_V2_CMD_AUTO;
1878 + }
1879 +
1880 +@@ -256,14 +253,14 @@ static void __init spectre_v2_select_mitigation(void)
1881 + goto retpoline_auto;
1882 + break;
1883 + }
1884 +- pr_err("kernel not compiled with retpoline; no mitigation available!");
1885 ++ pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
1886 + return;
1887 +
1888 + retpoline_auto:
1889 + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
1890 + retpoline_amd:
1891 + if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
1892 +- pr_err("LFENCE not serializing. Switching to generic retpoline\n");
1893 ++ pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
1894 + goto retpoline_generic;
1895 + }
1896 + mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
1897 +@@ -281,7 +278,7 @@ static void __init spectre_v2_select_mitigation(void)
1898 + pr_info("%s\n", spectre_v2_strings[mode]);
1899 +
1900 + /*
1901 +- * If neither SMEP or KPTI are available, there is a risk of
1902 ++ * If neither SMEP nor PTI are available, there is a risk of
1903 + * hitting userspace addresses in the RSB after a context switch
1904 + * from a shallow call stack to a deeper one. To prevent this fill
1905 + * the entire RSB, even when using IBRS.
1906 +@@ -295,21 +292,20 @@ static void __init spectre_v2_select_mitigation(void)
1907 + if ((!boot_cpu_has(X86_FEATURE_PTI) &&
1908 + !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
1909 + setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
1910 +- pr_info("Filling RSB on context switch\n");
1911 ++ pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
1912 + }
1913 +
1914 + /* Initialize Indirect Branch Prediction Barrier if supported */
1915 + if (boot_cpu_has(X86_FEATURE_IBPB)) {
1916 + setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
1917 +- pr_info("Enabling Indirect Branch Prediction Barrier\n");
1918 ++ pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
1919 + }
1920 + }
1921 +
1922 + #undef pr_fmt
1923 +
1924 + #ifdef CONFIG_SYSFS
1925 +-ssize_t cpu_show_meltdown(struct device *dev,
1926 +- struct device_attribute *attr, char *buf)
1927 ++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1928 + {
1929 + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
1930 + return sprintf(buf, "Not affected\n");
1931 +@@ -318,16 +314,14 @@ ssize_t cpu_show_meltdown(struct device *dev,
1932 + return sprintf(buf, "Vulnerable\n");
1933 + }
1934 +
1935 +-ssize_t cpu_show_spectre_v1(struct device *dev,
1936 +- struct device_attribute *attr, char *buf)
1937 ++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1938 + {
1939 + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1940 + return sprintf(buf, "Not affected\n");
1941 + return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1942 + }
1943 +
1944 +-ssize_t cpu_show_spectre_v2(struct device *dev,
1945 +- struct device_attribute *attr, char *buf)
1946 ++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1947 + {
1948 + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1949 + return sprintf(buf, "Not affected\n");
1950 +@@ -337,9 +331,3 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
1951 + spectre_v2_module_string());
1952 + }
1953 + #endif
1954 +-
1955 +-void __ibp_barrier(void)
1956 +-{
1957 +- __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
1958 +-}
1959 +-EXPORT_SYMBOL_GPL(__ibp_barrier);
1960 +diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
1961 +index 68bc6d9b3132..595be776727d 100644
1962 +--- a/arch/x86/kernel/cpu/centaur.c
1963 ++++ b/arch/x86/kernel/cpu/centaur.c
1964 +@@ -136,7 +136,7 @@ static void init_centaur(struct cpuinfo_x86 *c)
1965 + clear_cpu_cap(c, X86_FEATURE_TSC);
1966 + break;
1967 + case 8:
1968 +- switch (c->x86_mask) {
1969 ++ switch (c->x86_stepping) {
1970 + default:
1971 + name = "2";
1972 + break;
1973 +@@ -211,7 +211,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1974 + * - Note, it seems this may only be in engineering samples.
1975 + */
1976 + if ((c->x86 == 6) && (c->x86_model == 9) &&
1977 +- (c->x86_mask == 1) && (size == 65))
1978 ++ (c->x86_stepping == 1) && (size == 65))
1979 + size -= 1;
1980 + return size;
1981 + }
1982 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
1983 +index d63f4b5706e4..824aee0117bb 100644
1984 +--- a/arch/x86/kernel/cpu/common.c
1985 ++++ b/arch/x86/kernel/cpu/common.c
1986 +@@ -731,7 +731,7 @@ void cpu_detect(struct cpuinfo_x86 *c)
1987 + cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
1988 + c->x86 = x86_family(tfms);
1989 + c->x86_model = x86_model(tfms);
1990 +- c->x86_mask = x86_stepping(tfms);
1991 ++ c->x86_stepping = x86_stepping(tfms);
1992 +
1993 + if (cap0 & (1<<19)) {
1994 + c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1995 +@@ -1184,9 +1184,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
1996 + int i;
1997 +
1998 + c->loops_per_jiffy = loops_per_jiffy;
1999 +- c->x86_cache_size = -1;
2000 ++ c->x86_cache_size = 0;
2001 + c->x86_vendor = X86_VENDOR_UNKNOWN;
2002 +- c->x86_model = c->x86_mask = 0; /* So far unknown... */
2003 ++ c->x86_model = c->x86_stepping = 0; /* So far unknown... */
2004 + c->x86_vendor_id[0] = '\0'; /* Unset */
2005 + c->x86_model_id[0] = '\0'; /* Unset */
2006 + c->x86_max_cores = 1;
2007 +@@ -1378,8 +1378,8 @@ void print_cpu_info(struct cpuinfo_x86 *c)
2008 +
2009 + pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
2010 +
2011 +- if (c->x86_mask || c->cpuid_level >= 0)
2012 +- pr_cont(", stepping: 0x%x)\n", c->x86_mask);
2013 ++ if (c->x86_stepping || c->cpuid_level >= 0)
2014 ++ pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
2015 + else
2016 + pr_cont(")\n");
2017 + }
2018 +diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
2019 +index 6b4bb335641f..8949b7ae6d92 100644
2020 +--- a/arch/x86/kernel/cpu/cyrix.c
2021 ++++ b/arch/x86/kernel/cpu/cyrix.c
2022 +@@ -215,7 +215,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
2023 +
2024 + /* common case step number/rev -- exceptions handled below */
2025 + c->x86_model = (dir1 >> 4) + 1;
2026 +- c->x86_mask = dir1 & 0xf;
2027 ++ c->x86_stepping = dir1 & 0xf;
2028 +
2029 + /* Now cook; the original recipe is by Channing Corn, from Cyrix.
2030 + * We do the same thing for each generation: we work out
2031 +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
2032 +index 319bf989fad1..d19e903214b4 100644
2033 +--- a/arch/x86/kernel/cpu/intel.c
2034 ++++ b/arch/x86/kernel/cpu/intel.c
2035 +@@ -116,14 +116,13 @@ struct sku_microcode {
2036 + u32 microcode;
2037 + };
2038 + static const struct sku_microcode spectre_bad_microcodes[] = {
2039 +- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 },
2040 +- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 },
2041 +- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 },
2042 +- { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 },
2043 +- { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 },
2044 ++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
2045 ++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
2046 ++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
2047 ++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
2048 ++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
2049 + { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
2050 + { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
2051 +- { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 },
2052 + { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
2053 + { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
2054 + { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
2055 +@@ -136,8 +135,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
2056 + { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
2057 + { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
2058 + { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
2059 +- /* Updated in the 20180108 release; blacklist until we know otherwise */
2060 +- { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 },
2061 + /* Observed in the wild */
2062 + { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
2063 + { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
2064 +@@ -149,7 +146,7 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
2065 +
2066 + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
2067 + if (c->x86_model == spectre_bad_microcodes[i].model &&
2068 +- c->x86_mask == spectre_bad_microcodes[i].stepping)
2069 ++ c->x86_stepping == spectre_bad_microcodes[i].stepping)
2070 + return (c->microcode <= spectre_bad_microcodes[i].microcode);
2071 + }
2072 + return false;
2073 +@@ -196,7 +193,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
2074 + * need the microcode to have already been loaded... so if it is
2075 + * not, recommend a BIOS update and disable large pages.
2076 + */
2077 +- if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
2078 ++ if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
2079 + c->microcode < 0x20e) {
2080 + pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
2081 + clear_cpu_cap(c, X86_FEATURE_PSE);
2082 +@@ -212,7 +209,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
2083 +
2084 + /* CPUID workaround for 0F33/0F34 CPU */
2085 + if (c->x86 == 0xF && c->x86_model == 0x3
2086 +- && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
2087 ++ && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
2088 + c->x86_phys_bits = 36;
2089 +
2090 + /*
2091 +@@ -310,7 +307,7 @@ int ppro_with_ram_bug(void)
2092 + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
2093 + boot_cpu_data.x86 == 6 &&
2094 + boot_cpu_data.x86_model == 1 &&
2095 +- boot_cpu_data.x86_mask < 8) {
2096 ++ boot_cpu_data.x86_stepping < 8) {
2097 + pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
2098 + return 1;
2099 + }
2100 +@@ -327,7 +324,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c)
2101 + * Mask B, Pentium, but not Pentium MMX
2102 + */
2103 + if (c->x86 == 5 &&
2104 +- c->x86_mask >= 1 && c->x86_mask <= 4 &&
2105 ++ c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
2106 + c->x86_model <= 3) {
2107 + /*
2108 + * Remember we have B step Pentia with bugs
2109 +@@ -370,7 +367,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
2110 + * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
2111 + * model 3 mask 3
2112 + */
2113 +- if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
2114 ++ if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
2115 + clear_cpu_cap(c, X86_FEATURE_SEP);
2116 +
2117 + /*
2118 +@@ -388,7 +385,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
2119 + * P4 Xeon erratum 037 workaround.
2120 + * Hardware prefetcher may cause stale data to be loaded into the cache.
2121 + */
2122 +- if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
2123 ++ if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
2124 + if (msr_set_bit(MSR_IA32_MISC_ENABLE,
2125 + MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
2126 + pr_info("CPU: C0 stepping P4 Xeon detected.\n");
2127 +@@ -403,7 +400,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
2128 + * Specification Update").
2129 + */
2130 + if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
2131 +- (c->x86_mask < 0x6 || c->x86_mask == 0xb))
2132 ++ (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
2133 + set_cpu_bug(c, X86_BUG_11AP);
2134 +
2135 +
2136 +@@ -650,7 +647,7 @@ static void init_intel(struct cpuinfo_x86 *c)
2137 + case 6:
2138 + if (l2 == 128)
2139 + p = "Celeron (Mendocino)";
2140 +- else if (c->x86_mask == 0 || c->x86_mask == 5)
2141 ++ else if (c->x86_stepping == 0 || c->x86_stepping == 5)
2142 + p = "Celeron-A";
2143 + break;
2144 +
2145 +diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
2146 +index 99442370de40..18dd8f22e353 100644
2147 +--- a/arch/x86/kernel/cpu/intel_rdt.c
2148 ++++ b/arch/x86/kernel/cpu/intel_rdt.c
2149 +@@ -771,7 +771,7 @@ static __init void rdt_quirks(void)
2150 + cache_alloc_hsw_probe();
2151 + break;
2152 + case INTEL_FAM6_SKYLAKE_X:
2153 +- if (boot_cpu_data.x86_mask <= 4)
2154 ++ if (boot_cpu_data.x86_stepping <= 4)
2155 + set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
2156 + }
2157 + }
2158 +diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
2159 +index aa0d5df9dc60..e956eb267061 100644
2160 +--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
2161 ++++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
2162 +@@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
2163 +
2164 + extern struct mca_config mca_cfg;
2165 +
2166 ++#ifndef CONFIG_X86_64
2167 ++/*
2168 ++ * On 32-bit systems it would be difficult to safely unmap a poison page
2169 ++ * from the kernel 1:1 map because there are no non-canonical addresses that
2170 ++ * we can use to refer to the address without risking a speculative access.
2171 ++ * However, this isn't much of an issue because:
2172 ++ * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which
2173 ++ * are only mapped into the kernel as needed
2174 ++ * 2) Few people would run a 32-bit kernel on a machine that supports
2175 ++ * recoverable errors because they have too much memory to boot 32-bit.
2176 ++ */
2177 ++static inline void mce_unmap_kpfn(unsigned long pfn) {}
2178 ++#define mce_unmap_kpfn mce_unmap_kpfn
2179 ++#endif
2180 ++
2181 + #endif /* __X86_MCE_INTERNAL_H__ */
2182 +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
2183 +index 868e412b4f0c..2fe482f6ecd8 100644
2184 +--- a/arch/x86/kernel/cpu/mcheck/mce.c
2185 ++++ b/arch/x86/kernel/cpu/mcheck/mce.c
2186 +@@ -106,6 +106,10 @@ static struct irq_work mce_irq_work;
2187 +
2188 + static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
2189 +
2190 ++#ifndef mce_unmap_kpfn
2191 ++static void mce_unmap_kpfn(unsigned long pfn);
2192 ++#endif
2193 ++
2194 + /*
2195 + * CPU/chipset specific EDAC code can register a notifier call here to print
2196 + * MCE errors in a human-readable form.
2197 +@@ -582,7 +586,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
2198 +
2199 + if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
2200 + pfn = mce->addr >> PAGE_SHIFT;
2201 +- memory_failure(pfn, MCE_VECTOR, 0);
2202 ++ if (memory_failure(pfn, MCE_VECTOR, 0))
2203 ++ mce_unmap_kpfn(pfn);
2204 + }
2205 +
2206 + return NOTIFY_OK;
2207 +@@ -1049,12 +1054,13 @@ static int do_memory_failure(struct mce *m)
2208 + ret = memory_failure(m->addr >> PAGE_SHIFT, MCE_VECTOR, flags);
2209 + if (ret)
2210 + pr_err("Memory error not recovered");
2211 ++ else
2212 ++ mce_unmap_kpfn(m->addr >> PAGE_SHIFT);
2213 + return ret;
2214 + }
2215 +
2216 +-#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE)
2217 +-
2218 +-void arch_unmap_kpfn(unsigned long pfn)
2219 ++#ifndef mce_unmap_kpfn
2220 ++static void mce_unmap_kpfn(unsigned long pfn)
2221 + {
2222 + unsigned long decoy_addr;
2223 +
2224 +@@ -1065,7 +1071,7 @@ void arch_unmap_kpfn(unsigned long pfn)
2225 + * We would like to just call:
2226 + * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1);
2227 + * but doing that would radically increase the odds of a
2228 +- * speculative access to the posion page because we'd have
2229 ++ * speculative access to the poison page because we'd have
2230 + * the virtual address of the kernel 1:1 mapping sitting
2231 + * around in registers.
2232 + * Instead we get tricky. We create a non-canonical address
2233 +@@ -1090,7 +1096,6 @@ void arch_unmap_kpfn(unsigned long pfn)
2234 +
2235 + if (set_memory_np(decoy_addr, 1))
2236 + pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
2237 +-
2238 + }
2239 + #endif
2240 +
2241 +diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
2242 +index f7c55b0e753a..a15db2b4e0d6 100644
2243 +--- a/arch/x86/kernel/cpu/microcode/intel.c
2244 ++++ b/arch/x86/kernel/cpu/microcode/intel.c
2245 +@@ -921,7 +921,7 @@ static bool is_blacklisted(unsigned int cpu)
2246 + */
2247 + if (c->x86 == 6 &&
2248 + c->x86_model == INTEL_FAM6_BROADWELL_X &&
2249 +- c->x86_mask == 0x01 &&
2250 ++ c->x86_stepping == 0x01 &&
2251 + llc_size_per_core > 2621440 &&
2252 + c->microcode < 0x0b000021) {
2253 + pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
2254 +@@ -944,7 +944,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
2255 + return UCODE_NFOUND;
2256 +
2257 + sprintf(name, "intel-ucode/%02x-%02x-%02x",
2258 +- c->x86, c->x86_model, c->x86_mask);
2259 ++ c->x86, c->x86_model, c->x86_stepping);
2260 +
2261 + if (request_firmware_direct(&firmware, name, device)) {
2262 + pr_debug("data file %s load failed\n", name);
2263 +@@ -982,7 +982,7 @@ static struct microcode_ops microcode_intel_ops = {
2264 +
2265 + static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
2266 + {
2267 +- u64 llc_size = c->x86_cache_size * 1024;
2268 ++ u64 llc_size = c->x86_cache_size * 1024ULL;
2269 +
2270 + do_div(llc_size, c->x86_max_cores);
2271 +
2272 +diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
2273 +index fdc55215d44d..e12ee86906c6 100644
2274 +--- a/arch/x86/kernel/cpu/mtrr/generic.c
2275 ++++ b/arch/x86/kernel/cpu/mtrr/generic.c
2276 +@@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
2277 + */
2278 + if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
2279 + boot_cpu_data.x86_model == 1 &&
2280 +- boot_cpu_data.x86_mask <= 7) {
2281 ++ boot_cpu_data.x86_stepping <= 7) {
2282 + if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
2283 + pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
2284 + return -EINVAL;
2285 +diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
2286 +index 40d5a8a75212..7468de429087 100644
2287 +--- a/arch/x86/kernel/cpu/mtrr/main.c
2288 ++++ b/arch/x86/kernel/cpu/mtrr/main.c
2289 +@@ -711,8 +711,8 @@ void __init mtrr_bp_init(void)
2290 + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
2291 + boot_cpu_data.x86 == 0xF &&
2292 + boot_cpu_data.x86_model == 0x3 &&
2293 +- (boot_cpu_data.x86_mask == 0x3 ||
2294 +- boot_cpu_data.x86_mask == 0x4))
2295 ++ (boot_cpu_data.x86_stepping == 0x3 ||
2296 ++ boot_cpu_data.x86_stepping == 0x4))
2297 + phys_addr = 36;
2298 +
2299 + size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
2300 +diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
2301 +index e7ecedafa1c8..2c8522a39ed5 100644
2302 +--- a/arch/x86/kernel/cpu/proc.c
2303 ++++ b/arch/x86/kernel/cpu/proc.c
2304 +@@ -72,8 +72,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
2305 + c->x86_model,
2306 + c->x86_model_id[0] ? c->x86_model_id : "unknown");
2307 +
2308 +- if (c->x86_mask || c->cpuid_level >= 0)
2309 +- seq_printf(m, "stepping\t: %d\n", c->x86_mask);
2310 ++ if (c->x86_stepping || c->cpuid_level >= 0)
2311 ++ seq_printf(m, "stepping\t: %d\n", c->x86_stepping);
2312 + else
2313 + seq_puts(m, "stepping\t: unknown\n");
2314 + if (c->microcode)
2315 +@@ -91,8 +91,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
2316 + }
2317 +
2318 + /* Cache size */
2319 +- if (c->x86_cache_size >= 0)
2320 +- seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
2321 ++ if (c->x86_cache_size)
2322 ++ seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size);
2323 +
2324 + show_cpuinfo_core(m, c, cpu);
2325 + show_cpuinfo_misc(m, c);
2326 +diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
2327 +index 1e82f787c160..c87560e1e3ef 100644
2328 +--- a/arch/x86/kernel/early-quirks.c
2329 ++++ b/arch/x86/kernel/early-quirks.c
2330 +@@ -527,6 +527,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
2331 + INTEL_SKL_IDS(&gen9_early_ops),
2332 + INTEL_BXT_IDS(&gen9_early_ops),
2333 + INTEL_KBL_IDS(&gen9_early_ops),
2334 ++ INTEL_CFL_IDS(&gen9_early_ops),
2335 + INTEL_GLK_IDS(&gen9_early_ops),
2336 + INTEL_CNL_IDS(&gen9_early_ops),
2337 + };
2338 +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
2339 +index c29020907886..b59e4fb40fd9 100644
2340 +--- a/arch/x86/kernel/head_32.S
2341 ++++ b/arch/x86/kernel/head_32.S
2342 +@@ -37,7 +37,7 @@
2343 + #define X86 new_cpu_data+CPUINFO_x86
2344 + #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
2345 + #define X86_MODEL new_cpu_data+CPUINFO_x86_model
2346 +-#define X86_MASK new_cpu_data+CPUINFO_x86_mask
2347 ++#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping
2348 + #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
2349 + #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
2350 + #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
2351 +@@ -332,7 +332,7 @@ ENTRY(startup_32_smp)
2352 + shrb $4,%al
2353 + movb %al,X86_MODEL
2354 + andb $0x0f,%cl # mask mask revision
2355 +- movb %cl,X86_MASK
2356 ++ movb %cl,X86_STEPPING
2357 + movl %edx,X86_CAPABILITY
2358 +
2359 + .Lis486:
2360 +diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
2361 +index 3a4b12809ab5..bc6bc6689e68 100644
2362 +--- a/arch/x86/kernel/mpparse.c
2363 ++++ b/arch/x86/kernel/mpparse.c
2364 +@@ -407,7 +407,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
2365 + processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
2366 + processor.cpuflag = CPU_ENABLED;
2367 + processor.cpufeature = (boot_cpu_data.x86 << 8) |
2368 +- (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
2369 ++ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
2370 + processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
2371 + processor.reserved[0] = 0;
2372 + processor.reserved[1] = 0;
2373 +diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
2374 +index 041096bdef86..99dc79e76bdc 100644
2375 +--- a/arch/x86/kernel/paravirt.c
2376 ++++ b/arch/x86/kernel/paravirt.c
2377 +@@ -200,9 +200,9 @@ static void native_flush_tlb_global(void)
2378 + __native_flush_tlb_global();
2379 + }
2380 +
2381 +-static void native_flush_tlb_single(unsigned long addr)
2382 ++static void native_flush_tlb_one_user(unsigned long addr)
2383 + {
2384 +- __native_flush_tlb_single(addr);
2385 ++ __native_flush_tlb_one_user(addr);
2386 + }
2387 +
2388 + struct static_key paravirt_steal_enabled;
2389 +@@ -401,7 +401,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
2390 +
2391 + .flush_tlb_user = native_flush_tlb,
2392 + .flush_tlb_kernel = native_flush_tlb_global,
2393 +- .flush_tlb_single = native_flush_tlb_single,
2394 ++ .flush_tlb_one_user = native_flush_tlb_one_user,
2395 + .flush_tlb_others = native_flush_tlb_others,
2396 +
2397 + .pgd_alloc = __paravirt_pgd_alloc,
2398 +diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
2399 +index 307d3bac5f04..11eda21eb697 100644
2400 +--- a/arch/x86/kernel/relocate_kernel_64.S
2401 ++++ b/arch/x86/kernel/relocate_kernel_64.S
2402 +@@ -68,6 +68,9 @@ relocate_kernel:
2403 + movq %cr4, %rax
2404 + movq %rax, CR4(%r11)
2405 +
2406 ++ /* Save CR4. Required to enable the right paging mode later. */
2407 ++ movq %rax, %r13
2408 ++
2409 + /* zero out flags, and disable interrupts */
2410 + pushq $0
2411 + popfq
2412 +@@ -126,8 +129,13 @@ identity_mapped:
2413 + /*
2414 + * Set cr4 to a known state:
2415 + * - physical address extension enabled
2416 ++ * - 5-level paging, if it was enabled before
2417 + */
2418 + movl $X86_CR4_PAE, %eax
2419 ++ testq $X86_CR4_LA57, %r13
2420 ++ jz 1f
2421 ++ orl $X86_CR4_LA57, %eax
2422 ++1:
2423 + movq %rax, %cr4
2424 +
2425 + jmp 1f
2426 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
2427 +index ed556d50d7ed..844279c3ff4a 100644
2428 +--- a/arch/x86/kernel/smpboot.c
2429 ++++ b/arch/x86/kernel/smpboot.c
2430 +@@ -1431,7 +1431,6 @@ static void remove_siblinginfo(int cpu)
2431 + cpumask_clear(cpu_llc_shared_mask(cpu));
2432 + cpumask_clear(topology_sibling_cpumask(cpu));
2433 + cpumask_clear(topology_core_cpumask(cpu));
2434 +- c->phys_proc_id = 0;
2435 + c->cpu_core_id = 0;
2436 + cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
2437 + recompute_smt_state();
2438 +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
2439 +index 446c9ef8cfc3..3d9b2308e7fa 100644
2440 +--- a/arch/x86/kernel/traps.c
2441 ++++ b/arch/x86/kernel/traps.c
2442 +@@ -181,7 +181,7 @@ int fixup_bug(struct pt_regs *regs, int trapnr)
2443 + break;
2444 +
2445 + case BUG_TRAP_TYPE_WARN:
2446 +- regs->ip += LEN_UD0;
2447 ++ regs->ip += LEN_UD2;
2448 + return 1;
2449 + }
2450 +
2451 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
2452 +index 2b8eb4da4d08..cc83bdcb65d1 100644
2453 +--- a/arch/x86/kvm/mmu.c
2454 ++++ b/arch/x86/kvm/mmu.c
2455 +@@ -5058,7 +5058,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
2456 + typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
2457 +
2458 + /* The caller should hold mmu-lock before calling this function. */
2459 +-static bool
2460 ++static __always_inline bool
2461 + slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
2462 + slot_level_handler fn, int start_level, int end_level,
2463 + gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
2464 +@@ -5088,7 +5088,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
2465 + return flush;
2466 + }
2467 +
2468 +-static bool
2469 ++static __always_inline bool
2470 + slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
2471 + slot_level_handler fn, int start_level, int end_level,
2472 + bool lock_flush_tlb)
2473 +@@ -5099,7 +5099,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
2474 + lock_flush_tlb);
2475 + }
2476 +
2477 +-static bool
2478 ++static __always_inline bool
2479 + slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
2480 + slot_level_handler fn, bool lock_flush_tlb)
2481 + {
2482 +@@ -5107,7 +5107,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
2483 + PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
2484 + }
2485 +
2486 +-static bool
2487 ++static __always_inline bool
2488 + slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
2489 + slot_level_handler fn, bool lock_flush_tlb)
2490 + {
2491 +@@ -5115,7 +5115,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
2492 + PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
2493 + }
2494 +
2495 +-static bool
2496 ++static __always_inline bool
2497 + slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
2498 + slot_level_handler fn, bool lock_flush_tlb)
2499 + {
2500 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
2501 +index 6f623848260f..561d8937fac5 100644
2502 +--- a/arch/x86/kvm/vmx.c
2503 ++++ b/arch/x86/kvm/vmx.c
2504 +@@ -10131,7 +10131,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
2505 + if (cpu_has_vmx_msr_bitmap() &&
2506 + nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS) &&
2507 + nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
2508 +- ;
2509 ++ vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
2510 ++ CPU_BASED_USE_MSR_BITMAPS);
2511 + else
2512 + vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
2513 + CPU_BASED_USE_MSR_BITMAPS);
2514 +@@ -10220,8 +10221,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
2515 + * updated to reflect this when L1 (or its L2s) actually write to
2516 + * the MSR.
2517 + */
2518 +- bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
2519 +- bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
2520 ++ bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
2521 ++ bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
2522 +
2523 + if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
2524 + !pred_cmd && !spec_ctrl)
2525 +diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
2526 +index d6f848d1211d..2dd1fe13a37b 100644
2527 +--- a/arch/x86/lib/cpu.c
2528 ++++ b/arch/x86/lib/cpu.c
2529 +@@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig)
2530 + {
2531 + unsigned int fam, model;
2532 +
2533 +- fam = x86_family(sig);
2534 ++ fam = x86_family(sig);
2535 +
2536 + model = (sig >> 4) & 0xf;
2537 +
2538 +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
2539 +index 4a837289f2ad..60ae1fe3609f 100644
2540 +--- a/arch/x86/mm/init_64.c
2541 ++++ b/arch/x86/mm/init_64.c
2542 +@@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
2543 + * It's enough to flush this one mapping.
2544 + * (PGE mappings get flushed as well)
2545 + */
2546 +- __flush_tlb_one(vaddr);
2547 ++ __flush_tlb_one_kernel(vaddr);
2548 + }
2549 +
2550 + void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
2551 +diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
2552 +index c45b6ec5357b..e2db83bebc3b 100644
2553 +--- a/arch/x86/mm/ioremap.c
2554 ++++ b/arch/x86/mm/ioremap.c
2555 +@@ -820,5 +820,5 @@ void __init __early_set_fixmap(enum fixed_addresses idx,
2556 + set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
2557 + else
2558 + pte_clear(&init_mm, addr, pte);
2559 +- __flush_tlb_one(addr);
2560 ++ __flush_tlb_one_kernel(addr);
2561 + }
2562 +diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
2563 +index 58477ec3d66d..7c8686709636 100644
2564 +--- a/arch/x86/mm/kmmio.c
2565 ++++ b/arch/x86/mm/kmmio.c
2566 +@@ -168,7 +168,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
2567 + return -1;
2568 + }
2569 +
2570 +- __flush_tlb_one(f->addr);
2571 ++ __flush_tlb_one_kernel(f->addr);
2572 + return 0;
2573 + }
2574 +
2575 +diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
2576 +index c3c5274410a9..9bb7f0ab9fe6 100644
2577 +--- a/arch/x86/mm/pgtable_32.c
2578 ++++ b/arch/x86/mm/pgtable_32.c
2579 +@@ -63,7 +63,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
2580 + * It's enough to flush this one mapping.
2581 + * (PGE mappings get flushed as well)
2582 + */
2583 +- __flush_tlb_one(vaddr);
2584 ++ __flush_tlb_one_kernel(vaddr);
2585 + }
2586 +
2587 + unsigned long __FIXADDR_TOP = 0xfffff000;
2588 +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
2589 +index 012d02624848..0c936435ea93 100644
2590 +--- a/arch/x86/mm/tlb.c
2591 ++++ b/arch/x86/mm/tlb.c
2592 +@@ -492,7 +492,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
2593 + * flush that changes context.tlb_gen from 2 to 3. If they get
2594 + * processed on this CPU in reverse order, we'll see
2595 + * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
2596 +- * If we were to use __flush_tlb_single() and set local_tlb_gen to
2597 ++ * If we were to use __flush_tlb_one_user() and set local_tlb_gen to
2598 + * 3, we'd be break the invariant: we'd update local_tlb_gen above
2599 + * 1 without the full flush that's needed for tlb_gen 2.
2600 + *
2601 +@@ -513,7 +513,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
2602 +
2603 + addr = f->start;
2604 + while (addr < f->end) {
2605 +- __flush_tlb_single(addr);
2606 ++ __flush_tlb_one_user(addr);
2607 + addr += PAGE_SIZE;
2608 + }
2609 + if (local)
2610 +@@ -660,7 +660,7 @@ static void do_kernel_range_flush(void *info)
2611 +
2612 + /* flush range by one by one 'invlpg' */
2613 + for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
2614 +- __flush_tlb_one(addr);
2615 ++ __flush_tlb_one_kernel(addr);
2616 + }
2617 +
2618 + void flush_tlb_kernel_range(unsigned long start, unsigned long end)
2619 +diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
2620 +index 8538a6723171..7d5d53f36a7a 100644
2621 +--- a/arch/x86/platform/uv/tlb_uv.c
2622 ++++ b/arch/x86/platform/uv/tlb_uv.c
2623 +@@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
2624 + local_flush_tlb();
2625 + stat->d_alltlb++;
2626 + } else {
2627 +- __flush_tlb_single(msg->address);
2628 ++ __flush_tlb_one_user(msg->address);
2629 + stat->d_onetlb++;
2630 + }
2631 + stat->d_requestee++;
2632 +diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
2633 +index d85076223a69..aae88fec9941 100644
2634 +--- a/arch/x86/xen/mmu_pv.c
2635 ++++ b/arch/x86/xen/mmu_pv.c
2636 +@@ -1300,12 +1300,12 @@ static void xen_flush_tlb(void)
2637 + preempt_enable();
2638 + }
2639 +
2640 +-static void xen_flush_tlb_single(unsigned long addr)
2641 ++static void xen_flush_tlb_one_user(unsigned long addr)
2642 + {
2643 + struct mmuext_op *op;
2644 + struct multicall_space mcs;
2645 +
2646 +- trace_xen_mmu_flush_tlb_single(addr);
2647 ++ trace_xen_mmu_flush_tlb_one_user(addr);
2648 +
2649 + preempt_disable();
2650 +
2651 +@@ -2370,7 +2370,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2652 +
2653 + .flush_tlb_user = xen_flush_tlb,
2654 + .flush_tlb_kernel = xen_flush_tlb,
2655 +- .flush_tlb_single = xen_flush_tlb_single,
2656 ++ .flush_tlb_one_user = xen_flush_tlb_one_user,
2657 + .flush_tlb_others = xen_flush_tlb_others,
2658 +
2659 + .pgd_alloc = xen_pgd_alloc,
2660 +diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
2661 +index 13b4f19b9131..159a897151d6 100644
2662 +--- a/arch/x86/xen/p2m.c
2663 ++++ b/arch/x86/xen/p2m.c
2664 +@@ -694,6 +694,9 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
2665 + int i, ret = 0;
2666 + pte_t *pte;
2667 +
2668 ++ if (xen_feature(XENFEAT_auto_translated_physmap))
2669 ++ return 0;
2670 ++
2671 + if (kmap_ops) {
2672 + ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
2673 + kmap_ops, count);
2674 +@@ -736,6 +739,9 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
2675 + {
2676 + int i, ret = 0;
2677 +
2678 ++ if (xen_feature(XENFEAT_auto_translated_physmap))
2679 ++ return 0;
2680 ++
2681 + for (i = 0; i < count; i++) {
2682 + unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
2683 + unsigned long pfn = page_to_pfn(pages[i]);
2684 +diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
2685 +index 497cc55a0c16..96f26e026783 100644
2686 +--- a/arch/x86/xen/xen-head.S
2687 ++++ b/arch/x86/xen/xen-head.S
2688 +@@ -9,7 +9,9 @@
2689 +
2690 + #include <asm/boot.h>
2691 + #include <asm/asm.h>
2692 ++#include <asm/msr.h>
2693 + #include <asm/page_types.h>
2694 ++#include <asm/percpu.h>
2695 + #include <asm/unwind_hints.h>
2696 +
2697 + #include <xen/interface/elfnote.h>
2698 +@@ -35,6 +37,20 @@ ENTRY(startup_xen)
2699 + mov %_ASM_SI, xen_start_info
2700 + mov $init_thread_union+THREAD_SIZE, %_ASM_SP
2701 +
2702 ++#ifdef CONFIG_X86_64
2703 ++ /* Set up %gs.
2704 ++ *
2705 ++ * The base of %gs always points to the bottom of the irqstack
2706 ++ * union. If the stack protector canary is enabled, it is
2707 ++ * located at %gs:40. Note that, on SMP, the boot cpu uses
2708 ++ * init data section till per cpu areas are set up.
2709 ++ */
2710 ++ movl $MSR_GS_BASE,%ecx
2711 ++ movq $INIT_PER_CPU_VAR(irq_stack_union),%rax
2712 ++ cdq
2713 ++ wrmsr
2714 ++#endif
2715 ++
2716 + jmp xen_start_kernel
2717 + END(startup_xen)
2718 + __FINIT
2719 +diff --git a/block/blk-wbt.c b/block/blk-wbt.c
2720 +index ae8de9780085..f92fc84b5e2c 100644
2721 +--- a/block/blk-wbt.c
2722 ++++ b/block/blk-wbt.c
2723 +@@ -697,7 +697,15 @@ u64 wbt_default_latency_nsec(struct request_queue *q)
2724 +
2725 + static int wbt_data_dir(const struct request *rq)
2726 + {
2727 +- return rq_data_dir(rq);
2728 ++ const int op = req_op(rq);
2729 ++
2730 ++ if (op == REQ_OP_READ)
2731 ++ return READ;
2732 ++ else if (op == REQ_OP_WRITE || op == REQ_OP_FLUSH)
2733 ++ return WRITE;
2734 ++
2735 ++ /* don't account */
2736 ++ return -1;
2737 + }
2738 +
2739 + int wbt_init(struct request_queue *q)
2740 +diff --git a/drivers/base/core.c b/drivers/base/core.c
2741 +index 110230d86527..6835736daf2d 100644
2742 +--- a/drivers/base/core.c
2743 ++++ b/drivers/base/core.c
2744 +@@ -313,6 +313,9 @@ static void __device_link_del(struct device_link *link)
2745 + dev_info(link->consumer, "Dropping the link to %s\n",
2746 + dev_name(link->supplier));
2747 +
2748 ++ if (link->flags & DL_FLAG_PM_RUNTIME)
2749 ++ pm_runtime_drop_link(link->consumer);
2750 ++
2751 + list_del(&link->s_node);
2752 + list_del(&link->c_node);
2753 + device_link_free(link);
2754 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
2755 +index cc93522a6d41..1bbf14338bdb 100644
2756 +--- a/drivers/block/rbd.c
2757 ++++ b/drivers/block/rbd.c
2758 +@@ -124,11 +124,13 @@ static int atomic_dec_return_safe(atomic_t *v)
2759 + #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
2760 + #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
2761 + #define RBD_FEATURE_DATA_POOL (1ULL<<7)
2762 ++#define RBD_FEATURE_OPERATIONS (1ULL<<8)
2763 +
2764 + #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
2765 + RBD_FEATURE_STRIPINGV2 | \
2766 + RBD_FEATURE_EXCLUSIVE_LOCK | \
2767 +- RBD_FEATURE_DATA_POOL)
2768 ++ RBD_FEATURE_DATA_POOL | \
2769 ++ RBD_FEATURE_OPERATIONS)
2770 +
2771 + /* Features supported by this (client software) implementation. */
2772 +
2773 +diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
2774 +index d1f5bb534e0e..6e9df558325b 100644
2775 +--- a/drivers/char/hw_random/via-rng.c
2776 ++++ b/drivers/char/hw_random/via-rng.c
2777 +@@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rng)
2778 + /* Enable secondary noise source on CPUs where it is present. */
2779 +
2780 + /* Nehemiah stepping 8 and higher */
2781 +- if ((c->x86_model == 9) && (c->x86_mask > 7))
2782 ++ if ((c->x86_model == 9) && (c->x86_stepping > 7))
2783 + lo |= VIA_NOISESRC2;
2784 +
2785 + /* Esther */
2786 +diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
2787 +index 3a2ca0f79daf..d0c34df0529c 100644
2788 +--- a/drivers/cpufreq/acpi-cpufreq.c
2789 ++++ b/drivers/cpufreq/acpi-cpufreq.c
2790 +@@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
2791 + if (c->x86_vendor == X86_VENDOR_INTEL) {
2792 + if ((c->x86 == 15) &&
2793 + (c->x86_model == 6) &&
2794 +- (c->x86_mask == 8)) {
2795 ++ (c->x86_stepping == 8)) {
2796 + pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
2797 + return -ENODEV;
2798 + }
2799 +diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
2800 +index c46a12df40dd..d5e27bc7585a 100644
2801 +--- a/drivers/cpufreq/longhaul.c
2802 ++++ b/drivers/cpufreq/longhaul.c
2803 +@@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
2804 + break;
2805 +
2806 + case 7:
2807 +- switch (c->x86_mask) {
2808 ++ switch (c->x86_stepping) {
2809 + case 0:
2810 + longhaul_version = TYPE_LONGHAUL_V1;
2811 + cpu_model = CPU_SAMUEL2;
2812 +@@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
2813 + break;
2814 + case 1 ... 15:
2815 + longhaul_version = TYPE_LONGHAUL_V2;
2816 +- if (c->x86_mask < 8) {
2817 ++ if (c->x86_stepping < 8) {
2818 + cpu_model = CPU_SAMUEL2;
2819 + cpuname = "C3 'Samuel 2' [C5B]";
2820 + } else {
2821 +@@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
2822 + numscales = 32;
2823 + memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
2824 + memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
2825 +- switch (c->x86_mask) {
2826 ++ switch (c->x86_stepping) {
2827 + case 0 ... 1:
2828 + cpu_model = CPU_NEHEMIAH;
2829 + cpuname = "C3 'Nehemiah A' [C5XLOE]";
2830 +diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
2831 +index fd77812313f3..a25741b1281b 100644
2832 +--- a/drivers/cpufreq/p4-clockmod.c
2833 ++++ b/drivers/cpufreq/p4-clockmod.c
2834 +@@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
2835 + #endif
2836 +
2837 + /* Errata workaround */
2838 +- cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
2839 ++ cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
2840 + switch (cpuid) {
2841 + case 0x0f07:
2842 + case 0x0f0a:
2843 +diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
2844 +index 80ac313e6c59..302e9ce793a0 100644
2845 +--- a/drivers/cpufreq/powernow-k7.c
2846 ++++ b/drivers/cpufreq/powernow-k7.c
2847 +@@ -131,7 +131,7 @@ static int check_powernow(void)
2848 + return 0;
2849 + }
2850 +
2851 +- if ((c->x86_model == 6) && (c->x86_mask == 0)) {
2852 ++ if ((c->x86_model == 6) && (c->x86_stepping == 0)) {
2853 + pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
2854 + have_a0 = 1;
2855 + }
2856 +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
2857 +index b6d7c4c98d0a..da7fdb4b661a 100644
2858 +--- a/drivers/cpufreq/powernv-cpufreq.c
2859 ++++ b/drivers/cpufreq/powernv-cpufreq.c
2860 +@@ -288,9 +288,9 @@ static int init_powernv_pstates(void)
2861 +
2862 + if (id == pstate_max)
2863 + powernv_pstate_info.max = i;
2864 +- else if (id == pstate_nominal)
2865 ++ if (id == pstate_nominal)
2866 + powernv_pstate_info.nominal = i;
2867 +- else if (id == pstate_min)
2868 ++ if (id == pstate_min)
2869 + powernv_pstate_info.min = i;
2870 +
2871 + if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
2872 +diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
2873 +index 41bc5397f4bb..4fa5adf16c70 100644
2874 +--- a/drivers/cpufreq/speedstep-centrino.c
2875 ++++ b/drivers/cpufreq/speedstep-centrino.c
2876 +@@ -37,7 +37,7 @@ struct cpu_id
2877 + {
2878 + __u8 x86; /* CPU family */
2879 + __u8 x86_model; /* model */
2880 +- __u8 x86_mask; /* stepping */
2881 ++ __u8 x86_stepping; /* stepping */
2882 + };
2883 +
2884 + enum {
2885 +@@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
2886 + {
2887 + if ((c->x86 == x->x86) &&
2888 + (c->x86_model == x->x86_model) &&
2889 +- (c->x86_mask == x->x86_mask))
2890 ++ (c->x86_stepping == x->x86_stepping))
2891 + return 1;
2892 + return 0;
2893 + }
2894 +diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
2895 +index 8085ec9000d1..e3a9962ee410 100644
2896 +--- a/drivers/cpufreq/speedstep-lib.c
2897 ++++ b/drivers/cpufreq/speedstep-lib.c
2898 +@@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void)
2899 + ebx = cpuid_ebx(0x00000001);
2900 + ebx &= 0x000000FF;
2901 +
2902 +- pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
2903 ++ pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping);
2904 +
2905 +- switch (c->x86_mask) {
2906 ++ switch (c->x86_stepping) {
2907 + case 4:
2908 + /*
2909 + * B-stepping [M-P4-M]
2910 +@@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void)
2911 + msr_lo, msr_hi);
2912 + if ((msr_hi & (1<<18)) &&
2913 + (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
2914 +- if (c->x86_mask == 0x01) {
2915 ++ if (c->x86_stepping == 0x01) {
2916 + pr_debug("early PIII version\n");
2917 + return SPEEDSTEP_CPU_PIII_C_EARLY;
2918 + } else
2919 +diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
2920 +index 4b6642a25df5..1c6cbda56afe 100644
2921 +--- a/drivers/crypto/padlock-aes.c
2922 ++++ b/drivers/crypto/padlock-aes.c
2923 +@@ -512,7 +512,7 @@ static int __init padlock_init(void)
2924 +
2925 + printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
2926 +
2927 +- if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
2928 ++ if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
2929 + ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
2930 + cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
2931 + printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
2932 +diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
2933 +index 0d01d1624252..63d636424161 100644
2934 +--- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
2935 ++++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
2936 +@@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
2937 + algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
2938 + ss = algt->ss;
2939 +
2940 +- spin_lock(&ss->slock);
2941 ++ spin_lock_bh(&ss->slock);
2942 +
2943 + writel(mode, ss->base + SS_CTL);
2944 +
2945 +@@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
2946 + }
2947 +
2948 + writel(0, ss->base + SS_CTL);
2949 +- spin_unlock(&ss->slock);
2950 +- return dlen;
2951 ++ spin_unlock_bh(&ss->slock);
2952 ++ return 0;
2953 + }
2954 +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
2955 +index 78fb496ecb4e..99c4021fc33b 100644
2956 +--- a/drivers/devfreq/devfreq.c
2957 ++++ b/drivers/devfreq/devfreq.c
2958 +@@ -737,7 +737,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
2959 + devfreq = devfreq_add_device(dev, profile, governor_name, data);
2960 + if (IS_ERR(devfreq)) {
2961 + devres_free(ptr);
2962 +- return ERR_PTR(-ENOMEM);
2963 ++ return devfreq;
2964 + }
2965 +
2966 + *ptr = devfreq;
2967 +diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
2968 +index b44d9d7db347..012fa3d1f407 100644
2969 +--- a/drivers/dma-buf/reservation.c
2970 ++++ b/drivers/dma-buf/reservation.c
2971 +@@ -455,13 +455,15 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
2972 + unsigned long timeout)
2973 + {
2974 + struct dma_fence *fence;
2975 +- unsigned seq, shared_count, i = 0;
2976 ++ unsigned seq, shared_count;
2977 + long ret = timeout ? timeout : 1;
2978 ++ int i;
2979 +
2980 + retry:
2981 + shared_count = 0;
2982 + seq = read_seqcount_begin(&obj->seq);
2983 + rcu_read_lock();
2984 ++ i = -1;
2985 +
2986 + fence = rcu_dereference(obj->fence_excl);
2987 + if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
2988 +@@ -477,14 +479,14 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
2989 + fence = NULL;
2990 + }
2991 +
2992 +- if (!fence && wait_all) {
2993 ++ if (wait_all) {
2994 + struct reservation_object_list *fobj =
2995 + rcu_dereference(obj->fence);
2996 +
2997 + if (fobj)
2998 + shared_count = fobj->shared_count;
2999 +
3000 +- for (i = 0; i < shared_count; ++i) {
3001 ++ for (i = 0; !fence && i < shared_count; ++i) {
3002 + struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
3003 +
3004 + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
3005 +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
3006 +index 8b16ec595fa7..329cb96f886f 100644
3007 +--- a/drivers/edac/amd64_edac.c
3008 ++++ b/drivers/edac/amd64_edac.c
3009 +@@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3010 + struct amd64_family_type *fam_type = NULL;
3011 +
3012 + pvt->ext_model = boot_cpu_data.x86_model >> 4;
3013 +- pvt->stepping = boot_cpu_data.x86_mask;
3014 ++ pvt->stepping = boot_cpu_data.x86_stepping;
3015 + pvt->model = boot_cpu_data.x86_model;
3016 + pvt->fam = boot_cpu_data.x86;
3017 +
3018 +diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
3019 +index 58888400f1b8..caebdbebdcd8 100644
3020 +--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
3021 ++++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
3022 +@@ -40,7 +40,7 @@ struct smu_table_entry {
3023 + uint32_t table_addr_high;
3024 + uint32_t table_addr_low;
3025 + uint8_t *table;
3026 +- uint32_t handle;
3027 ++ unsigned long handle;
3028 + };
3029 +
3030 + struct smu_table_array {
3031 +diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
3032 +index 9555a3542022..831b73392d82 100644
3033 +--- a/drivers/gpu/drm/ast/ast_mode.c
3034 ++++ b/drivers/gpu/drm/ast/ast_mode.c
3035 +@@ -644,6 +644,7 @@ static void ast_crtc_commit(struct drm_crtc *crtc)
3036 + {
3037 + struct ast_private *ast = crtc->dev->dev_private;
3038 + ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
3039 ++ ast_crtc_load_lut(crtc);
3040 + }
3041 +
3042 +
3043 +diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
3044 +index aad468d170a7..d9c0f7573905 100644
3045 +--- a/drivers/gpu/drm/drm_auth.c
3046 ++++ b/drivers/gpu/drm/drm_auth.c
3047 +@@ -230,6 +230,12 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
3048 + if (!dev->master)
3049 + goto out_unlock;
3050 +
3051 ++ if (file_priv->master->lessor != NULL) {
3052 ++ DRM_DEBUG_LEASE("Attempt to drop lessee %d as master\n", file_priv->master->lessee_id);
3053 ++ ret = -EINVAL;
3054 ++ goto out_unlock;
3055 ++ }
3056 ++
3057 + ret = 0;
3058 + drm_drop_master(dev, file_priv);
3059 + out_unlock:
3060 +diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
3061 +index 4756b3c9bf2c..9a9214ae0fb5 100644
3062 +--- a/drivers/gpu/drm/qxl/qxl_display.c
3063 ++++ b/drivers/gpu/drm/qxl/qxl_display.c
3064 +@@ -289,6 +289,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
3065 + {
3066 + struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
3067 +
3068 ++ qxl_bo_unref(&qxl_crtc->cursor_bo);
3069 + drm_crtc_cleanup(crtc);
3070 + kfree(qxl_crtc);
3071 + }
3072 +@@ -495,6 +496,53 @@ static int qxl_primary_atomic_check(struct drm_plane *plane,
3073 + return 0;
3074 + }
3075 +
3076 ++static int qxl_primary_apply_cursor(struct drm_plane *plane)
3077 ++{
3078 ++ struct drm_device *dev = plane->dev;
3079 ++ struct qxl_device *qdev = dev->dev_private;
3080 ++ struct drm_framebuffer *fb = plane->state->fb;
3081 ++ struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
3082 ++ struct qxl_cursor_cmd *cmd;
3083 ++ struct qxl_release *release;
3084 ++ int ret = 0;
3085 ++
3086 ++ if (!qcrtc->cursor_bo)
3087 ++ return 0;
3088 ++
3089 ++ ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
3090 ++ QXL_RELEASE_CURSOR_CMD,
3091 ++ &release, NULL);
3092 ++ if (ret)
3093 ++ return ret;
3094 ++
3095 ++ ret = qxl_release_list_add(release, qcrtc->cursor_bo);
3096 ++ if (ret)
3097 ++ goto out_free_release;
3098 ++
3099 ++ ret = qxl_release_reserve_list(release, false);
3100 ++ if (ret)
3101 ++ goto out_free_release;
3102 ++
3103 ++ cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
3104 ++ cmd->type = QXL_CURSOR_SET;
3105 ++ cmd->u.set.position.x = plane->state->crtc_x + fb->hot_x;
3106 ++ cmd->u.set.position.y = plane->state->crtc_y + fb->hot_y;
3107 ++
3108 ++ cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
3109 ++
3110 ++ cmd->u.set.visible = 1;
3111 ++ qxl_release_unmap(qdev, release, &cmd->release_info);
3112 ++
3113 ++ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
3114 ++ qxl_release_fence_buffer_objects(release);
3115 ++
3116 ++ return ret;
3117 ++
3118 ++out_free_release:
3119 ++ qxl_release_free(qdev, release);
3120 ++ return ret;
3121 ++}
3122 ++
3123 + static void qxl_primary_atomic_update(struct drm_plane *plane,
3124 + struct drm_plane_state *old_state)
3125 + {
3126 +@@ -510,6 +558,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
3127 + .x2 = qfb->base.width,
3128 + .y2 = qfb->base.height
3129 + };
3130 ++ int ret;
3131 + bool same_shadow = false;
3132 +
3133 + if (old_state->fb) {
3134 +@@ -531,6 +580,11 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
3135 + if (!same_shadow)
3136 + qxl_io_destroy_primary(qdev);
3137 + bo_old->is_primary = false;
3138 ++
3139 ++ ret = qxl_primary_apply_cursor(plane);
3140 ++ if (ret)
3141 ++ DRM_ERROR(
3142 ++ "could not set cursor after creating primary");
3143 + }
3144 +
3145 + if (!bo->is_primary) {
3146 +@@ -571,11 +625,12 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
3147 + struct drm_device *dev = plane->dev;
3148 + struct qxl_device *qdev = dev->dev_private;
3149 + struct drm_framebuffer *fb = plane->state->fb;
3150 ++ struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
3151 + struct qxl_release *release;
3152 + struct qxl_cursor_cmd *cmd;
3153 + struct qxl_cursor *cursor;
3154 + struct drm_gem_object *obj;
3155 +- struct qxl_bo *cursor_bo, *user_bo = NULL;
3156 ++ struct qxl_bo *cursor_bo = NULL, *user_bo = NULL;
3157 + int ret;
3158 + void *user_ptr;
3159 + int size = 64*64*4;
3160 +@@ -628,6 +683,10 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
3161 + cmd->u.set.shape = qxl_bo_physical_address(qdev,
3162 + cursor_bo, 0);
3163 + cmd->type = QXL_CURSOR_SET;
3164 ++
3165 ++ qxl_bo_unref(&qcrtc->cursor_bo);
3166 ++ qcrtc->cursor_bo = cursor_bo;
3167 ++ cursor_bo = NULL;
3168 + } else {
3169 +
3170 + ret = qxl_release_reserve_list(release, true);
3171 +@@ -645,6 +704,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
3172 + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
3173 + qxl_release_fence_buffer_objects(release);
3174 +
3175 ++ qxl_bo_unref(&cursor_bo);
3176 ++
3177 + return;
3178 +
3179 + out_backoff:
3180 +diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
3181 +index 08752c0ffb35..00a1a66b052a 100644
3182 +--- a/drivers/gpu/drm/qxl/qxl_drv.h
3183 ++++ b/drivers/gpu/drm/qxl/qxl_drv.h
3184 +@@ -111,6 +111,8 @@ struct qxl_bo_list {
3185 + struct qxl_crtc {
3186 + struct drm_crtc base;
3187 + int index;
3188 ++
3189 ++ struct qxl_bo *cursor_bo;
3190 + };
3191 +
3192 + struct qxl_output {
3193 +diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
3194 +index d34d1cf33895..95f4db70dd22 100644
3195 +--- a/drivers/gpu/drm/radeon/radeon_uvd.c
3196 ++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
3197 +@@ -995,7 +995,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
3198 + /* calc dclk divider with current vco freq */
3199 + dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
3200 + pd_min, pd_even);
3201 +- if (vclk_div > pd_max)
3202 ++ if (dclk_div > pd_max)
3203 + break; /* vco is too big, it has to stop */
3204 +
3205 + /* calc score with current vco freq */
3206 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
3207 +index ee3e74266a13..97a0a639dad9 100644
3208 +--- a/drivers/gpu/drm/radeon/si_dpm.c
3209 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
3210 +@@ -2984,6 +2984,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
3211 + (rdev->pdev->device == 0x6667)) {
3212 + max_sclk = 75000;
3213 + }
3214 ++ if ((rdev->pdev->revision == 0xC3) ||
3215 ++ (rdev->pdev->device == 0x6665)) {
3216 ++ max_sclk = 60000;
3217 ++ max_mclk = 80000;
3218 ++ }
3219 + } else if (rdev->family == CHIP_OLAND) {
3220 + if ((rdev->pdev->revision == 0xC7) ||
3221 + (rdev->pdev->revision == 0x80) ||
3222 +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
3223 +index c088703777e2..68eed684dff5 100644
3224 +--- a/drivers/gpu/drm/ttm/ttm_bo.c
3225 ++++ b/drivers/gpu/drm/ttm/ttm_bo.c
3226 +@@ -175,7 +175,8 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
3227 + list_add_tail(&bo->lru, &man->lru[bo->priority]);
3228 + kref_get(&bo->list_kref);
3229 +
3230 +- if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
3231 ++ if (bo->ttm && !(bo->ttm->page_flags &
3232 ++ (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
3233 + list_add_tail(&bo->swap,
3234 + &bo->glob->swap_lru[bo->priority]);
3235 + kref_get(&bo->list_kref);
3236 +diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
3237 +index c8ebb757e36b..b17d0d38f290 100644
3238 +--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
3239 ++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
3240 +@@ -299,7 +299,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma)
3241 +
3242 + static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
3243 + unsigned long offset,
3244 +- void *buf, int len, int write)
3245 ++ uint8_t *buf, int len, int write)
3246 + {
3247 + unsigned long page = offset >> PAGE_SHIFT;
3248 + unsigned long bytes_left = len;
3249 +@@ -328,6 +328,7 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
3250 + ttm_bo_kunmap(&map);
3251 +
3252 + page++;
3253 ++ buf += bytes;
3254 + bytes_left -= bytes;
3255 + offset = 0;
3256 + } while (bytes_left);
3257 +diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
3258 +index c13a4fd86b3c..a42744c7665b 100644
3259 +--- a/drivers/hwmon/coretemp.c
3260 ++++ b/drivers/hwmon/coretemp.c
3261 +@@ -268,13 +268,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
3262 + for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
3263 + const struct tjmax_model *tm = &tjmax_model_table[i];
3264 + if (c->x86_model == tm->model &&
3265 +- (tm->mask == ANY || c->x86_mask == tm->mask))
3266 ++ (tm->mask == ANY || c->x86_stepping == tm->mask))
3267 + return tm->tjmax;
3268 + }
3269 +
3270 + /* Early chips have no MSR for TjMax */
3271 +
3272 +- if (c->x86_model == 0xf && c->x86_mask < 4)
3273 ++ if (c->x86_model == 0xf && c->x86_stepping < 4)
3274 + usemsr_ee = 0;
3275 +
3276 + if (c->x86_model > 0xe && usemsr_ee) {
3277 +@@ -425,7 +425,7 @@ static int chk_ucode_version(unsigned int cpu)
3278 + * Readings might stop update when processor visited too deep sleep,
3279 + * fixed for stepping D0 (6EC).
3280 + */
3281 +- if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
3282 ++ if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
3283 + pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
3284 + return -ENODEV;
3285 + }
3286 +diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
3287 +index ef91b8a67549..84e91286fc4f 100644
3288 +--- a/drivers/hwmon/hwmon-vid.c
3289 ++++ b/drivers/hwmon/hwmon-vid.c
3290 +@@ -293,7 +293,7 @@ u8 vid_which_vrm(void)
3291 + if (c->x86 < 6) /* Any CPU with family lower than 6 */
3292 + return 0; /* doesn't have VID */
3293 +
3294 +- vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor);
3295 ++ vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor);
3296 + if (vrm_ret == 134)
3297 + vrm_ret = get_via_model_d_vrm();
3298 + if (vrm_ret == 0)
3299 +diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
3300 +index 0721e175664a..b960015cb073 100644
3301 +--- a/drivers/hwmon/k10temp.c
3302 ++++ b/drivers/hwmon/k10temp.c
3303 +@@ -226,7 +226,7 @@ static bool has_erratum_319(struct pci_dev *pdev)
3304 + * and AM3 formats, but that's the best we can do.
3305 + */
3306 + return boot_cpu_data.x86_model < 4 ||
3307 +- (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2);
3308 ++ (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
3309 + }
3310 +
3311 + static int k10temp_probe(struct pci_dev *pdev,
3312 +diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
3313 +index 5a632bcf869b..e59f9113fb93 100644
3314 +--- a/drivers/hwmon/k8temp.c
3315 ++++ b/drivers/hwmon/k8temp.c
3316 +@@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev,
3317 + return -ENOMEM;
3318 +
3319 + model = boot_cpu_data.x86_model;
3320 +- stepping = boot_cpu_data.x86_mask;
3321 ++ stepping = boot_cpu_data.x86_stepping;
3322 +
3323 + /* feature available since SH-C0, exclude older revisions */
3324 + if ((model == 4 && stepping == 0) ||
3325 +diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
3326 +index 465520627e4b..d7d042a20ab4 100644
3327 +--- a/drivers/infiniband/core/device.c
3328 ++++ b/drivers/infiniband/core/device.c
3329 +@@ -462,7 +462,6 @@ int ib_register_device(struct ib_device *device,
3330 + struct ib_udata uhw = {.outlen = 0, .inlen = 0};
3331 + struct device *parent = device->dev.parent;
3332 +
3333 +- WARN_ON_ONCE(!parent);
3334 + WARN_ON_ONCE(device->dma_device);
3335 + if (device->dev.dma_ops) {
3336 + /*
3337 +@@ -471,16 +470,25 @@ int ib_register_device(struct ib_device *device,
3338 + * into device->dev.
3339 + */
3340 + device->dma_device = &device->dev;
3341 +- if (!device->dev.dma_mask)
3342 +- device->dev.dma_mask = parent->dma_mask;
3343 +- if (!device->dev.coherent_dma_mask)
3344 +- device->dev.coherent_dma_mask =
3345 +- parent->coherent_dma_mask;
3346 ++ if (!device->dev.dma_mask) {
3347 ++ if (parent)
3348 ++ device->dev.dma_mask = parent->dma_mask;
3349 ++ else
3350 ++ WARN_ON_ONCE(true);
3351 ++ }
3352 ++ if (!device->dev.coherent_dma_mask) {
3353 ++ if (parent)
3354 ++ device->dev.coherent_dma_mask =
3355 ++ parent->coherent_dma_mask;
3356 ++ else
3357 ++ WARN_ON_ONCE(true);
3358 ++ }
3359 + } else {
3360 + /*
3361 + * The caller did not provide custom DMA operations. Use the
3362 + * DMA mapping operations of the parent device.
3363 + */
3364 ++ WARN_ON_ONCE(!parent);
3365 + device->dma_device = parent;
3366 + }
3367 +
3368 +diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
3369 +index e30d86fa1855..8ae1308eecc7 100644
3370 +--- a/drivers/infiniband/core/sysfs.c
3371 ++++ b/drivers/infiniband/core/sysfs.c
3372 +@@ -1276,7 +1276,6 @@ int ib_device_register_sysfs(struct ib_device *device,
3373 + int ret;
3374 + int i;
3375 +
3376 +- WARN_ON_ONCE(!device->dev.parent);
3377 + ret = dev_set_name(class_dev, "%s", device->name);
3378 + if (ret)
3379 + return ret;
3380 +diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
3381 +index 4b64dd02e090..3205800f9579 100644
3382 +--- a/drivers/infiniband/core/user_mad.c
3383 ++++ b/drivers/infiniband/core/user_mad.c
3384 +@@ -500,7 +500,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
3385 + }
3386 +
3387 + memset(&ah_attr, 0, sizeof ah_attr);
3388 +- ah_attr.type = rdma_ah_find_type(file->port->ib_dev,
3389 ++ ah_attr.type = rdma_ah_find_type(agent->device,
3390 + file->port->port_num);
3391 + rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid));
3392 + rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl);
3393 +diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
3394 +index c3ee5d9b336d..cca70d36ee15 100644
3395 +--- a/drivers/infiniband/core/uverbs_std_types.c
3396 ++++ b/drivers/infiniband/core/uverbs_std_types.c
3397 +@@ -315,7 +315,7 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev,
3398 + cq->uobject = &obj->uobject;
3399 + cq->comp_handler = ib_uverbs_comp_handler;
3400 + cq->event_handler = ib_uverbs_cq_event_handler;
3401 +- cq->cq_context = &ev_file->ev_queue;
3402 ++ cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
3403 + obj->uobject.object = cq;
3404 + obj->uobject.user_handle = user_handle;
3405 + atomic_set(&cq->usecnt, 0);
3406 +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
3407 +index 8c8a16791a3f..5caf37ba7fff 100644
3408 +--- a/drivers/infiniband/hw/mlx4/main.c
3409 ++++ b/drivers/infiniband/hw/mlx4/main.c
3410 +@@ -2995,9 +2995,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
3411 + kfree(ibdev->ib_uc_qpns_bitmap);
3412 +
3413 + err_steer_qp_release:
3414 +- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
3415 +- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
3416 +- ibdev->steer_qpn_count);
3417 ++ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
3418 ++ ibdev->steer_qpn_count);
3419 + err_counter:
3420 + for (i = 0; i < ibdev->num_ports; ++i)
3421 + mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
3422 +@@ -3102,11 +3101,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
3423 + ibdev->iboe.nb.notifier_call = NULL;
3424 + }
3425 +
3426 +- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3427 +- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
3428 +- ibdev->steer_qpn_count);
3429 +- kfree(ibdev->ib_uc_qpns_bitmap);
3430 +- }
3431 ++ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
3432 ++ ibdev->steer_qpn_count);
3433 ++ kfree(ibdev->ib_uc_qpns_bitmap);
3434 +
3435 + iounmap(ibdev->uar_map);
3436 + for (p = 0; p < ibdev->num_ports; ++p)
3437 +diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
3438 +index 8f5754fb8579..e4a9ba1dd9ba 100644
3439 +--- a/drivers/infiniband/hw/qib/qib_rc.c
3440 ++++ b/drivers/infiniband/hw/qib/qib_rc.c
3441 +@@ -434,13 +434,13 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
3442 + qp->s_state = OP(COMPARE_SWAP);
3443 + put_ib_ateth_swap(wqe->atomic_wr.swap,
3444 + &ohdr->u.atomic_eth);
3445 +- put_ib_ateth_swap(wqe->atomic_wr.compare_add,
3446 +- &ohdr->u.atomic_eth);
3447 ++ put_ib_ateth_compare(wqe->atomic_wr.compare_add,
3448 ++ &ohdr->u.atomic_eth);
3449 + } else {
3450 + qp->s_state = OP(FETCH_ADD);
3451 + put_ib_ateth_swap(wqe->atomic_wr.compare_add,
3452 + &ohdr->u.atomic_eth);
3453 +- put_ib_ateth_swap(0, &ohdr->u.atomic_eth);
3454 ++ put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
3455 + }
3456 + put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
3457 + &ohdr->u.atomic_eth);
3458 +diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
3459 +index d7472a442a2c..96c3a6c5c4b5 100644
3460 +--- a/drivers/infiniband/sw/rxe/rxe_loc.h
3461 ++++ b/drivers/infiniband/sw/rxe/rxe_loc.h
3462 +@@ -237,7 +237,6 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
3463 +
3464 + void rxe_release(struct kref *kref);
3465 +
3466 +-void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify);
3467 + int rxe_completer(void *arg);
3468 + int rxe_requester(void *arg);
3469 + int rxe_responder(void *arg);
3470 +diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
3471 +index 4469592b839d..137d6c0c49d4 100644
3472 +--- a/drivers/infiniband/sw/rxe/rxe_qp.c
3473 ++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
3474 +@@ -824,9 +824,9 @@ void rxe_qp_destroy(struct rxe_qp *qp)
3475 + }
3476 +
3477 + /* called when the last reference to the qp is dropped */
3478 +-void rxe_qp_cleanup(struct rxe_pool_entry *arg)
3479 ++static void rxe_qp_do_cleanup(struct work_struct *work)
3480 + {
3481 +- struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
3482 ++ struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
3483 +
3484 + rxe_drop_all_mcast_groups(qp);
3485 +
3486 +@@ -859,3 +859,11 @@ void rxe_qp_cleanup(struct rxe_pool_entry *arg)
3487 + kernel_sock_shutdown(qp->sk, SHUT_RDWR);
3488 + sock_release(qp->sk);
3489 + }
3490 ++
3491 ++/* called when the last reference to the qp is dropped */
3492 ++void rxe_qp_cleanup(struct rxe_pool_entry *arg)
3493 ++{
3494 ++ struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
3495 ++
3496 ++ execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
3497 ++}
3498 +diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
3499 +index 26a7f923045b..7bdaf71b8221 100644
3500 +--- a/drivers/infiniband/sw/rxe/rxe_req.c
3501 ++++ b/drivers/infiniband/sw/rxe/rxe_req.c
3502 +@@ -594,15 +594,8 @@ int rxe_requester(void *arg)
3503 + rxe_add_ref(qp);
3504 +
3505 + next_wqe:
3506 +- if (unlikely(!qp->valid)) {
3507 +- rxe_drain_req_pkts(qp, true);
3508 ++ if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
3509 + goto exit;
3510 +- }
3511 +-
3512 +- if (unlikely(qp->req.state == QP_STATE_ERROR)) {
3513 +- rxe_drain_req_pkts(qp, true);
3514 +- goto exit;
3515 +- }
3516 +
3517 + if (unlikely(qp->req.state == QP_STATE_RESET)) {
3518 + qp->req.wqe_index = consumer_index(qp->sq.queue);
3519 +diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
3520 +index 4240866a5331..01f926fd9029 100644
3521 +--- a/drivers/infiniband/sw/rxe/rxe_resp.c
3522 ++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
3523 +@@ -1210,7 +1210,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
3524 + }
3525 + }
3526 +
3527 +-void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
3528 ++static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
3529 + {
3530 + struct sk_buff *skb;
3531 +
3532 +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
3533 +index d03002b9d84d..7210a784abb4 100644
3534 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
3535 ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
3536 +@@ -814,6 +814,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
3537 + (queue_count(qp->sq.queue) > 1);
3538 +
3539 + rxe_run_task(&qp->req.task, must_sched);
3540 ++ if (unlikely(qp->req.state == QP_STATE_ERROR))
3541 ++ rxe_run_task(&qp->comp.task, 1);
3542 +
3543 + return err;
3544 + }
3545 +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
3546 +index 0c2dbe45c729..1019f5e7dbdd 100644
3547 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
3548 ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
3549 +@@ -35,6 +35,7 @@
3550 + #define RXE_VERBS_H
3551 +
3552 + #include <linux/interrupt.h>
3553 ++#include <linux/workqueue.h>
3554 + #include <rdma/rdma_user_rxe.h>
3555 + #include "rxe_pool.h"
3556 + #include "rxe_task.h"
3557 +@@ -281,6 +282,8 @@ struct rxe_qp {
3558 + struct timer_list rnr_nak_timer;
3559 +
3560 + spinlock_t state_lock; /* guard requester and completer */
3561 ++
3562 ++ struct execute_work cleanup_work;
3563 + };
3564 +
3565 + enum rxe_mem_state {
3566 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
3567 +index de17b7193299..1c42b00d3be2 100644
3568 +--- a/drivers/md/dm.c
3569 ++++ b/drivers/md/dm.c
3570 +@@ -817,7 +817,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
3571 + queue_io(md, bio);
3572 + } else {
3573 + /* done with normal IO or empty flush */
3574 +- bio->bi_status = io_error;
3575 ++ if (io_error)
3576 ++ bio->bi_status = io_error;
3577 + bio_endio(bio);
3578 + }
3579 + }
3580 +diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
3581 +index ba80376a3b86..d097eb04a0e9 100644
3582 +--- a/drivers/media/tuners/r820t.c
3583 ++++ b/drivers/media/tuners/r820t.c
3584 +@@ -396,9 +396,11 @@ static int r820t_write(struct r820t_priv *priv, u8 reg, const u8 *val,
3585 + return 0;
3586 + }
3587 +
3588 +-static int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val)
3589 ++static inline int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val)
3590 + {
3591 +- return r820t_write(priv, reg, &val, 1);
3592 ++ u8 tmp = val; /* work around GCC PR81715 with asan-stack=1 */
3593 ++
3594 ++ return r820t_write(priv, reg, &tmp, 1);
3595 + }
3596 +
3597 + static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
3598 +@@ -411,17 +413,18 @@ static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
3599 + return -EINVAL;
3600 + }
3601 +
3602 +-static int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val,
3603 ++static inline int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val,
3604 + u8 bit_mask)
3605 + {
3606 ++ u8 tmp = val;
3607 + int rc = r820t_read_cache_reg(priv, reg);
3608 +
3609 + if (rc < 0)
3610 + return rc;
3611 +
3612 +- val = (rc & ~bit_mask) | (val & bit_mask);
3613 ++ tmp = (rc & ~bit_mask) | (tmp & bit_mask);
3614 +
3615 +- return r820t_write(priv, reg, &val, 1);
3616 ++ return r820t_write(priv, reg, &tmp, 1);
3617 + }
3618 +
3619 + static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len)
3620 +diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
3621 +index 229dc18f0581..768972af8b85 100644
3622 +--- a/drivers/mmc/host/bcm2835.c
3623 ++++ b/drivers/mmc/host/bcm2835.c
3624 +@@ -1265,7 +1265,8 @@ static int bcm2835_add_host(struct bcm2835_host *host)
3625 + char pio_limit_string[20];
3626 + int ret;
3627 +
3628 +- mmc->f_max = host->max_clk;
3629 ++ if (!mmc->f_max || mmc->f_max > host->max_clk)
3630 ++ mmc->f_max = host->max_clk;
3631 + mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV;
3632 +
3633 + mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000);
3634 +diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
3635 +index e0862d3f65b3..730fbe01726d 100644
3636 +--- a/drivers/mmc/host/meson-gx-mmc.c
3637 ++++ b/drivers/mmc/host/meson-gx-mmc.c
3638 +@@ -716,22 +716,6 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
3639 + static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
3640 + {
3641 + struct meson_host *host = mmc_priv(mmc);
3642 +- int ret;
3643 +-
3644 +- /*
3645 +- * If this is the initial tuning, try to get a sane Rx starting
3646 +- * phase before doing the actual tuning.
3647 +- */
3648 +- if (!mmc->doing_retune) {
3649 +- ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
3650 +-
3651 +- if (ret)
3652 +- return ret;
3653 +- }
3654 +-
3655 +- ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
3656 +- if (ret)
3657 +- return ret;
3658 +
3659 + return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
3660 + }
3661 +@@ -762,9 +746,8 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
3662 + if (!IS_ERR(mmc->supply.vmmc))
3663 + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
3664 +
3665 +- /* Reset phases */
3666 ++ /* Reset rx phase */
3667 + clk_set_phase(host->rx_clk, 0);
3668 +- clk_set_phase(host->tx_clk, 270);
3669 +
3670 + break;
3671 +
3672 +diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
3673 +index 1f424374bbbb..4ffa6b173a21 100644
3674 +--- a/drivers/mmc/host/sdhci-of-esdhc.c
3675 ++++ b/drivers/mmc/host/sdhci-of-esdhc.c
3676 +@@ -589,10 +589,18 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
3677 +
3678 + static void esdhc_reset(struct sdhci_host *host, u8 mask)
3679 + {
3680 ++ u32 val;
3681 ++
3682 + sdhci_reset(host, mask);
3683 +
3684 + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3685 + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3686 ++
3687 ++ if (mask & SDHCI_RESET_ALL) {
3688 ++ val = sdhci_readl(host, ESDHC_TBCTL);
3689 ++ val &= ~ESDHC_TB_EN;
3690 ++ sdhci_writel(host, val, ESDHC_TBCTL);
3691 ++ }
3692 + }
3693 +
3694 + /* The SCFG, Supplemental Configuration Unit, provides SoC specific
3695 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
3696 +index e9290a3439d5..d24306b2b839 100644
3697 +--- a/drivers/mmc/host/sdhci.c
3698 ++++ b/drivers/mmc/host/sdhci.c
3699 +@@ -21,6 +21,7 @@
3700 + #include <linux/dma-mapping.h>
3701 + #include <linux/slab.h>
3702 + #include <linux/scatterlist.h>
3703 ++#include <linux/sizes.h>
3704 + #include <linux/swiotlb.h>
3705 + #include <linux/regulator/consumer.h>
3706 + #include <linux/pm_runtime.h>
3707 +@@ -502,8 +503,35 @@ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
3708 + if (data->host_cookie == COOKIE_PRE_MAPPED)
3709 + return data->sg_count;
3710 +
3711 +- sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
3712 +- mmc_get_dma_dir(data));
3713 ++ /* Bounce write requests to the bounce buffer */
3714 ++ if (host->bounce_buffer) {
3715 ++ unsigned int length = data->blksz * data->blocks;
3716 ++
3717 ++ if (length > host->bounce_buffer_size) {
3718 ++ pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
3719 ++ mmc_hostname(host->mmc), length,
3720 ++ host->bounce_buffer_size);
3721 ++ return -EIO;
3722 ++ }
3723 ++ if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
3724 ++ /* Copy the data to the bounce buffer */
3725 ++ sg_copy_to_buffer(data->sg, data->sg_len,
3726 ++ host->bounce_buffer,
3727 ++ length);
3728 ++ }
3729 ++ /* Switch ownership to the DMA */
3730 ++ dma_sync_single_for_device(host->mmc->parent,
3731 ++ host->bounce_addr,
3732 ++ host->bounce_buffer_size,
3733 ++ mmc_get_dma_dir(data));
3734 ++ /* Just a dummy value */
3735 ++ sg_count = 1;
3736 ++ } else {
3737 ++ /* Just access the data directly from memory */
3738 ++ sg_count = dma_map_sg(mmc_dev(host->mmc),
3739 ++ data->sg, data->sg_len,
3740 ++ mmc_get_dma_dir(data));
3741 ++ }
3742 +
3743 + if (sg_count == 0)
3744 + return -ENOSPC;
3745 +@@ -673,6 +701,14 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
3746 + }
3747 + }
3748 +
3749 ++static u32 sdhci_sdma_address(struct sdhci_host *host)
3750 ++{
3751 ++ if (host->bounce_buffer)
3752 ++ return host->bounce_addr;
3753 ++ else
3754 ++ return sg_dma_address(host->data->sg);
3755 ++}
3756 ++
3757 + static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
3758 + {
3759 + u8 count;
3760 +@@ -858,8 +894,8 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
3761 + SDHCI_ADMA_ADDRESS_HI);
3762 + } else {
3763 + WARN_ON(sg_cnt != 1);
3764 +- sdhci_writel(host, sg_dma_address(data->sg),
3765 +- SDHCI_DMA_ADDRESS);
3766 ++ sdhci_writel(host, sdhci_sdma_address(host),
3767 ++ SDHCI_DMA_ADDRESS);
3768 + }
3769 + }
3770 +
3771 +@@ -2248,7 +2284,12 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
3772 +
3773 + mrq->data->host_cookie = COOKIE_UNMAPPED;
3774 +
3775 +- if (host->flags & SDHCI_REQ_USE_DMA)
3776 ++ /*
3777 ++ * No pre-mapping in the pre hook if we're using the bounce buffer,
3778 ++ * for that we would need two bounce buffers since one buffer is
3779 ++ * in flight when this is getting called.
3780 ++ */
3781 ++ if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
3782 + sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
3783 + }
3784 +
3785 +@@ -2352,8 +2393,45 @@ static bool sdhci_request_done(struct sdhci_host *host)
3786 + struct mmc_data *data = mrq->data;
3787 +
3788 + if (data && data->host_cookie == COOKIE_MAPPED) {
3789 +- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
3790 +- mmc_get_dma_dir(data));
3791 ++ if (host->bounce_buffer) {
3792 ++ /*
3793 ++ * On reads, copy the bounced data into the
3794 ++ * sglist
3795 ++ */
3796 ++ if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
3797 ++ unsigned int length = data->bytes_xfered;
3798 ++
3799 ++ if (length > host->bounce_buffer_size) {
3800 ++ pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
3801 ++ mmc_hostname(host->mmc),
3802 ++ host->bounce_buffer_size,
3803 ++ data->bytes_xfered);
3804 ++ /* Cap it down and continue */
3805 ++ length = host->bounce_buffer_size;
3806 ++ }
3807 ++ dma_sync_single_for_cpu(
3808 ++ host->mmc->parent,
3809 ++ host->bounce_addr,
3810 ++ host->bounce_buffer_size,
3811 ++ DMA_FROM_DEVICE);
3812 ++ sg_copy_from_buffer(data->sg,
3813 ++ data->sg_len,
3814 ++ host->bounce_buffer,
3815 ++ length);
3816 ++ } else {
3817 ++ /* No copying, just switch ownership */
3818 ++ dma_sync_single_for_cpu(
3819 ++ host->mmc->parent,
3820 ++ host->bounce_addr,
3821 ++ host->bounce_buffer_size,
3822 ++ mmc_get_dma_dir(data));
3823 ++ }
3824 ++ } else {
3825 ++ /* Unmap the raw data */
3826 ++ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
3827 ++ data->sg_len,
3828 ++ mmc_get_dma_dir(data));
3829 ++ }
3830 + data->host_cookie = COOKIE_UNMAPPED;
3831 + }
3832 + }
3833 +@@ -2636,7 +2714,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3834 + */
3835 + if (intmask & SDHCI_INT_DMA_END) {
3836 + u32 dmastart, dmanow;
3837 +- dmastart = sg_dma_address(host->data->sg);
3838 ++
3839 ++ dmastart = sdhci_sdma_address(host);
3840 + dmanow = dmastart + host->data->bytes_xfered;
3841 + /*
3842 + * Force update to the next DMA block boundary.
3843 +@@ -3217,6 +3296,68 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3844 + }
3845 + EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3846 +
3847 ++static int sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3848 ++{
3849 ++ struct mmc_host *mmc = host->mmc;
3850 ++ unsigned int max_blocks;
3851 ++ unsigned int bounce_size;
3852 ++ int ret;
3853 ++
3854 ++ /*
3855 ++ * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
3856 ++ * has diminishing returns, this is probably because SD/MMC
3857 ++ * cards are usually optimized to handle this size of requests.
3858 ++ */
3859 ++ bounce_size = SZ_64K;
3860 ++ /*
3861 ++ * Adjust downwards to maximum request size if this is less
3862 ++ * than our segment size, else hammer down the maximum
3863 ++ * request size to the maximum buffer size.
3864 ++ */
3865 ++ if (mmc->max_req_size < bounce_size)
3866 ++ bounce_size = mmc->max_req_size;
3867 ++ max_blocks = bounce_size / 512;
3868 ++
3869 ++ /*
3870 ++ * When we just support one segment, we can get significant
3871 ++ * speedups by the help of a bounce buffer to group scattered
3872 ++ * reads/writes together.
3873 ++ */
3874 ++ host->bounce_buffer = devm_kmalloc(mmc->parent,
3875 ++ bounce_size,
3876 ++ GFP_KERNEL);
3877 ++ if (!host->bounce_buffer) {
3878 ++ pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3879 ++ mmc_hostname(mmc),
3880 ++ bounce_size);
3881 ++ /*
3882 ++ * Exiting with zero here makes sure we proceed with
3883 ++ * mmc->max_segs == 1.
3884 ++ */
3885 ++ return 0;
3886 ++ }
3887 ++
3888 ++ host->bounce_addr = dma_map_single(mmc->parent,
3889 ++ host->bounce_buffer,
3890 ++ bounce_size,
3891 ++ DMA_BIDIRECTIONAL);
3892 ++ ret = dma_mapping_error(mmc->parent, host->bounce_addr);
3893 ++ if (ret)
3894 ++ /* Again fall back to max_segs == 1 */
3895 ++ return 0;
3896 ++ host->bounce_buffer_size = bounce_size;
3897 ++
3898 ++ /* Lie about this since we're bouncing */
3899 ++ mmc->max_segs = max_blocks;
3900 ++ mmc->max_seg_size = bounce_size;
3901 ++ mmc->max_req_size = bounce_size;
3902 ++
3903 ++ pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3904 ++ mmc_hostname(mmc), max_blocks, bounce_size);
3905 ++
3906 ++ return 0;
3907 ++}
3908 ++
3909 + int sdhci_setup_host(struct sdhci_host *host)
3910 + {
3911 + struct mmc_host *mmc;
3912 +@@ -3713,6 +3854,13 @@ int sdhci_setup_host(struct sdhci_host *host)
3913 + */
3914 + mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3915 +
3916 ++ if (mmc->max_segs == 1) {
3917 ++ /* This may alter mmc->*_blk_* parameters */
3918 ++ ret = sdhci_allocate_bounce_buffer(host);
3919 ++ if (ret)
3920 ++ return ret;
3921 ++ }
3922 ++
3923 + return 0;
3924 +
3925 + unreg:
3926 +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
3927 +index 54bc444c317f..1d7d61e25dbf 100644
3928 +--- a/drivers/mmc/host/sdhci.h
3929 ++++ b/drivers/mmc/host/sdhci.h
3930 +@@ -440,6 +440,9 @@ struct sdhci_host {
3931 +
3932 + int irq; /* Device IRQ */
3933 + void __iomem *ioaddr; /* Mapped address */
3934 ++ char *bounce_buffer; /* For packing SDMA reads/writes */
3935 ++ dma_addr_t bounce_addr;
3936 ++ unsigned int bounce_buffer_size;
3937 +
3938 + const struct sdhci_ops *ops; /* Low level hw interface */
3939 +
3940 +diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
3941 +index 8037d4b48a05..e2583a539b41 100644
3942 +--- a/drivers/mtd/nand/vf610_nfc.c
3943 ++++ b/drivers/mtd/nand/vf610_nfc.c
3944 +@@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev)
3945 + if (mtd->oobsize > 64)
3946 + mtd->oobsize = 64;
3947 +
3948 +- /*
3949 +- * mtd->ecclayout is not specified here because we're using the
3950 +- * default large page ECC layout defined in NAND core.
3951 +- */
3952 ++ /* Use default large page ECC layout defined in NAND core */
3953 ++ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
3954 + if (chip->ecc.strength == 32) {
3955 + nfc->ecc_mode = ECC_60_BYTE;
3956 + chip->ecc.bytes = 60;
3957 +diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
3958 +index 634b2f41cc9e..908acd4624e8 100644
3959 +--- a/drivers/net/ethernet/marvell/mvpp2.c
3960 ++++ b/drivers/net/ethernet/marvell/mvpp2.c
3961 +@@ -7127,6 +7127,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
3962 + int id = port->id;
3963 + bool allmulti = dev->flags & IFF_ALLMULTI;
3964 +
3965 ++retry:
3966 + mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
3967 + mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
3968 + mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
3969 +@@ -7134,9 +7135,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
3970 + /* Remove all port->id's mcast enries */
3971 + mvpp2_prs_mcast_del_all(priv, id);
3972 +
3973 +- if (allmulti && !netdev_mc_empty(dev)) {
3974 +- netdev_for_each_mc_addr(ha, dev)
3975 +- mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
3976 ++ if (!allmulti) {
3977 ++ netdev_for_each_mc_addr(ha, dev) {
3978 ++ if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
3979 ++ allmulti = true;
3980 ++ goto retry;
3981 ++ }
3982 ++ }
3983 + }
3984 + }
3985 +
3986 +diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
3987 +index 769598f7b6c8..3aaf4bad6c5a 100644
3988 +--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
3989 ++++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
3990 +@@ -287,6 +287,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
3991 + u64 in_param = 0;
3992 + int err;
3993 +
3994 ++ if (!cnt)
3995 ++ return;
3996 ++
3997 + if (mlx4_is_mfunc(dev)) {
3998 + set_param_l(&in_param, base_qpn);
3999 + set_param_h(&in_param, cnt);
4000 +diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
4001 +index cd314946452c..9511f5fe62f4 100644
4002 +--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
4003 ++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
4004 +@@ -2781,7 +2781,10 @@ static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter)
4005 + {
4006 + struct pcie_service_card *card = adapter->card;
4007 +
4008 +- pci_reset_function(card->dev);
4009 ++ /* We can't afford to wait here; remove() might be waiting on us. If we
4010 ++ * can't grab the device lock, maybe we'll get another chance later.
4011 ++ */
4012 ++ pci_try_reset_function(card->dev);
4013 + }
4014 +
4015 + static void mwifiex_pcie_work(struct work_struct *work)
4016 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
4017 +index 43e18c4c1e68..999ddd947b2a 100644
4018 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
4019 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
4020 +@@ -1123,7 +1123,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
4021 + }
4022 + if (0 == tmp) {
4023 + read_addr = REG_DBI_RDATA + addr % 4;
4024 +- ret = rtl_read_word(rtlpriv, read_addr);
4025 ++ ret = rtl_read_byte(rtlpriv, read_addr);
4026 + }
4027 + return ret;
4028 + }
4029 +@@ -1165,7 +1165,8 @@ static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw)
4030 + }
4031 +
4032 + tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f);
4033 +- _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7));
4034 ++ _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7) |
4035 ++ ASPM_L1_LATENCY << 3);
4036 +
4037 + tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719);
4038 + _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4));
4039 +diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
4040 +index 92d4859ec906..2a37125b2ef5 100644
4041 +--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
4042 ++++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
4043 +@@ -99,6 +99,7 @@
4044 + #define RTL_USB_MAX_RX_COUNT 100
4045 + #define QBSS_LOAD_SIZE 5
4046 + #define MAX_WMMELE_LENGTH 64
4047 ++#define ASPM_L1_LATENCY 7
4048 +
4049 + #define TOTAL_CAM_ENTRY 32
4050 +
4051 +diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c
4052 +index 5bee3af47588..39405598b22d 100644
4053 +--- a/drivers/pci/dwc/pci-keystone.c
4054 ++++ b/drivers/pci/dwc/pci-keystone.c
4055 +@@ -178,7 +178,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
4056 + }
4057 +
4058 + /* interrupt controller is in a child node */
4059 +- *np_temp = of_find_node_by_name(np_pcie, controller);
4060 ++ *np_temp = of_get_child_by_name(np_pcie, controller);
4061 + if (!(*np_temp)) {
4062 + dev_err(dev, "Node for %s is absent\n", controller);
4063 + return -EINVAL;
4064 +@@ -187,6 +187,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
4065 + temp = of_irq_count(*np_temp);
4066 + if (!temp) {
4067 + dev_err(dev, "No IRQ entries in %s\n", controller);
4068 ++ of_node_put(*np_temp);
4069 + return -EINVAL;
4070 + }
4071 +
4072 +@@ -204,6 +205,8 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
4073 + break;
4074 + }
4075 +
4076 ++ of_node_put(*np_temp);
4077 ++
4078 + if (temp) {
4079 + *num_irqs = temp;
4080 + return 0;
4081 +diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
4082 +index a5073a921a04..32228d41f746 100644
4083 +--- a/drivers/pci/host/pcie-iproc-platform.c
4084 ++++ b/drivers/pci/host/pcie-iproc-platform.c
4085 +@@ -92,6 +92,13 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
4086 + pcie->need_ob_cfg = true;
4087 + }
4088 +
4089 ++ /*
4090 ++ * DT nodes are not used by all platforms that use the iProc PCIe
4091 ++ * core driver. For platforms that require explict inbound mapping
4092 ++ * configuration, "dma-ranges" would have been present in DT
4093 ++ */
4094 ++ pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges");
4095 ++
4096 + /* PHY use is optional */
4097 + pcie->phy = devm_phy_get(dev, "pcie-phy");
4098 + if (IS_ERR(pcie->phy)) {
4099 +diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
4100 +index 935909bbe5c4..75836067f538 100644
4101 +--- a/drivers/pci/host/pcie-iproc.c
4102 ++++ b/drivers/pci/host/pcie-iproc.c
4103 +@@ -1378,9 +1378,11 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
4104 + }
4105 + }
4106 +
4107 +- ret = iproc_pcie_map_dma_ranges(pcie);
4108 +- if (ret && ret != -ENOENT)
4109 +- goto err_power_off_phy;
4110 ++ if (pcie->need_ib_cfg) {
4111 ++ ret = iproc_pcie_map_dma_ranges(pcie);
4112 ++ if (ret && ret != -ENOENT)
4113 ++ goto err_power_off_phy;
4114 ++ }
4115 +
4116 + #ifdef CONFIG_ARM
4117 + pcie->sysdata.private_data = pcie;
4118 +diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
4119 +index a6b55cec9a66..4ac6282f2bfd 100644
4120 +--- a/drivers/pci/host/pcie-iproc.h
4121 ++++ b/drivers/pci/host/pcie-iproc.h
4122 +@@ -74,6 +74,7 @@ struct iproc_msi;
4123 + * @ob: outbound mapping related parameters
4124 + * @ob_map: outbound mapping related parameters specific to the controller
4125 + *
4126 ++ * @need_ib_cfg: indicates SW needs to configure the inbound mapping window
4127 + * @ib: inbound mapping related parameters
4128 + * @ib_map: outbound mapping region related parameters
4129 + *
4130 +@@ -101,6 +102,7 @@ struct iproc_pcie {
4131 + struct iproc_pcie_ob ob;
4132 + const struct iproc_pcie_ob_map *ob_map;
4133 +
4134 ++ bool need_ib_cfg;
4135 + struct iproc_pcie_ib ib;
4136 + const struct iproc_pcie_ib_map *ib_map;
4137 +
4138 +diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
4139 +index 7bab0606f1a9..a89d8b990228 100644
4140 +--- a/drivers/pci/hotplug/pciehp_hpc.c
4141 ++++ b/drivers/pci/hotplug/pciehp_hpc.c
4142 +@@ -848,6 +848,13 @@ struct controller *pcie_init(struct pcie_device *dev)
4143 + if (pdev->hotplug_user_indicators)
4144 + slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
4145 +
4146 ++ /*
4147 ++ * We assume no Thunderbolt controllers support Command Complete events,
4148 ++ * but some controllers falsely claim they do.
4149 ++ */
4150 ++ if (pdev->is_thunderbolt)
4151 ++ slot_cap |= PCI_EXP_SLTCAP_NCCS;
4152 ++
4153 + ctrl->slot_cap = slot_cap;
4154 + mutex_init(&ctrl->ctrl_lock);
4155 + init_waitqueue_head(&ctrl->queue);
4156 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
4157 +index 10684b17d0bd..d22750ea7444 100644
4158 +--- a/drivers/pci/quirks.c
4159 ++++ b/drivers/pci/quirks.c
4160 +@@ -1636,8 +1636,8 @@ static void quirk_pcie_mch(struct pci_dev *pdev)
4161 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
4162 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
4163 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
4164 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, quirk_pcie_mch);
4165 +
4166 ++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
4167 +
4168 + /*
4169 + * It's possible for the MSI to get corrupted if shpc and acpi
4170 +diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
4171 +index 623d322447a2..7c4eb86c851e 100644
4172 +--- a/drivers/platform/x86/apple-gmux.c
4173 ++++ b/drivers/platform/x86/apple-gmux.c
4174 +@@ -24,7 +24,6 @@
4175 + #include <linux/delay.h>
4176 + #include <linux/pci.h>
4177 + #include <linux/vga_switcheroo.h>
4178 +-#include <linux/vgaarb.h>
4179 + #include <acpi/video.h>
4180 + #include <asm/io.h>
4181 +
4182 +@@ -54,7 +53,6 @@ struct apple_gmux_data {
4183 + bool indexed;
4184 + struct mutex index_lock;
4185 +
4186 +- struct pci_dev *pdev;
4187 + struct backlight_device *bdev;
4188 +
4189 + /* switcheroo data */
4190 +@@ -599,23 +597,6 @@ static int gmux_resume(struct device *dev)
4191 + return 0;
4192 + }
4193 +
4194 +-static struct pci_dev *gmux_get_io_pdev(void)
4195 +-{
4196 +- struct pci_dev *pdev = NULL;
4197 +-
4198 +- while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev))) {
4199 +- u16 cmd;
4200 +-
4201 +- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
4202 +- if (!(cmd & PCI_COMMAND_IO))
4203 +- continue;
4204 +-
4205 +- return pdev;
4206 +- }
4207 +-
4208 +- return NULL;
4209 +-}
4210 +-
4211 + static int is_thunderbolt(struct device *dev, void *data)
4212 + {
4213 + return to_pci_dev(dev)->is_thunderbolt;
4214 +@@ -631,7 +612,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
4215 + int ret = -ENXIO;
4216 + acpi_status status;
4217 + unsigned long long gpe;
4218 +- struct pci_dev *pdev = NULL;
4219 +
4220 + if (apple_gmux_data)
4221 + return -EBUSY;
4222 +@@ -682,7 +662,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
4223 + ver_minor = (version >> 16) & 0xff;
4224 + ver_release = (version >> 8) & 0xff;
4225 + } else {
4226 +- pr_info("gmux device not present or IO disabled\n");
4227 ++ pr_info("gmux device not present\n");
4228 + ret = -ENODEV;
4229 + goto err_release;
4230 + }
4231 +@@ -690,23 +670,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
4232 + pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor,
4233 + ver_release, (gmux_data->indexed ? "indexed" : "classic"));
4234 +
4235 +- /*
4236 +- * Apple systems with gmux are EFI based and normally don't use
4237 +- * VGA. In addition changing IO+MEM ownership between IGP and dGPU
4238 +- * disables IO/MEM used for backlight control on some systems.
4239 +- * Lock IO+MEM to GPU with active IO to prevent switch.
4240 +- */
4241 +- pdev = gmux_get_io_pdev();
4242 +- if (pdev && vga_tryget(pdev,
4243 +- VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM)) {
4244 +- pr_err("IO+MEM vgaarb-locking for PCI:%s failed\n",
4245 +- pci_name(pdev));
4246 +- ret = -EBUSY;
4247 +- goto err_release;
4248 +- } else if (pdev)
4249 +- pr_info("locked IO for PCI:%s\n", pci_name(pdev));
4250 +- gmux_data->pdev = pdev;
4251 +-
4252 + memset(&props, 0, sizeof(props));
4253 + props.type = BACKLIGHT_PLATFORM;
4254 + props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
4255 +@@ -822,10 +785,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
4256 + err_notify:
4257 + backlight_device_unregister(bdev);
4258 + err_release:
4259 +- if (gmux_data->pdev)
4260 +- vga_put(gmux_data->pdev,
4261 +- VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM);
4262 +- pci_dev_put(pdev);
4263 + release_region(gmux_data->iostart, gmux_data->iolen);
4264 + err_free:
4265 + kfree(gmux_data);
4266 +@@ -845,11 +804,6 @@ static void gmux_remove(struct pnp_dev *pnp)
4267 + &gmux_notify_handler);
4268 + }
4269 +
4270 +- if (gmux_data->pdev) {
4271 +- vga_put(gmux_data->pdev,
4272 +- VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM);
4273 +- pci_dev_put(gmux_data->pdev);
4274 +- }
4275 + backlight_device_unregister(gmux_data->bdev);
4276 +
4277 + release_region(gmux_data->iostart, gmux_data->iolen);
4278 +diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
4279 +index daa68acbc900..c0c8945603cb 100644
4280 +--- a/drivers/platform/x86/wmi.c
4281 ++++ b/drivers/platform/x86/wmi.c
4282 +@@ -933,7 +933,7 @@ static int wmi_dev_probe(struct device *dev)
4283 + goto probe_failure;
4284 + }
4285 +
4286 +- buf = kmalloc(strlen(wdriver->driver.name) + 4, GFP_KERNEL);
4287 ++ buf = kmalloc(strlen(wdriver->driver.name) + 5, GFP_KERNEL);
4288 + if (!buf) {
4289 + ret = -ENOMEM;
4290 + goto probe_string_failure;
4291 +diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
4292 +index e2a946c0e667..304e891e35fc 100644
4293 +--- a/drivers/rtc/rtc-opal.c
4294 ++++ b/drivers/rtc/rtc-opal.c
4295 +@@ -58,6 +58,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
4296 + static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
4297 + {
4298 + long rc = OPAL_BUSY;
4299 ++ int retries = 10;
4300 + u32 y_m_d;
4301 + u64 h_m_s_ms;
4302 + __be32 __y_m_d;
4303 +@@ -67,8 +68,11 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
4304 + rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
4305 + if (rc == OPAL_BUSY_EVENT)
4306 + opal_poll_events(NULL);
4307 +- else
4308 ++ else if (retries-- && (rc == OPAL_HARDWARE
4309 ++ || rc == OPAL_INTERNAL_ERROR))
4310 + msleep(10);
4311 ++ else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
4312 ++ break;
4313 + }
4314 +
4315 + if (rc != OPAL_SUCCESS)
4316 +@@ -84,6 +88,7 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
4317 + static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
4318 + {
4319 + long rc = OPAL_BUSY;
4320 ++ int retries = 10;
4321 + u32 y_m_d = 0;
4322 + u64 h_m_s_ms = 0;
4323 +
4324 +@@ -92,8 +97,11 @@ static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
4325 + rc = opal_rtc_write(y_m_d, h_m_s_ms);
4326 + if (rc == OPAL_BUSY_EVENT)
4327 + opal_poll_events(NULL);
4328 +- else
4329 ++ else if (retries-- && (rc == OPAL_HARDWARE
4330 ++ || rc == OPAL_INTERNAL_ERROR))
4331 + msleep(10);
4332 ++ else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
4333 ++ break;
4334 + }
4335 +
4336 + return rc == OPAL_SUCCESS ? 0 : -EIO;
4337 +diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
4338 +index 0f42a225a664..e6b779930230 100644
4339 +--- a/drivers/scsi/smartpqi/Makefile
4340 ++++ b/drivers/scsi/smartpqi/Makefile
4341 +@@ -1,3 +1,3 @@
4342 + ccflags-y += -I.
4343 +-obj-m += smartpqi.o
4344 ++obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o
4345 + smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
4346 +diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
4347 +index f9bc8ec6fb6b..9518ffd8b8ba 100644
4348 +--- a/drivers/target/iscsi/iscsi_target_auth.c
4349 ++++ b/drivers/target/iscsi/iscsi_target_auth.c
4350 +@@ -421,7 +421,8 @@ static int chap_server_compute_md5(
4351 + auth_ret = 0;
4352 + out:
4353 + kzfree(desc);
4354 +- crypto_free_shash(tfm);
4355 ++ if (tfm)
4356 ++ crypto_free_shash(tfm);
4357 + kfree(challenge);
4358 + kfree(challenge_binhex);
4359 + return auth_ret;
4360 +diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
4361 +index b686e2ce9c0e..8a5e8d17a942 100644
4362 +--- a/drivers/target/iscsi/iscsi_target_nego.c
4363 ++++ b/drivers/target/iscsi/iscsi_target_nego.c
4364 +@@ -432,6 +432,9 @@ static void iscsi_target_sk_data_ready(struct sock *sk)
4365 + if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
4366 + write_unlock_bh(&sk->sk_callback_lock);
4367 + pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn);
4368 ++ if (iscsi_target_sk_data_ready == conn->orig_data_ready)
4369 ++ return;
4370 ++ conn->orig_data_ready(sk);
4371 + return;
4372 + }
4373 +
4374 +diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
4375 +index f699abab1787..65812a2f60b4 100644
4376 +--- a/drivers/usb/Kconfig
4377 ++++ b/drivers/usb/Kconfig
4378 +@@ -19,6 +19,14 @@ config USB_EHCI_BIG_ENDIAN_MMIO
4379 + config USB_EHCI_BIG_ENDIAN_DESC
4380 + bool
4381 +
4382 ++config USB_UHCI_BIG_ENDIAN_MMIO
4383 ++ bool
4384 ++ default y if SPARC_LEON
4385 ++
4386 ++config USB_UHCI_BIG_ENDIAN_DESC
4387 ++ bool
4388 ++ default y if SPARC_LEON
4389 ++
4390 + menuconfig USB_SUPPORT
4391 + bool "USB support"
4392 + depends on HAS_IOMEM
4393 +diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
4394 +index b80a94e632af..2763a640359f 100644
4395 +--- a/drivers/usb/host/Kconfig
4396 ++++ b/drivers/usb/host/Kconfig
4397 +@@ -625,14 +625,6 @@ config USB_UHCI_ASPEED
4398 + bool
4399 + default y if ARCH_ASPEED
4400 +
4401 +-config USB_UHCI_BIG_ENDIAN_MMIO
4402 +- bool
4403 +- default y if SPARC_LEON
4404 +-
4405 +-config USB_UHCI_BIG_ENDIAN_DESC
4406 +- bool
4407 +- default y if SPARC_LEON
4408 +-
4409 + config USB_FHCI_HCD
4410 + tristate "Freescale QE USB Host Controller support"
4411 + depends on OF_GPIO && QE_GPIO && QUICC_ENGINE
4412 +diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
4413 +index 9269d5685239..b90ef96e43d6 100644
4414 +--- a/drivers/video/console/dummycon.c
4415 ++++ b/drivers/video/console/dummycon.c
4416 +@@ -67,7 +67,6 @@ const struct consw dummy_con = {
4417 + .con_switch = DUMMY,
4418 + .con_blank = DUMMY,
4419 + .con_font_set = DUMMY,
4420 +- .con_font_get = DUMMY,
4421 + .con_font_default = DUMMY,
4422 + .con_font_copy = DUMMY,
4423 + };
4424 +diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
4425 +index e06358da4b99..3dee267d7c75 100644
4426 +--- a/drivers/video/fbdev/atmel_lcdfb.c
4427 ++++ b/drivers/video/fbdev/atmel_lcdfb.c
4428 +@@ -1119,7 +1119,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
4429 + goto put_display_node;
4430 + }
4431 +
4432 +- timings_np = of_find_node_by_name(display_np, "display-timings");
4433 ++ timings_np = of_get_child_by_name(display_np, "display-timings");
4434 + if (!timings_np) {
4435 + dev_err(dev, "failed to find display-timings node\n");
4436 + ret = -ENODEV;
4437 +@@ -1140,6 +1140,12 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
4438 + fb_add_videomode(&fb_vm, &info->modelist);
4439 + }
4440 +
4441 ++ /*
4442 ++ * FIXME: Make sure we are not referencing any fields in display_np
4443 ++ * and timings_np and drop our references to them before returning to
4444 ++ * avoid leaking the nodes on probe deferral and driver unbind.
4445 ++ */
4446 ++
4447 + return 0;
4448 +
4449 + put_timings_node:
4450 +diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c
4451 +index 6082f653c68a..67773e8bbb95 100644
4452 +--- a/drivers/video/fbdev/geode/video_gx.c
4453 ++++ b/drivers/video/fbdev/geode/video_gx.c
4454 +@@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info)
4455 + int timeout = 1000;
4456 +
4457 + /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
4458 +- if (cpu_data(0).x86_mask == 1) {
4459 ++ if (cpu_data(0).x86_stepping == 1) {
4460 + pll_table = gx_pll_table_14MHz;
4461 + pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);
4462 + } else {
4463 +diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
4464 +index 149c5e7efc89..092981171df1 100644
4465 +--- a/drivers/xen/xenbus/xenbus.h
4466 ++++ b/drivers/xen/xenbus/xenbus.h
4467 +@@ -76,6 +76,7 @@ struct xb_req_data {
4468 + struct list_head list;
4469 + wait_queue_head_t wq;
4470 + struct xsd_sockmsg msg;
4471 ++ uint32_t caller_req_id;
4472 + enum xsd_sockmsg_type type;
4473 + char *body;
4474 + const struct kvec *vec;
4475 +diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
4476 +index 5b081a01779d..d239fc3c5e3d 100644
4477 +--- a/drivers/xen/xenbus/xenbus_comms.c
4478 ++++ b/drivers/xen/xenbus/xenbus_comms.c
4479 +@@ -309,6 +309,7 @@ static int process_msg(void)
4480 + goto out;
4481 +
4482 + if (req->state == xb_req_state_wait_reply) {
4483 ++ req->msg.req_id = req->caller_req_id;
4484 + req->msg.type = state.msg.type;
4485 + req->msg.len = state.msg.len;
4486 + req->body = state.body;
4487 +diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
4488 +index 3e59590c7254..3f3b29398ab8 100644
4489 +--- a/drivers/xen/xenbus/xenbus_xs.c
4490 ++++ b/drivers/xen/xenbus/xenbus_xs.c
4491 +@@ -227,6 +227,8 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)
4492 + req->state = xb_req_state_queued;
4493 + init_waitqueue_head(&req->wq);
4494 +
4495 ++ /* Save the caller req_id and restore it later in the reply */
4496 ++ req->caller_req_id = req->msg.req_id;
4497 + req->msg.req_id = xs_request_enter(req);
4498 +
4499 + mutex_lock(&xb_write_mutex);
4500 +@@ -310,6 +312,7 @@ static void *xs_talkv(struct xenbus_transaction t,
4501 + req->num_vecs = num_vecs;
4502 + req->cb = xs_wake_up;
4503 +
4504 ++ msg.req_id = 0;
4505 + msg.tx_id = t.id;
4506 + msg.type = type;
4507 + msg.len = 0;
4508 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
4509 +index 0f57602092cf..c04183cc2117 100644
4510 +--- a/fs/btrfs/inode.c
4511 ++++ b/fs/btrfs/inode.c
4512 +@@ -1330,8 +1330,11 @@ static noinline int run_delalloc_nocow(struct inode *inode,
4513 + leaf = path->nodes[0];
4514 + if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4515 + ret = btrfs_next_leaf(root, path);
4516 +- if (ret < 0)
4517 ++ if (ret < 0) {
4518 ++ if (cow_start != (u64)-1)
4519 ++ cur_offset = cow_start;
4520 + goto error;
4521 ++ }
4522 + if (ret > 0)
4523 + break;
4524 + leaf = path->nodes[0];
4525 +@@ -3366,6 +3369,11 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans,
4526 + ret = btrfs_orphan_reserve_metadata(trans, inode);
4527 + ASSERT(!ret);
4528 + if (ret) {
4529 ++ /*
4530 ++ * dec doesn't need spin_lock as ->orphan_block_rsv
4531 ++ * would be released only if ->orphan_inodes is
4532 ++ * zero.
4533 ++ */
4534 + atomic_dec(&root->orphan_inodes);
4535 + clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
4536 + &inode->runtime_flags);
4537 +@@ -3380,12 +3388,17 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans,
4538 + if (insert >= 1) {
4539 + ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
4540 + if (ret) {
4541 +- atomic_dec(&root->orphan_inodes);
4542 + if (reserve) {
4543 + clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
4544 + &inode->runtime_flags);
4545 + btrfs_orphan_release_metadata(inode);
4546 + }
4547 ++ /*
4548 ++ * btrfs_orphan_commit_root may race with us and set
4549 ++ * ->orphan_block_rsv to zero, in order to avoid that,
4550 ++ * decrease ->orphan_inodes after everything is done.
4551 ++ */
4552 ++ atomic_dec(&root->orphan_inodes);
4553 + if (ret != -EEXIST) {
4554 + clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
4555 + &inode->runtime_flags);
4556 +@@ -3417,28 +3430,26 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
4557 + {
4558 + struct btrfs_root *root = inode->root;
4559 + int delete_item = 0;
4560 +- int release_rsv = 0;
4561 + int ret = 0;
4562 +
4563 +- spin_lock(&root->orphan_lock);
4564 + if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
4565 + &inode->runtime_flags))
4566 + delete_item = 1;
4567 +
4568 ++ if (delete_item && trans)
4569 ++ ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
4570 ++
4571 + if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
4572 + &inode->runtime_flags))
4573 +- release_rsv = 1;
4574 +- spin_unlock(&root->orphan_lock);
4575 ++ btrfs_orphan_release_metadata(inode);
4576 +
4577 +- if (delete_item) {
4578 ++ /*
4579 ++ * btrfs_orphan_commit_root may race with us and set ->orphan_block_rsv
4580 ++ * to zero, in order to avoid that, decrease ->orphan_inodes after
4581 ++ * everything is done.
4582 ++ */
4583 ++ if (delete_item)
4584 + atomic_dec(&root->orphan_inodes);
4585 +- if (trans)
4586 +- ret = btrfs_del_orphan_item(trans, root,
4587 +- btrfs_ino(inode));
4588 +- }
4589 +-
4590 +- if (release_rsv)
4591 +- btrfs_orphan_release_metadata(inode);
4592 +
4593 + return ret;
4594 + }
4595 +@@ -5263,7 +5274,7 @@ void btrfs_evict_inode(struct inode *inode)
4596 + trace_btrfs_inode_evict(inode);
4597 +
4598 + if (!root) {
4599 +- kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4600 ++ clear_inode(inode);
4601 + return;
4602 + }
4603 +
4604 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
4605 +index 7bf9b31561db..b5e1afb30f36 100644
4606 +--- a/fs/btrfs/tree-log.c
4607 ++++ b/fs/btrfs/tree-log.c
4608 +@@ -28,6 +28,7 @@
4609 + #include "hash.h"
4610 + #include "compression.h"
4611 + #include "qgroup.h"
4612 ++#include "inode-map.h"
4613 +
4614 + /* magic values for the inode_only field in btrfs_log_inode:
4615 + *
4616 +@@ -2494,6 +2495,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
4617 + clean_tree_block(fs_info, next);
4618 + btrfs_wait_tree_block_writeback(next);
4619 + btrfs_tree_unlock(next);
4620 ++ } else {
4621 ++ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
4622 ++ clear_extent_buffer_dirty(next);
4623 + }
4624 +
4625 + WARN_ON(root_owner !=
4626 +@@ -2574,6 +2578,9 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
4627 + clean_tree_block(fs_info, next);
4628 + btrfs_wait_tree_block_writeback(next);
4629 + btrfs_tree_unlock(next);
4630 ++ } else {
4631 ++ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
4632 ++ clear_extent_buffer_dirty(next);
4633 + }
4634 +
4635 + WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
4636 +@@ -2652,6 +2659,9 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
4637 + clean_tree_block(fs_info, next);
4638 + btrfs_wait_tree_block_writeback(next);
4639 + btrfs_tree_unlock(next);
4640 ++ } else {
4641 ++ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
4642 ++ clear_extent_buffer_dirty(next);
4643 + }
4644 +
4645 + WARN_ON(log->root_key.objectid !=
4646 +@@ -3040,13 +3050,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
4647 +
4648 + while (1) {
4649 + ret = find_first_extent_bit(&log->dirty_log_pages,
4650 +- 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
4651 ++ 0, &start, &end,
4652 ++ EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT,
4653 + NULL);
4654 + if (ret)
4655 + break;
4656 +
4657 + clear_extent_bits(&log->dirty_log_pages, start, end,
4658 +- EXTENT_DIRTY | EXTENT_NEW);
4659 ++ EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
4660 + }
4661 +
4662 + /*
4663 +@@ -5705,6 +5716,23 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
4664 + path);
4665 + }
4666 +
4667 ++ if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
4668 ++ struct btrfs_root *root = wc.replay_dest;
4669 ++
4670 ++ btrfs_release_path(path);
4671 ++
4672 ++ /*
4673 ++ * We have just replayed everything, and the highest
4674 ++ * objectid of fs roots probably has changed in case
4675 ++ * some inode_item's got replayed.
4676 ++ *
4677 ++ * root->objectid_mutex is not acquired as log replay
4678 ++ * could only happen during mount.
4679 ++ */
4680 ++ ret = btrfs_find_highest_objectid(root,
4681 ++ &root->highest_objectid);
4682 ++ }
4683 ++
4684 + key.offset = found_key.offset - 1;
4685 + wc.replay_dest->log_root = NULL;
4686 + free_extent_buffer(log->node);
4687 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4688 +index 534a9130f625..4c2f8b57bdc7 100644
4689 +--- a/fs/ext4/inode.c
4690 ++++ b/fs/ext4/inode.c
4691 +@@ -3767,10 +3767,18 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
4692 + /* Credits for sb + inode write */
4693 + handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
4694 + if (IS_ERR(handle)) {
4695 +- /* This is really bad luck. We've written the data
4696 +- * but cannot extend i_size. Bail out and pretend
4697 +- * the write failed... */
4698 +- ret = PTR_ERR(handle);
4699 ++ /*
4700 ++ * We wrote the data but cannot extend
4701 ++ * i_size. Bail out. In async io case, we do
4702 ++ * not return error here because we have
4703 ++ * already submmitted the corresponding
4704 ++ * bio. Returning error here makes the caller
4705 ++ * think that this IO is done and failed
4706 ++ * resulting in race with bio's completion
4707 ++ * handler.
4708 ++ */
4709 ++ if (!ret)
4710 ++ ret = PTR_ERR(handle);
4711 + if (inode->i_nlink)
4712 + ext4_orphan_del(NULL, inode);
4713 +
4714 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4715 +index 7c46693a14d7..71594382e195 100644
4716 +--- a/fs/ext4/super.c
4717 ++++ b/fs/ext4/super.c
4718 +@@ -742,6 +742,7 @@ __acquires(bitlock)
4719 + }
4720 +
4721 + ext4_unlock_group(sb, grp);
4722 ++ ext4_commit_super(sb, 1);
4723 + ext4_handle_error(sb);
4724 + /*
4725 + * We only get here in the ERRORS_RO case; relocking the group
4726 +diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
4727 +index d5f0d96169c5..8c50d6878aa5 100644
4728 +--- a/fs/gfs2/bmap.c
4729 ++++ b/fs/gfs2/bmap.c
4730 +@@ -736,7 +736,7 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
4731 + __be64 *ptr;
4732 + sector_t lblock;
4733 + sector_t lend;
4734 +- int ret;
4735 ++ int ret = 0;
4736 + int eob;
4737 + unsigned int len;
4738 + struct buffer_head *bh;
4739 +@@ -748,12 +748,14 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
4740 + goto out;
4741 + }
4742 +
4743 +- if ((flags & IOMAP_REPORT) && gfs2_is_stuffed(ip)) {
4744 +- gfs2_stuffed_iomap(inode, iomap);
4745 +- if (pos >= iomap->length)
4746 +- return -ENOENT;
4747 +- ret = 0;
4748 +- goto out;
4749 ++ if (gfs2_is_stuffed(ip)) {
4750 ++ if (flags & IOMAP_REPORT) {
4751 ++ gfs2_stuffed_iomap(inode, iomap);
4752 ++ if (pos >= iomap->length)
4753 ++ ret = -ENOENT;
4754 ++ goto out;
4755 ++ }
4756 ++ BUG_ON(!(flags & IOMAP_WRITE));
4757 + }
4758 +
4759 + lblock = pos >> inode->i_blkbits;
4760 +@@ -764,7 +766,7 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
4761 + iomap->type = IOMAP_HOLE;
4762 + iomap->length = (u64)(lend - lblock) << inode->i_blkbits;
4763 + iomap->flags = IOMAP_F_MERGED;
4764 +- bmap_lock(ip, 0);
4765 ++ bmap_lock(ip, flags & IOMAP_WRITE);
4766 +
4767 + /*
4768 + * Directory data blocks have a struct gfs2_meta_header header, so the
4769 +@@ -807,27 +809,28 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
4770 + iomap->flags |= IOMAP_F_BOUNDARY;
4771 + iomap->length = (u64)len << inode->i_blkbits;
4772 +
4773 +- ret = 0;
4774 +-
4775 + out_release:
4776 + release_metapath(&mp);
4777 +- bmap_unlock(ip, 0);
4778 ++ bmap_unlock(ip, flags & IOMAP_WRITE);
4779 + out:
4780 + trace_gfs2_iomap_end(ip, iomap, ret);
4781 + return ret;
4782 +
4783 + do_alloc:
4784 +- if (!(flags & IOMAP_WRITE)) {
4785 +- if (pos >= i_size_read(inode)) {
4786 ++ if (flags & IOMAP_WRITE) {
4787 ++ ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
4788 ++ } else if (flags & IOMAP_REPORT) {
4789 ++ loff_t size = i_size_read(inode);
4790 ++ if (pos >= size)
4791 + ret = -ENOENT;
4792 +- goto out_release;
4793 +- }
4794 +- ret = 0;
4795 +- iomap->length = hole_size(inode, lblock, &mp);
4796 +- goto out_release;
4797 ++ else if (height <= ip->i_height)
4798 ++ iomap->length = hole_size(inode, lblock, &mp);
4799 ++ else
4800 ++ iomap->length = size - pos;
4801 ++ } else {
4802 ++ if (height <= ip->i_height)
4803 ++ iomap->length = hole_size(inode, lblock, &mp);
4804 + }
4805 +-
4806 +- ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
4807 + goto out_release;
4808 + }
4809 +
4810 +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
4811 +index 8b08044b3120..c0681814c379 100644
4812 +--- a/fs/jbd2/transaction.c
4813 ++++ b/fs/jbd2/transaction.c
4814 +@@ -495,8 +495,10 @@ void jbd2_journal_free_reserved(handle_t *handle)
4815 + EXPORT_SYMBOL(jbd2_journal_free_reserved);
4816 +
4817 + /**
4818 +- * int jbd2_journal_start_reserved(handle_t *handle) - start reserved handle
4819 ++ * int jbd2_journal_start_reserved() - start reserved handle
4820 + * @handle: handle to start
4821 ++ * @type: for handle statistics
4822 ++ * @line_no: for handle statistics
4823 + *
4824 + * Start handle that has been previously reserved with jbd2_journal_reserve().
4825 + * This attaches @handle to the running transaction (or creates one if there's
4826 +@@ -626,6 +628,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
4827 + * int jbd2_journal_restart() - restart a handle .
4828 + * @handle: handle to restart
4829 + * @nblocks: nr credits requested
4830 ++ * @gfp_mask: memory allocation flags (for start_this_handle)
4831 + *
4832 + * Restart a handle for a multi-transaction filesystem
4833 + * operation.
4834 +diff --git a/fs/mbcache.c b/fs/mbcache.c
4835 +index b8b8b9ced9f8..46b23bb432fe 100644
4836 +--- a/fs/mbcache.c
4837 ++++ b/fs/mbcache.c
4838 +@@ -94,6 +94,7 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
4839 + entry->e_key = key;
4840 + entry->e_value = value;
4841 + entry->e_reusable = reusable;
4842 ++ entry->e_referenced = 0;
4843 + head = mb_cache_entry_head(cache, key);
4844 + hlist_bl_lock(head);
4845 + hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
4846 +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
4847 +index 4689940a953c..5193218f5889 100644
4848 +--- a/fs/ocfs2/dlmglue.c
4849 ++++ b/fs/ocfs2/dlmglue.c
4850 +@@ -2486,6 +2486,15 @@ int ocfs2_inode_lock_with_page(struct inode *inode,
4851 + ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
4852 + if (ret == -EAGAIN) {
4853 + unlock_page(page);
4854 ++ /*
4855 ++ * If we can't get inode lock immediately, we should not return
4856 ++ * directly here, since this will lead to a softlockup problem.
4857 ++ * The method is to get a blocking lock and immediately unlock
4858 ++ * before returning, this can avoid CPU resource waste due to
4859 ++ * lots of retries, and benefits fairness in getting lock.
4860 ++ */
4861 ++ if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
4862 ++ ocfs2_inode_unlock(inode, ex);
4863 + ret = AOP_TRUNCATED_PAGE;
4864 + }
4865 +
4866 +diff --git a/fs/seq_file.c b/fs/seq_file.c
4867 +index 4be761c1a03d..eea09f6d8830 100644
4868 +--- a/fs/seq_file.c
4869 ++++ b/fs/seq_file.c
4870 +@@ -181,8 +181,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
4871 + * if request is to read from zero offset, reset iterator to first
4872 + * record as it might have been already advanced by previous requests
4873 + */
4874 +- if (*ppos == 0)
4875 ++ if (*ppos == 0) {
4876 + m->index = 0;
4877 ++ m->version = 0;
4878 ++ m->count = 0;
4879 ++ }
4880 +
4881 + /* Don't assume *ppos is where we left it */
4882 + if (unlikely(*ppos != m->read_pos)) {
4883 +diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
4884 +index 972a25633525..c65e4489006d 100644
4885 +--- a/include/drm/i915_pciids.h
4886 ++++ b/include/drm/i915_pciids.h
4887 +@@ -392,6 +392,12 @@
4888 + INTEL_VGA_DEVICE(0x3EA8, info), /* ULT GT3 */ \
4889 + INTEL_VGA_DEVICE(0x3EA5, info) /* ULT GT3 */
4890 +
4891 ++#define INTEL_CFL_IDS(info) \
4892 ++ INTEL_CFL_S_GT1_IDS(info), \
4893 ++ INTEL_CFL_S_GT2_IDS(info), \
4894 ++ INTEL_CFL_H_GT2_IDS(info), \
4895 ++ INTEL_CFL_U_GT3_IDS(info)
4896 ++
4897 + /* CNL U 2+2 */
4898 + #define INTEL_CNL_U_GT2_IDS(info) \
4899 + INTEL_VGA_DEVICE(0x5A52, info), \
4900 +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
4901 +index 631354acfa72..73bc63e0a1c4 100644
4902 +--- a/include/linux/compiler-gcc.h
4903 ++++ b/include/linux/compiler-gcc.h
4904 +@@ -167,8 +167,6 @@
4905 +
4906 + #if GCC_VERSION >= 40100
4907 + # define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
4908 +-
4909 +-#define __nostackprotector __attribute__((__optimize__("no-stack-protector")))
4910 + #endif
4911 +
4912 + #if GCC_VERSION >= 40300
4913 +@@ -196,6 +194,11 @@
4914 + #endif /* __CHECKER__ */
4915 + #endif /* GCC_VERSION >= 40300 */
4916 +
4917 ++#if GCC_VERSION >= 40400
4918 ++#define __optimize(level) __attribute__((__optimize__(level)))
4919 ++#define __nostackprotector __optimize("no-stack-protector")
4920 ++#endif /* GCC_VERSION >= 40400 */
4921 ++
4922 + #if GCC_VERSION >= 40500
4923 +
4924 + #ifndef __CHECKER__
4925 +diff --git a/include/linux/compiler.h b/include/linux/compiler.h
4926 +index 52e611ab9a6c..5ff818e9a836 100644
4927 +--- a/include/linux/compiler.h
4928 ++++ b/include/linux/compiler.h
4929 +@@ -271,6 +271,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
4930 +
4931 + #endif /* __ASSEMBLY__ */
4932 +
4933 ++#ifndef __optimize
4934 ++# define __optimize(level)
4935 ++#endif
4936 ++
4937 + /* Compile time object size, -1 for unknown */
4938 + #ifndef __compiletime_object_size
4939 + # define __compiletime_object_size(obj) -1
4940 +diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
4941 +index 8f7788d23b57..a6989e02d0a0 100644
4942 +--- a/include/linux/cpuidle.h
4943 ++++ b/include/linux/cpuidle.h
4944 +@@ -225,7 +225,7 @@ static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev,
4945 + }
4946 + #endif
4947 +
4948 +-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
4949 ++#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX)
4950 + void cpuidle_poll_state_init(struct cpuidle_driver *drv);
4951 + #else
4952 + static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {}
4953 +diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
4954 +index 296d1e0ea87b..b708e5169d1d 100644
4955 +--- a/include/linux/jbd2.h
4956 ++++ b/include/linux/jbd2.h
4957 +@@ -418,26 +418,41 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
4958 + #define JI_WAIT_DATA (1 << __JI_WAIT_DATA)
4959 +
4960 + /**
4961 +- * struct jbd_inode is the structure linking inodes in ordered mode
4962 +- * present in a transaction so that we can sync them during commit.
4963 ++ * struct jbd_inode - The jbd_inode type is the structure linking inodes in
4964 ++ * ordered mode present in a transaction so that we can sync them during commit.
4965 + */
4966 + struct jbd2_inode {
4967 +- /* Which transaction does this inode belong to? Either the running
4968 +- * transaction or the committing one. [j_list_lock] */
4969 ++ /**
4970 ++ * @i_transaction:
4971 ++ *
4972 ++ * Which transaction does this inode belong to? Either the running
4973 ++ * transaction or the committing one. [j_list_lock]
4974 ++ */
4975 + transaction_t *i_transaction;
4976 +
4977 +- /* Pointer to the running transaction modifying inode's data in case
4978 +- * there is already a committing transaction touching it. [j_list_lock] */
4979 ++ /**
4980 ++ * @i_next_transaction:
4981 ++ *
4982 ++ * Pointer to the running transaction modifying inode's data in case
4983 ++ * there is already a committing transaction touching it. [j_list_lock]
4984 ++ */
4985 + transaction_t *i_next_transaction;
4986 +
4987 +- /* List of inodes in the i_transaction [j_list_lock] */
4988 ++ /**
4989 ++ * @i_list: List of inodes in the i_transaction [j_list_lock]
4990 ++ */
4991 + struct list_head i_list;
4992 +
4993 +- /* VFS inode this inode belongs to [constant during the lifetime
4994 +- * of the structure] */
4995 ++ /**
4996 ++ * @i_vfs_inode:
4997 ++ *
4998 ++ * VFS inode this inode belongs to [constant for lifetime of structure]
4999 ++ */
5000 + struct inode *i_vfs_inode;
5001 +
5002 +- /* Flags of inode [j_list_lock] */
5003 ++ /**
5004 ++ * @i_flags: Flags of inode [j_list_lock]
5005 ++ */
5006 + unsigned long i_flags;
5007 + };
5008 +
5009 +@@ -447,12 +462,20 @@ struct jbd2_revoke_table_s;
5010 + * struct handle_s - The handle_s type is the concrete type associated with
5011 + * handle_t.
5012 + * @h_transaction: Which compound transaction is this update a part of?
5013 ++ * @h_journal: Which journal handle belongs to - used iff h_reserved set.
5014 ++ * @h_rsv_handle: Handle reserved for finishing the logical operation.
5015 + * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
5016 +- * @h_ref: Reference count on this handle
5017 +- * @h_err: Field for caller's use to track errors through large fs operations
5018 +- * @h_sync: flag for sync-on-close
5019 +- * @h_jdata: flag to force data journaling
5020 +- * @h_aborted: flag indicating fatal error on handle
5021 ++ * @h_ref: Reference count on this handle.
5022 ++ * @h_err: Field for caller's use to track errors through large fs operations.
5023 ++ * @h_sync: Flag for sync-on-close.
5024 ++ * @h_jdata: Flag to force data journaling.
5025 ++ * @h_reserved: Flag for handle for reserved credits.
5026 ++ * @h_aborted: Flag indicating fatal error on handle.
5027 ++ * @h_type: For handle statistics.
5028 ++ * @h_line_no: For handle statistics.
5029 ++ * @h_start_jiffies: Handle Start time.
5030 ++ * @h_requested_credits: Holds @h_buffer_credits after handle is started.
5031 ++ * @saved_alloc_context: Saved context while transaction is open.
5032 + **/
5033 +
5034 + /* Docbook can't yet cope with the bit fields, but will leave the documentation
5035 +@@ -462,32 +485,23 @@ struct jbd2_revoke_table_s;
5036 + struct jbd2_journal_handle
5037 + {
5038 + union {
5039 +- /* Which compound transaction is this update a part of? */
5040 + transaction_t *h_transaction;
5041 + /* Which journal handle belongs to - used iff h_reserved set */
5042 + journal_t *h_journal;
5043 + };
5044 +
5045 +- /* Handle reserved for finishing the logical operation */
5046 + handle_t *h_rsv_handle;
5047 +-
5048 +- /* Number of remaining buffers we are allowed to dirty: */
5049 + int h_buffer_credits;
5050 +-
5051 +- /* Reference count on this handle */
5052 + int h_ref;
5053 +-
5054 +- /* Field for caller's use to track errors through large fs */
5055 +- /* operations */
5056 + int h_err;
5057 +
5058 + /* Flags [no locking] */
5059 +- unsigned int h_sync: 1; /* sync-on-close */
5060 +- unsigned int h_jdata: 1; /* force data journaling */
5061 +- unsigned int h_reserved: 1; /* handle with reserved credits */
5062 +- unsigned int h_aborted: 1; /* fatal error on handle */
5063 +- unsigned int h_type: 8; /* for handle statistics */
5064 +- unsigned int h_line_no: 16; /* for handle statistics */
5065 ++ unsigned int h_sync: 1;
5066 ++ unsigned int h_jdata: 1;
5067 ++ unsigned int h_reserved: 1;
5068 ++ unsigned int h_aborted: 1;
5069 ++ unsigned int h_type: 8;
5070 ++ unsigned int h_line_no: 16;
5071 +
5072 + unsigned long h_start_jiffies;
5073 + unsigned int h_requested_credits;
5074 +@@ -729,228 +743,253 @@ jbd2_time_diff(unsigned long start, unsigned long end)
5075 + /**
5076 + * struct journal_s - The journal_s type is the concrete type associated with
5077 + * journal_t.
5078 +- * @j_flags: General journaling state flags
5079 +- * @j_errno: Is there an outstanding uncleared error on the journal (from a
5080 +- * prior abort)?
5081 +- * @j_sb_buffer: First part of superblock buffer
5082 +- * @j_superblock: Second part of superblock buffer
5083 +- * @j_format_version: Version of the superblock format
5084 +- * @j_state_lock: Protect the various scalars in the journal
5085 +- * @j_barrier_count: Number of processes waiting to create a barrier lock
5086 +- * @j_barrier: The barrier lock itself
5087 +- * @j_running_transaction: The current running transaction..
5088 +- * @j_committing_transaction: the transaction we are pushing to disk
5089 +- * @j_checkpoint_transactions: a linked circular list of all transactions
5090 +- * waiting for checkpointing
5091 +- * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
5092 +- * to start committing, or for a barrier lock to be released
5093 +- * @j_wait_done_commit: Wait queue for waiting for commit to complete
5094 +- * @j_wait_commit: Wait queue to trigger commit
5095 +- * @j_wait_updates: Wait queue to wait for updates to complete
5096 +- * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop
5097 +- * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
5098 +- * @j_head: Journal head - identifies the first unused block in the journal
5099 +- * @j_tail: Journal tail - identifies the oldest still-used block in the
5100 +- * journal.
5101 +- * @j_free: Journal free - how many free blocks are there in the journal?
5102 +- * @j_first: The block number of the first usable block
5103 +- * @j_last: The block number one beyond the last usable block
5104 +- * @j_dev: Device where we store the journal
5105 +- * @j_blocksize: blocksize for the location where we store the journal.
5106 +- * @j_blk_offset: starting block offset for into the device where we store the
5107 +- * journal
5108 +- * @j_fs_dev: Device which holds the client fs. For internal journal this will
5109 +- * be equal to j_dev
5110 +- * @j_reserved_credits: Number of buffers reserved from the running transaction
5111 +- * @j_maxlen: Total maximum capacity of the journal region on disk.
5112 +- * @j_list_lock: Protects the buffer lists and internal buffer state.
5113 +- * @j_inode: Optional inode where we store the journal. If present, all journal
5114 +- * block numbers are mapped into this inode via bmap().
5115 +- * @j_tail_sequence: Sequence number of the oldest transaction in the log
5116 +- * @j_transaction_sequence: Sequence number of the next transaction to grant
5117 +- * @j_commit_sequence: Sequence number of the most recently committed
5118 +- * transaction
5119 +- * @j_commit_request: Sequence number of the most recent transaction wanting
5120 +- * commit
5121 +- * @j_uuid: Uuid of client object.
5122 +- * @j_task: Pointer to the current commit thread for this journal
5123 +- * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
5124 +- * single compound commit transaction
5125 +- * @j_commit_interval: What is the maximum transaction lifetime before we begin
5126 +- * a commit?
5127 +- * @j_commit_timer: The timer used to wakeup the commit thread
5128 +- * @j_revoke_lock: Protect the revoke table
5129 +- * @j_revoke: The revoke table - maintains the list of revoked blocks in the
5130 +- * current transaction.
5131 +- * @j_revoke_table: alternate revoke tables for j_revoke
5132 +- * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction
5133 +- * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
5134 +- * number that will fit in j_blocksize
5135 +- * @j_last_sync_writer: most recent pid which did a synchronous write
5136 +- * @j_history_lock: Protect the transactions statistics history
5137 +- * @j_proc_entry: procfs entry for the jbd statistics directory
5138 +- * @j_stats: Overall statistics
5139 +- * @j_private: An opaque pointer to fs-private information.
5140 +- * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies
5141 + */
5142 +-
5143 + struct journal_s
5144 + {
5145 +- /* General journaling state flags [j_state_lock] */
5146 ++ /**
5147 ++ * @j_flags: General journaling state flags [j_state_lock]
5148 ++ */
5149 + unsigned long j_flags;
5150 +
5151 +- /*
5152 ++ /**
5153 ++ * @j_errno:
5154 ++ *
5155 + * Is there an outstanding uncleared error on the journal (from a prior
5156 + * abort)? [j_state_lock]
5157 + */
5158 + int j_errno;
5159 +
5160 +- /* The superblock buffer */
5161 ++ /**
5162 ++ * @j_sb_buffer: The first part of the superblock buffer.
5163 ++ */
5164 + struct buffer_head *j_sb_buffer;
5165 ++
5166 ++ /**
5167 ++ * @j_superblock: The second part of the superblock buffer.
5168 ++ */
5169 + journal_superblock_t *j_superblock;
5170 +
5171 +- /* Version of the superblock format */
5172 ++ /**
5173 ++ * @j_format_version: Version of the superblock format.
5174 ++ */
5175 + int j_format_version;
5176 +
5177 +- /*
5178 +- * Protect the various scalars in the journal
5179 ++ /**
5180 ++ * @j_state_lock: Protect the various scalars in the journal.
5181 + */
5182 + rwlock_t j_state_lock;
5183 +
5184 +- /*
5185 ++ /**
5186 ++ * @j_barrier_count:
5187 ++ *
5188 + * Number of processes waiting to create a barrier lock [j_state_lock]
5189 + */
5190 + int j_barrier_count;
5191 +
5192 +- /* The barrier lock itself */
5193 ++ /**
5194 ++ * @j_barrier: The barrier lock itself.
5195 ++ */
5196 + struct mutex j_barrier;
5197 +
5198 +- /*
5199 ++ /**
5200 ++ * @j_running_transaction:
5201 ++ *
5202 + * Transactions: The current running transaction...
5203 + * [j_state_lock] [caller holding open handle]
5204 + */
5205 + transaction_t *j_running_transaction;
5206 +
5207 +- /*
5208 ++ /**
5209 ++ * @j_committing_transaction:
5210 ++ *
5211 + * the transaction we are pushing to disk
5212 + * [j_state_lock] [caller holding open handle]
5213 + */
5214 + transaction_t *j_committing_transaction;
5215 +
5216 +- /*
5217 ++ /**
5218 ++ * @j_checkpoint_transactions:
5219 ++ *
5220 + * ... and a linked circular list of all transactions waiting for
5221 + * checkpointing. [j_list_lock]
5222 + */
5223 + transaction_t *j_checkpoint_transactions;
5224 +
5225 +- /*
5226 ++ /**
5227 ++ * @j_wait_transaction_locked:
5228 ++ *
5229 + * Wait queue for waiting for a locked transaction to start committing,
5230 +- * or for a barrier lock to be released
5231 ++ * or for a barrier lock to be released.
5232 + */
5233 + wait_queue_head_t j_wait_transaction_locked;
5234 +
5235 +- /* Wait queue for waiting for commit to complete */
5236 ++ /**
5237 ++ * @j_wait_done_commit: Wait queue for waiting for commit to complete.
5238 ++ */
5239 + wait_queue_head_t j_wait_done_commit;
5240 +
5241 +- /* Wait queue to trigger commit */
5242 ++ /**
5243 ++ * @j_wait_commit: Wait queue to trigger commit.
5244 ++ */
5245 + wait_queue_head_t j_wait_commit;
5246 +
5247 +- /* Wait queue to wait for updates to complete */
5248 ++ /**
5249 ++ * @j_wait_updates: Wait queue to wait for updates to complete.
5250 ++ */
5251 + wait_queue_head_t j_wait_updates;
5252 +
5253 +- /* Wait queue to wait for reserved buffer credits to drop */
5254 ++ /**
5255 ++ * @j_wait_reserved:
5256 ++ *
5257 ++ * Wait queue to wait for reserved buffer credits to drop.
5258 ++ */
5259 + wait_queue_head_t j_wait_reserved;
5260 +
5261 +- /* Semaphore for locking against concurrent checkpoints */
5262 ++ /**
5263 ++ * @j_checkpoint_mutex:
5264 ++ *
5265 ++ * Semaphore for locking against concurrent checkpoints.
5266 ++ */
5267 + struct mutex j_checkpoint_mutex;
5268 +
5269 +- /*
5270 ++ /**
5271 ++ * @j_chkpt_bhs:
5272 ++ *
5273 + * List of buffer heads used by the checkpoint routine. This
5274 + * was moved from jbd2_log_do_checkpoint() to reduce stack
5275 + * usage. Access to this array is controlled by the
5276 +- * j_checkpoint_mutex. [j_checkpoint_mutex]
5277 ++ * @j_checkpoint_mutex. [j_checkpoint_mutex]
5278 + */
5279 + struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH];
5280 +-
5281 +- /*
5282 ++
5283 ++ /**
5284 ++ * @j_head:
5285 ++ *
5286 + * Journal head: identifies the first unused block in the journal.
5287 + * [j_state_lock]
5288 + */
5289 + unsigned long j_head;
5290 +
5291 +- /*
5292 ++ /**
5293 ++ * @j_tail:
5294 ++ *
5295 + * Journal tail: identifies the oldest still-used block in the journal.
5296 + * [j_state_lock]
5297 + */
5298 + unsigned long j_tail;
5299 +
5300 +- /*
5301 ++ /**
5302 ++ * @j_free:
5303 ++ *
5304 + * Journal free: how many free blocks are there in the journal?
5305 + * [j_state_lock]
5306 + */
5307 + unsigned long j_free;
5308 +
5309 +- /*
5310 +- * Journal start and end: the block numbers of the first usable block
5311 +- * and one beyond the last usable block in the journal. [j_state_lock]
5312 ++ /**
5313 ++ * @j_first:
5314 ++ *
5315 ++ * The block number of the first usable block in the journal
5316 ++ * [j_state_lock].
5317 + */
5318 + unsigned long j_first;
5319 ++
5320 ++ /**
5321 ++ * @j_last:
5322 ++ *
5323 ++ * The block number one beyond the last usable block in the journal
5324 ++ * [j_state_lock].
5325 ++ */
5326 + unsigned long j_last;
5327 +
5328 +- /*
5329 +- * Device, blocksize and starting block offset for the location where we
5330 +- * store the journal.
5331 ++ /**
5332 ++ * @j_dev: Device where we store the journal.
5333 + */
5334 + struct block_device *j_dev;
5335 ++
5336 ++ /**
5337 ++ * @j_blocksize: Block size for the location where we store the journal.
5338 ++ */
5339 + int j_blocksize;
5340 ++
5341 ++ /**
5342 ++ * @j_blk_offset:
5343 ++ *
5344 ++ * Starting block offset into the device where we store the journal.
5345 ++ */
5346 + unsigned long long j_blk_offset;
5347 ++
5348 ++ /**
5349 ++ * @j_devname: Journal device name.
5350 ++ */
5351 + char j_devname[BDEVNAME_SIZE+24];
5352 +
5353 +- /*
5354 ++ /**
5355 ++ * @j_fs_dev:
5356 ++ *
5357 + * Device which holds the client fs. For internal journal this will be
5358 + * equal to j_dev.
5359 + */
5360 + struct block_device *j_fs_dev;
5361 +
5362 +- /* Total maximum capacity of the journal region on disk. */
5363 ++ /**
5364 ++ * @j_maxlen: Total maximum capacity of the journal region on disk.
5365 ++ */
5366 + unsigned int j_maxlen;
5367 +
5368 +- /* Number of buffers reserved from the running transaction */
5369 ++ /**
5370 ++ * @j_reserved_credits:
5371 ++ *
5372 ++ * Number of buffers reserved from the running transaction.
5373 ++ */
5374 + atomic_t j_reserved_credits;
5375 +
5376 +- /*
5377 +- * Protects the buffer lists and internal buffer state.
5378 ++ /**
5379 ++ * @j_list_lock: Protects the buffer lists and internal buffer state.
5380 + */
5381 + spinlock_t j_list_lock;
5382 +
5383 +- /* Optional inode where we store the journal. If present, all */
5384 +- /* journal block numbers are mapped into this inode via */
5385 +- /* bmap(). */
5386 ++ /**
5387 ++ * @j_inode:
5388 ++ *
5389 ++ * Optional inode where we store the journal. If present, all
5390 ++ * journal block numbers are mapped into this inode via bmap().
5391 ++ */
5392 + struct inode *j_inode;
5393 +
5394 +- /*
5395 ++ /**
5396 ++ * @j_tail_sequence:
5397 ++ *
5398 + * Sequence number of the oldest transaction in the log [j_state_lock]
5399 + */
5400 + tid_t j_tail_sequence;
5401 +
5402 +- /*
5403 ++ /**
5404 ++ * @j_transaction_sequence:
5405 ++ *
5406 + * Sequence number of the next transaction to grant [j_state_lock]
5407 + */
5408 + tid_t j_transaction_sequence;
5409 +
5410 +- /*
5411 ++ /**
5412 ++ * @j_commit_sequence:
5413 ++ *
5414 + * Sequence number of the most recently committed transaction
5415 + * [j_state_lock].
5416 + */
5417 + tid_t j_commit_sequence;
5418 +
5419 +- /*
5420 ++ /**
5421 ++ * @j_commit_request:
5422 ++ *
5423 + * Sequence number of the most recent transaction wanting commit
5424 + * [j_state_lock]
5425 + */
5426 + tid_t j_commit_request;
5427 +
5428 +- /*
5429 ++ /**
5430 ++ * @j_uuid:
5431 ++ *
5432 + * Journal uuid: identifies the object (filesystem, LVM volume etc)
5433 + * backed by this journal. This will eventually be replaced by an array
5434 + * of uuids, allowing us to index multiple devices within a single
5435 +@@ -958,85 +997,151 @@ struct journal_s
5436 + */
5437 + __u8 j_uuid[16];
5438 +
5439 +- /* Pointer to the current commit thread for this journal */
5440 ++ /**
5441 ++ * @j_task: Pointer to the current commit thread for this journal.
5442 ++ */
5443 + struct task_struct *j_task;
5444 +
5445 +- /*
5446 ++ /**
5447 ++ * @j_max_transaction_buffers:
5448 ++ *
5449 + * Maximum number of metadata buffers to allow in a single compound
5450 +- * commit transaction
5451 ++ * commit transaction.
5452 + */
5453 + int j_max_transaction_buffers;
5454 +
5455 +- /*
5456 ++ /**
5457 ++ * @j_commit_interval:
5458 ++ *
5459 + * What is the maximum transaction lifetime before we begin a commit?
5460 + */
5461 + unsigned long j_commit_interval;
5462 +
5463 +- /* The timer used to wakeup the commit thread: */
5464 ++ /**
5465 ++ * @j_commit_timer: The timer used to wakeup the commit thread.
5466 ++ */
5467 + struct timer_list j_commit_timer;
5468 +
5469 +- /*
5470 +- * The revoke table: maintains the list of revoked blocks in the
5471 +- * current transaction. [j_revoke_lock]
5472 ++ /**
5473 ++ * @j_revoke_lock: Protect the revoke table.
5474 + */
5475 + spinlock_t j_revoke_lock;
5476 ++
5477 ++ /**
5478 ++ * @j_revoke:
5479 ++ *
5480 ++ * The revoke table - maintains the list of revoked blocks in the
5481 ++ * current transaction.
5482 ++ */
5483 + struct jbd2_revoke_table_s *j_revoke;
5484 ++
5485 ++ /**
5486 ++ * @j_revoke_table: Alternate revoke tables for j_revoke.
5487 ++ */
5488 + struct jbd2_revoke_table_s *j_revoke_table[2];
5489 +
5490 +- /*
5491 +- * array of bhs for jbd2_journal_commit_transaction
5492 ++ /**
5493 ++ * @j_wbuf: Array of bhs for jbd2_journal_commit_transaction.
5494 + */
5495 + struct buffer_head **j_wbuf;
5496 ++
5497 ++ /**
5498 ++ * @j_wbufsize:
5499 ++ *
5500 ++ * Size of @j_wbuf array.
5501 ++ */
5502 + int j_wbufsize;
5503 +
5504 +- /*
5505 +- * this is the pid of hte last person to run a synchronous operation
5506 +- * through the journal
5507 ++ /**
5508 ++ * @j_last_sync_writer:
5509 ++ *
5510 ++ * The pid of the last person to run a synchronous operation
5511 ++ * through the journal.
5512 + */
5513 + pid_t j_last_sync_writer;
5514 +
5515 +- /*
5516 +- * the average amount of time in nanoseconds it takes to commit a
5517 ++ /**
5518 ++ * @j_average_commit_time:
5519 ++ *
5520 ++ * The average amount of time in nanoseconds it takes to commit a
5521 + * transaction to disk. [j_state_lock]
5522 + */
5523 + u64 j_average_commit_time;
5524 +
5525 +- /*
5526 +- * minimum and maximum times that we should wait for
5527 +- * additional filesystem operations to get batched into a
5528 +- * synchronous handle in microseconds
5529 ++ /**
5530 ++ * @j_min_batch_time:
5531 ++ *
5532 ++ * Minimum time that we should wait for additional filesystem operations
5533 ++ * to get batched into a synchronous handle in microseconds.
5534 + */
5535 + u32 j_min_batch_time;
5536 ++
5537 ++ /**
5538 ++ * @j_max_batch_time:
5539 ++ *
5540 ++ * Maximum time that we should wait for additional filesystem operations
5541 ++ * to get batched into a synchronous handle in microseconds.
5542 ++ */
5543 + u32 j_max_batch_time;
5544 +
5545 +- /* This function is called when a transaction is closed */
5546 ++ /**
5547 ++ * @j_commit_callback:
5548 ++ *
5549 ++ * This function is called when a transaction is closed.
5550 ++ */
5551 + void (*j_commit_callback)(journal_t *,
5552 + transaction_t *);
5553 +
5554 + /*
5555 + * Journal statistics
5556 + */
5557 ++
5558 ++ /**
5559 ++ * @j_history_lock: Protect the transactions statistics history.
5560 ++ */
5561 + spinlock_t j_history_lock;
5562 ++
5563 ++ /**
5564 ++ * @j_proc_entry: procfs entry for the jbd statistics directory.
5565 ++ */
5566 + struct proc_dir_entry *j_proc_entry;
5567 ++
5568 ++ /**
5569 ++ * @j_stats: Overall statistics.
5570 ++ */
5571 + struct transaction_stats_s j_stats;
5572 +
5573 +- /* Failed journal commit ID */
5574 ++ /**
5575 ++ * @j_failed_commit: Failed journal commit ID.
5576 ++ */
5577 + unsigned int j_failed_commit;
5578 +
5579 +- /*
5580 ++ /**
5581 ++ * @j_private:
5582 ++ *
5583 + * An opaque pointer to fs-private information. ext3 puts its
5584 +- * superblock pointer here
5585 ++ * superblock pointer here.
5586 + */
5587 + void *j_private;
5588 +
5589 +- /* Reference to checksum algorithm driver via cryptoapi */
5590 ++ /**
5591 ++ * @j_chksum_driver:
5592 ++ *
5593 ++ * Reference to checksum algorithm driver via cryptoapi.
5594 ++ */
5595 + struct crypto_shash *j_chksum_driver;
5596 +
5597 +- /* Precomputed journal UUID checksum for seeding other checksums */
5598 ++ /**
5599 ++ * @j_csum_seed:
5600 ++ *
5601 ++ * Precomputed journal UUID checksum for seeding other checksums.
5602 ++ */
5603 + __u32 j_csum_seed;
5604 +
5605 + #ifdef CONFIG_DEBUG_LOCK_ALLOC
5606 +- /*
5607 ++ /**
5608 ++ * @j_trans_commit_map:
5609 ++ *
5610 + * Lockdep entity to track transaction commit dependencies. Handles
5611 + * hold this "lock" for read, when we wait for commit, we acquire the
5612 + * "lock" for writing. This matches the properties of jbd2 journalling
5613 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
5614 +index a0610427e168..b82c4ae92411 100644
5615 +--- a/include/linux/mlx5/driver.h
5616 ++++ b/include/linux/mlx5/driver.h
5617 +@@ -1238,7 +1238,7 @@ mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
5618 + int eqn;
5619 + int err;
5620 +
5621 +- err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
5622 ++ err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq);
5623 + if (err)
5624 + return NULL;
5625 +
5626 +diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
5627 +index c30b32e3c862..10191c28fc04 100644
5628 +--- a/include/linux/mm_inline.h
5629 ++++ b/include/linux/mm_inline.h
5630 +@@ -127,10 +127,4 @@ static __always_inline enum lru_list page_lru(struct page *page)
5631 +
5632 + #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
5633 +
5634 +-#ifdef arch_unmap_kpfn
5635 +-extern void arch_unmap_kpfn(unsigned long pfn);
5636 +-#else
5637 +-static __always_inline void arch_unmap_kpfn(unsigned long pfn) { }
5638 +-#endif
5639 +-
5640 + #endif
5641 +diff --git a/include/linux/nospec.h b/include/linux/nospec.h
5642 +index b99bced39ac2..fbc98e2c8228 100644
5643 +--- a/include/linux/nospec.h
5644 ++++ b/include/linux/nospec.h
5645 +@@ -19,20 +19,6 @@
5646 + static inline unsigned long array_index_mask_nospec(unsigned long index,
5647 + unsigned long size)
5648 + {
5649 +- /*
5650 +- * Warn developers about inappropriate array_index_nospec() usage.
5651 +- *
5652 +- * Even if the CPU speculates past the WARN_ONCE branch, the
5653 +- * sign bit of @index is taken into account when generating the
5654 +- * mask.
5655 +- *
5656 +- * This warning is compiled out when the compiler can infer that
5657 +- * @index and @size are less than LONG_MAX.
5658 +- */
5659 +- if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,
5660 +- "array_index_nospec() limited to range of [0, LONG_MAX]\n"))
5661 +- return 0;
5662 +-
5663 + /*
5664 + * Always calculate and emit the mask even if the compiler
5665 + * thinks the mask is not needed. The compiler does not take
5666 +@@ -43,6 +29,26 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
5667 + }
5668 + #endif
5669 +
5670 ++/*
5671 ++ * Warn developers about inappropriate array_index_nospec() usage.
5672 ++ *
5673 ++ * Even if the CPU speculates past the WARN_ONCE branch, the
5674 ++ * sign bit of @index is taken into account when generating the
5675 ++ * mask.
5676 ++ *
5677 ++ * This warning is compiled out when the compiler can infer that
5678 ++ * @index and @size are less than LONG_MAX.
5679 ++ */
5680 ++#define array_index_mask_nospec_check(index, size) \
5681 ++({ \
5682 ++ if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \
5683 ++ "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \
5684 ++ _mask = 0; \
5685 ++ else \
5686 ++ _mask = array_index_mask_nospec(index, size); \
5687 ++ _mask; \
5688 ++})
5689 ++
5690 + /*
5691 + * array_index_nospec - sanitize an array index after a bounds check
5692 + *
5693 +@@ -61,7 +67,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
5694 + ({ \
5695 + typeof(index) _i = (index); \
5696 + typeof(size) _s = (size); \
5697 +- unsigned long _mask = array_index_mask_nospec(_i, _s); \
5698 ++ unsigned long _mask = array_index_mask_nospec_check(_i, _s); \
5699 + \
5700 + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
5701 + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
5702 +diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
5703 +index fd84cda5ed7c..0d6a110dae7c 100644
5704 +--- a/include/rdma/ib_verbs.h
5705 ++++ b/include/rdma/ib_verbs.h
5706 +@@ -983,9 +983,9 @@ struct ib_wc {
5707 + u32 invalidate_rkey;
5708 + } ex;
5709 + u32 src_qp;
5710 ++ u32 slid;
5711 + int wc_flags;
5712 + u16 pkey_index;
5713 +- u32 slid;
5714 + u8 sl;
5715 + u8 dlid_path_bits;
5716 + u8 port_num; /* valid only for DR SMPs on switches */
5717 +diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
5718 +index b8adf05c534e..7dd8f34c37df 100644
5719 +--- a/include/trace/events/xen.h
5720 ++++ b/include/trace/events/xen.h
5721 +@@ -368,7 +368,7 @@ TRACE_EVENT(xen_mmu_flush_tlb,
5722 + TP_printk("%s", "")
5723 + );
5724 +
5725 +-TRACE_EVENT(xen_mmu_flush_tlb_single,
5726 ++TRACE_EVENT(xen_mmu_flush_tlb_one_user,
5727 + TP_PROTO(unsigned long addr),
5728 + TP_ARGS(addr),
5729 + TP_STRUCT__entry(
5730 +diff --git a/kernel/memremap.c b/kernel/memremap.c
5731 +index 403ab9cdb949..4712ce646e04 100644
5732 +--- a/kernel/memremap.c
5733 ++++ b/kernel/memremap.c
5734 +@@ -301,7 +301,8 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
5735 +
5736 + /* pages are dead and unused, undo the arch mapping */
5737 + align_start = res->start & ~(SECTION_SIZE - 1);
5738 +- align_size = ALIGN(resource_size(res), SECTION_SIZE);
5739 ++ align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
5740 ++ - align_start;
5741 +
5742 + mem_hotplug_begin();
5743 + arch_remove_memory(align_start, align_size);
5744 +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
5745 +index 61e7f0678d33..a764aec3c9a1 100644
5746 +--- a/kernel/trace/trace_events_filter.c
5747 ++++ b/kernel/trace/trace_events_filter.c
5748 +@@ -400,7 +400,6 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
5749 + for (i = 0; i < len; i++) {
5750 + if (buff[i] == '*') {
5751 + if (!i) {
5752 +- *search = buff + 1;
5753 + type = MATCH_END_ONLY;
5754 + } else if (i == len - 1) {
5755 + if (type == MATCH_END_ONLY)
5756 +@@ -410,14 +409,14 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
5757 + buff[i] = 0;
5758 + break;
5759 + } else { /* pattern continues, use full glob */
5760 +- type = MATCH_GLOB;
5761 +- break;
5762 ++ return MATCH_GLOB;
5763 + }
5764 + } else if (strchr("[?\\", buff[i])) {
5765 +- type = MATCH_GLOB;
5766 +- break;
5767 ++ return MATCH_GLOB;
5768 + }
5769 + }
5770 ++ if (buff[0] == '*')
5771 ++ *search = buff + 1;
5772 +
5773 + return type;
5774 + }
5775 +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
5776 +index 40592e7b3568..268029ae1be6 100644
5777 +--- a/kernel/trace/trace_uprobe.c
5778 ++++ b/kernel/trace/trace_uprobe.c
5779 +@@ -608,7 +608,7 @@ static int probes_seq_show(struct seq_file *m, void *v)
5780 +
5781 + /* Don't print "0x (null)" when offset is 0 */
5782 + if (tu->offset) {
5783 +- seq_printf(m, "0x%p", (void *)tu->offset);
5784 ++ seq_printf(m, "0x%px", (void *)tu->offset);
5785 + } else {
5786 + switch (sizeof(void *)) {
5787 + case 4:
5788 +diff --git a/lib/swiotlb.c b/lib/swiotlb.c
5789 +index cea19aaf303c..0d7f46fb993a 100644
5790 +--- a/lib/swiotlb.c
5791 ++++ b/lib/swiotlb.c
5792 +@@ -586,7 +586,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
5793 +
5794 + not_found:
5795 + spin_unlock_irqrestore(&io_tlb_lock, flags);
5796 +- if (printk_ratelimit())
5797 ++ if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
5798 + dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
5799 + return SWIOTLB_MAP_ERROR;
5800 + found:
5801 +@@ -713,6 +713,7 @@ void *
5802 + swiotlb_alloc_coherent(struct device *hwdev, size_t size,
5803 + dma_addr_t *dma_handle, gfp_t flags)
5804 + {
5805 ++ bool warn = !(flags & __GFP_NOWARN);
5806 + dma_addr_t dev_addr;
5807 + void *ret;
5808 + int order = get_order(size);
5809 +@@ -738,8 +739,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
5810 + * GFP_DMA memory; fall back on map_single(), which
5811 + * will grab memory from the lowest available address range.
5812 + */
5813 +- phys_addr_t paddr = map_single(hwdev, 0, size,
5814 +- DMA_FROM_DEVICE, 0);
5815 ++ phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE,
5816 ++ warn ? 0 : DMA_ATTR_NO_WARN);
5817 + if (paddr == SWIOTLB_MAP_ERROR)
5818 + goto err_warn;
5819 +
5820 +@@ -769,9 +770,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
5821 + return ret;
5822 +
5823 + err_warn:
5824 +- pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
5825 +- dev_name(hwdev), size);
5826 +- dump_stack();
5827 ++ if (warn && printk_ratelimit()) {
5828 ++ pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
5829 ++ dev_name(hwdev), size);
5830 ++ dump_stack();
5831 ++ }
5832 +
5833 + return NULL;
5834 + }
5835 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
5836 +index 4acdf393a801..c85fa0038848 100644
5837 +--- a/mm/memory-failure.c
5838 ++++ b/mm/memory-failure.c
5839 +@@ -1146,8 +1146,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
5840 + return 0;
5841 + }
5842 +
5843 +- arch_unmap_kpfn(pfn);
5844 +-
5845 + orig_head = hpage = compound_head(p);
5846 + num_poisoned_pages_inc();
5847 +
5848 +diff --git a/mm/memory.c b/mm/memory.c
5849 +index 793004608332..93e51ad41ba3 100644
5850 +--- a/mm/memory.c
5851 ++++ b/mm/memory.c
5852 +@@ -81,7 +81,7 @@
5853 +
5854 + #include "internal.h"
5855 +
5856 +-#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
5857 ++#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
5858 + #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
5859 + #endif
5860 +
5861 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
5862 +index 76c9688b6a0a..d23818c5465a 100644
5863 +--- a/mm/page_alloc.c
5864 ++++ b/mm/page_alloc.c
5865 +@@ -1177,9 +1177,10 @@ static void free_one_page(struct zone *zone,
5866 + }
5867 +
5868 + static void __meminit __init_single_page(struct page *page, unsigned long pfn,
5869 +- unsigned long zone, int nid)
5870 ++ unsigned long zone, int nid, bool zero)
5871 + {
5872 +- mm_zero_struct_page(page);
5873 ++ if (zero)
5874 ++ mm_zero_struct_page(page);
5875 + set_page_links(page, zone, nid, pfn);
5876 + init_page_count(page);
5877 + page_mapcount_reset(page);
5878 +@@ -1194,9 +1195,9 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
5879 + }
5880 +
5881 + static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
5882 +- int nid)
5883 ++ int nid, bool zero)
5884 + {
5885 +- return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
5886 ++ return __init_single_page(pfn_to_page(pfn), pfn, zone, nid, zero);
5887 + }
5888 +
5889 + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
5890 +@@ -1217,7 +1218,7 @@ static void __meminit init_reserved_page(unsigned long pfn)
5891 + if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
5892 + break;
5893 + }
5894 +- __init_single_pfn(pfn, zid, nid);
5895 ++ __init_single_pfn(pfn, zid, nid, true);
5896 + }
5897 + #else
5898 + static inline void init_reserved_page(unsigned long pfn)
5899 +@@ -1514,7 +1515,7 @@ static unsigned long __init deferred_init_range(int nid, int zid,
5900 + page++;
5901 + else
5902 + page = pfn_to_page(pfn);
5903 +- __init_single_page(page, pfn, zid, nid);
5904 ++ __init_single_page(page, pfn, zid, nid, true);
5905 + cond_resched();
5906 + }
5907 + }
5908 +@@ -5393,15 +5394,20 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5909 + * can be created for invalid pages (for alignment)
5910 + * check here not to call set_pageblock_migratetype() against
5911 + * pfn out of zone.
5912 ++ *
5913 ++ * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
5914 ++ * because this is done early in sparse_add_one_section
5915 + */
5916 + if (!(pfn & (pageblock_nr_pages - 1))) {
5917 + struct page *page = pfn_to_page(pfn);
5918 +
5919 +- __init_single_page(page, pfn, zone, nid);
5920 ++ __init_single_page(page, pfn, zone, nid,
5921 ++ context != MEMMAP_HOTPLUG);
5922 + set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5923 + cond_resched();
5924 + } else {
5925 +- __init_single_pfn(pfn, zone, nid);
5926 ++ __init_single_pfn(pfn, zone, nid,
5927 ++ context != MEMMAP_HOTPLUG);
5928 + }
5929 + }
5930 + }
5931 +diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
5932 +index f3a4efcf1456..3aa5a93ad107 100644
5933 +--- a/net/9p/trans_virtio.c
5934 ++++ b/net/9p/trans_virtio.c
5935 +@@ -160,7 +160,8 @@ static void req_done(struct virtqueue *vq)
5936 + spin_unlock_irqrestore(&chan->lock, flags);
5937 + /* Wakeup if anyone waiting for VirtIO ring space. */
5938 + wake_up(chan->vc_wq);
5939 +- p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
5940 ++ if (len)
5941 ++ p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
5942 + }
5943 + }
5944 +
5945 +diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
5946 +index 8ca9915befc8..aae3565c3a92 100644
5947 +--- a/net/mpls/af_mpls.c
5948 ++++ b/net/mpls/af_mpls.c
5949 +@@ -8,6 +8,7 @@
5950 + #include <linux/ipv6.h>
5951 + #include <linux/mpls.h>
5952 + #include <linux/netconf.h>
5953 ++#include <linux/nospec.h>
5954 + #include <linux/vmalloc.h>
5955 + #include <linux/percpu.h>
5956 + #include <net/ip.h>
5957 +@@ -935,24 +936,27 @@ static int mpls_nh_build_multi(struct mpls_route_config *cfg,
5958 + return err;
5959 + }
5960 +
5961 +-static bool mpls_label_ok(struct net *net, unsigned int index,
5962 ++static bool mpls_label_ok(struct net *net, unsigned int *index,
5963 + struct netlink_ext_ack *extack)
5964 + {
5965 ++ bool is_ok = true;
5966 ++
5967 + /* Reserved labels may not be set */
5968 +- if (index < MPLS_LABEL_FIRST_UNRESERVED) {
5969 ++ if (*index < MPLS_LABEL_FIRST_UNRESERVED) {
5970 + NL_SET_ERR_MSG(extack,
5971 + "Invalid label - must be MPLS_LABEL_FIRST_UNRESERVED or higher");
5972 +- return false;
5973 ++ is_ok = false;
5974 + }
5975 +
5976 + /* The full 20 bit range may not be supported. */
5977 +- if (index >= net->mpls.platform_labels) {
5978 ++ if (is_ok && *index >= net->mpls.platform_labels) {
5979 + NL_SET_ERR_MSG(extack,
5980 + "Label >= configured maximum in platform_labels");
5981 +- return false;
5982 ++ is_ok = false;
5983 + }
5984 +
5985 +- return true;
5986 ++ *index = array_index_nospec(*index, net->mpls.platform_labels);
5987 ++ return is_ok;
5988 + }
5989 +
5990 + static int mpls_route_add(struct mpls_route_config *cfg,
5991 +@@ -975,7 +979,7 @@ static int mpls_route_add(struct mpls_route_config *cfg,
5992 + index = find_free_label(net);
5993 + }
5994 +
5995 +- if (!mpls_label_ok(net, index, extack))
5996 ++ if (!mpls_label_ok(net, &index, extack))
5997 + goto errout;
5998 +
5999 + /* Append makes no sense with mpls */
6000 +@@ -1052,7 +1056,7 @@ static int mpls_route_del(struct mpls_route_config *cfg,
6001 +
6002 + index = cfg->rc_label;
6003 +
6004 +- if (!mpls_label_ok(net, index, extack))
6005 ++ if (!mpls_label_ok(net, &index, extack))
6006 + goto errout;
6007 +
6008 + mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
6009 +@@ -1810,7 +1814,7 @@ static int rtm_to_route_config(struct sk_buff *skb,
6010 + goto errout;
6011 +
6012 + if (!mpls_label_ok(cfg->rc_nlinfo.nl_net,
6013 +- cfg->rc_label, extack))
6014 ++ &cfg->rc_label, extack))
6015 + goto errout;
6016 + break;
6017 + }
6018 +@@ -2137,7 +2141,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
6019 + goto errout;
6020 + }
6021 +
6022 +- if (!mpls_label_ok(net, in_label, extack)) {
6023 ++ if (!mpls_label_ok(net, &in_label, extack)) {
6024 + err = -EINVAL;
6025 + goto errout;
6026 + }
6027 +diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
6028 +index a3f2ab283aeb..852b838d37b3 100644
6029 +--- a/net/sunrpc/xprtrdma/rpc_rdma.c
6030 ++++ b/net/sunrpc/xprtrdma/rpc_rdma.c
6031 +@@ -143,7 +143,7 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
6032 + if (xdr->page_len) {
6033 + remaining = xdr->page_len;
6034 + offset = offset_in_page(xdr->page_base);
6035 +- count = 0;
6036 ++ count = RPCRDMA_MIN_SEND_SGES;
6037 + while (remaining) {
6038 + remaining -= min_t(unsigned int,
6039 + PAGE_SIZE - offset, remaining);
6040 +diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
6041 +index 8607c029c0dd..8cd7ee4fa0cd 100644
6042 +--- a/net/sunrpc/xprtrdma/verbs.c
6043 ++++ b/net/sunrpc/xprtrdma/verbs.c
6044 +@@ -509,7 +509,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
6045 + pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
6046 + return -ENOMEM;
6047 + }
6048 +- ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES;
6049 ++ ia->ri_max_send_sges = max_sge;
6050 +
6051 + if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) {
6052 + dprintk("RPC: %s: insufficient wqe's available\n",
6053 +@@ -1476,6 +1476,9 @@ __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
6054 + static void
6055 + rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
6056 + {
6057 ++ if (!rb)
6058 ++ return;
6059 ++
6060 + if (!rpcrdma_regbuf_is_mapped(rb))
6061 + return;
6062 +
6063 +@@ -1491,9 +1494,6 @@ rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
6064 + void
6065 + rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb)
6066 + {
6067 +- if (!rb)
6068 +- return;
6069 +-
6070 + rpcrdma_dma_unmap_regbuf(rb);
6071 + kfree(rb);
6072 + }
6073 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
6074 +index d01913404581..a42cbbf2c8d9 100644
6075 +--- a/sound/core/seq/seq_clientmgr.c
6076 ++++ b/sound/core/seq/seq_clientmgr.c
6077 +@@ -1003,7 +1003,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
6078 + {
6079 + struct snd_seq_client *client = file->private_data;
6080 + int written = 0, len;
6081 +- int err = -EINVAL;
6082 ++ int err;
6083 + struct snd_seq_event event;
6084 +
6085 + if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
6086 +@@ -1018,11 +1018,15 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
6087 +
6088 + /* allocate the pool now if the pool is not allocated yet */
6089 + if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
6090 +- if (snd_seq_pool_init(client->pool) < 0)
6091 ++ mutex_lock(&client->ioctl_mutex);
6092 ++ err = snd_seq_pool_init(client->pool);
6093 ++ mutex_unlock(&client->ioctl_mutex);
6094 ++ if (err < 0)
6095 + return -ENOMEM;
6096 + }
6097 +
6098 + /* only process whole events */
6099 ++ err = -EINVAL;
6100 + while (count >= sizeof(struct snd_seq_event)) {
6101 + /* Read in the event header from the user */
6102 + len = sizeof(event);
6103 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6104 +index 1750e00c5bb4..4ff1f0ca52fc 100644
6105 +--- a/sound/pci/hda/patch_realtek.c
6106 ++++ b/sound/pci/hda/patch_realtek.c
6107 +@@ -3378,6 +3378,19 @@ static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
6108 + spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
6109 + }
6110 +
6111 ++static void alc269_fixup_pincfg_U7x7_headset_mic(struct hda_codec *codec,
6112 ++ const struct hda_fixup *fix,
6113 ++ int action)
6114 ++{
6115 ++ unsigned int cfg_headphone = snd_hda_codec_get_pincfg(codec, 0x21);
6116 ++ unsigned int cfg_headset_mic = snd_hda_codec_get_pincfg(codec, 0x19);
6117 ++
6118 ++ if (cfg_headphone && cfg_headset_mic == 0x411111f0)
6119 ++ snd_hda_codec_set_pincfg(codec, 0x19,
6120 ++ (cfg_headphone & ~AC_DEFCFG_DEVICE) |
6121 ++ (AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT));
6122 ++}
6123 ++
6124 + static void alc269_fixup_hweq(struct hda_codec *codec,
6125 + const struct hda_fixup *fix, int action)
6126 + {
6127 +@@ -4850,6 +4863,28 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
6128 + }
6129 + }
6130 +
6131 ++static void alc_fixup_tpt470_dock(struct hda_codec *codec,
6132 ++ const struct hda_fixup *fix, int action)
6133 ++{
6134 ++ static const struct hda_pintbl pincfgs[] = {
6135 ++ { 0x17, 0x21211010 }, /* dock headphone */
6136 ++ { 0x19, 0x21a11010 }, /* dock mic */
6137 ++ { }
6138 ++ };
6139 ++ struct alc_spec *spec = codec->spec;
6140 ++
6141 ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
6142 ++ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
6143 ++ /* Enable DOCK device */
6144 ++ snd_hda_codec_write(codec, 0x17, 0,
6145 ++ AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
6146 ++ /* Enable DOCK device */
6147 ++ snd_hda_codec_write(codec, 0x19, 0,
6148 ++ AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
6149 ++ snd_hda_apply_pincfgs(codec, pincfgs);
6150 ++ }
6151 ++}
6152 ++
6153 + static void alc_shutup_dell_xps13(struct hda_codec *codec)
6154 + {
6155 + struct alc_spec *spec = codec->spec;
6156 +@@ -5229,6 +5264,7 @@ enum {
6157 + ALC269_FIXUP_LIFEBOOK_EXTMIC,
6158 + ALC269_FIXUP_LIFEBOOK_HP_PIN,
6159 + ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
6160 ++ ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC,
6161 + ALC269_FIXUP_AMIC,
6162 + ALC269_FIXUP_DMIC,
6163 + ALC269VB_FIXUP_AMIC,
6164 +@@ -5324,6 +5360,7 @@ enum {
6165 + ALC700_FIXUP_INTEL_REFERENCE,
6166 + ALC274_FIXUP_DELL_BIND_DACS,
6167 + ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
6168 ++ ALC298_FIXUP_TPT470_DOCK,
6169 + };
6170 +
6171 + static const struct hda_fixup alc269_fixups[] = {
6172 +@@ -5434,6 +5471,10 @@ static const struct hda_fixup alc269_fixups[] = {
6173 + .type = HDA_FIXUP_FUNC,
6174 + .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
6175 + },
6176 ++ [ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC] = {
6177 ++ .type = HDA_FIXUP_FUNC,
6178 ++ .v.func = alc269_fixup_pincfg_U7x7_headset_mic,
6179 ++ },
6180 + [ALC269_FIXUP_AMIC] = {
6181 + .type = HDA_FIXUP_PINS,
6182 + .v.pins = (const struct hda_pintbl[]) {
6183 +@@ -6149,6 +6190,12 @@ static const struct hda_fixup alc269_fixups[] = {
6184 + .chained = true,
6185 + .chain_id = ALC274_FIXUP_DELL_BIND_DACS
6186 + },
6187 ++ [ALC298_FIXUP_TPT470_DOCK] = {
6188 ++ .type = HDA_FIXUP_FUNC,
6189 ++ .v.func = alc_fixup_tpt470_dock,
6190 ++ .chained = true,
6191 ++ .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
6192 ++ },
6193 + };
6194 +
6195 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6196 +@@ -6199,6 +6246,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6197 + SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
6198 + SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
6199 + SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
6200 ++ SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
6201 ++ SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
6202 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6203 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6204 + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
6205 +@@ -6300,6 +6349,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6206 + SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
6207 + SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
6208 + SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
6209 ++ SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
6210 + SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
6211 + SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
6212 + SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
6213 +@@ -6328,8 +6378,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6214 + SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
6215 + SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
6216 + SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
6217 ++ SND_PCI_QUIRK(0x17aa, 0x222d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6218 ++ SND_PCI_QUIRK(0x17aa, 0x222e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6219 + SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
6220 + SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
6221 ++ SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
6222 ++ SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6223 ++ SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6224 ++ SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6225 ++ SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6226 ++ SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6227 + SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6228 + SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6229 + SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6230 +@@ -6350,7 +6408,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6231 + SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
6232 + SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
6233 + SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
6234 ++ SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6235 ++ SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6236 ++ SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6237 + SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
6238 ++ SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6239 ++ SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6240 + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
6241 + SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
6242 + SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
6243 +@@ -6612,6 +6675,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6244 + {0x12, 0xb7a60130},
6245 + {0x14, 0x90170110},
6246 + {0x21, 0x02211020}),
6247 ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
6248 ++ {0x12, 0x90a60130},
6249 ++ {0x14, 0x90170110},
6250 ++ {0x14, 0x01011020},
6251 ++ {0x21, 0x0221101f}),
6252 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
6253 + ALC256_STANDARD_PINS),
6254 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC,
6255 +@@ -6681,6 +6749,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6256 + {0x12, 0x90a60120},
6257 + {0x14, 0x90170110},
6258 + {0x21, 0x0321101f}),
6259 ++ SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
6260 ++ {0x12, 0xb7a60130},
6261 ++ {0x14, 0x90170110},
6262 ++ {0x21, 0x04211020}),
6263 + SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
6264 + ALC290_STANDARD_PINS,
6265 + {0x15, 0x04211040},
6266 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
6267 +index 2b4ceda36291..20b28a5a1456 100644
6268 +--- a/sound/usb/mixer.c
6269 ++++ b/sound/usb/mixer.c
6270 +@@ -347,17 +347,20 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
6271 + int validx, int *value_ret)
6272 + {
6273 + struct snd_usb_audio *chip = cval->head.mixer->chip;
6274 +- unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */
6275 ++ /* enough space for one range */
6276 ++ unsigned char buf[sizeof(__u16) + 3 * sizeof(__u32)];
6277 + unsigned char *val;
6278 +- int idx = 0, ret, size;
6279 ++ int idx = 0, ret, val_size, size;
6280 + __u8 bRequest;
6281 +
6282 ++ val_size = uac2_ctl_value_size(cval->val_type);
6283 ++
6284 + if (request == UAC_GET_CUR) {
6285 + bRequest = UAC2_CS_CUR;
6286 +- size = uac2_ctl_value_size(cval->val_type);
6287 ++ size = val_size;
6288 + } else {
6289 + bRequest = UAC2_CS_RANGE;
6290 +- size = sizeof(buf);
6291 ++ size = sizeof(__u16) + 3 * val_size;
6292 + }
6293 +
6294 + memset(buf, 0, sizeof(buf));
6295 +@@ -390,16 +393,17 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
6296 + val = buf + sizeof(__u16);
6297 + break;
6298 + case UAC_GET_MAX:
6299 +- val = buf + sizeof(__u16) * 2;
6300 ++ val = buf + sizeof(__u16) + val_size;
6301 + break;
6302 + case UAC_GET_RES:
6303 +- val = buf + sizeof(__u16) * 3;
6304 ++ val = buf + sizeof(__u16) + val_size * 2;
6305 + break;
6306 + default:
6307 + return -EINVAL;
6308 + }
6309 +
6310 +- *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16)));
6311 ++ *value_ret = convert_signed_value(cval,
6312 ++ snd_usb_combine_bytes(val, val_size));
6313 +
6314 + return 0;
6315 + }
6316 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
6317 +index b9c9a19f9588..3cbfae6604f9 100644
6318 +--- a/sound/usb/pcm.c
6319 ++++ b/sound/usb/pcm.c
6320 +@@ -352,6 +352,15 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
6321 + ep = 0x86;
6322 + iface = usb_ifnum_to_if(dev, 2);
6323 +
6324 ++ if (!iface || iface->num_altsetting == 0)
6325 ++ return -EINVAL;
6326 ++
6327 ++ alts = &iface->altsetting[1];
6328 ++ goto add_sync_ep;
6329 ++ case USB_ID(0x1397, 0x0002):
6330 ++ ep = 0x81;
6331 ++ iface = usb_ifnum_to_if(dev, 1);
6332 ++
6333 + if (!iface || iface->num_altsetting == 0)
6334 + return -EINVAL;
6335 +
6336 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
6337 +index a66ef5777887..ea8f3de92fa4 100644
6338 +--- a/sound/usb/quirks.c
6339 ++++ b/sound/usb/quirks.c
6340 +@@ -1363,8 +1363,11 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
6341 + return SNDRV_PCM_FMTBIT_DSD_U32_BE;
6342 + break;
6343 +
6344 +- /* Amanero Combo384 USB interface with native DSD support */
6345 +- case USB_ID(0x16d0, 0x071a):
6346 ++ /* Amanero Combo384 USB based DACs with native DSD support */
6347 ++ case USB_ID(0x16d0, 0x071a): /* Amanero - Combo384 */
6348 ++ case USB_ID(0x2ab6, 0x0004): /* T+A DAC8DSD-V2.0, MP1000E-V2.0, MP2000R-V2.0, MP2500R-V2.0, MP3100HV-V2.0 */
6349 ++ case USB_ID(0x2ab6, 0x0005): /* T+A USB HD Audio 1 */
6350 ++ case USB_ID(0x2ab6, 0x0006): /* T+A USB HD Audio 2 */
6351 + if (fp->altsetting == 2) {
6352 + switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) {
6353 + case 0x199:
6354 +diff --git a/tools/objtool/check.c b/tools/objtool/check.c
6355 +index 2e458eb45586..c7fb5c2392ee 100644
6356 +--- a/tools/objtool/check.c
6357 ++++ b/tools/objtool/check.c
6358 +@@ -1935,13 +1935,19 @@ static bool ignore_unreachable_insn(struct instruction *insn)
6359 + if (is_kasan_insn(insn) || is_ubsan_insn(insn))
6360 + return true;
6361 +
6362 +- if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) {
6363 +- insn = insn->jump_dest;
6364 +- continue;
6365 ++ if (insn->type == INSN_JUMP_UNCONDITIONAL) {
6366 ++ if (insn->jump_dest &&
6367 ++ insn->jump_dest->func == insn->func) {
6368 ++ insn = insn->jump_dest;
6369 ++ continue;
6370 ++ }
6371 ++
6372 ++ break;
6373 + }
6374 +
6375 + if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
6376 + break;
6377 ++
6378 + insn = list_next_entry(insn, list);
6379 + }
6380 +
6381 +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
6382 +index 24dbf634e2dd..0b457e8e0f0c 100644
6383 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
6384 ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
6385 +@@ -1717,7 +1717,7 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
6386 +
6387 + if (nr == __NR_getpid)
6388 + change_syscall(_metadata, tracee, __NR_getppid);
6389 +- if (nr == __NR_open)
6390 ++ if (nr == __NR_openat)
6391 + change_syscall(_metadata, tracee, -1);
6392 + }
6393 +
6394 +@@ -1792,7 +1792,7 @@ TEST_F(TRACE_syscall, ptrace_syscall_dropped)
6395 + true);
6396 +
6397 + /* Tracer should skip the open syscall, resulting in EPERM. */
6398 +- EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_open));
6399 ++ EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat));
6400 + }
6401 +
6402 + TEST_F(TRACE_syscall, syscall_allowed)
6403 +diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c
6404 +index a65b016d4c13..1097f04e4d80 100644
6405 +--- a/tools/testing/selftests/vm/compaction_test.c
6406 ++++ b/tools/testing/selftests/vm/compaction_test.c
6407 +@@ -137,6 +137,8 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
6408 + printf("No of huge pages allocated = %d\n",
6409 + (atoi(nr_hugepages)));
6410 +
6411 ++ lseek(fd, 0, SEEK_SET);
6412 ++
6413 + if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
6414 + != strlen(initial_nr_hugepages)) {
6415 + perror("Failed to write value to /proc/sys/vm/nr_hugepages\n");
6416 +diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
6417 +index 5d4f10ac2af2..aa6e2d7f6a1f 100644
6418 +--- a/tools/testing/selftests/x86/Makefile
6419 ++++ b/tools/testing/selftests/x86/Makefile
6420 +@@ -5,16 +5,26 @@ include ../lib.mk
6421 +
6422 + .PHONY: all all_32 all_64 warn_32bit_failure clean
6423 +
6424 +-TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
6425 +- check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \
6426 ++UNAME_M := $(shell uname -m)
6427 ++CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
6428 ++CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
6429 ++
6430 ++TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \
6431 ++ check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \
6432 + protection_keys test_vdso test_vsyscall
6433 + TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
6434 + test_FCMOV test_FCOMI test_FISTTP \
6435 + vdso_restorer
6436 +-TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip 5lvl
6437 ++TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip
6438 ++# Some selftests require 32bit support enabled also on 64bit systems
6439 ++TARGETS_C_32BIT_NEEDED := ldt_gdt ptrace_syscall
6440 +
6441 +-TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
6442 ++TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) $(TARGETS_C_32BIT_NEEDED)
6443 + TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
6444 ++ifeq ($(CAN_BUILD_I386)$(CAN_BUILD_X86_64),11)
6445 ++TARGETS_C_64BIT_ALL += $(TARGETS_C_32BIT_NEEDED)
6446 ++endif
6447 ++
6448 + BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
6449 + BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
6450 +
6451 +@@ -23,18 +33,16 @@ BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
6452 +
6453 + CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie
6454 +
6455 +-UNAME_M := $(shell uname -m)
6456 +-CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
6457 +-CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
6458 +-
6459 + ifeq ($(CAN_BUILD_I386),1)
6460 + all: all_32
6461 + TEST_PROGS += $(BINARIES_32)
6462 ++EXTRA_CFLAGS += -DCAN_BUILD_32
6463 + endif
6464 +
6465 + ifeq ($(CAN_BUILD_X86_64),1)
6466 + all: all_64
6467 + TEST_PROGS += $(BINARIES_64)
6468 ++EXTRA_CFLAGS += -DCAN_BUILD_64
6469 + endif
6470 +
6471 + all_32: $(BINARIES_32)
6472 +diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
6473 +index ec0f6b45ce8b..9c0325e1ea68 100644
6474 +--- a/tools/testing/selftests/x86/mpx-mini-test.c
6475 ++++ b/tools/testing/selftests/x86/mpx-mini-test.c
6476 +@@ -315,11 +315,39 @@ static inline void *__si_bounds_upper(siginfo_t *si)
6477 + return si->si_upper;
6478 + }
6479 + #else
6480 ++
6481 ++/*
6482 ++ * This deals with old version of _sigfault in some distros:
6483 ++ *
6484 ++
6485 ++old _sigfault:
6486 ++ struct {
6487 ++ void *si_addr;
6488 ++ } _sigfault;
6489 ++
6490 ++new _sigfault:
6491 ++ struct {
6492 ++ void __user *_addr;
6493 ++ int _trapno;
6494 ++ short _addr_lsb;
6495 ++ union {
6496 ++ struct {
6497 ++ void __user *_lower;
6498 ++ void __user *_upper;
6499 ++ } _addr_bnd;
6500 ++ __u32 _pkey;
6501 ++ };
6502 ++ } _sigfault;
6503 ++ *
6504 ++ */
6505 ++
6506 + static inline void **__si_bounds_hack(siginfo_t *si)
6507 + {
6508 + void *sigfault = &si->_sifields._sigfault;
6509 + void *end_sigfault = sigfault + sizeof(si->_sifields._sigfault);
6510 +- void **__si_lower = end_sigfault;
6511 ++ int *trapno = (int*)end_sigfault;
6512 ++ /* skip _trapno and _addr_lsb */
6513 ++ void **__si_lower = (void**)(trapno + 2);
6514 +
6515 + return __si_lower;
6516 + }
6517 +@@ -331,7 +359,7 @@ static inline void *__si_bounds_lower(siginfo_t *si)
6518 +
6519 + static inline void *__si_bounds_upper(siginfo_t *si)
6520 + {
6521 +- return (*__si_bounds_hack(si)) + sizeof(void *);
6522 ++ return *(__si_bounds_hack(si) + 1);
6523 + }
6524 + #endif
6525 +
6526 +diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
6527 +index bc1b0735bb50..f15aa5a76fe3 100644
6528 +--- a/tools/testing/selftests/x86/protection_keys.c
6529 ++++ b/tools/testing/selftests/x86/protection_keys.c
6530 +@@ -393,34 +393,6 @@ pid_t fork_lazy_child(void)
6531 + return forkret;
6532 + }
6533 +
6534 +-void davecmp(void *_a, void *_b, int len)
6535 +-{
6536 +- int i;
6537 +- unsigned long *a = _a;
6538 +- unsigned long *b = _b;
6539 +-
6540 +- for (i = 0; i < len / sizeof(*a); i++) {
6541 +- if (a[i] == b[i])
6542 +- continue;
6543 +-
6544 +- dprintf3("[%3d]: a: %016lx b: %016lx\n", i, a[i], b[i]);
6545 +- }
6546 +-}
6547 +-
6548 +-void dumpit(char *f)
6549 +-{
6550 +- int fd = open(f, O_RDONLY);
6551 +- char buf[100];
6552 +- int nr_read;
6553 +-
6554 +- dprintf2("maps fd: %d\n", fd);
6555 +- do {
6556 +- nr_read = read(fd, &buf[0], sizeof(buf));
6557 +- write(1, buf, nr_read);
6558 +- } while (nr_read > 0);
6559 +- close(fd);
6560 +-}
6561 +-
6562 + #define PKEY_DISABLE_ACCESS 0x1
6563 + #define PKEY_DISABLE_WRITE 0x2
6564 +
6565 +diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
6566 +index a48da95c18fd..ddfdd635de16 100644
6567 +--- a/tools/testing/selftests/x86/single_step_syscall.c
6568 ++++ b/tools/testing/selftests/x86/single_step_syscall.c
6569 +@@ -119,7 +119,9 @@ static void check_result(void)
6570 +
6571 + int main()
6572 + {
6573 ++#ifdef CAN_BUILD_32
6574 + int tmp;
6575 ++#endif
6576 +
6577 + sethandler(SIGTRAP, sigtrap, 0);
6578 +
6579 +@@ -139,12 +141,13 @@ int main()
6580 + : : "c" (post_nop) : "r11");
6581 + check_result();
6582 + #endif
6583 +-
6584 ++#ifdef CAN_BUILD_32
6585 + printf("[RUN]\tSet TF and check int80\n");
6586 + set_eflags(get_eflags() | X86_EFLAGS_TF);
6587 + asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
6588 + : INT80_CLOBBERS);
6589 + check_result();
6590 ++#endif
6591 +
6592 + /*
6593 + * This test is particularly interesting if fast syscalls use
6594 +diff --git a/tools/testing/selftests/x86/test_mremap_vdso.c b/tools/testing/selftests/x86/test_mremap_vdso.c
6595 +index bf0d687c7db7..64f11c8d9b76 100644
6596 +--- a/tools/testing/selftests/x86/test_mremap_vdso.c
6597 ++++ b/tools/testing/selftests/x86/test_mremap_vdso.c
6598 +@@ -90,8 +90,12 @@ int main(int argc, char **argv, char **envp)
6599 + vdso_size += PAGE_SIZE;
6600 + }
6601 +
6602 ++#ifdef __i386__
6603 + /* Glibc is likely to explode now - exit with raw syscall */
6604 + asm volatile ("int $0x80" : : "a" (__NR_exit), "b" (!!ret));
6605 ++#else /* __x86_64__ */
6606 ++ syscall(SYS_exit, ret);
6607 ++#endif
6608 + } else {
6609 + int status;
6610 +
6611 +diff --git a/tools/testing/selftests/x86/test_vdso.c b/tools/testing/selftests/x86/test_vdso.c
6612 +index 29973cde06d3..235259011704 100644
6613 +--- a/tools/testing/selftests/x86/test_vdso.c
6614 ++++ b/tools/testing/selftests/x86/test_vdso.c
6615 +@@ -26,20 +26,59 @@
6616 + # endif
6617 + #endif
6618 +
6619 ++/* max length of lines in /proc/self/maps - anything longer is skipped here */
6620 ++#define MAPS_LINE_LEN 128
6621 ++
6622 + int nerrs = 0;
6623 +
6624 ++typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
6625 ++
6626 ++getcpu_t vgetcpu;
6627 ++getcpu_t vdso_getcpu;
6628 ++
6629 ++static void *vsyscall_getcpu(void)
6630 ++{
6631 + #ifdef __x86_64__
6632 +-# define VSYS(x) (x)
6633 ++ FILE *maps;
6634 ++ char line[MAPS_LINE_LEN];
6635 ++ bool found = false;
6636 ++
6637 ++ maps = fopen("/proc/self/maps", "r");
6638 ++ if (!maps) /* might still be present, but ignore it here, as we test vDSO not vsyscall */
6639 ++ return NULL;
6640 ++
6641 ++ while (fgets(line, MAPS_LINE_LEN, maps)) {
6642 ++ char r, x;
6643 ++ void *start, *end;
6644 ++ char name[MAPS_LINE_LEN];
6645 ++
6646 ++ /* sscanf() is safe here as strlen(name) >= strlen(line) */
6647 ++ if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
6648 ++ &start, &end, &r, &x, name) != 5)
6649 ++ continue;
6650 ++
6651 ++ if (strcmp(name, "[vsyscall]"))
6652 ++ continue;
6653 ++
6654 ++ /* assume entries are OK, as we test vDSO here not vsyscall */
6655 ++ found = true;
6656 ++ break;
6657 ++ }
6658 ++
6659 ++ fclose(maps);
6660 ++
6661 ++ if (!found) {
6662 ++ printf("Warning: failed to find vsyscall getcpu\n");
6663 ++ return NULL;
6664 ++ }
6665 ++ return (void *) (0xffffffffff600800);
6666 + #else
6667 +-# define VSYS(x) 0
6668 ++ return NULL;
6669 + #endif
6670 ++}
6671 +
6672 +-typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
6673 +-
6674 +-const getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
6675 +-getcpu_t vdso_getcpu;
6676 +
6677 +-void fill_function_pointers()
6678 ++static void fill_function_pointers()
6679 + {
6680 + void *vdso = dlopen("linux-vdso.so.1",
6681 + RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
6682 +@@ -54,6 +93,8 @@ void fill_function_pointers()
6683 + vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
6684 + if (!vdso_getcpu)
6685 + printf("Warning: failed to find getcpu in vDSO\n");
6686 ++
6687 ++ vgetcpu = (getcpu_t) vsyscall_getcpu();
6688 + }
6689 +
6690 + static long sys_getcpu(unsigned * cpu, unsigned * node,
6691 +diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
6692 +index 7a744fa7b786..be81621446f0 100644
6693 +--- a/tools/testing/selftests/x86/test_vsyscall.c
6694 ++++ b/tools/testing/selftests/x86/test_vsyscall.c
6695 +@@ -33,6 +33,9 @@
6696 + # endif
6697 + #endif
6698 +
6699 ++/* max length of lines in /proc/self/maps - anything longer is skipped here */
6700 ++#define MAPS_LINE_LEN 128
6701 ++
6702 + static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
6703 + int flags)
6704 + {
6705 +@@ -98,7 +101,7 @@ static int init_vsys(void)
6706 + #ifdef __x86_64__
6707 + int nerrs = 0;
6708 + FILE *maps;
6709 +- char line[128];
6710 ++ char line[MAPS_LINE_LEN];
6711 + bool found = false;
6712 +
6713 + maps = fopen("/proc/self/maps", "r");
6714 +@@ -108,10 +111,12 @@ static int init_vsys(void)
6715 + return 0;
6716 + }
6717 +
6718 +- while (fgets(line, sizeof(line), maps)) {
6719 ++ while (fgets(line, MAPS_LINE_LEN, maps)) {
6720 + char r, x;
6721 + void *start, *end;
6722 +- char name[128];
6723 ++ char name[MAPS_LINE_LEN];
6724 ++
6725 ++ /* sscanf() is safe here as strlen(name) >= strlen(line) */
6726 + if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
6727 + &start, &end, &r, &x, name) != 5)
6728 + continue;