Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.18 commit in: /
Date: Wed, 21 Nov 2018 12:29:00
Message-Id: 1542803307.f038ea3a40fec1a50410f3e39b1ca402f8d6543c.mpagano@gentoo
1 commit: f038ea3a40fec1a50410f3e39b1ca402f8d6543c
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Nov 21 12:28:27 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Nov 21 12:28:27 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f038ea3a
7
8 proj/linux-patches: Linux patch 4.18.20
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1019_linux-4.18.20.patch | 4811 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4815 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 4d0ed54..805997e 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -119,6 +119,10 @@ Patch: 1018_linux-4.18.19.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.18.19
23
24 +Patch: 1019_linux-4.18.20.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.18.20
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1019_linux-4.18.20.patch b/1019_linux-4.18.20.patch
33 new file mode 100644
34 index 0000000..6ea25b7
35 --- /dev/null
36 +++ b/1019_linux-4.18.20.patch
37 @@ -0,0 +1,4811 @@
38 +diff --git a/Makefile b/Makefile
39 +index 71642133ba22..5f6697c4dbbc 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 18
46 +-SUBLEVEL = 19
47 ++SUBLEVEL = 20
48 + EXTRAVERSION =
49 + NAME = Merciless Moray
50 +
51 +diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h
52 +index 6a8c53dec57e..b7c77bb1bfd2 100644
53 +--- a/arch/alpha/include/asm/termios.h
54 ++++ b/arch/alpha/include/asm/termios.h
55 +@@ -73,9 +73,15 @@
56 + })
57 +
58 + #define user_termios_to_kernel_termios(k, u) \
59 +- copy_from_user(k, u, sizeof(struct termios))
60 ++ copy_from_user(k, u, sizeof(struct termios2))
61 +
62 + #define kernel_termios_to_user_termios(u, k) \
63 ++ copy_to_user(u, k, sizeof(struct termios2))
64 ++
65 ++#define user_termios_to_kernel_termios_1(k, u) \
66 ++ copy_from_user(k, u, sizeof(struct termios))
67 ++
68 ++#define kernel_termios_to_user_termios_1(u, k) \
69 + copy_to_user(u, k, sizeof(struct termios))
70 +
71 + #endif /* _ALPHA_TERMIOS_H */
72 +diff --git a/arch/alpha/include/uapi/asm/ioctls.h b/arch/alpha/include/uapi/asm/ioctls.h
73 +index 3729d92d3fa8..dc8c20ac7191 100644
74 +--- a/arch/alpha/include/uapi/asm/ioctls.h
75 ++++ b/arch/alpha/include/uapi/asm/ioctls.h
76 +@@ -32,6 +32,11 @@
77 + #define TCXONC _IO('t', 30)
78 + #define TCFLSH _IO('t', 31)
79 +
80 ++#define TCGETS2 _IOR('T', 42, struct termios2)
81 ++#define TCSETS2 _IOW('T', 43, struct termios2)
82 ++#define TCSETSW2 _IOW('T', 44, struct termios2)
83 ++#define TCSETSF2 _IOW('T', 45, struct termios2)
84 ++
85 + #define TIOCSWINSZ _IOW('t', 103, struct winsize)
86 + #define TIOCGWINSZ _IOR('t', 104, struct winsize)
87 + #define TIOCSTART _IO('t', 110) /* start output, like ^Q */
88 +diff --git a/arch/alpha/include/uapi/asm/termbits.h b/arch/alpha/include/uapi/asm/termbits.h
89 +index de6c8360fbe3..4575ba34a0ea 100644
90 +--- a/arch/alpha/include/uapi/asm/termbits.h
91 ++++ b/arch/alpha/include/uapi/asm/termbits.h
92 +@@ -26,6 +26,19 @@ struct termios {
93 + speed_t c_ospeed; /* output speed */
94 + };
95 +
96 ++/* Alpha has identical termios and termios2 */
97 ++
98 ++struct termios2 {
99 ++ tcflag_t c_iflag; /* input mode flags */
100 ++ tcflag_t c_oflag; /* output mode flags */
101 ++ tcflag_t c_cflag; /* control mode flags */
102 ++ tcflag_t c_lflag; /* local mode flags */
103 ++ cc_t c_cc[NCCS]; /* control characters */
104 ++ cc_t c_line; /* line discipline (== c_cc[19]) */
105 ++ speed_t c_ispeed; /* input speed */
106 ++ speed_t c_ospeed; /* output speed */
107 ++};
108 ++
109 + /* Alpha has matching termios and ktermios */
110 +
111 + struct ktermios {
112 +@@ -152,6 +165,7 @@ struct ktermios {
113 + #define B3000000 00034
114 + #define B3500000 00035
115 + #define B4000000 00036
116 ++#define BOTHER 00037
117 +
118 + #define CSIZE 00001400
119 + #define CS5 00000000
120 +@@ -169,6 +183,9 @@ struct ktermios {
121 + #define CMSPAR 010000000000 /* mark or space (stick) parity */
122 + #define CRTSCTS 020000000000 /* flow control */
123 +
124 ++#define CIBAUD 07600000
125 ++#define IBSHIFT 16
126 ++
127 + /* c_lflag bits */
128 + #define ISIG 0x00000080
129 + #define ICANON 0x00000100
130 +diff --git a/arch/arm/boot/dts/imx6ull-pinfunc.h b/arch/arm/boot/dts/imx6ull-pinfunc.h
131 +index fdc46bb09cc1..3c12a6fb0b61 100644
132 +--- a/arch/arm/boot/dts/imx6ull-pinfunc.h
133 ++++ b/arch/arm/boot/dts/imx6ull-pinfunc.h
134 +@@ -14,14 +14,23 @@
135 + * The pin function ID is a tuple of
136 + * <mux_reg conf_reg input_reg mux_mode input_val>
137 + */
138 ++/* signals common for i.MX6UL and i.MX6ULL */
139 ++#undef MX6UL_PAD_UART5_TX_DATA__UART5_DTE_RX
140 ++#define MX6UL_PAD_UART5_TX_DATA__UART5_DTE_RX 0x00BC 0x0348 0x0644 0x0 0x6
141 ++#undef MX6UL_PAD_UART5_RX_DATA__UART5_DCE_RX
142 ++#define MX6UL_PAD_UART5_RX_DATA__UART5_DCE_RX 0x00C0 0x034C 0x0644 0x0 0x7
143 ++#undef MX6UL_PAD_ENET1_RX_EN__UART5_DCE_RTS
144 ++#define MX6UL_PAD_ENET1_RX_EN__UART5_DCE_RTS 0x00CC 0x0358 0x0640 0x1 0x5
145 ++#undef MX6UL_PAD_ENET1_TX_DATA0__UART5_DTE_RTS
146 ++#define MX6UL_PAD_ENET1_TX_DATA0__UART5_DTE_RTS 0x00D0 0x035C 0x0640 0x1 0x6
147 ++#undef MX6UL_PAD_CSI_DATA02__UART5_DCE_RTS
148 ++#define MX6UL_PAD_CSI_DATA02__UART5_DCE_RTS 0x01EC 0x0478 0x0640 0x8 0x7
149 ++
150 ++/* signals for i.MX6ULL only */
151 + #define MX6ULL_PAD_UART1_TX_DATA__UART5_DTE_RX 0x0084 0x0310 0x0644 0x9 0x4
152 + #define MX6ULL_PAD_UART1_RX_DATA__UART5_DCE_RX 0x0088 0x0314 0x0644 0x9 0x5
153 + #define MX6ULL_PAD_UART1_CTS_B__UART5_DCE_RTS 0x008C 0x0318 0x0640 0x9 0x3
154 + #define MX6ULL_PAD_UART1_RTS_B__UART5_DTE_RTS 0x0090 0x031C 0x0640 0x9 0x4
155 +-#define MX6ULL_PAD_UART5_TX_DATA__UART5_DTE_RX 0x00BC 0x0348 0x0644 0x0 0x6
156 +-#define MX6ULL_PAD_UART5_RX_DATA__UART5_DCE_RX 0x00C0 0x034C 0x0644 0x0 0x7
157 +-#define MX6ULL_PAD_ENET1_RX_EN__UART5_DCE_RTS 0x00CC 0x0358 0x0640 0x1 0x5
158 +-#define MX6ULL_PAD_ENET1_TX_DATA0__UART5_DTE_RTS 0x00D0 0x035C 0x0640 0x1 0x6
159 + #define MX6ULL_PAD_ENET2_RX_DATA0__EPDC_SDDO08 0x00E4 0x0370 0x0000 0x9 0x0
160 + #define MX6ULL_PAD_ENET2_RX_DATA1__EPDC_SDDO09 0x00E8 0x0374 0x0000 0x9 0x0
161 + #define MX6ULL_PAD_ENET2_RX_EN__EPDC_SDDO10 0x00EC 0x0378 0x0000 0x9 0x0
162 +@@ -55,7 +64,6 @@
163 + #define MX6ULL_PAD_CSI_DATA00__ESAI_TX_HF_CLK 0x01E4 0x0470 0x0000 0x9 0x0
164 + #define MX6ULL_PAD_CSI_DATA01__ESAI_RX_HF_CLK 0x01E8 0x0474 0x0000 0x9 0x0
165 + #define MX6ULL_PAD_CSI_DATA02__ESAI_RX_FS 0x01EC 0x0478 0x0000 0x9 0x0
166 +-#define MX6ULL_PAD_CSI_DATA02__UART5_DCE_RTS 0x01EC 0x0478 0x0640 0x8 0x7
167 + #define MX6ULL_PAD_CSI_DATA03__ESAI_RX_CLK 0x01F0 0x047C 0x0000 0x9 0x0
168 + #define MX6ULL_PAD_CSI_DATA04__ESAI_TX_FS 0x01F4 0x0480 0x0000 0x9 0x0
169 + #define MX6ULL_PAD_CSI_DATA05__ESAI_TX_CLK 0x01F8 0x0484 0x0000 0x9 0x0
170 +diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
171 +index 200ebda47e0c..254dcad97e67 100644
172 +--- a/arch/arm/configs/imx_v6_v7_defconfig
173 ++++ b/arch/arm/configs/imx_v6_v7_defconfig
174 +@@ -406,6 +406,7 @@ CONFIG_ZISOFS=y
175 + CONFIG_UDF_FS=m
176 + CONFIG_MSDOS_FS=m
177 + CONFIG_VFAT_FS=y
178 ++CONFIG_TMPFS_POSIX_ACL=y
179 + CONFIG_JFFS2_FS=y
180 + CONFIG_UBIFS_FS=y
181 + CONFIG_NFS_FS=y
182 +diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
183 +index 6fe52819e014..339eb17c9808 100644
184 +--- a/arch/arm/mm/proc-v7.S
185 ++++ b/arch/arm/mm/proc-v7.S
186 +@@ -112,7 +112,7 @@ ENTRY(cpu_v7_hvc_switch_mm)
187 + hvc #0
188 + ldmfd sp!, {r0 - r3}
189 + b cpu_v7_switch_mm
190 +-ENDPROC(cpu_v7_smc_switch_mm)
191 ++ENDPROC(cpu_v7_hvc_switch_mm)
192 + #endif
193 + ENTRY(cpu_v7_iciallu_switch_mm)
194 + mov r3, #0
195 +diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
196 +index 3989876ab699..6c8bd13d64b8 100644
197 +--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
198 ++++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
199 +@@ -131,6 +131,9 @@
200 + reset-names = "stmmaceth";
201 + clocks = <&clkmgr STRATIX10_EMAC0_CLK>;
202 + clock-names = "stmmaceth";
203 ++ tx-fifo-depth = <16384>;
204 ++ rx-fifo-depth = <16384>;
205 ++ snps,multicast-filter-bins = <256>;
206 + status = "disabled";
207 + };
208 +
209 +@@ -144,6 +147,9 @@
210 + reset-names = "stmmaceth";
211 + clocks = <&clkmgr STRATIX10_EMAC1_CLK>;
212 + clock-names = "stmmaceth";
213 ++ tx-fifo-depth = <16384>;
214 ++ rx-fifo-depth = <16384>;
215 ++ snps,multicast-filter-bins = <256>;
216 + status = "disabled";
217 + };
218 +
219 +@@ -157,6 +163,9 @@
220 + reset-names = "stmmaceth";
221 + clocks = <&clkmgr STRATIX10_EMAC2_CLK>;
222 + clock-names = "stmmaceth";
223 ++ tx-fifo-depth = <16384>;
224 ++ rx-fifo-depth = <16384>;
225 ++ snps,multicast-filter-bins = <256>;
226 + status = "disabled";
227 + };
228 +
229 +diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
230 +index f9b1ef12db48..fb1b9ddd9f51 100644
231 +--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
232 ++++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
233 +@@ -76,7 +76,7 @@
234 + phy-mode = "rgmii";
235 + phy-handle = <&phy0>;
236 +
237 +- max-frame-size = <3800>;
238 ++ max-frame-size = <9000>;
239 +
240 + mdio0 {
241 + #address-cells = <1>;
242 +diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h
243 +index 3644b68c0ccc..be9f727a9328 100644
244 +--- a/arch/mips/include/asm/mach-loongson64/irq.h
245 ++++ b/arch/mips/include/asm/mach-loongson64/irq.h
246 +@@ -10,7 +10,7 @@
247 + #define MIPS_CPU_IRQ_BASE 56
248 +
249 + #define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 2) /* UART */
250 +-#define LOONGSON_HT1_IRQ (MIPS_CPU_IRQ_BASE + 3) /* HT1 */
251 ++#define LOONGSON_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 3) /* CASCADE */
252 + #define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */
253 +
254 + #define LOONGSON_HT1_CFG_BASE loongson_sysconf.ht_control_base
255 +diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
256 +index d455363d51c3..4c07a43a3242 100644
257 +--- a/arch/mips/kernel/crash.c
258 ++++ b/arch/mips/kernel/crash.c
259 +@@ -36,6 +36,9 @@ static void crash_shutdown_secondary(void *passed_regs)
260 + if (!cpu_online(cpu))
261 + return;
262 +
263 ++ /* We won't be sent IPIs any more. */
264 ++ set_cpu_online(cpu, false);
265 ++
266 + local_irq_disable();
267 + if (!cpumask_test_cpu(cpu, &cpus_in_crash))
268 + crash_save_cpu(regs, cpu);
269 +diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
270 +index 8b574bcd39ba..4b3726e4fe3a 100644
271 +--- a/arch/mips/kernel/machine_kexec.c
272 ++++ b/arch/mips/kernel/machine_kexec.c
273 +@@ -118,6 +118,9 @@ machine_kexec(struct kimage *image)
274 + *ptr = (unsigned long) phys_to_virt(*ptr);
275 + }
276 +
277 ++ /* Mark offline BEFORE disabling local irq. */
278 ++ set_cpu_online(smp_processor_id(), false);
279 ++
280 + /*
281 + * we do not want to be bothered.
282 + */
283 +diff --git a/arch/mips/loongson64/loongson-3/irq.c b/arch/mips/loongson64/loongson-3/irq.c
284 +index cbeb20f9fc95..5605061f5f98 100644
285 +--- a/arch/mips/loongson64/loongson-3/irq.c
286 ++++ b/arch/mips/loongson64/loongson-3/irq.c
287 +@@ -96,51 +96,8 @@ void mach_irq_dispatch(unsigned int pending)
288 + }
289 + }
290 +
291 +-static struct irqaction cascade_irqaction = {
292 +- .handler = no_action,
293 +- .flags = IRQF_NO_SUSPEND,
294 +- .name = "cascade",
295 +-};
296 +-
297 +-static inline void mask_loongson_irq(struct irq_data *d)
298 +-{
299 +- clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
300 +- irq_disable_hazard();
301 +-
302 +- /* Workaround: UART IRQ may deliver to any core */
303 +- if (d->irq == LOONGSON_UART_IRQ) {
304 +- int cpu = smp_processor_id();
305 +- int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
306 +- int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
307 +- u64 intenclr_addr = smp_group[node_id] |
308 +- (u64)(&LOONGSON_INT_ROUTER_INTENCLR);
309 +- u64 introuter_lpc_addr = smp_group[node_id] |
310 +- (u64)(&LOONGSON_INT_ROUTER_LPC);
311 +-
312 +- *(volatile u32 *)intenclr_addr = 1 << 10;
313 +- *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id);
314 +- }
315 +-}
316 +-
317 +-static inline void unmask_loongson_irq(struct irq_data *d)
318 +-{
319 +- /* Workaround: UART IRQ may deliver to any core */
320 +- if (d->irq == LOONGSON_UART_IRQ) {
321 +- int cpu = smp_processor_id();
322 +- int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
323 +- int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
324 +- u64 intenset_addr = smp_group[node_id] |
325 +- (u64)(&LOONGSON_INT_ROUTER_INTENSET);
326 +- u64 introuter_lpc_addr = smp_group[node_id] |
327 +- (u64)(&LOONGSON_INT_ROUTER_LPC);
328 +-
329 +- *(volatile u32 *)intenset_addr = 1 << 10;
330 +- *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id);
331 +- }
332 +-
333 +- set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
334 +- irq_enable_hazard();
335 +-}
336 ++static inline void mask_loongson_irq(struct irq_data *d) { }
337 ++static inline void unmask_loongson_irq(struct irq_data *d) { }
338 +
339 + /* For MIPS IRQs which shared by all cores */
340 + static struct irq_chip loongson_irq_chip = {
341 +@@ -183,12 +140,11 @@ void __init mach_init_irq(void)
342 + chip->irq_set_affinity = plat_set_irq_affinity;
343 +
344 + irq_set_chip_and_handler(LOONGSON_UART_IRQ,
345 +- &loongson_irq_chip, handle_level_irq);
346 +-
347 +- /* setup HT1 irq */
348 +- setup_irq(LOONGSON_HT1_IRQ, &cascade_irqaction);
349 ++ &loongson_irq_chip, handle_percpu_irq);
350 ++ irq_set_chip_and_handler(LOONGSON_BRIDGE_IRQ,
351 ++ &loongson_irq_chip, handle_percpu_irq);
352 +
353 +- set_c0_status(STATUSF_IP2 | STATUSF_IP6);
354 ++ set_c0_status(STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP6);
355 + }
356 +
357 + #ifdef CONFIG_HOTPLUG_CPU
358 +diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
359 +index f1e92bf743c2..3c3b1e6abb53 100644
360 +--- a/arch/mips/pci/pci-legacy.c
361 ++++ b/arch/mips/pci/pci-legacy.c
362 +@@ -127,8 +127,12 @@ static void pcibios_scanbus(struct pci_controller *hose)
363 + if (pci_has_flag(PCI_PROBE_ONLY)) {
364 + pci_bus_claim_resources(bus);
365 + } else {
366 ++ struct pci_bus *child;
367 ++
368 + pci_bus_size_bridges(bus);
369 + pci_bus_assign_resources(bus);
370 ++ list_for_each_entry(child, &bus->children, node)
371 ++ pcie_bus_configure_settings(child);
372 + }
373 + pci_bus_add_devices(bus);
374 + }
375 +diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
376 +index fb96206de317..2510ff9381d0 100644
377 +--- a/arch/powerpc/Makefile
378 ++++ b/arch/powerpc/Makefile
379 +@@ -244,7 +244,11 @@ cpu-as-$(CONFIG_4xx) += -Wa,-m405
380 + cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
381 + cpu-as-$(CONFIG_E200) += -Wa,-me200
382 + cpu-as-$(CONFIG_E500) += -Wa,-me500
383 +-cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
384 ++
385 ++# When using '-many -mpower4' gas will first try and find a matching power4
386 ++# mnemonic and failing that it will allow any valid mnemonic that GAS knows
387 ++# about. GCC will pass -many to GAS when assembling, clang does not.
388 ++cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 -Wa,-many
389 + cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
390 +
391 + KBUILD_AFLAGS += $(cpu-as-y)
392 +diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
393 +index dcf2f15e6797..32dfe6d083f3 100644
394 +--- a/arch/powerpc/boot/crt0.S
395 ++++ b/arch/powerpc/boot/crt0.S
396 +@@ -47,8 +47,10 @@ p_end: .long _end
397 + p_pstack: .long _platform_stack_top
398 + #endif
399 +
400 +- .weak _zimage_start
401 + .globl _zimage_start
402 ++ /* Clang appears to require the .weak directive to be after the symbol
403 ++ * is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */
404 ++ .weak _zimage_start
405 + _zimage_start:
406 + .globl _zimage_start_lib
407 + _zimage_start_lib:
408 +diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
409 +index 4f547752ae79..193f53116c7a 100644
410 +--- a/arch/powerpc/include/asm/mmu-8xx.h
411 ++++ b/arch/powerpc/include/asm/mmu-8xx.h
412 +@@ -34,20 +34,12 @@
413 + * respectively NA for All or X for Supervisor and no access for User.
414 + * Then we use the APG to say whether accesses are according to Page rules or
415 + * "all Supervisor" rules (Access to all)
416 +- * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP:
417 +- * When that bit is not set access is done iaw "all user"
418 +- * which means no access iaw page rules.
419 +- * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED
420 +- * 0x => No access => 11 (all accesses performed as user iaw page definition)
421 +- * 10 => No user => 01 (all accesses performed according to page definition)
422 +- * 11 => User => 00 (all accesses performed as supervisor iaw page definition)
423 ++ * Therefore, we define 2 APG groups. lsb is _PMD_USER
424 ++ * 0 => No user => 01 (all accesses performed according to page definition)
425 ++ * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
426 + * We define all 16 groups so that all other bits of APG can take any value
427 + */
428 +-#ifdef CONFIG_SWAP
429 +-#define MI_APG_INIT 0xf4f4f4f4
430 +-#else
431 + #define MI_APG_INIT 0x44444444
432 +-#endif
433 +
434 + /* The effective page number register. When read, contains the information
435 + * about the last instruction TLB miss. When MI_RPN is written, bits in
436 +@@ -115,20 +107,12 @@
437 + * Supervisor and no access for user and NA for ALL.
438 + * Then we use the APG to say whether accesses are according to Page rules or
439 + * "all Supervisor" rules (Access to all)
440 +- * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP:
441 +- * When that bit is not set access is done iaw "all user"
442 +- * which means no access iaw page rules.
443 +- * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED
444 +- * 0x => No access => 11 (all accesses performed as user iaw page definition)
445 +- * 10 => No user => 01 (all accesses performed according to page definition)
446 +- * 11 => User => 00 (all accesses performed as supervisor iaw page definition)
447 ++ * Therefore, we define 2 APG groups. lsb is _PMD_USER
448 ++ * 0 => No user => 01 (all accesses performed according to page definition)
449 ++ * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
450 + * We define all 16 groups so that all other bits of APG can take any value
451 + */
452 +-#ifdef CONFIG_SWAP
453 +-#define MD_APG_INIT 0xf4f4f4f4
454 +-#else
455 + #define MD_APG_INIT 0x44444444
456 +-#endif
457 +
458 + /* The effective page number register. When read, contains the information
459 + * about the last instruction TLB miss. When MD_RPN is written, bits in
460 +@@ -180,12 +164,6 @@
461 + */
462 + #define SPRN_M_TW 799
463 +
464 +-/* APGs */
465 +-#define M_APG0 0x00000000
466 +-#define M_APG1 0x00000020
467 +-#define M_APG2 0x00000040
468 +-#define M_APG3 0x00000060
469 +-
470 + #ifdef CONFIG_PPC_MM_SLICES
471 + #include <asm/nohash/32/slice.h>
472 + #define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1))
473 +diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
474 +index 5746809cfaad..bf0a02038cad 100644
475 +--- a/arch/powerpc/kernel/eeh.c
476 ++++ b/arch/powerpc/kernel/eeh.c
477 +@@ -169,6 +169,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
478 + int n = 0, l = 0;
479 + char buffer[128];
480 +
481 ++ if (!pdn) {
482 ++ pr_warn("EEH: Note: No error log for absent device.\n");
483 ++ return 0;
484 ++ }
485 ++
486 + n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
487 + pdn->phb->global_number, pdn->busno,
488 + PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
489 +diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
490 +index 6cab07e76732..19bdc65d05b8 100644
491 +--- a/arch/powerpc/kernel/head_8xx.S
492 ++++ b/arch/powerpc/kernel/head_8xx.S
493 +@@ -354,13 +354,14 @@ _ENTRY(ITLBMiss_cmp)
494 + #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
495 + mtcr r12
496 + #endif
497 +-
498 +-#ifdef CONFIG_SWAP
499 +- rlwinm r11, r10, 31, _PAGE_ACCESSED >> 1
500 +-#endif
501 + /* Load the MI_TWC with the attributes for this "segment." */
502 + mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
503 +
504 ++#ifdef CONFIG_SWAP
505 ++ rlwinm r11, r10, 32-5, _PAGE_PRESENT
506 ++ and r11, r11, r10
507 ++ rlwimi r10, r11, 0, _PAGE_PRESENT
508 ++#endif
509 + li r11, RPN_PATTERN | 0x200
510 + /* The Linux PTE won't go exactly into the MMU TLB.
511 + * Software indicator bits 20 and 23 must be clear.
512 +@@ -471,14 +472,22 @@ _ENTRY(DTLBMiss_jmp)
513 + * above.
514 + */
515 + rlwimi r11, r10, 0, _PAGE_GUARDED
516 +-#ifdef CONFIG_SWAP
517 +- /* _PAGE_ACCESSED has to be set. We use second APG bit for that, 0
518 +- * on that bit will represent a Non Access group
519 +- */
520 +- rlwinm r11, r10, 31, _PAGE_ACCESSED >> 1
521 +-#endif
522 + mtspr SPRN_MD_TWC, r11
523 +
524 ++ /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
525 ++ * We also need to know if the insn is a load/store, so:
526 ++ * Clear _PAGE_PRESENT and load that which will
527 ++ * trap into DTLB Error with store bit set accordinly.
528 ++ */
529 ++ /* PRESENT=0x1, ACCESSED=0x20
530 ++ * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
531 ++ * r10 = (r10 & ~PRESENT) | r11;
532 ++ */
533 ++#ifdef CONFIG_SWAP
534 ++ rlwinm r11, r10, 32-5, _PAGE_PRESENT
535 ++ and r11, r11, r10
536 ++ rlwimi r10, r11, 0, _PAGE_PRESENT
537 ++#endif
538 + /* The Linux PTE won't go exactly into the MMU TLB.
539 + * Software indicator bits 24, 25, 26, and 27 must be
540 + * set. All other Linux PTE bits control the behavior
541 +@@ -638,8 +647,8 @@ InstructionBreakpoint:
542 + */
543 + DTLBMissIMMR:
544 + mtcr r12
545 +- /* Set 512k byte guarded page and mark it valid and accessed */
546 +- li r10, MD_PS512K | MD_GUARDED | MD_SVALID | M_APG2
547 ++ /* Set 512k byte guarded page and mark it valid */
548 ++ li r10, MD_PS512K | MD_GUARDED | MD_SVALID
549 + mtspr SPRN_MD_TWC, r10
550 + mfspr r10, SPRN_IMMR /* Get current IMMR */
551 + rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
552 +@@ -657,8 +666,8 @@ _ENTRY(dtlb_miss_exit_2)
553 +
554 + DTLBMissLinear:
555 + mtcr r12
556 +- /* Set 8M byte page and mark it valid and accessed */
557 +- li r11, MD_PS8MEG | MD_SVALID | M_APG2
558 ++ /* Set 8M byte page and mark it valid */
559 ++ li r11, MD_PS8MEG | MD_SVALID
560 + mtspr SPRN_MD_TWC, r11
561 + rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
562 + ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \
563 +@@ -676,8 +685,8 @@ _ENTRY(dtlb_miss_exit_3)
564 + #ifndef CONFIG_PIN_TLB_TEXT
565 + ITLBMissLinear:
566 + mtcr r12
567 +- /* Set 8M byte page and mark it valid,accessed */
568 +- li r11, MI_PS8MEG | MI_SVALID | M_APG2
569 ++ /* Set 8M byte page and mark it valid */
570 ++ li r11, MI_PS8MEG | MI_SVALID
571 + mtspr SPRN_MI_TWC, r11
572 + rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
573 + ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \
574 +@@ -960,7 +969,7 @@ initial_mmu:
575 + ori r8, r8, MI_EVALID /* Mark it valid */
576 + mtspr SPRN_MI_EPN, r8
577 + li r8, MI_PS8MEG /* Set 8M byte page */
578 +- ori r8, r8, MI_SVALID | M_APG2 /* Make it valid, APG 2 */
579 ++ ori r8, r8, MI_SVALID /* Make it valid */
580 + mtspr SPRN_MI_TWC, r8
581 + li r8, MI_BOOTINIT /* Create RPN for address 0 */
582 + mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
583 +@@ -987,7 +996,7 @@ initial_mmu:
584 + ori r8, r8, MD_EVALID /* Mark it valid */
585 + mtspr SPRN_MD_EPN, r8
586 + li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */
587 +- ori r8, r8, MD_SVALID | M_APG2 /* Make it valid and accessed */
588 ++ ori r8, r8, MD_SVALID /* Make it valid */
589 + mtspr SPRN_MD_TWC, r8
590 + mr r8, r9 /* Create paddr for TLB */
591 + ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
592 +diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
593 +index b8d61e019d06..f7b1203bdaee 100644
594 +--- a/arch/powerpc/kernel/module_64.c
595 ++++ b/arch/powerpc/kernel/module_64.c
596 +@@ -685,7 +685,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
597 +
598 + case R_PPC64_REL32:
599 + /* 32 bits relative (used by relative exception tables) */
600 +- *(u32 *)location = value - (unsigned long)location;
601 ++ /* Convert value to relative */
602 ++ value -= (unsigned long)location;
603 ++ if (value + 0x80000000 > 0xffffffff) {
604 ++ pr_err("%s: REL32 %li out of range!\n",
605 ++ me->name, (long int)value);
606 ++ return -ENOEXEC;
607 ++ }
608 ++ *(u32 *)location = value;
609 + break;
610 +
611 + case R_PPC64_TOCSAVE:
612 +diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
613 +index 0e17dcb48720..6bfcb5a506af 100644
614 +--- a/arch/powerpc/kernel/traps.c
615 ++++ b/arch/powerpc/kernel/traps.c
616 +@@ -736,12 +736,17 @@ void machine_check_exception(struct pt_regs *regs)
617 + if (check_io_access(regs))
618 + goto bail;
619 +
620 +- die("Machine check", regs, SIGBUS);
621 +-
622 + /* Must die if the interrupt is not recoverable */
623 + if (!(regs->msr & MSR_RI))
624 + nmi_panic(regs, "Unrecoverable Machine check");
625 +
626 ++ if (!nested)
627 ++ nmi_exit();
628 ++
629 ++ die("Machine check", regs, SIGBUS);
630 ++
631 ++ return;
632 ++
633 + bail:
634 + if (!nested)
635 + nmi_exit();
636 +diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
637 +index cf77d755246d..5d53684c2ebd 100644
638 +--- a/arch/powerpc/mm/8xx_mmu.c
639 ++++ b/arch/powerpc/mm/8xx_mmu.c
640 +@@ -79,7 +79,7 @@ void __init MMU_init_hw(void)
641 + for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) {
642 + mtspr(SPRN_MD_CTR, ctr | (i << 8));
643 + mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
644 +- mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID | M_APG2);
645 ++ mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID);
646 + mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT);
647 + addr += LARGE_PAGE_SIZE_8M;
648 + mem -= LARGE_PAGE_SIZE_8M;
649 +diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
650 +index 876e2a3c79f2..bdf33b989f98 100644
651 +--- a/arch/powerpc/mm/dump_linuxpagetables.c
652 ++++ b/arch/powerpc/mm/dump_linuxpagetables.c
653 +@@ -418,12 +418,13 @@ static void walk_pagetables(struct pg_state *st)
654 + unsigned int i;
655 + unsigned long addr;
656 +
657 ++ addr = st->start_address;
658 ++
659 + /*
660 + * Traverse the linux pagetable structure and dump pages that are in
661 + * the hash pagetable.
662 + */
663 +- for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
664 +- addr = KERN_VIRT_START + i * PGDIR_SIZE;
665 ++ for (i = 0; i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
666 + if (!pgd_none(*pgd) && !pgd_huge(*pgd))
667 + /* pgd exists */
668 + walk_pud(st, pgd, addr);
669 +@@ -472,9 +473,14 @@ static int ptdump_show(struct seq_file *m, void *v)
670 + {
671 + struct pg_state st = {
672 + .seq = m,
673 +- .start_address = KERN_VIRT_START,
674 + .marker = address_markers,
675 + };
676 ++
677 ++ if (radix_enabled())
678 ++ st.start_address = PAGE_OFFSET;
679 ++ else
680 ++ st.start_address = KERN_VIRT_START;
681 ++
682 + /* Traverse kernel page tables */
683 + walk_pagetables(&st);
684 + note_page(&st, 0, 0, 0);
685 +diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
686 +index 8a9a49c13865..a84c410e5090 100644
687 +--- a/arch/powerpc/mm/hugetlbpage.c
688 ++++ b/arch/powerpc/mm/hugetlbpage.c
689 +@@ -19,6 +19,7 @@
690 + #include <linux/moduleparam.h>
691 + #include <linux/swap.h>
692 + #include <linux/swapops.h>
693 ++#include <linux/kmemleak.h>
694 + #include <asm/pgtable.h>
695 + #include <asm/pgalloc.h>
696 + #include <asm/tlb.h>
697 +@@ -112,6 +113,8 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
698 + for (i = i - 1 ; i >= 0; i--, hpdp--)
699 + *hpdp = __hugepd(0);
700 + kmem_cache_free(cachep, new);
701 ++ } else {
702 ++ kmemleak_ignore(new);
703 + }
704 + spin_unlock(ptl);
705 + return 0;
706 +diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
707 +index 205fe557ca10..4f213ba33491 100644
708 +--- a/arch/powerpc/mm/slice.c
709 ++++ b/arch/powerpc/mm/slice.c
710 +@@ -61,6 +61,13 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) {
711 +
712 + #endif
713 +
714 ++static inline bool slice_addr_is_low(unsigned long addr)
715 ++{
716 ++ u64 tmp = (u64)addr;
717 ++
718 ++ return tmp < SLICE_LOW_TOP;
719 ++}
720 ++
721 + static void slice_range_to_mask(unsigned long start, unsigned long len,
722 + struct slice_mask *ret)
723 + {
724 +@@ -70,7 +77,7 @@ static void slice_range_to_mask(unsigned long start, unsigned long len,
725 + if (SLICE_NUM_HIGH)
726 + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
727 +
728 +- if (start < SLICE_LOW_TOP) {
729 ++ if (slice_addr_is_low(start)) {
730 + unsigned long mend = min(end,
731 + (unsigned long)(SLICE_LOW_TOP - 1));
732 +
733 +@@ -78,7 +85,7 @@ static void slice_range_to_mask(unsigned long start, unsigned long len,
734 + - (1u << GET_LOW_SLICE_INDEX(start));
735 + }
736 +
737 +- if ((start + len) > SLICE_LOW_TOP) {
738 ++ if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
739 + unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
740 + unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
741 + unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
742 +@@ -133,7 +140,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
743 + if (!slice_low_has_vma(mm, i))
744 + ret->low_slices |= 1u << i;
745 +
746 +- if (high_limit <= SLICE_LOW_TOP)
747 ++ if (slice_addr_is_low(high_limit - 1))
748 + return;
749 +
750 + for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
751 +@@ -182,7 +189,7 @@ static bool slice_check_range_fits(struct mm_struct *mm,
752 + unsigned long end = start + len - 1;
753 + u64 low_slices = 0;
754 +
755 +- if (start < SLICE_LOW_TOP) {
756 ++ if (slice_addr_is_low(start)) {
757 + unsigned long mend = min(end,
758 + (unsigned long)(SLICE_LOW_TOP - 1));
759 +
760 +@@ -192,7 +199,7 @@ static bool slice_check_range_fits(struct mm_struct *mm,
761 + if ((low_slices & available->low_slices) != low_slices)
762 + return false;
763 +
764 +- if (SLICE_NUM_HIGH && ((start + len) > SLICE_LOW_TOP)) {
765 ++ if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
766 + unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
767 + unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
768 + unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
769 +@@ -303,7 +310,7 @@ static bool slice_scan_available(unsigned long addr,
770 + int end, unsigned long *boundary_addr)
771 + {
772 + unsigned long slice;
773 +- if (addr < SLICE_LOW_TOP) {
774 ++ if (slice_addr_is_low(addr)) {
775 + slice = GET_LOW_SLICE_INDEX(addr);
776 + *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
777 + return !!(available->low_slices & (1u << slice));
778 +@@ -706,7 +713,7 @@ unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
779 +
780 + VM_BUG_ON(radix_enabled());
781 +
782 +- if (addr < SLICE_LOW_TOP) {
783 ++ if (slice_addr_is_low(addr)) {
784 + psizes = mm->context.low_slices_psize;
785 + index = GET_LOW_SLICE_INDEX(addr);
786 + } else {
787 +diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
788 +index 15fe5f0c8665..ae5d568e267f 100644
789 +--- a/arch/powerpc/mm/tlb_nohash.c
790 ++++ b/arch/powerpc/mm/tlb_nohash.c
791 +@@ -503,6 +503,9 @@ static void setup_page_sizes(void)
792 + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
793 + struct mmu_psize_def *def = &mmu_psize_defs[psize];
794 +
795 ++ if (!def->shift)
796 ++ continue;
797 ++
798 + if (tlb1ps & (1U << (def->shift - 10))) {
799 + def->flags |= MMU_PAGE_SIZE_DIRECT;
800 +
801 +diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
802 +index b99283df8584..265669002da0 100644
803 +--- a/arch/powerpc/platforms/powernv/memtrace.c
804 ++++ b/arch/powerpc/platforms/powernv/memtrace.c
805 +@@ -119,17 +119,15 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
806 + walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE,
807 + change_memblock_state);
808 +
809 +- lock_device_hotplug();
810 +- remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
811 +- unlock_device_hotplug();
812 +
813 + return true;
814 + }
815 +
816 + static u64 memtrace_alloc_node(u32 nid, u64 size)
817 + {
818 +- u64 start_pfn, end_pfn, nr_pages;
819 ++ u64 start_pfn, end_pfn, nr_pages, pfn;
820 + u64 base_pfn;
821 ++ u64 bytes = memory_block_size_bytes();
822 +
823 + if (!node_spanned_pages(nid))
824 + return 0;
825 +@@ -142,8 +140,21 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
826 + end_pfn = round_down(end_pfn - nr_pages, nr_pages);
827 +
828 + for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
829 +- if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true)
830 ++ if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) {
831 ++ /*
832 ++ * Remove memory in memory block size chunks so that
833 ++ * iomem resources are always split to the same size and
834 ++ * we never try to remove memory that spans two iomem
835 ++ * resources.
836 ++ */
837 ++ lock_device_hotplug();
838 ++ end_pfn = base_pfn + nr_pages;
839 ++ for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) {
840 ++ remove_memory(nid, pfn << PAGE_SHIFT, bytes);
841 ++ }
842 ++ unlock_device_hotplug();
843 + return base_pfn << PAGE_SHIFT;
844 ++ }
845 + }
846 +
847 + return 0;
848 +diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
849 +index 3a17107594c8..eb786f90f2d3 100644
850 +--- a/arch/x86/include/asm/mce.h
851 ++++ b/arch/x86/include/asm/mce.h
852 +@@ -216,6 +216,8 @@ static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *s
853 +
854 + int mce_available(struct cpuinfo_x86 *c);
855 + bool mce_is_memory_error(struct mce *m);
856 ++bool mce_is_correctable(struct mce *m);
857 ++int mce_usable_address(struct mce *m);
858 +
859 + DECLARE_PER_CPU(unsigned, mce_exception_count);
860 + DECLARE_PER_CPU(unsigned, mce_poll_count);
861 +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
862 +index 8c50754c09c1..c51b9d116be1 100644
863 +--- a/arch/x86/kernel/cpu/mcheck/mce.c
864 ++++ b/arch/x86/kernel/cpu/mcheck/mce.c
865 +@@ -489,7 +489,7 @@ static void mce_report_event(struct pt_regs *regs)
866 + * be somewhat complicated (e.g. segment offset would require an instruction
867 + * parser). So only support physical addresses up to page granuality for now.
868 + */
869 +-static int mce_usable_address(struct mce *m)
870 ++int mce_usable_address(struct mce *m)
871 + {
872 + if (!(m->status & MCI_STATUS_ADDRV))
873 + return 0;
874 +@@ -509,6 +509,7 @@ static int mce_usable_address(struct mce *m)
875 +
876 + return 1;
877 + }
878 ++EXPORT_SYMBOL_GPL(mce_usable_address);
879 +
880 + bool mce_is_memory_error(struct mce *m)
881 + {
882 +@@ -538,7 +539,7 @@ bool mce_is_memory_error(struct mce *m)
883 + }
884 + EXPORT_SYMBOL_GPL(mce_is_memory_error);
885 +
886 +-static bool mce_is_correctable(struct mce *m)
887 ++bool mce_is_correctable(struct mce *m)
888 + {
889 + if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
890 + return false;
891 +@@ -548,6 +549,7 @@ static bool mce_is_correctable(struct mce *m)
892 +
893 + return true;
894 + }
895 ++EXPORT_SYMBOL_GPL(mce_is_correctable);
896 +
897 + static bool cec_add_mce(struct mce *m)
898 + {
899 +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
900 +index 031082c96db8..12a67046fefb 100644
901 +--- a/arch/x86/kernel/cpu/mshyperv.c
902 ++++ b/arch/x86/kernel/cpu/mshyperv.c
903 +@@ -20,6 +20,7 @@
904 + #include <linux/interrupt.h>
905 + #include <linux/irq.h>
906 + #include <linux/kexec.h>
907 ++#include <linux/i8253.h>
908 + #include <asm/processor.h>
909 + #include <asm/hypervisor.h>
910 + #include <asm/hyperv-tlfs.h>
911 +@@ -285,6 +286,16 @@ static void __init ms_hyperv_init_platform(void)
912 + if (efi_enabled(EFI_BOOT))
913 + x86_platform.get_nmi_reason = hv_get_nmi_reason;
914 +
915 ++ /*
916 ++ * Hyper-V VMs have a PIT emulation quirk such that zeroing the
917 ++ * counter register during PIT shutdown restarts the PIT. So it
918 ++ * continues to interrupt @18.2 HZ. Setting i8253_clear_counter
919 ++ * to false tells pit_shutdown() not to zero the counter so that
920 ++ * the PIT really is shutdown. Generation 2 VMs don't have a PIT,
921 ++ * and setting this value has no effect.
922 ++ */
923 ++ i8253_clear_counter_on_shutdown = false;
924 ++
925 + #if IS_ENABLED(CONFIG_HYPERV)
926 + /*
927 + * Setup the hook to get control post apic initialization.
928 +diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
929 +index 8e005329648b..d805202c63cd 100644
930 +--- a/arch/x86/kernel/cpu/vmware.c
931 ++++ b/arch/x86/kernel/cpu/vmware.c
932 +@@ -77,7 +77,7 @@ static __init int setup_vmw_sched_clock(char *s)
933 + }
934 + early_param("no-vmw-sched-clock", setup_vmw_sched_clock);
935 +
936 +-static unsigned long long vmware_sched_clock(void)
937 ++static unsigned long long notrace vmware_sched_clock(void)
938 + {
939 + unsigned long long ns;
940 +
941 +diff --git a/arch/x86/um/shared/sysdep/ptrace_32.h b/arch/x86/um/shared/sysdep/ptrace_32.h
942 +index b94a108de1dc..ae00d22bce02 100644
943 +--- a/arch/x86/um/shared/sysdep/ptrace_32.h
944 ++++ b/arch/x86/um/shared/sysdep/ptrace_32.h
945 +@@ -10,20 +10,10 @@
946 +
947 + static inline void update_debugregs(int seq) {}
948 +
949 +-/* syscall emulation path in ptrace */
950 +-
951 +-#ifndef PTRACE_SYSEMU
952 +-#define PTRACE_SYSEMU 31
953 +-#endif
954 +-
955 + void set_using_sysemu(int value);
956 + int get_using_sysemu(void);
957 + extern int sysemu_supported;
958 +
959 +-#ifndef PTRACE_SYSEMU_SINGLESTEP
960 +-#define PTRACE_SYSEMU_SINGLESTEP 32
961 +-#endif
962 +-
963 + #define UPT_SYSCALL_ARG1(r) UPT_BX(r)
964 + #define UPT_SYSCALL_ARG2(r) UPT_CX(r)
965 + #define UPT_SYSCALL_ARG3(r) UPT_DX(r)
966 +diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
967 +index 53e4178711e6..8c20a7965bda 100644
968 +--- a/arch/xtensa/boot/Makefile
969 ++++ b/arch/xtensa/boot/Makefile
970 +@@ -34,7 +34,7 @@ boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y)) \
971 + $(addprefix $(obj)/,$(host-progs))
972 + $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
973 +
974 +-OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary
975 ++OBJCOPYFLAGS = --strip-all -R .comment -R .notes -O binary
976 +
977 + vmlinux.bin: vmlinux FORCE
978 + $(call if_changed,objcopy)
979 +diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
980 +index 5b0027d4ecc0..a39cd81b741a 100644
981 +--- a/arch/xtensa/include/asm/processor.h
982 ++++ b/arch/xtensa/include/asm/processor.h
983 +@@ -24,7 +24,11 @@
984 + # error Linux requires the Xtensa Windowed Registers Option.
985 + #endif
986 +
987 +-#define ARCH_SLAB_MINALIGN XCHAL_DATA_WIDTH
988 ++/* Xtensa ABI requires stack alignment to be at least 16 */
989 ++
990 ++#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
991 ++
992 ++#define ARCH_SLAB_MINALIGN STACK_ALIGN
993 +
994 + /*
995 + * User space process size: 1 GB.
996 +diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
997 +index 9c4e9433e536..3ceb76c7a4ae 100644
998 +--- a/arch/xtensa/kernel/head.S
999 ++++ b/arch/xtensa/kernel/head.S
1000 +@@ -88,9 +88,12 @@ _SetupMMU:
1001 + initialize_mmu
1002 + #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
1003 + rsr a2, excsave1
1004 +- movi a3, 0x08000000
1005 ++ movi a3, XCHAL_KSEG_PADDR
1006 ++ bltu a2, a3, 1f
1007 ++ sub a2, a2, a3
1008 ++ movi a3, XCHAL_KSEG_SIZE
1009 + bgeu a2, a3, 1f
1010 +- movi a3, 0xd0000000
1011 ++ movi a3, XCHAL_KSEG_CACHED_VADDR
1012 + add a2, a2, a3
1013 + wsr a2, excsave1
1014 + 1:
1015 +diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
1016 +index 70b731edc7b8..c430c96ea723 100644
1017 +--- a/arch/xtensa/kernel/vmlinux.lds.S
1018 ++++ b/arch/xtensa/kernel/vmlinux.lds.S
1019 +@@ -131,6 +131,7 @@ SECTIONS
1020 + .fixup : { *(.fixup) }
1021 +
1022 + EXCEPTION_TABLE(16)
1023 ++ NOTES
1024 + /* Data section */
1025 +
1026 + _sdata = .;
1027 +diff --git a/block/blk-core.c b/block/blk-core.c
1028 +index f9d2e1b66e05..41b7396c8658 100644
1029 +--- a/block/blk-core.c
1030 ++++ b/block/blk-core.c
1031 +@@ -793,9 +793,8 @@ void blk_cleanup_queue(struct request_queue *q)
1032 + * dispatch may still be in-progress since we dispatch requests
1033 + * from more than one contexts.
1034 + *
1035 +- * No need to quiesce queue if it isn't initialized yet since
1036 +- * blk_freeze_queue() should be enough for cases of passthrough
1037 +- * request.
1038 ++ * We rely on driver to deal with the race in case that queue
1039 ++ * initialization isn't done.
1040 + */
1041 + if (q->mq_ops && blk_queue_init_done(q))
1042 + blk_mq_quiesce_queue(q);
1043 +diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
1044 +index 0e89b5457cab..ceeb2eaf28cf 100644
1045 +--- a/crypto/crypto_user.c
1046 ++++ b/crypto/crypto_user.c
1047 +@@ -83,7 +83,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
1048 + {
1049 + struct crypto_report_cipher rcipher;
1050 +
1051 +- strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
1052 ++ strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
1053 +
1054 + rcipher.blocksize = alg->cra_blocksize;
1055 + rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
1056 +@@ -102,7 +102,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
1057 + {
1058 + struct crypto_report_comp rcomp;
1059 +
1060 +- strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
1061 ++ strncpy(rcomp.type, "compression", sizeof(rcomp.type));
1062 + if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
1063 + sizeof(struct crypto_report_comp), &rcomp))
1064 + goto nla_put_failure;
1065 +@@ -116,7 +116,7 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
1066 + {
1067 + struct crypto_report_acomp racomp;
1068 +
1069 +- strlcpy(racomp.type, "acomp", sizeof(racomp.type));
1070 ++ strncpy(racomp.type, "acomp", sizeof(racomp.type));
1071 +
1072 + if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
1073 + sizeof(struct crypto_report_acomp), &racomp))
1074 +@@ -131,7 +131,7 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
1075 + {
1076 + struct crypto_report_akcipher rakcipher;
1077 +
1078 +- strlcpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
1079 ++ strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
1080 +
1081 + if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
1082 + sizeof(struct crypto_report_akcipher), &rakcipher))
1083 +@@ -146,7 +146,7 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
1084 + {
1085 + struct crypto_report_kpp rkpp;
1086 +
1087 +- strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
1088 ++ strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
1089 +
1090 + if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
1091 + sizeof(struct crypto_report_kpp), &rkpp))
1092 +@@ -160,10 +160,10 @@ nla_put_failure:
1093 + static int crypto_report_one(struct crypto_alg *alg,
1094 + struct crypto_user_alg *ualg, struct sk_buff *skb)
1095 + {
1096 +- strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
1097 +- strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
1098 ++ strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
1099 ++ strncpy(ualg->cru_driver_name, alg->cra_driver_name,
1100 + sizeof(ualg->cru_driver_name));
1101 +- strlcpy(ualg->cru_module_name, module_name(alg->cra_module),
1102 ++ strncpy(ualg->cru_module_name, module_name(alg->cra_module),
1103 + sizeof(ualg->cru_module_name));
1104 +
1105 + ualg->cru_type = 0;
1106 +@@ -176,7 +176,7 @@ static int crypto_report_one(struct crypto_alg *alg,
1107 + if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
1108 + struct crypto_report_larval rl;
1109 +
1110 +- strlcpy(rl.type, "larval", sizeof(rl.type));
1111 ++ strncpy(rl.type, "larval", sizeof(rl.type));
1112 + if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
1113 + sizeof(struct crypto_report_larval), &rl))
1114 + goto nla_put_failure;
1115 +diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
1116 +index 78f9de260d5f..e9fb0bf3c8d2 100644
1117 +--- a/drivers/acpi/acpica/dsopcode.c
1118 ++++ b/drivers/acpi/acpica/dsopcode.c
1119 +@@ -417,10 +417,6 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
1120 + ACPI_FORMAT_UINT64(obj_desc->region.address),
1121 + obj_desc->region.length));
1122 +
1123 +- status = acpi_ut_add_address_range(obj_desc->region.space_id,
1124 +- obj_desc->region.address,
1125 +- obj_desc->region.length, node);
1126 +-
1127 + /* Now the address and length are valid for this opregion */
1128 +
1129 + obj_desc->region.flags |= AOPOBJ_DATA_VALID;
1130 +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
1131 +index c0db96e8a81a..8eb123d47d54 100644
1132 +--- a/drivers/acpi/nfit/core.c
1133 ++++ b/drivers/acpi/nfit/core.c
1134 +@@ -2835,9 +2835,9 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
1135 + return rc;
1136 +
1137 + if (ars_status_process_records(acpi_desc))
1138 +- return -ENOMEM;
1139 ++ dev_err(acpi_desc->dev, "Failed to process ARS records\n");
1140 +
1141 +- return 0;
1142 ++ return rc;
1143 + }
1144 +
1145 + static int ars_register(struct acpi_nfit_desc *acpi_desc,
1146 +diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
1147 +index e9626bf6ca29..d6c1b10f6c25 100644
1148 +--- a/drivers/acpi/nfit/mce.c
1149 ++++ b/drivers/acpi/nfit/mce.c
1150 +@@ -25,8 +25,12 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
1151 + struct acpi_nfit_desc *acpi_desc;
1152 + struct nfit_spa *nfit_spa;
1153 +
1154 +- /* We only care about memory errors */
1155 +- if (!mce_is_memory_error(mce))
1156 ++ /* We only care about uncorrectable memory errors */
1157 ++ if (!mce_is_memory_error(mce) || mce_is_correctable(mce))
1158 ++ return NOTIFY_DONE;
1159 ++
1160 ++ /* Verify the address reported in the MCE is valid. */
1161 ++ if (!mce_usable_address(mce))
1162 + return NOTIFY_DONE;
1163 +
1164 + /*
1165 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1166 +index 321a9579556d..a9a8440a4945 100644
1167 +--- a/drivers/ata/libata-core.c
1168 ++++ b/drivers/ata/libata-core.c
1169 +@@ -4552,7 +4552,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1170 + /* These specific Samsung models/firmware-revs do not handle LPM well */
1171 + { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
1172 + { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
1173 +- { "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, },
1174 ++ { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
1175 +
1176 + /* devices that don't properly handle queued TRIM commands */
1177 + { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
1178 +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
1179 +index af7cb8e618fe..363b9102ebb0 100644
1180 +--- a/drivers/block/zram/zram_drv.c
1181 ++++ b/drivers/block/zram/zram_drv.c
1182 +@@ -1637,6 +1637,11 @@ static const struct attribute_group zram_disk_attr_group = {
1183 + .attrs = zram_disk_attrs,
1184 + };
1185 +
1186 ++static const struct attribute_group *zram_disk_attr_groups[] = {
1187 ++ &zram_disk_attr_group,
1188 ++ NULL,
1189 ++};
1190 ++
1191 + /*
1192 + * Allocate and initialize new zram device. the function returns
1193 + * '>= 0' device_id upon success, and negative value otherwise.
1194 +@@ -1717,24 +1722,15 @@ static int zram_add(void)
1195 +
1196 + zram->disk->queue->backing_dev_info->capabilities |=
1197 + (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
1198 ++ disk_to_dev(zram->disk)->groups = zram_disk_attr_groups;
1199 + add_disk(zram->disk);
1200 +
1201 +- ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1202 +- &zram_disk_attr_group);
1203 +- if (ret < 0) {
1204 +- pr_err("Error creating sysfs group for device %d\n",
1205 +- device_id);
1206 +- goto out_free_disk;
1207 +- }
1208 + strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1209 +
1210 + zram_debugfs_register(zram);
1211 + pr_info("Added device: %s\n", zram->disk->disk_name);
1212 + return device_id;
1213 +
1214 +-out_free_disk:
1215 +- del_gendisk(zram->disk);
1216 +- put_disk(zram->disk);
1217 + out_free_queue:
1218 + blk_cleanup_queue(queue);
1219 + out_free_idr:
1220 +@@ -1763,16 +1759,6 @@ static int zram_remove(struct zram *zram)
1221 + mutex_unlock(&bdev->bd_mutex);
1222 +
1223 + zram_debugfs_unregister(zram);
1224 +- /*
1225 +- * Remove sysfs first, so no one will perform a disksize
1226 +- * store while we destroy the devices. This also helps during
1227 +- * hot_remove -- zram_reset_device() is the last holder of
1228 +- * ->init_lock, no later/concurrent disksize_store() or any
1229 +- * other sysfs handlers are possible.
1230 +- */
1231 +- sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1232 +- &zram_disk_attr_group);
1233 +-
1234 + /* Make sure all the pending I/O are finished */
1235 + fsync_bdev(bdev);
1236 + zram_reset_device(zram);
1237 +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
1238 +index 66acbd063562..894103abe8a8 100644
1239 +--- a/drivers/cdrom/cdrom.c
1240 ++++ b/drivers/cdrom/cdrom.c
1241 +@@ -2441,7 +2441,7 @@ static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi,
1242 + return -ENOSYS;
1243 +
1244 + if (arg != CDSL_CURRENT && arg != CDSL_NONE) {
1245 +- if ((int)arg >= cdi->capacity)
1246 ++ if (arg >= cdi->capacity)
1247 + return -EINVAL;
1248 + }
1249 +
1250 +diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
1251 +index 72b6091eb7b9..dc7fbc796cb6 100644
1252 +--- a/drivers/clk/at91/clk-pll.c
1253 ++++ b/drivers/clk/at91/clk-pll.c
1254 +@@ -133,6 +133,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
1255 + {
1256 + struct clk_pll *pll = to_clk_pll(hw);
1257 +
1258 ++ if (!pll->div || !pll->mul)
1259 ++ return 0;
1260 ++
1261 + return (parent_rate / pll->div) * (pll->mul + 1);
1262 + }
1263 +
1264 +diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
1265 +index d44e0eea31ec..0934d3724495 100644
1266 +--- a/drivers/clk/clk-s2mps11.c
1267 ++++ b/drivers/clk/clk-s2mps11.c
1268 +@@ -245,6 +245,36 @@ static const struct platform_device_id s2mps11_clk_id[] = {
1269 + };
1270 + MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
1271 +
1272 ++#ifdef CONFIG_OF
1273 ++/*
1274 ++ * Device is instantiated through parent MFD device and device matching is done
1275 ++ * through platform_device_id.
1276 ++ *
1277 ++ * However if device's DT node contains proper clock compatible and driver is
1278 ++ * built as a module, then the *module* matching will be done trough DT aliases.
1279 ++ * This requires of_device_id table. In the same time this will not change the
1280 ++ * actual *device* matching so do not add .of_match_table.
1281 ++ */
1282 ++static const struct of_device_id s2mps11_dt_match[] = {
1283 ++ {
1284 ++ .compatible = "samsung,s2mps11-clk",
1285 ++ .data = (void *)S2MPS11X,
1286 ++ }, {
1287 ++ .compatible = "samsung,s2mps13-clk",
1288 ++ .data = (void *)S2MPS13X,
1289 ++ }, {
1290 ++ .compatible = "samsung,s2mps14-clk",
1291 ++ .data = (void *)S2MPS14X,
1292 ++ }, {
1293 ++ .compatible = "samsung,s5m8767-clk",
1294 ++ .data = (void *)S5M8767X,
1295 ++ }, {
1296 ++ /* Sentinel */
1297 ++ },
1298 ++};
1299 ++MODULE_DEVICE_TABLE(of, s2mps11_dt_match);
1300 ++#endif
1301 ++
1302 + static struct platform_driver s2mps11_clk_driver = {
1303 + .driver = {
1304 + .name = "s2mps11-clk",
1305 +diff --git a/drivers/clk/hisilicon/reset.c b/drivers/clk/hisilicon/reset.c
1306 +index 2a5015c736ce..43e82fa64422 100644
1307 +--- a/drivers/clk/hisilicon/reset.c
1308 ++++ b/drivers/clk/hisilicon/reset.c
1309 +@@ -109,9 +109,8 @@ struct hisi_reset_controller *hisi_reset_init(struct platform_device *pdev)
1310 + return NULL;
1311 +
1312 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1313 +- rstc->membase = devm_ioremap(&pdev->dev,
1314 +- res->start, resource_size(res));
1315 +- if (!rstc->membase)
1316 ++ rstc->membase = devm_ioremap_resource(&pdev->dev, res);
1317 ++ if (IS_ERR(rstc->membase))
1318 + return NULL;
1319 +
1320 + spin_lock_init(&rstc->lock);
1321 +diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
1322 +index bd4dbc696b88..cfd26fd7e404 100644
1323 +--- a/drivers/clk/meson/axg.c
1324 ++++ b/drivers/clk/meson/axg.c
1325 +@@ -320,6 +320,7 @@ static struct clk_regmap axg_fclk_div2 = {
1326 + .ops = &clk_regmap_gate_ops,
1327 + .parent_names = (const char *[]){ "fclk_div2_div" },
1328 + .num_parents = 1,
1329 ++ .flags = CLK_IS_CRITICAL,
1330 + },
1331 + };
1332 +
1333 +@@ -344,6 +345,18 @@ static struct clk_regmap axg_fclk_div3 = {
1334 + .ops = &clk_regmap_gate_ops,
1335 + .parent_names = (const char *[]){ "fclk_div3_div" },
1336 + .num_parents = 1,
1337 ++ /*
1338 ++ * FIXME:
1339 ++ * This clock, as fdiv2, is used by the SCPI FW and is required
1340 ++ * by the platform to operate correctly.
1341 ++ * Until the following condition are met, we need this clock to
1342 ++ * be marked as critical:
1343 ++ * a) The SCPI generic driver claims and enable all the clocks
1344 ++ * it needs
1345 ++ * b) CCF has a clock hand-off mechanism to make the sure the
1346 ++ * clock stays on until the proper driver comes along
1347 ++ */
1348 ++ .flags = CLK_IS_CRITICAL,
1349 + },
1350 + };
1351 +
1352 +diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
1353 +index 177fffb9ebef..902c63209785 100644
1354 +--- a/drivers/clk/meson/gxbb.c
1355 ++++ b/drivers/clk/meson/gxbb.c
1356 +@@ -523,6 +523,18 @@ static struct clk_regmap gxbb_fclk_div3 = {
1357 + .ops = &clk_regmap_gate_ops,
1358 + .parent_names = (const char *[]){ "fclk_div3_div" },
1359 + .num_parents = 1,
1360 ++ /*
1361 ++ * FIXME:
1362 ++ * This clock, as fdiv2, is used by the SCPI FW and is required
1363 ++ * by the platform to operate correctly.
1364 ++ * Until the following condition are met, we need this clock to
1365 ++ * be marked as critical:
1366 ++ * a) The SCPI generic driver claims and enable all the clocks
1367 ++ * it needs
1368 ++ * b) CCF has a clock hand-off mechanism to make the sure the
1369 ++ * clock stays on until the proper driver comes along
1370 ++ */
1371 ++ .flags = CLK_IS_CRITICAL,
1372 + },
1373 + };
1374 +
1375 +diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
1376 +index e8075359366b..ebce5260068b 100644
1377 +--- a/drivers/clk/rockchip/clk-ddr.c
1378 ++++ b/drivers/clk/rockchip/clk-ddr.c
1379 +@@ -80,16 +80,12 @@ static long rockchip_ddrclk_sip_round_rate(struct clk_hw *hw,
1380 + static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw)
1381 + {
1382 + struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw);
1383 +- int num_parents = clk_hw_get_num_parents(hw);
1384 + u32 val;
1385 +
1386 + val = clk_readl(ddrclk->reg_base +
1387 + ddrclk->mux_offset) >> ddrclk->mux_shift;
1388 + val &= GENMASK(ddrclk->mux_width - 1, 0);
1389 +
1390 +- if (val >= num_parents)
1391 +- return -EINVAL;
1392 +-
1393 + return val;
1394 + }
1395 +
1396 +diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
1397 +index 252366a5231f..2c5426607790 100644
1398 +--- a/drivers/clk/rockchip/clk-rk3328.c
1399 ++++ b/drivers/clk/rockchip/clk-rk3328.c
1400 +@@ -813,22 +813,22 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
1401 + MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc",
1402 + RK3328_SDMMC_CON0, 1),
1403 + MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc",
1404 +- RK3328_SDMMC_CON1, 1),
1405 ++ RK3328_SDMMC_CON1, 0),
1406 +
1407 + MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio",
1408 + RK3328_SDIO_CON0, 1),
1409 + MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio",
1410 +- RK3328_SDIO_CON1, 1),
1411 ++ RK3328_SDIO_CON1, 0),
1412 +
1413 + MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc",
1414 + RK3328_EMMC_CON0, 1),
1415 + MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc",
1416 +- RK3328_EMMC_CON1, 1),
1417 ++ RK3328_EMMC_CON1, 0),
1418 +
1419 + MMC(SCLK_SDMMC_EXT_DRV, "sdmmc_ext_drv", "clk_sdmmc_ext",
1420 + RK3328_SDMMC_EXT_CON0, 1),
1421 + MMC(SCLK_SDMMC_EXT_SAMPLE, "sdmmc_ext_sample", "clk_sdmmc_ext",
1422 +- RK3328_SDMMC_EXT_CON1, 1),
1423 ++ RK3328_SDMMC_EXT_CON1, 0),
1424 + };
1425 +
1426 + static const char *const rk3328_critical_clocks[] __initconst = {
1427 +diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
1428 +index bdbfe78fe133..0f7a0ffd3f70 100644
1429 +--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
1430 ++++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
1431 +@@ -224,7 +224,7 @@ static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2",
1432 + psi_ahb1_ahb2_parents,
1433 + 0x510,
1434 + 0, 5, /* M */
1435 +- 16, 2, /* P */
1436 ++ 8, 2, /* P */
1437 + 24, 2, /* mux */
1438 + 0);
1439 +
1440 +@@ -233,19 +233,19 @@ static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k",
1441 + "pll-periph0" };
1442 + static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c,
1443 + 0, 5, /* M */
1444 +- 16, 2, /* P */
1445 ++ 8, 2, /* P */
1446 + 24, 2, /* mux */
1447 + 0);
1448 +
1449 + static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520,
1450 + 0, 5, /* M */
1451 +- 16, 2, /* P */
1452 ++ 8, 2, /* P */
1453 + 24, 2, /* mux */
1454 + 0);
1455 +
1456 + static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524,
1457 + 0, 5, /* M */
1458 +- 16, 2, /* P */
1459 ++ 8, 2, /* P */
1460 + 24, 2, /* mux */
1461 + 0);
1462 +
1463 +diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
1464 +index 9c38895542f4..d4350bb10b83 100644
1465 +--- a/drivers/clocksource/i8253.c
1466 ++++ b/drivers/clocksource/i8253.c
1467 +@@ -20,6 +20,13 @@
1468 + DEFINE_RAW_SPINLOCK(i8253_lock);
1469 + EXPORT_SYMBOL(i8253_lock);
1470 +
1471 ++/*
1472 ++ * Handle PIT quirk in pit_shutdown() where zeroing the counter register
1473 ++ * restarts the PIT, negating the shutdown. On platforms with the quirk,
1474 ++ * platform specific code can set this to false.
1475 ++ */
1476 ++bool i8253_clear_counter_on_shutdown __ro_after_init = true;
1477 ++
1478 + #ifdef CONFIG_CLKSRC_I8253
1479 + /*
1480 + * Since the PIT overflows every tick, its not very useful
1481 +@@ -109,8 +116,11 @@ static int pit_shutdown(struct clock_event_device *evt)
1482 + raw_spin_lock(&i8253_lock);
1483 +
1484 + outb_p(0x30, PIT_MODE);
1485 +- outb_p(0, PIT_CH0);
1486 +- outb_p(0, PIT_CH0);
1487 ++
1488 ++ if (i8253_clear_counter_on_shutdown) {
1489 ++ outb_p(0, PIT_CH0);
1490 ++ outb_p(0, PIT_CH0);
1491 ++ }
1492 +
1493 + raw_spin_unlock(&i8253_lock);
1494 + return 0;
1495 +diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
1496 +index 8830fa601e45..0c0d2312f4a8 100644
1497 +--- a/drivers/firmware/efi/libstub/fdt.c
1498 ++++ b/drivers/firmware/efi/libstub/fdt.c
1499 +@@ -158,6 +158,10 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
1500 + return efi_status;
1501 + }
1502 + }
1503 ++
1504 ++ /* shrink the FDT back to its minimum size */
1505 ++ fdt_pack(fdt);
1506 ++
1507 + return EFI_SUCCESS;
1508 +
1509 + fdt_set_fail:
1510 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
1511 +index a1c78f90eadf..b1a86f99011a 100644
1512 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
1513 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
1514 +@@ -574,7 +574,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
1515 + /* skip over VMID 0, since it is the system VM */
1516 + for (j = 1; j < id_mgr->num_ids; ++j) {
1517 + amdgpu_vmid_reset(adev, i, j);
1518 +- amdgpu_sync_create(&id_mgr->ids[i].active);
1519 ++ amdgpu_sync_create(&id_mgr->ids[j].active);
1520 + list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
1521 + }
1522 + }
1523 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
1524 +index 2bd56760c744..b1cd8e9649b9 100644
1525 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
1526 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
1527 +@@ -62,6 +62,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
1528 + amdgpu_sync_create(&(*job)->sync);
1529 + amdgpu_sync_create(&(*job)->sched_sync);
1530 + (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
1531 ++ (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
1532 +
1533 + return 0;
1534 + }
1535 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
1536 +index f55f72a37ca8..c29d519fa381 100644
1537 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
1538 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
1539 +@@ -277,6 +277,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
1540 + case CHIP_PITCAIRN:
1541 + case CHIP_VERDE:
1542 + case CHIP_OLAND:
1543 ++ case CHIP_HAINAN:
1544 + return AMDGPU_FW_LOAD_DIRECT;
1545 + #endif
1546 + #ifdef CONFIG_DRM_AMDGPU_CIK
1547 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1548 +index c31fff32a321..eb0ae9726cf7 100644
1549 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1550 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1551 +@@ -631,7 +631,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
1552 + }
1553 +
1554 + gds_switch_needed &= !!ring->funcs->emit_gds_switch;
1555 +- vm_flush_needed &= !!ring->funcs->emit_vm_flush;
1556 ++ vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
1557 ++ job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
1558 + pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
1559 + ring->funcs->emit_wreg;
1560 +
1561 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
1562 +index 644b2187507b..a6348bbb6fc7 100644
1563 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
1564 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
1565 +@@ -1045,9 +1045,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
1566 + */
1567 + update_flags->bits.bpp_change = 1;
1568 +
1569 +- if (u->gamma && dce_use_lut(u->plane_info->format))
1570 +- update_flags->bits.gamma_change = 1;
1571 +-
1572 + if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1573 + sizeof(union dc_tiling_info)) != 0) {
1574 + update_flags->bits.swizzle_change = 1;
1575 +@@ -1064,7 +1061,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
1576 + if (update_flags->bits.rotation_change
1577 + || update_flags->bits.stereo_format_change
1578 + || update_flags->bits.pixel_format_change
1579 +- || update_flags->bits.gamma_change
1580 + || update_flags->bits.bpp_change
1581 + || update_flags->bits.bandwidth_change
1582 + || update_flags->bits.output_tf_change)
1583 +@@ -1154,13 +1150,26 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
1584 + if (u->coeff_reduction_factor)
1585 + update_flags->bits.coeff_reduction_change = 1;
1586 +
1587 ++ if (u->gamma) {
1588 ++ enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
1589 ++
1590 ++ if (u->plane_info)
1591 ++ format = u->plane_info->format;
1592 ++ else if (u->surface)
1593 ++ format = u->surface->format;
1594 ++
1595 ++ if (dce_use_lut(format))
1596 ++ update_flags->bits.gamma_change = 1;
1597 ++ }
1598 ++
1599 + if (update_flags->bits.in_transfer_func_change) {
1600 + type = UPDATE_TYPE_MED;
1601 + elevate_update_type(&overall_type, type);
1602 + }
1603 +
1604 + if (update_flags->bits.input_csc_change
1605 +- || update_flags->bits.coeff_reduction_change) {
1606 ++ || update_flags->bits.coeff_reduction_change
1607 ++ || update_flags->bits.gamma_change) {
1608 + type = UPDATE_TYPE_FULL;
1609 + elevate_update_type(&overall_type, type);
1610 + }
1611 +diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
1612 +index eee0dfad6962..7d9fea6877bc 100644
1613 +--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
1614 ++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
1615 +@@ -968,10 +968,14 @@ static void build_evenly_distributed_points(
1616 + struct dividers dividers)
1617 + {
1618 + struct gamma_pixel *p = points;
1619 +- struct gamma_pixel *p_last = p + numberof_points - 1;
1620 ++ struct gamma_pixel *p_last;
1621 +
1622 + uint32_t i = 0;
1623 +
1624 ++ // This function should not gets called with 0 as a parameter
1625 ++ ASSERT(numberof_points > 0);
1626 ++ p_last = p + numberof_points - 1;
1627 ++
1628 + do {
1629 + struct fixed31_32 value = dc_fixpt_from_fraction(i,
1630 + numberof_points - 1);
1631 +@@ -982,7 +986,7 @@ static void build_evenly_distributed_points(
1632 +
1633 + ++p;
1634 + ++i;
1635 +- } while (i != numberof_points);
1636 ++ } while (i < numberof_points);
1637 +
1638 + p->r = dc_fixpt_div(p_last->r, dividers.divider1);
1639 + p->g = dc_fixpt_div(p_last->g, dividers.divider1);
1640 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
1641 +index 617557bd8c24..b813e77d8e93 100644
1642 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
1643 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
1644 +@@ -1222,14 +1222,17 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1645 +
1646 + static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
1647 + {
1648 +- if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
1649 ++ if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
1650 ++ smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
1651 + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
1652 ++ }
1653 + return 0;
1654 + }
1655 +
1656 + static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
1657 + {
1658 + if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
1659 ++ smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
1660 + return smum_send_msg_to_smc_with_parameter(
1661 + hwmgr,
1662 + PPSMC_MSG_UVDPowerON,
1663 +diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
1664 +index 2d4ec8ac3a08..0b3ea7e9b805 100644
1665 +--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
1666 ++++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
1667 +@@ -2303,11 +2303,13 @@ static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
1668 + case DRAM_LOG_BUFF_SIZE:
1669 + return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
1670 + }
1671 ++ break;
1672 + case SMU_Discrete_DpmTable:
1673 + switch (member) {
1674 + case LowSclkInterruptThreshold:
1675 + return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
1676 + }
1677 ++ break;
1678 + }
1679 + pr_debug("can't get the offset of type %x member %x\n", type, member);
1680 + return 0;
1681 +diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
1682 +index 53df9405f43a..bb616a530d3c 100644
1683 +--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
1684 ++++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
1685 +@@ -2372,6 +2372,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
1686 + case DRAM_LOG_BUFF_SIZE:
1687 + return offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE);
1688 + }
1689 ++ break;
1690 + case SMU_Discrete_DpmTable:
1691 + switch (member) {
1692 + case UvdBootLevel:
1693 +@@ -2383,6 +2384,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
1694 + case LowSclkInterruptThreshold:
1695 + return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
1696 + }
1697 ++ break;
1698 + }
1699 + pr_warn("can't get the offset of type %x member %x\n", type, member);
1700 + return 0;
1701 +diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
1702 +index 415f691c3fa9..c15e15e657b8 100644
1703 +--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
1704 ++++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
1705 +@@ -2246,11 +2246,13 @@ static uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
1706 + case DRAM_LOG_BUFF_SIZE:
1707 + return offsetof(SMU71_SoftRegisters, DRAM_LOG_BUFF_SIZE);
1708 + }
1709 ++ break;
1710 + case SMU_Discrete_DpmTable:
1711 + switch (member) {
1712 + case LowSclkInterruptThreshold:
1713 + return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
1714 + }
1715 ++ break;
1716 + }
1717 + pr_warn("can't get the offset of type %x member %x\n", type, member);
1718 + return 0;
1719 +diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
1720 +index 782b19fc2e70..a5b7a4484700 100644
1721 +--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
1722 ++++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
1723 +@@ -2667,6 +2667,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
1724 + case DRAM_LOG_BUFF_SIZE:
1725 + return offsetof(SMU72_SoftRegisters, DRAM_LOG_BUFF_SIZE);
1726 + }
1727 ++ break;
1728 + case SMU_Discrete_DpmTable:
1729 + switch (member) {
1730 + case UvdBootLevel:
1731 +@@ -2678,6 +2679,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
1732 + case LowSclkInterruptThreshold:
1733 + return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
1734 + }
1735 ++ break;
1736 + }
1737 + pr_warn("can't get the offset of type %x member %x\n", type, member);
1738 + return 0;
1739 +diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
1740 +index 2de48959ac93..52834334bd53 100644
1741 +--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
1742 ++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
1743 +@@ -2267,6 +2267,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
1744 + case DRAM_LOG_BUFF_SIZE:
1745 + return offsetof(SMU75_SoftRegisters, DRAM_LOG_BUFF_SIZE);
1746 + }
1747 ++ break;
1748 + case SMU_Discrete_DpmTable:
1749 + switch (member) {
1750 + case UvdBootLevel:
1751 +@@ -2278,6 +2279,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
1752 + case LowSclkInterruptThreshold:
1753 + return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold);
1754 + }
1755 ++ break;
1756 + }
1757 + pr_warn("can't get the offset of type %x member %x\n", type, member);
1758 + return 0;
1759 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1760 +index 658830620ca3..9c166621e920 100644
1761 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
1762 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1763 +@@ -1274,6 +1274,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
1764 + mutex_lock(&mgr->lock);
1765 + mstb = mgr->mst_primary;
1766 +
1767 ++ if (!mstb)
1768 ++ goto out;
1769 ++
1770 + for (i = 0; i < lct - 1; i++) {
1771 + int shift = (i % 2) ? 0 : 4;
1772 + int port_num = (rad[i / 2] >> shift) & 0xf;
1773 +diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
1774 +index fe9c6c731e87..ee4a5e1221f1 100644
1775 +--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
1776 ++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
1777 +@@ -30,6 +30,12 @@ struct drm_dmi_panel_orientation_data {
1778 + int orientation;
1779 + };
1780 +
1781 ++static const struct drm_dmi_panel_orientation_data acer_s1003 = {
1782 ++ .width = 800,
1783 ++ .height = 1280,
1784 ++ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
1785 ++};
1786 ++
1787 + static const struct drm_dmi_panel_orientation_data asus_t100ha = {
1788 + .width = 800,
1789 + .height = 1280,
1790 +@@ -67,7 +73,13 @@ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
1791 + };
1792 +
1793 + static const struct dmi_system_id orientation_data[] = {
1794 +- { /* Asus T100HA */
1795 ++ { /* Acer One 10 (S1003) */
1796 ++ .matches = {
1797 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
1798 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
1799 ++ },
1800 ++ .driver_data = (void *)&acer_s1003,
1801 ++ }, { /* Asus T100HA */
1802 + .matches = {
1803 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1804 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
1805 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
1806 +index 50d6b88cb7aa..5944f319c78b 100644
1807 +--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
1808 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
1809 +@@ -93,7 +93,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
1810 + * If the GPU managed to complete this jobs fence, the timout is
1811 + * spurious. Bail out.
1812 + */
1813 +- if (fence_completed(gpu, submit->out_fence->seqno))
1814 ++ if (dma_fence_is_signaled(submit->out_fence))
1815 + return;
1816 +
1817 + /*
1818 +diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
1819 +index b92595c477ef..8bd29075ae4e 100644
1820 +--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
1821 ++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
1822 +@@ -122,6 +122,7 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
1823 + hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj);
1824 + if (IS_ERR(hi_fbdev->fb)) {
1825 + ret = PTR_ERR(hi_fbdev->fb);
1826 ++ hi_fbdev->fb = NULL;
1827 + DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
1828 + goto out_release_fbi;
1829 + }
1830 +diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
1831 +index 97e62647418a..5040bcd430d2 100644
1832 +--- a/drivers/gpu/drm/i915/gvt/gtt.h
1833 ++++ b/drivers/gpu/drm/i915/gvt/gtt.h
1834 +@@ -35,7 +35,6 @@
1835 + #define _GVT_GTT_H_
1836 +
1837 + #define I915_GTT_PAGE_SHIFT 12
1838 +-#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1))
1839 +
1840 + struct intel_vgpu_mm;
1841 +
1842 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1843 +index 17c5097721e8..4b77325d135a 100644
1844 +--- a/drivers/gpu/drm/i915/i915_gem.c
1845 ++++ b/drivers/gpu/drm/i915/i915_gem.c
1846 +@@ -1112,11 +1112,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
1847 + offset = offset_in_page(args->offset);
1848 + for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1849 + struct page *page = i915_gem_object_get_page(obj, idx);
1850 +- int length;
1851 +-
1852 +- length = remain;
1853 +- if (offset + length > PAGE_SIZE)
1854 +- length = PAGE_SIZE - offset;
1855 ++ unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
1856 +
1857 + ret = shmem_pread(page, offset, length, user_data,
1858 + page_to_phys(page) & obj_do_bit17_swizzling,
1859 +@@ -1562,11 +1558,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1860 + offset = offset_in_page(args->offset);
1861 + for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1862 + struct page *page = i915_gem_object_get_page(obj, idx);
1863 +- int length;
1864 +-
1865 +- length = remain;
1866 +- if (offset + length > PAGE_SIZE)
1867 +- length = PAGE_SIZE - offset;
1868 ++ unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
1869 +
1870 + ret = shmem_pwrite(page, offset, length, user_data,
1871 + page_to_phys(page) & obj_do_bit17_swizzling,
1872 +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1873 +index 22df17c8ca9b..b43bc767ec3d 100644
1874 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1875 ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1876 +@@ -449,7 +449,7 @@ eb_validate_vma(struct i915_execbuffer *eb,
1877 + * any non-page-aligned or non-canonical addresses.
1878 + */
1879 + if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
1880 +- entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK)))
1881 ++ entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
1882 + return -EINVAL;
1883 +
1884 + /* pad_to_size was once a reserved field, so sanitize it */
1885 +diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
1886 +index aec4f73574f4..69f53faab644 100644
1887 +--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
1888 ++++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
1889 +@@ -49,6 +49,8 @@
1890 + #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
1891 + #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
1892 +
1893 ++#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
1894 ++
1895 + #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
1896 +
1897 + #define I915_FENCE_REG_NONE -1
1898 +@@ -625,20 +627,20 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
1899 + u64 start, u64 end, unsigned int flags);
1900 +
1901 + /* Flags used by pin/bind&friends. */
1902 +-#define PIN_NONBLOCK BIT(0)
1903 +-#define PIN_MAPPABLE BIT(1)
1904 +-#define PIN_ZONE_4G BIT(2)
1905 +-#define PIN_NONFAULT BIT(3)
1906 +-#define PIN_NOEVICT BIT(4)
1907 +-
1908 +-#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */
1909 +-#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */
1910 +-#define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */
1911 +-#define PIN_UPDATE BIT(8)
1912 +-
1913 +-#define PIN_HIGH BIT(9)
1914 +-#define PIN_OFFSET_BIAS BIT(10)
1915 +-#define PIN_OFFSET_FIXED BIT(11)
1916 ++#define PIN_NONBLOCK BIT_ULL(0)
1917 ++#define PIN_MAPPABLE BIT_ULL(1)
1918 ++#define PIN_ZONE_4G BIT_ULL(2)
1919 ++#define PIN_NONFAULT BIT_ULL(3)
1920 ++#define PIN_NOEVICT BIT_ULL(4)
1921 ++
1922 ++#define PIN_MBZ BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */
1923 ++#define PIN_GLOBAL BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */
1924 ++#define PIN_USER BIT_ULL(7) /* I915_VMA_LOCAL_BIND */
1925 ++#define PIN_UPDATE BIT_ULL(8)
1926 ++
1927 ++#define PIN_HIGH BIT_ULL(9)
1928 ++#define PIN_OFFSET_BIAS BIT_ULL(10)
1929 ++#define PIN_OFFSET_FIXED BIT_ULL(11)
1930 + #define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
1931 +
1932 + #endif
1933 +diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
1934 +index 3ea566f99450..391ad4123953 100644
1935 +--- a/drivers/gpu/drm/i915/intel_audio.c
1936 ++++ b/drivers/gpu/drm/i915/intel_audio.c
1937 +@@ -134,6 +134,9 @@ static const struct {
1938 + /* HDMI N/CTS table */
1939 + #define TMDS_297M 297000
1940 + #define TMDS_296M 296703
1941 ++#define TMDS_594M 594000
1942 ++#define TMDS_593M 593407
1943 ++
1944 + static const struct {
1945 + int sample_rate;
1946 + int clock;
1947 +@@ -154,6 +157,20 @@ static const struct {
1948 + { 176400, TMDS_297M, 18816, 247500 },
1949 + { 192000, TMDS_296M, 23296, 281250 },
1950 + { 192000, TMDS_297M, 20480, 247500 },
1951 ++ { 44100, TMDS_593M, 8918, 937500 },
1952 ++ { 44100, TMDS_594M, 9408, 990000 },
1953 ++ { 48000, TMDS_593M, 5824, 562500 },
1954 ++ { 48000, TMDS_594M, 6144, 594000 },
1955 ++ { 32000, TMDS_593M, 5824, 843750 },
1956 ++ { 32000, TMDS_594M, 3072, 445500 },
1957 ++ { 88200, TMDS_593M, 17836, 937500 },
1958 ++ { 88200, TMDS_594M, 18816, 990000 },
1959 ++ { 96000, TMDS_593M, 11648, 562500 },
1960 ++ { 96000, TMDS_594M, 12288, 594000 },
1961 ++ { 176400, TMDS_593M, 35672, 937500 },
1962 ++ { 176400, TMDS_594M, 37632, 990000 },
1963 ++ { 192000, TMDS_593M, 23296, 562500 },
1964 ++ { 192000, TMDS_594M, 24576, 594000 },
1965 + };
1966 +
1967 + /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
1968 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1969 +index 00486c744f24..b1e4f460f403 100644
1970 +--- a/drivers/gpu/drm/i915/intel_display.c
1971 ++++ b/drivers/gpu/drm/i915/intel_display.c
1972 +@@ -12509,17 +12509,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
1973 + intel_check_cpu_fifo_underruns(dev_priv);
1974 + intel_check_pch_fifo_underruns(dev_priv);
1975 +
1976 +- if (!new_crtc_state->active) {
1977 +- /*
1978 +- * Make sure we don't call initial_watermarks
1979 +- * for ILK-style watermark updates.
1980 +- *
1981 +- * No clue what this is supposed to achieve.
1982 +- */
1983 +- if (INTEL_GEN(dev_priv) >= 9)
1984 +- dev_priv->display.initial_watermarks(intel_state,
1985 +- to_intel_crtc_state(new_crtc_state));
1986 +- }
1987 ++ /* FIXME unify this for all platforms */
1988 ++ if (!new_crtc_state->active &&
1989 ++ !HAS_GMCH_DISPLAY(dev_priv) &&
1990 ++ dev_priv->display.initial_watermarks)
1991 ++ dev_priv->display.initial_watermarks(intel_state,
1992 ++ to_intel_crtc_state(new_crtc_state));
1993 + }
1994 + }
1995 +
1996 +@@ -14383,7 +14378,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
1997 + fb->height < SKL_MIN_YUV_420_SRC_H ||
1998 + (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
1999 + DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
2000 +- return -EINVAL;
2001 ++ goto err;
2002 + }
2003 +
2004 + for (i = 0; i < fb->format->num_planes; i++) {
2005 +@@ -15216,13 +15211,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
2006 + I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
2007 + }
2008 +
2009 +- /* restore vblank interrupts to correct state */
2010 +- drm_crtc_vblank_reset(&crtc->base);
2011 + if (crtc->active) {
2012 + struct intel_plane *plane;
2013 +
2014 +- drm_crtc_vblank_on(&crtc->base);
2015 +-
2016 + /* Disable everything but the primary plane */
2017 + for_each_intel_plane_on_crtc(dev, crtc, plane) {
2018 + const struct intel_plane_state *plane_state =
2019 +@@ -15549,7 +15540,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
2020 + struct drm_modeset_acquire_ctx *ctx)
2021 + {
2022 + struct drm_i915_private *dev_priv = to_i915(dev);
2023 +- enum pipe pipe;
2024 + struct intel_crtc *crtc;
2025 + struct intel_encoder *encoder;
2026 + int i;
2027 +@@ -15560,15 +15550,23 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
2028 + /* HW state is read out, now we need to sanitize this mess. */
2029 + get_encoder_power_domains(dev_priv);
2030 +
2031 +- intel_sanitize_plane_mapping(dev_priv);
2032 ++ /*
2033 ++ * intel_sanitize_plane_mapping() may need to do vblank
2034 ++ * waits, so we need vblank interrupts restored beforehand.
2035 ++ */
2036 ++ for_each_intel_crtc(&dev_priv->drm, crtc) {
2037 ++ drm_crtc_vblank_reset(&crtc->base);
2038 +
2039 +- for_each_intel_encoder(dev, encoder) {
2040 +- intel_sanitize_encoder(encoder);
2041 ++ if (crtc->active)
2042 ++ drm_crtc_vblank_on(&crtc->base);
2043 + }
2044 +
2045 +- for_each_pipe(dev_priv, pipe) {
2046 +- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
2047 ++ intel_sanitize_plane_mapping(dev_priv);
2048 ++
2049 ++ for_each_intel_encoder(dev, encoder)
2050 ++ intel_sanitize_encoder(encoder);
2051 +
2052 ++ for_each_intel_crtc(&dev_priv->drm, crtc) {
2053 + intel_sanitize_crtc(crtc, ctx);
2054 + intel_dump_pipe_config(crtc, crtc->config,
2055 + "[setup_hw_state]");
2056 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
2057 +index 8e465095fe06..5d6517d37236 100644
2058 +--- a/drivers/gpu/drm/i915/intel_dp.c
2059 ++++ b/drivers/gpu/drm/i915/intel_dp.c
2060 +@@ -387,6 +387,22 @@ static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
2061 + return true;
2062 + }
2063 +
2064 ++static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
2065 ++ int link_rate,
2066 ++ uint8_t lane_count)
2067 ++{
2068 ++ const struct drm_display_mode *fixed_mode =
2069 ++ intel_dp->attached_connector->panel.fixed_mode;
2070 ++ int mode_rate, max_rate;
2071 ++
2072 ++ mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
2073 ++ max_rate = intel_dp_max_data_rate(link_rate, lane_count);
2074 ++ if (mode_rate > max_rate)
2075 ++ return false;
2076 ++
2077 ++ return true;
2078 ++}
2079 ++
2080 + int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
2081 + int link_rate, uint8_t lane_count)
2082 + {
2083 +@@ -396,9 +412,23 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
2084 + intel_dp->num_common_rates,
2085 + link_rate);
2086 + if (index > 0) {
2087 ++ if (intel_dp_is_edp(intel_dp) &&
2088 ++ !intel_dp_can_link_train_fallback_for_edp(intel_dp,
2089 ++ intel_dp->common_rates[index - 1],
2090 ++ lane_count)) {
2091 ++ DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
2092 ++ return 0;
2093 ++ }
2094 + intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
2095 + intel_dp->max_link_lane_count = lane_count;
2096 + } else if (lane_count > 1) {
2097 ++ if (intel_dp_is_edp(intel_dp) &&
2098 ++ !intel_dp_can_link_train_fallback_for_edp(intel_dp,
2099 ++ intel_dp_max_common_rate(intel_dp),
2100 ++ lane_count >> 1)) {
2101 ++ DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
2102 ++ return 0;
2103 ++ }
2104 + intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
2105 + intel_dp->max_link_lane_count = lane_count >> 1;
2106 + } else {
2107 +@@ -4842,19 +4872,13 @@ intel_dp_long_pulse(struct intel_connector *connector,
2108 + */
2109 + status = connector_status_disconnected;
2110 + goto out;
2111 +- } else {
2112 +- /*
2113 +- * If display is now connected check links status,
2114 +- * there has been known issues of link loss triggering
2115 +- * long pulse.
2116 +- *
2117 +- * Some sinks (eg. ASUS PB287Q) seem to perform some
2118 +- * weird HPD ping pong during modesets. So we can apparently
2119 +- * end up with HPD going low during a modeset, and then
2120 +- * going back up soon after. And once that happens we must
2121 +- * retrain the link to get a picture. That's in case no
2122 +- * userspace component reacted to intermittent HPD dip.
2123 +- */
2124 ++ }
2125 ++
2126 ++ /*
2127 ++ * Some external monitors do not signal loss of link synchronization
2128 ++ * with an IRQ_HPD, so force a link status check.
2129 ++ */
2130 ++ if (!intel_dp_is_edp(intel_dp)) {
2131 + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
2132 +
2133 + intel_dp_retrain_link(encoder, ctx);
2134 +diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
2135 +index 3fcaa98b9055..667397541f10 100644
2136 +--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
2137 ++++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
2138 +@@ -335,22 +335,14 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2139 + return;
2140 +
2141 + failure_handling:
2142 +- /* Dont fallback and prune modes if its eDP */
2143 +- if (!intel_dp_is_edp(intel_dp)) {
2144 +- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
2145 +- intel_connector->base.base.id,
2146 +- intel_connector->base.name,
2147 +- intel_dp->link_rate, intel_dp->lane_count);
2148 +- if (!intel_dp_get_link_train_fallback_values(intel_dp,
2149 +- intel_dp->link_rate,
2150 +- intel_dp->lane_count))
2151 +- /* Schedule a Hotplug Uevent to userspace to start modeset */
2152 +- schedule_work(&intel_connector->modeset_retry_work);
2153 +- } else {
2154 +- DRM_ERROR("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
2155 +- intel_connector->base.base.id,
2156 +- intel_connector->base.name,
2157 +- intel_dp->link_rate, intel_dp->lane_count);
2158 +- }
2159 ++ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
2160 ++ intel_connector->base.base.id,
2161 ++ intel_connector->base.name,
2162 ++ intel_dp->link_rate, intel_dp->lane_count);
2163 ++ if (!intel_dp_get_link_train_fallback_values(intel_dp,
2164 ++ intel_dp->link_rate,
2165 ++ intel_dp->lane_count))
2166 ++ /* Schedule a Hotplug Uevent to userspace to start modeset */
2167 ++ schedule_work(&intel_connector->modeset_retry_work);
2168 + return;
2169 + }
2170 +diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
2171 +index 5890500a3a8b..80566cc347ee 100644
2172 +--- a/drivers/gpu/drm/i915/intel_dp_mst.c
2173 ++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
2174 +@@ -38,11 +38,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
2175 + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
2176 + struct intel_digital_port *intel_dig_port = intel_mst->primary;
2177 + struct intel_dp *intel_dp = &intel_dig_port->dp;
2178 +- struct intel_connector *connector =
2179 +- to_intel_connector(conn_state->connector);
2180 ++ struct drm_connector *connector = conn_state->connector;
2181 ++ void *port = to_intel_connector(connector)->port;
2182 + struct drm_atomic_state *state = pipe_config->base.state;
2183 + int bpp;
2184 +- int lane_count, slots;
2185 ++ int lane_count, slots = 0;
2186 + const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2187 + int mst_pbn;
2188 + bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
2189 +@@ -70,17 +70,23 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
2190 +
2191 + pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
2192 +
2193 +- if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, connector->port))
2194 ++ if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port))
2195 + pipe_config->has_audio = true;
2196 +
2197 + mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
2198 + pipe_config->pbn = mst_pbn;
2199 +
2200 +- slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
2201 +- connector->port, mst_pbn);
2202 +- if (slots < 0) {
2203 +- DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots);
2204 +- return false;
2205 ++ /* Zombie connectors can't have VCPI slots */
2206 ++ if (READ_ONCE(connector->registered)) {
2207 ++ slots = drm_dp_atomic_find_vcpi_slots(state,
2208 ++ &intel_dp->mst_mgr,
2209 ++ port,
2210 ++ mst_pbn);
2211 ++ if (slots < 0) {
2212 ++ DRM_DEBUG_KMS("failed finding vcpi slots:%d\n",
2213 ++ slots);
2214 ++ return false;
2215 ++ }
2216 + }
2217 +
2218 + intel_link_compute_m_n(bpp, lane_count,
2219 +@@ -307,9 +313,8 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
2220 + struct edid *edid;
2221 + int ret;
2222 +
2223 +- if (!intel_dp) {
2224 ++ if (!READ_ONCE(connector->registered))
2225 + return intel_connector_update_modes(connector, NULL);
2226 +- }
2227 +
2228 + edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
2229 + ret = intel_connector_update_modes(connector, edid);
2230 +@@ -324,9 +329,10 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
2231 + struct intel_connector *intel_connector = to_intel_connector(connector);
2232 + struct intel_dp *intel_dp = intel_connector->mst_port;
2233 +
2234 +- if (!intel_dp)
2235 ++ if (!READ_ONCE(connector->registered))
2236 + return connector_status_disconnected;
2237 +- return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
2238 ++ return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr,
2239 ++ intel_connector->port);
2240 + }
2241 +
2242 + static void
2243 +@@ -366,7 +372,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
2244 + int bpp = 24; /* MST uses fixed bpp */
2245 + int max_rate, mode_rate, max_lanes, max_link_clock;
2246 +
2247 +- if (!intel_dp)
2248 ++ if (!READ_ONCE(connector->registered))
2249 + return MODE_ERROR;
2250 +
2251 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
2252 +@@ -398,7 +404,7 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
2253 + struct intel_dp *intel_dp = intel_connector->mst_port;
2254 + struct intel_crtc *crtc = to_intel_crtc(state->crtc);
2255 +
2256 +- if (!intel_dp)
2257 ++ if (!READ_ONCE(connector->registered))
2258 + return NULL;
2259 + return &intel_dp->mst_encoders[crtc->pipe]->base.base;
2260 + }
2261 +@@ -458,6 +464,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
2262 + if (!intel_connector)
2263 + return NULL;
2264 +
2265 ++ intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
2266 ++ intel_connector->mst_port = intel_dp;
2267 ++ intel_connector->port = port;
2268 ++
2269 + connector = &intel_connector->base;
2270 + ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
2271 + DRM_MODE_CONNECTOR_DisplayPort);
2272 +@@ -468,10 +478,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
2273 +
2274 + drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
2275 +
2276 +- intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
2277 +- intel_connector->mst_port = intel_dp;
2278 +- intel_connector->port = port;
2279 +-
2280 + for_each_pipe(dev_priv, pipe) {
2281 + struct drm_encoder *enc =
2282 + &intel_dp->mst_encoders[pipe]->base.base;
2283 +@@ -510,7 +516,6 @@ static void intel_dp_register_mst_connector(struct drm_connector *connector)
2284 + static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
2285 + struct drm_connector *connector)
2286 + {
2287 +- struct intel_connector *intel_connector = to_intel_connector(connector);
2288 + struct drm_i915_private *dev_priv = to_i915(connector->dev);
2289 +
2290 + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name);
2291 +@@ -519,10 +524,6 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
2292 + if (dev_priv->fbdev)
2293 + drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
2294 + connector);
2295 +- /* prevent race with the check in ->detect */
2296 +- drm_modeset_lock(&connector->dev->mode_config.connection_mutex, NULL);
2297 +- intel_connector->mst_port = NULL;
2298 +- drm_modeset_unlock(&connector->dev->mode_config.connection_mutex);
2299 +
2300 + drm_connector_unreference(connector);
2301 + }
2302 +diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
2303 +index cdf19553ffac..5d5336fbe7b0 100644
2304 +--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
2305 ++++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
2306 +@@ -297,8 +297,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
2307 + lpe_audio_platdev_destroy(dev_priv);
2308 +
2309 + irq_free_desc(dev_priv->lpe_audio.irq);
2310 +-}
2311 +
2312 ++ dev_priv->lpe_audio.irq = -1;
2313 ++ dev_priv->lpe_audio.platdev = NULL;
2314 ++}
2315 +
2316 + /**
2317 + * intel_lpe_audio_notify() - notify lpe audio event
2318 +diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
2319 +index 7c4c8fb1dae4..0328ee704ee5 100644
2320 +--- a/drivers/gpu/drm/i915/intel_lrc.c
2321 ++++ b/drivers/gpu/drm/i915/intel_lrc.c
2322 +@@ -425,7 +425,8 @@ static u64 execlists_update_context(struct i915_request *rq)
2323 +
2324 + reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
2325 +
2326 +- /* True 32b PPGTT with dynamic page allocation: update PDP
2327 ++ /*
2328 ++ * True 32b PPGTT with dynamic page allocation: update PDP
2329 + * registers and point the unallocated PDPs to scratch page.
2330 + * PML4 is allocated during ppgtt init, so this is not needed
2331 + * in 48-bit mode.
2332 +@@ -433,6 +434,17 @@ static u64 execlists_update_context(struct i915_request *rq)
2333 + if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
2334 + execlists_update_context_pdps(ppgtt, reg_state);
2335 +
2336 ++ /*
2337 ++ * Make sure the context image is complete before we submit it to HW.
2338 ++ *
2339 ++ * Ostensibly, writes (including the WCB) should be flushed prior to
2340 ++ * an uncached write such as our mmio register access, the empirical
2341 ++ * evidence (esp. on Braswell) suggests that the WC write into memory
2342 ++ * may not be visible to the HW prior to the completion of the UC
2343 ++ * register write and that we may begin execution from the context
2344 ++ * before its image is complete leading to invalid PD chasing.
2345 ++ */
2346 ++ wmb();
2347 + return ce->lrc_desc;
2348 + }
2349 +
2350 +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
2351 +index 8f19349a6055..72007d634359 100644
2352 +--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
2353 ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
2354 +@@ -91,6 +91,7 @@ static int
2355 + gen4_render_ring_flush(struct i915_request *rq, u32 mode)
2356 + {
2357 + u32 cmd, *cs;
2358 ++ int i;
2359 +
2360 + /*
2361 + * read/write caches:
2362 +@@ -127,12 +128,45 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
2363 + cmd |= MI_INVALIDATE_ISP;
2364 + }
2365 +
2366 +- cs = intel_ring_begin(rq, 2);
2367 ++ i = 2;
2368 ++ if (mode & EMIT_INVALIDATE)
2369 ++ i += 20;
2370 ++
2371 ++ cs = intel_ring_begin(rq, i);
2372 + if (IS_ERR(cs))
2373 + return PTR_ERR(cs);
2374 +
2375 + *cs++ = cmd;
2376 +- *cs++ = MI_NOOP;
2377 ++
2378 ++ /*
2379 ++ * A random delay to let the CS invalidate take effect? Without this
2380 ++ * delay, the GPU relocation path fails as the CS does not see
2381 ++ * the updated contents. Just as important, if we apply the flushes
2382 ++ * to the EMIT_FLUSH branch (i.e. immediately after the relocation
2383 ++ * write and before the invalidate on the next batch), the relocations
2384 ++ * still fail. This implies that is a delay following invalidation
2385 ++ * that is required to reset the caches as opposed to a delay to
2386 ++ * ensure the memory is written.
2387 ++ */
2388 ++ if (mode & EMIT_INVALIDATE) {
2389 ++ *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
2390 ++ *cs++ = i915_ggtt_offset(rq->engine->scratch) |
2391 ++ PIPE_CONTROL_GLOBAL_GTT;
2392 ++ *cs++ = 0;
2393 ++ *cs++ = 0;
2394 ++
2395 ++ for (i = 0; i < 12; i++)
2396 ++ *cs++ = MI_FLUSH;
2397 ++
2398 ++ *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
2399 ++ *cs++ = i915_ggtt_offset(rq->engine->scratch) |
2400 ++ PIPE_CONTROL_GLOBAL_GTT;
2401 ++ *cs++ = 0;
2402 ++ *cs++ = 0;
2403 ++ }
2404 ++
2405 ++ *cs++ = cmd;
2406 ++
2407 + intel_ring_advance(rq, cs);
2408 +
2409 + return 0;
2410 +diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
2411 +index 17d0506d058c..48960c1d92bc 100644
2412 +--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
2413 ++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
2414 +@@ -477,8 +477,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
2415 + struct device_node *child, *node;
2416 + int ret;
2417 +
2418 +- node = of_find_compatible_node(dev->of_node, NULL,
2419 +- "qcom,gpu-pwrlevels");
2420 ++ node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
2421 + if (!node) {
2422 + dev_err(dev, "Could not find the GPU powerlevels\n");
2423 + return -ENXIO;
2424 +@@ -499,6 +498,8 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
2425 + dev_pm_opp_add(dev, val, 0);
2426 + }
2427 +
2428 ++ of_node_put(node);
2429 ++
2430 + return 0;
2431 + }
2432 +
2433 +diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
2434 +index c3c8c84da113..c1e4aab9932e 100644
2435 +--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
2436 ++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
2437 +@@ -818,22 +818,16 @@ nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
2438 + {
2439 + struct nv50_head *head = nv50_head(connector_state->crtc);
2440 + struct nv50_mstc *mstc = nv50_mstc(connector);
2441 +- if (mstc->port) {
2442 +- struct nv50_mstm *mstm = mstc->mstm;
2443 +- return &mstm->msto[head->base.index]->encoder;
2444 +- }
2445 +- return NULL;
2446 ++
2447 ++ return &mstc->mstm->msto[head->base.index]->encoder;
2448 + }
2449 +
2450 + static struct drm_encoder *
2451 + nv50_mstc_best_encoder(struct drm_connector *connector)
2452 + {
2453 + struct nv50_mstc *mstc = nv50_mstc(connector);
2454 +- if (mstc->port) {
2455 +- struct nv50_mstm *mstm = mstc->mstm;
2456 +- return &mstm->msto[0]->encoder;
2457 +- }
2458 +- return NULL;
2459 ++
2460 ++ return &mstc->mstm->msto[0]->encoder;
2461 + }
2462 +
2463 + static enum drm_mode_status
2464 +diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
2465 +index 408b955e5c39..6dd72bc32897 100644
2466 +--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
2467 ++++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
2468 +@@ -116,7 +116,7 @@ nv40_backlight_init(struct drm_connector *connector)
2469 + &nv40_bl_ops, &props);
2470 +
2471 + if (IS_ERR(bd)) {
2472 +- if (bl_connector.id > 0)
2473 ++ if (bl_connector.id >= 0)
2474 + ida_simple_remove(&bl_ida, bl_connector.id);
2475 + return PTR_ERR(bd);
2476 + }
2477 +@@ -249,7 +249,7 @@ nv50_backlight_init(struct drm_connector *connector)
2478 + nv_encoder, ops, &props);
2479 +
2480 + if (IS_ERR(bd)) {
2481 +- if (bl_connector.id > 0)
2482 ++ if (bl_connector.id >= 0)
2483 + ida_simple_remove(&bl_ida, bl_connector.id);
2484 + return PTR_ERR(bd);
2485 + }
2486 +diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
2487 +index f92fe205550b..e884183c018a 100644
2488 +--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
2489 ++++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
2490 +@@ -285,6 +285,17 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
2491 + }
2492 +
2493 + txn->last_pat->next_pa = 0;
2494 ++ /* ensure that the written descriptors are visible to DMM */
2495 ++ wmb();
2496 ++
2497 ++ /*
2498 ++ * NOTE: the wmb() above should be enough, but there seems to be a bug
2499 ++ * in OMAP's memory barrier implementation, which in some rare cases may
2500 ++ * cause the writes not to be observable after wmb().
2501 ++ */
2502 ++
2503 ++ /* read back to ensure the data is in RAM */
2504 ++ readl(&txn->last_pat->next_pa);
2505 +
2506 + /* write to PAT_DESCR to clear out any pending transaction */
2507 + dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
2508 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
2509 +index f0bc7cc0e913..fb46df56f0c4 100644
2510 +--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
2511 ++++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
2512 +@@ -516,12 +516,22 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
2513 +
2514 + dev->mode_config.min_width = 0;
2515 + dev->mode_config.min_height = 0;
2516 +- dev->mode_config.max_width = 4095;
2517 +- dev->mode_config.max_height = 2047;
2518 + dev->mode_config.normalize_zpos = true;
2519 + dev->mode_config.funcs = &rcar_du_mode_config_funcs;
2520 + dev->mode_config.helper_private = &rcar_du_mode_config_helper;
2521 +
2522 ++ if (rcdu->info->gen < 3) {
2523 ++ dev->mode_config.max_width = 4095;
2524 ++ dev->mode_config.max_height = 2047;
2525 ++ } else {
2526 ++ /*
2527 ++ * The Gen3 DU uses the VSP1 for memory access, and is limited
2528 ++ * to frame sizes of 8190x8190.
2529 ++ */
2530 ++ dev->mode_config.max_width = 8190;
2531 ++ dev->mode_config.max_height = 8190;
2532 ++ }
2533 ++
2534 + rcdu->num_crtcs = hweight8(rcdu->info->channels_mask);
2535 +
2536 + ret = rcar_du_properties_init(rcdu);
2537 +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
2538 +index f814d37b1db2..05368fa4f956 100644
2539 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
2540 ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
2541 +@@ -442,6 +442,11 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
2542 + return 0;
2543 + }
2544 +
2545 ++static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
2546 ++{
2547 ++ rockchip_drm_platform_remove(pdev);
2548 ++}
2549 ++
2550 + static const struct of_device_id rockchip_drm_dt_ids[] = {
2551 + { .compatible = "rockchip,display-subsystem", },
2552 + { /* sentinel */ },
2553 +@@ -451,6 +456,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
2554 + static struct platform_driver rockchip_drm_platform_driver = {
2555 + .probe = rockchip_drm_platform_probe,
2556 + .remove = rockchip_drm_platform_remove,
2557 ++ .shutdown = rockchip_drm_platform_shutdown,
2558 + .driver = {
2559 + .name = "rockchip-drm",
2560 + .of_match_table = rockchip_drm_dt_ids,
2561 +diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
2562 +index e88c01961948..52918185578a 100644
2563 +--- a/drivers/hwmon/hwmon.c
2564 ++++ b/drivers/hwmon/hwmon.c
2565 +@@ -631,8 +631,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
2566 + if (info[i]->config[j] & HWMON_T_INPUT) {
2567 + err = hwmon_thermal_add_sensor(dev,
2568 + hwdev, j);
2569 +- if (err)
2570 +- goto free_device;
2571 ++ if (err) {
2572 ++ device_unregister(hdev);
2573 ++ goto ida_remove;
2574 ++ }
2575 + }
2576 + }
2577 + }
2578 +@@ -640,8 +642,6 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
2579 +
2580 + return hdev;
2581 +
2582 +-free_device:
2583 +- device_unregister(hdev);
2584 + free_hwmon:
2585 + kfree(hwdev);
2586 + ida_remove:
2587 +diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
2588 +index 2566b4d8b342..73856c2a8ac0 100644
2589 +--- a/drivers/input/touchscreen/wm97xx-core.c
2590 ++++ b/drivers/input/touchscreen/wm97xx-core.c
2591 +@@ -929,7 +929,8 @@ static int __init wm97xx_init(void)
2592 +
2593 + static void __exit wm97xx_exit(void)
2594 + {
2595 +- driver_unregister(&wm97xx_driver);
2596 ++ if (IS_BUILTIN(CONFIG_AC97_BUS))
2597 ++ driver_unregister(&wm97xx_driver);
2598 + platform_driver_unregister(&wm97xx_mfd_driver);
2599 + }
2600 +
2601 +diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
2602 +index 805bd9c65940..8b450fc53202 100644
2603 +--- a/drivers/media/i2c/tvp5150.c
2604 ++++ b/drivers/media/i2c/tvp5150.c
2605 +@@ -901,9 +901,6 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd,
2606 +
2607 + /* tvp5150 has some special limits */
2608 + rect.left = clamp(rect.left, 0, TVP5150_MAX_CROP_LEFT);
2609 +- rect.width = clamp_t(unsigned int, rect.width,
2610 +- TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
2611 +- TVP5150_H_MAX - rect.left);
2612 + rect.top = clamp(rect.top, 0, TVP5150_MAX_CROP_TOP);
2613 +
2614 + /* Calculate height based on current standard */
2615 +@@ -917,9 +914,16 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd,
2616 + else
2617 + hmax = TVP5150_V_MAX_OTHERS;
2618 +
2619 +- rect.height = clamp_t(unsigned int, rect.height,
2620 ++ /*
2621 ++ * alignments:
2622 ++ * - width = 2 due to UYVY colorspace
2623 ++ * - height, image = no special alignment
2624 ++ */
2625 ++ v4l_bound_align_image(&rect.width,
2626 ++ TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
2627 ++ TVP5150_H_MAX - rect.left, 1, &rect.height,
2628 + hmax - TVP5150_MAX_CROP_TOP - rect.top,
2629 +- hmax - rect.top);
2630 ++ hmax - rect.top, 0, 0);
2631 +
2632 + tvp5150_write(sd, TVP5150_VERT_BLANKING_START, rect.top);
2633 + tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP,
2634 +diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c
2635 +index 70aec9bb7e95..07bf20c6c6fc 100644
2636 +--- a/drivers/media/pci/cx23885/altera-ci.c
2637 ++++ b/drivers/media/pci/cx23885/altera-ci.c
2638 +@@ -665,6 +665,10 @@ static int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr)
2639 + }
2640 +
2641 + temp_int = append_internal(inter);
2642 ++ if (!temp_int) {
2643 ++ ret = -ENOMEM;
2644 ++ goto err;
2645 ++ }
2646 + inter->filts_used = 1;
2647 + inter->dev = config->dev;
2648 + inter->fpga_rw = config->fpga_rw;
2649 +@@ -699,6 +703,7 @@ err:
2650 + __func__, ret);
2651 +
2652 + kfree(pid_filt);
2653 ++ kfree(inter);
2654 +
2655 + return ret;
2656 + }
2657 +@@ -733,6 +738,10 @@ int altera_ci_init(struct altera_ci_config *config, int ci_nr)
2658 + }
2659 +
2660 + temp_int = append_internal(inter);
2661 ++ if (!temp_int) {
2662 ++ ret = -ENOMEM;
2663 ++ goto err;
2664 ++ }
2665 + inter->cis_used = 1;
2666 + inter->dev = config->dev;
2667 + inter->fpga_rw = config->fpga_rw;
2668 +@@ -801,6 +810,7 @@ err:
2669 + ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret);
2670 +
2671 + kfree(state);
2672 ++ kfree(inter);
2673 +
2674 + return ret;
2675 + }
2676 +diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
2677 +index c7631e117dd3..1ae15d4ec5ed 100644
2678 +--- a/drivers/media/platform/coda/coda-common.c
2679 ++++ b/drivers/media/platform/coda/coda-common.c
2680 +@@ -1719,7 +1719,8 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
2681 + break;
2682 + case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
2683 + /* TODO: switch between baseline and constrained baseline */
2684 +- ctx->params.h264_profile_idc = 66;
2685 ++ if (ctx->inst_type == CODA_INST_ENCODER)
2686 ++ ctx->params.h264_profile_idc = 66;
2687 + break;
2688 + case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
2689 + /* nothing to do, this is set by the encoder */
2690 +diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
2691 +index 57b02c4b3f63..6072ea9a84f2 100644
2692 +--- a/drivers/mtd/devices/Kconfig
2693 ++++ b/drivers/mtd/devices/Kconfig
2694 +@@ -207,7 +207,7 @@ comment "Disk-On-Chip Device Drivers"
2695 + config MTD_DOCG3
2696 + tristate "M-Systems Disk-On-Chip G3"
2697 + select BCH
2698 +- select BCH_CONST_PARAMS
2699 ++ select BCH_CONST_PARAMS if !MTD_NAND_BCH
2700 + select BITREVERSE
2701 + ---help---
2702 + This provides an MTD device driver for the M-Systems DiskOnChip
2703 +diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
2704 +index d7e10b36a0b9..0f0126901ac5 100644
2705 +--- a/drivers/mtd/spi-nor/cadence-quadspi.c
2706 ++++ b/drivers/mtd/spi-nor/cadence-quadspi.c
2707 +@@ -1000,7 +1000,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
2708 + err_unmap:
2709 + dma_unmap_single(nor->dev, dma_dst, len, DMA_DEV_TO_MEM);
2710 +
2711 +- return 0;
2712 ++ return ret;
2713 + }
2714 +
2715 + static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
2716 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2717 +index 2b01180be834..29661d45c6d0 100644
2718 +--- a/drivers/net/bonding/bond_main.c
2719 ++++ b/drivers/net/bonding/bond_main.c
2720 +@@ -3118,13 +3118,13 @@ static int bond_slave_netdev_event(unsigned long event,
2721 + case NETDEV_CHANGE:
2722 + /* For 802.3ad mode only:
2723 + * Getting invalid Speed/Duplex values here will put slave
2724 +- * in weird state. So mark it as link-down for the time
2725 ++ * in weird state. So mark it as link-fail for the time
2726 + * being and let link-monitoring (miimon) set it right when
2727 + * correct speeds/duplex are available.
2728 + */
2729 + if (bond_update_speed_duplex(slave) &&
2730 + BOND_MODE(bond) == BOND_MODE_8023AD)
2731 +- slave->link = BOND_LINK_DOWN;
2732 ++ slave->link = BOND_LINK_FAIL;
2733 +
2734 + if (BOND_MODE(bond) == BOND_MODE_8023AD)
2735 + bond_3ad_adapter_speed_duplex_changed(slave);
2736 +diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
2737 +index 27d9b4bba535..2411ed3c7303 100644
2738 +--- a/drivers/of/of_numa.c
2739 ++++ b/drivers/of/of_numa.c
2740 +@@ -115,9 +115,14 @@ static int __init of_numa_parse_distance_map_v1(struct device_node *map)
2741 + distance = of_read_number(matrix, 1);
2742 + matrix++;
2743 +
2744 ++ if ((nodea == nodeb && distance != LOCAL_DISTANCE) ||
2745 ++ (nodea != nodeb && distance <= LOCAL_DISTANCE)) {
2746 ++ pr_err("Invalid distance[node%d -> node%d] = %d\n",
2747 ++ nodea, nodeb, distance);
2748 ++ return -EINVAL;
2749 ++ }
2750 ++
2751 + numa_set_distance(nodea, nodeb, distance);
2752 +- pr_debug("distance[node%d -> node%d] = %d\n",
2753 +- nodea, nodeb, distance);
2754 +
2755 + /* Set default distance of node B->A same as A->B */
2756 + if (nodeb > nodea)
2757 +diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
2758 +index e79f2a181ad2..b9ec4a16db1f 100644
2759 +--- a/drivers/rtc/hctosys.c
2760 ++++ b/drivers/rtc/hctosys.c
2761 +@@ -50,8 +50,10 @@ static int __init rtc_hctosys(void)
2762 + tv64.tv_sec = rtc_tm_to_time64(&tm);
2763 +
2764 + #if BITS_PER_LONG == 32
2765 +- if (tv64.tv_sec > INT_MAX)
2766 ++ if (tv64.tv_sec > INT_MAX) {
2767 ++ err = -ERANGE;
2768 + goto err_read;
2769 ++ }
2770 + #endif
2771 +
2772 + err = do_settimeofday64(&tv64);
2773 +diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
2774 +index 7a3744006419..c492af5bcd95 100644
2775 +--- a/drivers/scsi/qla2xxx/qla_gs.c
2776 ++++ b/drivers/scsi/qla2xxx/qla_gs.c
2777 +@@ -4410,9 +4410,9 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
2778 + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
2779 + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
2780 +
2781 +- rspsz = sizeof(struct ct_sns_gpnft_rsp) +
2782 +- ((vha->hw->max_fibre_devices - 1) *
2783 +- sizeof(struct ct_sns_gpn_ft_data));
2784 ++ rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
2785 ++ memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
2786 ++ memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
2787 +
2788 + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
2789 + /* CT_IU preamble */
2790 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
2791 +index 75d34def2361..f77e470152d0 100644
2792 +--- a/drivers/scsi/qla2xxx/qla_init.c
2793 ++++ b/drivers/scsi/qla2xxx/qla_init.c
2794 +@@ -1742,25 +1742,15 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
2795 + cid.b.rsvd_1 = 0;
2796 +
2797 + ql_dbg(ql_dbg_disc, vha, 0x20ec,
2798 +- "%s %d %8phC LoopID 0x%x in use post gnl\n",
2799 ++ "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2800 + __func__, __LINE__, ea->fcport->port_name,
2801 +- ea->fcport->loop_id);
2802 ++ ea->fcport->loop_id, cid.b24);
2803 +
2804 +- if (IS_SW_RESV_ADDR(cid)) {
2805 +- set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2806 +- ea->fcport->loop_id = FC_NO_LOOP_ID;
2807 +- } else {
2808 +- qla2x00_clear_loop_id(ea->fcport);
2809 +- }
2810 ++ set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2811 ++ ea->fcport->loop_id = FC_NO_LOOP_ID;
2812 + qla24xx_post_gnl_work(vha, ea->fcport);
2813 + break;
2814 + case MBS_PORT_ID_USED:
2815 +- ql_dbg(ql_dbg_disc, vha, 0x20ed,
2816 +- "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
2817 +- __func__, __LINE__, ea->fcport->port_name,
2818 +- ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
2819 +- ea->fcport->d_id.b.al_pa);
2820 +-
2821 + lid = ea->iop[1] & 0xffff;
2822 + qlt_find_sess_invalidate_other(vha,
2823 + wwn_to_u64(ea->fcport->port_name),
2824 +@@ -4501,6 +4491,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2825 + fcport->loop_id = FC_NO_LOOP_ID;
2826 + qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
2827 + fcport->supported_classes = FC_COS_UNSPECIFIED;
2828 ++ fcport->fp_speed = PORT_SPEED_UNKNOWN;
2829 +
2830 + fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
2831 + sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
2832 +@@ -6515,7 +6506,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
2833 + * The next call disables the board
2834 + * completely.
2835 + */
2836 +- ha->isp_ops->reset_adapter(vha);
2837 ++ qla2x00_abort_isp_cleanup(vha);
2838 + vha->flags.online = 0;
2839 + clear_bit(ISP_ABORT_RETRY,
2840 + &vha->dpc_flags);
2841 +@@ -6972,7 +6963,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
2842 + }
2843 + icb->firmware_options_2 &= cpu_to_le32(
2844 + ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
2845 +- vha->flags.process_response_queue = 0;
2846 + if (ha->zio_mode != QLA_ZIO_DISABLED) {
2847 + ha->zio_mode = QLA_ZIO_MODE_6;
2848 +
2849 +@@ -6983,7 +6973,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
2850 + icb->firmware_options_2 |= cpu_to_le32(
2851 + (uint32_t)ha->zio_mode);
2852 + icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
2853 +- vha->flags.process_response_queue = 1;
2854 + }
2855 +
2856 + if (rval) {
2857 +diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
2858 +index 667055cbe155..3e94f15ce1cf 100644
2859 +--- a/drivers/scsi/qla2xxx/qla_iocb.c
2860 ++++ b/drivers/scsi/qla2xxx/qla_iocb.c
2861 +@@ -1526,12 +1526,6 @@ qla24xx_start_scsi(srb_t *sp)
2862 +
2863 + /* Set chip new ring index. */
2864 + WRT_REG_DWORD(req->req_q_in, req->ring_index);
2865 +- RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
2866 +-
2867 +- /* Manage unprocessed RIO/ZIO commands in response queue. */
2868 +- if (vha->flags.process_response_queue &&
2869 +- rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2870 +- qla24xx_process_response_queue(vha, rsp);
2871 +
2872 + spin_unlock_irqrestore(&ha->hardware_lock, flags);
2873 + return QLA_SUCCESS;
2874 +@@ -1725,12 +1719,6 @@ qla24xx_dif_start_scsi(srb_t *sp)
2875 +
2876 + /* Set chip new ring index. */
2877 + WRT_REG_DWORD(req->req_q_in, req->ring_index);
2878 +- RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
2879 +-
2880 +- /* Manage unprocessed RIO/ZIO commands in response queue. */
2881 +- if (vha->flags.process_response_queue &&
2882 +- rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2883 +- qla24xx_process_response_queue(vha, rsp);
2884 +
2885 + spin_unlock_irqrestore(&ha->hardware_lock, flags);
2886 +
2887 +@@ -1880,11 +1868,6 @@ qla2xxx_start_scsi_mq(srb_t *sp)
2888 + /* Set chip new ring index. */
2889 + WRT_REG_DWORD(req->req_q_in, req->ring_index);
2890 +
2891 +- /* Manage unprocessed RIO/ZIO commands in response queue. */
2892 +- if (vha->flags.process_response_queue &&
2893 +- rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2894 +- qla24xx_process_response_queue(vha, rsp);
2895 +-
2896 + spin_unlock_irqrestore(&qpair->qp_lock, flags);
2897 + return QLA_SUCCESS;
2898 +
2899 +diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
2900 +index f0ec13d48bf3..f301621e39d7 100644
2901 +--- a/drivers/scsi/qla2xxx/qla_mbx.c
2902 ++++ b/drivers/scsi/qla2xxx/qla_mbx.c
2903 +@@ -3716,10 +3716,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2904 + mcp->mb[0] = MBC_PORT_PARAMS;
2905 + mcp->mb[1] = loop_id;
2906 + mcp->mb[2] = BIT_0;
2907 +- if (IS_CNA_CAPABLE(vha->hw))
2908 +- mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
2909 +- else
2910 +- mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
2911 ++ mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
2912 + mcp->mb[9] = vha->vp_idx;
2913 + mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
2914 + mcp->in_mb = MBX_3|MBX_1|MBX_0;
2915 +diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
2916 +index c5a963c2c86e..d0dc425f33f5 100644
2917 +--- a/drivers/scsi/qla2xxx/qla_nvme.c
2918 ++++ b/drivers/scsi/qla2xxx/qla_nvme.c
2919 +@@ -604,7 +604,7 @@ void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
2920 + {
2921 + int rval;
2922 +
2923 +- if (!test_bit(ABORT_ISP_ACTIVE, &sp->vha->dpc_flags)) {
2924 ++ if (ha->flags.fw_started) {
2925 + rval = ha->isp_ops->abort_command(sp);
2926 + if (!rval && !qla_nvme_wait_on_command(sp))
2927 + ql_log(ql_log_warn, NULL, 0x2112,
2928 +@@ -657,9 +657,6 @@ void qla_nvme_delete(struct scsi_qla_host *vha)
2929 + __func__, fcport);
2930 +
2931 + nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
2932 +- init_completion(&fcport->nvme_del_done);
2933 +- nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
2934 +- wait_for_completion(&fcport->nvme_del_done);
2935 + }
2936 +
2937 + if (vha->nvme_local_port) {
2938 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
2939 +index 6dc1b1bd8069..27fbd437f412 100644
2940 +--- a/drivers/scsi/qla2xxx/qla_target.c
2941 ++++ b/drivers/scsi/qla2xxx/qla_target.c
2942 +@@ -1257,7 +1257,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
2943 + qla24xx_chk_fcp_state(sess);
2944 +
2945 + ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
2946 +- "Scheduling sess %p for deletion\n", sess);
2947 ++ "Scheduling sess %p for deletion %8phC\n",
2948 ++ sess, sess->port_name);
2949 +
2950 + INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
2951 + WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
2952 +diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
2953 +index 7732e9336d43..edfcb98aa4ef 100644
2954 +--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
2955 ++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
2956 +@@ -718,10 +718,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
2957 + cmd->sg_cnt = 0;
2958 + cmd->offset = 0;
2959 + cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
2960 +- if (cmd->trc_flags & TRC_XMIT_STATUS) {
2961 +- pr_crit("Multiple calls for status = %p.\n", cmd);
2962 +- dump_stack();
2963 +- }
2964 + cmd->trc_flags |= TRC_XMIT_STATUS;
2965 +
2966 + if (se_cmd->data_direction == DMA_FROM_DEVICE) {
2967 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2968 +index 41e9ac9fc138..4fd92b1802cd 100644
2969 +--- a/drivers/scsi/scsi_lib.c
2970 ++++ b/drivers/scsi/scsi_lib.c
2971 +@@ -696,6 +696,12 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
2972 + */
2973 + scsi_mq_uninit_cmd(cmd);
2974 +
2975 ++ /*
2976 ++ * queue is still alive, so grab the ref for preventing it
2977 ++ * from being cleaned up during running queue.
2978 ++ */
2979 ++ percpu_ref_get(&q->q_usage_counter);
2980 ++
2981 + __blk_mq_end_request(req, error);
2982 +
2983 + if (scsi_target(sdev)->single_lun ||
2984 +@@ -703,6 +709,8 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
2985 + kblockd_schedule_work(&sdev->requeue_work);
2986 + else
2987 + blk_mq_run_hw_queues(q, true);
2988 ++
2989 ++ percpu_ref_put(&q->q_usage_counter);
2990 + } else {
2991 + unsigned long flags;
2992 +
2993 +diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h
2994 +index 3efc47e82973..bd040c29c4bf 100644
2995 +--- a/drivers/soc/ti/knav_qmss.h
2996 ++++ b/drivers/soc/ti/knav_qmss.h
2997 +@@ -329,8 +329,8 @@ struct knav_range_ops {
2998 + };
2999 +
3000 + struct knav_irq_info {
3001 +- int irq;
3002 +- u32 cpu_map;
3003 ++ int irq;
3004 ++ struct cpumask *cpu_mask;
3005 + };
3006 +
3007 + struct knav_range_info {
3008 +diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c
3009 +index 316e82e46f6c..2f7fb2dcc1d6 100644
3010 +--- a/drivers/soc/ti/knav_qmss_acc.c
3011 ++++ b/drivers/soc/ti/knav_qmss_acc.c
3012 +@@ -205,18 +205,18 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range,
3013 + {
3014 + struct knav_device *kdev = range->kdev;
3015 + struct knav_acc_channel *acc;
3016 +- unsigned long cpu_map;
3017 ++ struct cpumask *cpu_mask;
3018 + int ret = 0, irq;
3019 + u32 old, new;
3020 +
3021 + if (range->flags & RANGE_MULTI_QUEUE) {
3022 + acc = range->acc;
3023 + irq = range->irqs[0].irq;
3024 +- cpu_map = range->irqs[0].cpu_map;
3025 ++ cpu_mask = range->irqs[0].cpu_mask;
3026 + } else {
3027 + acc = range->acc + queue;
3028 + irq = range->irqs[queue].irq;
3029 +- cpu_map = range->irqs[queue].cpu_map;
3030 ++ cpu_mask = range->irqs[queue].cpu_mask;
3031 + }
3032 +
3033 + old = acc->open_mask;
3034 +@@ -239,8 +239,8 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range,
3035 + acc->name, acc->name);
3036 + ret = request_irq(irq, knav_acc_int_handler, 0, acc->name,
3037 + range);
3038 +- if (!ret && cpu_map) {
3039 +- ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
3040 ++ if (!ret && cpu_mask) {
3041 ++ ret = irq_set_affinity_hint(irq, cpu_mask);
3042 + if (ret) {
3043 + dev_warn(range->kdev->dev,
3044 + "Failed to set IRQ affinity\n");
3045 +diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
3046 +index 6755f2af5619..ef36acc0e708 100644
3047 +--- a/drivers/soc/ti/knav_qmss_queue.c
3048 ++++ b/drivers/soc/ti/knav_qmss_queue.c
3049 +@@ -118,19 +118,17 @@ static int knav_queue_setup_irq(struct knav_range_info *range,
3050 + struct knav_queue_inst *inst)
3051 + {
3052 + unsigned queue = inst->id - range->queue_base;
3053 +- unsigned long cpu_map;
3054 + int ret = 0, irq;
3055 +
3056 + if (range->flags & RANGE_HAS_IRQ) {
3057 + irq = range->irqs[queue].irq;
3058 +- cpu_map = range->irqs[queue].cpu_map;
3059 + ret = request_irq(irq, knav_queue_int_handler, 0,
3060 + inst->irq_name, inst);
3061 + if (ret)
3062 + return ret;
3063 + disable_irq(irq);
3064 +- if (cpu_map) {
3065 +- ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
3066 ++ if (range->irqs[queue].cpu_mask) {
3067 ++ ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
3068 + if (ret) {
3069 + dev_warn(range->kdev->dev,
3070 + "Failed to set IRQ affinity\n");
3071 +@@ -1262,9 +1260,19 @@ static int knav_setup_queue_range(struct knav_device *kdev,
3072 +
3073 + range->num_irqs++;
3074 +
3075 +- if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3)
3076 +- range->irqs[i].cpu_map =
3077 +- (oirq.args[2] & 0x0000ff00) >> 8;
3078 ++ if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
3079 ++ unsigned long mask;
3080 ++ int bit;
3081 ++
3082 ++ range->irqs[i].cpu_mask = devm_kzalloc(dev,
3083 ++ cpumask_size(), GFP_KERNEL);
3084 ++ if (!range->irqs[i].cpu_mask)
3085 ++ return -ENOMEM;
3086 ++
3087 ++ mask = (oirq.args[2] & 0x0000ff00) >> 8;
3088 ++ for_each_set_bit(bit, &mask, BITS_PER_LONG)
3089 ++ cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
3090 ++ }
3091 + }
3092 +
3093 + range->num_irqs = min(range->num_irqs, range->num_queues);
3094 +diff --git a/drivers/staging/iio/adc/ad7606.c b/drivers/staging/iio/adc/ad7606.c
3095 +index 25b9fcd5e3a4..ce3351832fb1 100644
3096 +--- a/drivers/staging/iio/adc/ad7606.c
3097 ++++ b/drivers/staging/iio/adc/ad7606.c
3098 +@@ -26,9 +26,12 @@
3099 +
3100 + #include "ad7606.h"
3101 +
3102 +-/* Scales are computed as 2.5/2**16 and 5/2**16 respectively */
3103 ++/*
3104 ++ * Scales are computed as 5000/32768 and 10000/32768 respectively,
3105 ++ * so that when applied to the raw values they provide mV values
3106 ++ */
3107 + static const unsigned int scale_avail[2][2] = {
3108 +- {0, 38147}, {0, 76294}
3109 ++ {0, 152588}, {0, 305176}
3110 + };
3111 +
3112 + static int ad7606_reset(struct ad7606_state *st)
3113 +diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
3114 +index cf342eb58e10..ad7e28ab9a4f 100644
3115 +--- a/drivers/staging/most/video/video.c
3116 ++++ b/drivers/staging/most/video/video.c
3117 +@@ -530,7 +530,7 @@ static int comp_disconnect_channel(struct most_interface *iface,
3118 + return 0;
3119 + }
3120 +
3121 +-static struct core_component comp_info = {
3122 ++static struct core_component comp = {
3123 + .name = "video",
3124 + .probe_channel = comp_probe_channel,
3125 + .disconnect_channel = comp_disconnect_channel,
3126 +@@ -565,7 +565,7 @@ static void __exit comp_exit(void)
3127 + }
3128 + spin_unlock_irq(&list_lock);
3129 +
3130 +- most_deregister_component(&comp_info);
3131 ++ most_deregister_component(&comp);
3132 + BUG_ON(!list_empty(&video_devices));
3133 + }
3134 +
3135 +diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
3136 +index 6ab982309e6a..441778100887 100644
3137 +--- a/drivers/thermal/thermal_core.c
3138 ++++ b/drivers/thermal/thermal_core.c
3139 +@@ -1102,8 +1102,9 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
3140 + mutex_unlock(&thermal_list_lock);
3141 +
3142 + ida_simple_remove(&thermal_cdev_ida, cdev->id);
3143 +- device_unregister(&cdev->device);
3144 ++ device_del(&cdev->device);
3145 + thermal_cooling_device_destroy_sysfs(cdev);
3146 ++ put_device(&cdev->device);
3147 + }
3148 + EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
3149 +
3150 +diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
3151 +index 243c96025053..47b41159a8bc 100644
3152 +--- a/drivers/tty/serial/sc16is7xx.c
3153 ++++ b/drivers/tty/serial/sc16is7xx.c
3154 +@@ -657,7 +657,7 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
3155 + uart_write_wakeup(port);
3156 + }
3157 +
3158 +-static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
3159 ++static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
3160 + {
3161 + struct uart_port *port = &s->p[portno].port;
3162 +
3163 +@@ -666,7 +666,7 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
3164 +
3165 + iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG);
3166 + if (iir & SC16IS7XX_IIR_NO_INT_BIT)
3167 +- break;
3168 ++ return false;
3169 +
3170 + iir &= SC16IS7XX_IIR_ID_MASK;
3171 +
3172 +@@ -688,16 +688,23 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
3173 + port->line, iir);
3174 + break;
3175 + }
3176 +- } while (1);
3177 ++ } while (0);
3178 ++ return true;
3179 + }
3180 +
3181 + static void sc16is7xx_ist(struct kthread_work *ws)
3182 + {
3183 + struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
3184 +- int i;
3185 +
3186 +- for (i = 0; i < s->devtype->nr_uart; ++i)
3187 +- sc16is7xx_port_irq(s, i);
3188 ++ while (1) {
3189 ++ bool keep_polling = false;
3190 ++ int i;
3191 ++
3192 ++ for (i = 0; i < s->devtype->nr_uart; ++i)
3193 ++ keep_polling |= sc16is7xx_port_irq(s, i);
3194 ++ if (!keep_polling)
3195 ++ break;
3196 ++ }
3197 + }
3198 +
3199 + static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
3200 +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
3201 +index 3c55600a8236..671dc6a25e6c 100644
3202 +--- a/drivers/tty/serial/sh-sci.c
3203 ++++ b/drivers/tty/serial/sh-sci.c
3204 +@@ -3045,6 +3045,7 @@ static struct uart_driver sci_uart_driver = {
3205 + static int sci_remove(struct platform_device *dev)
3206 + {
3207 + struct sci_port *port = platform_get_drvdata(dev);
3208 ++ unsigned int type = port->port.type; /* uart_remove_... clears it */
3209 +
3210 + sci_ports_in_use &= ~BIT(port->port.line);
3211 + uart_remove_one_port(&sci_uart_driver, &port->port);
3212 +@@ -3055,8 +3056,7 @@ static int sci_remove(struct platform_device *dev)
3213 + sysfs_remove_file(&dev->dev.kobj,
3214 + &dev_attr_rx_fifo_trigger.attr);
3215 + }
3216 +- if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB ||
3217 +- port->port.type == PORT_HSCIF) {
3218 ++ if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) {
3219 + sysfs_remove_file(&dev->dev.kobj,
3220 + &dev_attr_rx_fifo_timeout.attr);
3221 + }
3222 +diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
3223 +index 3e827a3d48d5..b7dc2196f9d7 100644
3224 +--- a/drivers/tty/tty_baudrate.c
3225 ++++ b/drivers/tty/tty_baudrate.c
3226 +@@ -77,7 +77,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
3227 + else
3228 + cbaud += 15;
3229 + }
3230 +- return baud_table[cbaud];
3231 ++ return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
3232 + }
3233 + EXPORT_SYMBOL(tty_termios_baud_rate);
3234 +
3235 +@@ -113,7 +113,7 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
3236 + else
3237 + cbaud += 15;
3238 + }
3239 +- return baud_table[cbaud];
3240 ++ return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
3241 + #else
3242 + return tty_termios_baud_rate(termios);
3243 + #endif
3244 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
3245 +index 31d06f59c4e4..da45120d9453 100644
3246 +--- a/drivers/tty/tty_io.c
3247 ++++ b/drivers/tty/tty_io.c
3248 +@@ -408,7 +408,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
3249 + mutex_lock(&tty_mutex);
3250 + /* Search through the tty devices to look for a match */
3251 + list_for_each_entry(p, &tty_drivers, tty_drivers) {
3252 +- if (strncmp(name, p->name, len) != 0)
3253 ++ if (!len || strncmp(name, p->name, len) != 0)
3254 + continue;
3255 + stp = str;
3256 + if (*stp == ',')
3257 +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
3258 +index 17fcd3b2e686..fe7914dffd8f 100644
3259 +--- a/drivers/vhost/scsi.c
3260 ++++ b/drivers/vhost/scsi.c
3261 +@@ -964,7 +964,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
3262 + prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
3263 + }
3264 + /*
3265 +- * Set prot_iter to data_iter, and advance past any
3266 ++ * Set prot_iter to data_iter and truncate it to
3267 ++ * prot_bytes, and advance data_iter past any
3268 + * preceeding prot_bytes that may be present.
3269 + *
3270 + * Also fix up the exp_data_len to reflect only the
3271 +@@ -973,6 +974,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
3272 + if (prot_bytes) {
3273 + exp_data_len -= prot_bytes;
3274 + prot_iter = data_iter;
3275 ++ iov_iter_truncate(&prot_iter, prot_bytes);
3276 + iov_iter_advance(&data_iter, prot_bytes);
3277 + }
3278 + tag = vhost64_to_cpu(vq, v_req_pi.tag);
3279 +diff --git a/drivers/video/fbdev/aty/mach64_accel.c b/drivers/video/fbdev/aty/mach64_accel.c
3280 +index 2541a0e0de76..3ad46255f990 100644
3281 +--- a/drivers/video/fbdev/aty/mach64_accel.c
3282 ++++ b/drivers/video/fbdev/aty/mach64_accel.c
3283 +@@ -127,7 +127,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
3284 +
3285 + /* set host attributes */
3286 + wait_for_fifo(13, par);
3287 +- aty_st_le32(HOST_CNTL, 0, par);
3288 ++ aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par);
3289 +
3290 + /* set pattern attributes */
3291 + aty_st_le32(PAT_REG0, 0, par);
3292 +@@ -233,7 +233,8 @@ void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
3293 + rotation = rotation24bpp(dx, direction);
3294 + }
3295 +
3296 +- wait_for_fifo(4, par);
3297 ++ wait_for_fifo(5, par);
3298 ++ aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par);
3299 + aty_st_le32(DP_SRC, FRGD_SRC_BLIT, par);
3300 + aty_st_le32(SRC_Y_X, (sx << 16) | sy, par);
3301 + aty_st_le32(SRC_HEIGHT1_WIDTH1, (width << 16) | area->height, par);
3302 +@@ -269,7 +270,8 @@ void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
3303 + rotation = rotation24bpp(dx, DST_X_LEFT_TO_RIGHT);
3304 + }
3305 +
3306 +- wait_for_fifo(3, par);
3307 ++ wait_for_fifo(4, par);
3308 ++ aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par);
3309 + aty_st_le32(DP_FRGD_CLR, color, par);
3310 + aty_st_le32(DP_SRC,
3311 + BKGD_SRC_BKGD_CLR | FRGD_SRC_FRGD_CLR | MONO_SRC_ONE,
3312 +@@ -284,7 +286,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
3313 + {
3314 + struct atyfb_par *par = (struct atyfb_par *) info->par;
3315 + u32 src_bytes, dx = image->dx, dy = image->dy, width = image->width;
3316 +- u32 pix_width_save, pix_width, host_cntl, rotation = 0, src, mix;
3317 ++ u32 pix_width, rotation = 0, src, mix;
3318 +
3319 + if (par->asleep)
3320 + return;
3321 +@@ -296,8 +298,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
3322 + return;
3323 + }
3324 +
3325 +- pix_width = pix_width_save = aty_ld_le32(DP_PIX_WIDTH, par);
3326 +- host_cntl = aty_ld_le32(HOST_CNTL, par) | HOST_BYTE_ALIGN;
3327 ++ pix_width = par->crtc.dp_pix_width;
3328 +
3329 + switch (image->depth) {
3330 + case 1:
3331 +@@ -345,7 +346,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
3332 + * since Rage 3D IIc we have DP_HOST_TRIPLE_EN bit
3333 + * this hwaccelerated triple has an issue with not aligned data
3334 + */
3335 +- if (M64_HAS(HW_TRIPLE) && image->width % 8 == 0)
3336 ++ if (image->depth == 1 && M64_HAS(HW_TRIPLE) && image->width % 8 == 0)
3337 + pix_width |= DP_HOST_TRIPLE_EN;
3338 + }
3339 +
3340 +@@ -370,19 +371,18 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
3341 + mix = FRGD_MIX_D_XOR_S | BKGD_MIX_D;
3342 + }
3343 +
3344 +- wait_for_fifo(6, par);
3345 +- aty_st_le32(DP_WRITE_MASK, 0xFFFFFFFF, par);
3346 ++ wait_for_fifo(5, par);
3347 + aty_st_le32(DP_PIX_WIDTH, pix_width, par);
3348 + aty_st_le32(DP_MIX, mix, par);
3349 + aty_st_le32(DP_SRC, src, par);
3350 +- aty_st_le32(HOST_CNTL, host_cntl, par);
3351 ++ aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par);
3352 + aty_st_le32(DST_CNTL, DST_Y_TOP_TO_BOTTOM | DST_X_LEFT_TO_RIGHT | rotation, par);
3353 +
3354 + draw_rect(dx, dy, width, image->height, par);
3355 + src_bytes = (((image->width * image->depth) + 7) / 8) * image->height;
3356 +
3357 + /* manual triple each pixel */
3358 +- if (info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) {
3359 ++ if (image->depth == 1 && info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) {
3360 + int inbit, outbit, mult24, byte_id_in_dword, width;
3361 + u8 *pbitmapin = (u8*)image->data, *pbitmapout;
3362 + u32 hostdword;
3363 +@@ -415,7 +415,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
3364 + }
3365 + }
3366 + wait_for_fifo(1, par);
3367 +- aty_st_le32(HOST_DATA0, hostdword, par);
3368 ++ aty_st_le32(HOST_DATA0, le32_to_cpu(hostdword), par);
3369 + }
3370 + } else {
3371 + u32 *pbitmap, dwords = (src_bytes + 3) / 4;
3372 +@@ -424,8 +424,4 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
3373 + aty_st_le32(HOST_DATA0, get_unaligned_le32(pbitmap), par);
3374 + }
3375 + }
3376 +-
3377 +- /* restore pix_width */
3378 +- wait_for_fifo(1, par);
3379 +- aty_st_le32(DP_PIX_WIDTH, pix_width_save, par);
3380 + }
3381 +diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
3382 +index 03c9e325bfbc..3a2f37ad1f89 100644
3383 +--- a/fs/9p/vfs_file.c
3384 ++++ b/fs/9p/vfs_file.c
3385 +@@ -204,6 +204,14 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
3386 + break;
3387 + if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
3388 + break;
3389 ++ /*
3390 ++ * p9_client_lock_dotl overwrites flock.client_id with the
3391 ++ * server message, free and reuse the client name
3392 ++ */
3393 ++ if (flock.client_id != fid->clnt->name) {
3394 ++ kfree(flock.client_id);
3395 ++ flock.client_id = fid->clnt->name;
3396 ++ }
3397 + }
3398 +
3399 + /* map 9p status to VFS status */
3400 +@@ -235,6 +243,8 @@ out_unlock:
3401 + locks_lock_file_wait(filp, fl);
3402 + fl->fl_type = fl_type;
3403 + }
3404 ++ if (flock.client_id != fid->clnt->name)
3405 ++ kfree(flock.client_id);
3406 + out:
3407 + return res;
3408 + }
3409 +@@ -269,7 +279,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
3410 +
3411 + res = p9_client_getlock_dotl(fid, &glock);
3412 + if (res < 0)
3413 +- return res;
3414 ++ goto out;
3415 + /* map 9p lock type to os lock type */
3416 + switch (glock.type) {
3417 + case P9_LOCK_TYPE_RDLCK:
3418 +@@ -290,7 +300,9 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
3419 + fl->fl_end = glock.start + glock.length - 1;
3420 + fl->fl_pid = -glock.proc_id;
3421 + }
3422 +- kfree(glock.client_id);
3423 ++out:
3424 ++ if (glock.client_id != fid->clnt->name)
3425 ++ kfree(glock.client_id);
3426 + return res;
3427 + }
3428 +
3429 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
3430 +index 891b1aab3480..2012eaf80da5 100644
3431 +--- a/fs/btrfs/disk-io.c
3432 ++++ b/fs/btrfs/disk-io.c
3433 +@@ -4404,13 +4404,23 @@ static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
3434 + unpin = pinned_extents;
3435 + again:
3436 + while (1) {
3437 ++ /*
3438 ++ * The btrfs_finish_extent_commit() may get the same range as
3439 ++ * ours between find_first_extent_bit and clear_extent_dirty.
3440 ++ * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
3441 ++ * the same extent range.
3442 ++ */
3443 ++ mutex_lock(&fs_info->unused_bg_unpin_mutex);
3444 + ret = find_first_extent_bit(unpin, 0, &start, &end,
3445 + EXTENT_DIRTY, NULL);
3446 +- if (ret)
3447 ++ if (ret) {
3448 ++ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
3449 + break;
3450 ++ }
3451 +
3452 + clear_extent_dirty(unpin, start, end);
3453 + btrfs_error_unpin_extent_range(fs_info, start, end);
3454 ++ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
3455 + cond_resched();
3456 + }
3457 +
3458 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3459 +index dc0f9d089b19..3e6c1baddda3 100644
3460 +--- a/fs/btrfs/inode.c
3461 ++++ b/fs/btrfs/inode.c
3462 +@@ -1537,12 +1537,11 @@ out_check:
3463 + }
3464 + btrfs_release_path(path);
3465 +
3466 +- if (cur_offset <= end && cow_start == (u64)-1) {
3467 ++ if (cur_offset <= end && cow_start == (u64)-1)
3468 + cow_start = cur_offset;
3469 +- cur_offset = end;
3470 +- }
3471 +
3472 + if (cow_start != (u64)-1) {
3473 ++ cur_offset = end;
3474 + ret = cow_file_range(inode, locked_page, cow_start, end, end,
3475 + page_started, nr_written, 1, NULL);
3476 + if (ret)
3477 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
3478 +index c972920701a3..ec021bd947ba 100644
3479 +--- a/fs/btrfs/ioctl.c
3480 ++++ b/fs/btrfs/ioctl.c
3481 +@@ -3499,6 +3499,8 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
3482 + const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
3483 +
3484 + len = round_down(i_size_read(src), sz) - loff;
3485 ++ if (len == 0)
3486 ++ return 0;
3487 + olen = len;
3488 + }
3489 + }
3490 +@@ -4291,9 +4293,17 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
3491 + goto out_unlock;
3492 + if (len == 0)
3493 + olen = len = src->i_size - off;
3494 +- /* if we extend to eof, continue to block boundary */
3495 +- if (off + len == src->i_size)
3496 ++ /*
3497 ++ * If we extend to eof, continue to block boundary if and only if the
3498 ++ * destination end offset matches the destination file's size, otherwise
3499 ++ * we would be corrupting data by placing the eof block into the middle
3500 ++ * of a file.
3501 ++ */
3502 ++ if (off + len == src->i_size) {
3503 ++ if (!IS_ALIGNED(len, bs) && destoff + len < inode->i_size)
3504 ++ goto out_unlock;
3505 + len = ALIGN(src->i_size, bs) - off;
3506 ++ }
3507 +
3508 + if (len == 0) {
3509 + ret = 0;
3510 +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
3511 +index a866be999216..4b1eda26480b 100644
3512 +--- a/fs/ceph/inode.c
3513 ++++ b/fs/ceph/inode.c
3514 +@@ -1135,8 +1135,12 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
3515 + if (IS_ERR(realdn)) {
3516 + pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
3517 + PTR_ERR(realdn), dn, in, ceph_vinop(in));
3518 +- dput(dn);
3519 +- dn = realdn; /* note realdn contains the error */
3520 ++ dn = realdn;
3521 ++ /*
3522 ++ * Caller should release 'dn' in the case of error.
3523 ++ * If 'req->r_dentry' is passed to this function,
3524 ++ * caller should leave 'req->r_dentry' untouched.
3525 ++ */
3526 + goto out;
3527 + } else if (realdn) {
3528 + dout("dn %p (%d) spliced with %p (%d) "
3529 +diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
3530 +index 78ffc2699993..a5c54af861f7 100644
3531 +--- a/fs/configfs/symlink.c
3532 ++++ b/fs/configfs/symlink.c
3533 +@@ -64,7 +64,7 @@ static void fill_item_path(struct config_item * item, char * buffer, int length)
3534 +
3535 + /* back up enough to print this bus id with '/' */
3536 + length -= cur;
3537 +- strncpy(buffer + length,config_item_name(p),cur);
3538 ++ memcpy(buffer + length, config_item_name(p), cur);
3539 + *(buffer + --length) = '/';
3540 + }
3541 + }
3542 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3543 +index 2276137d0083..fc05c7f7bbcf 100644
3544 +--- a/fs/ext4/inode.c
3545 ++++ b/fs/ext4/inode.c
3546 +@@ -5763,9 +5763,10 @@ int ext4_mark_iloc_dirty(handle_t *handle,
3547 + {
3548 + int err = 0;
3549 +
3550 +- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
3551 ++ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
3552 ++ put_bh(iloc->bh);
3553 + return -EIO;
3554 +-
3555 ++ }
3556 + if (IS_I_VERSION(inode))
3557 + inode_inc_iversion(inode);
3558 +
3559 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
3560 +index 377d516c475f..ffa25753e929 100644
3561 +--- a/fs/ext4/namei.c
3562 ++++ b/fs/ext4/namei.c
3563 +@@ -126,6 +126,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
3564 + if (!is_dx_block && type == INDEX) {
3565 + ext4_error_inode(inode, func, line, block,
3566 + "directory leaf block found instead of index block");
3567 ++ brelse(bh);
3568 + return ERR_PTR(-EFSCORRUPTED);
3569 + }
3570 + if (!ext4_has_metadata_csum(inode->i_sb) ||
3571 +@@ -2811,7 +2812,9 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
3572 + list_del_init(&EXT4_I(inode)->i_orphan);
3573 + mutex_unlock(&sbi->s_orphan_lock);
3574 + }
3575 +- }
3576 ++ } else
3577 ++ brelse(iloc.bh);
3578 ++
3579 + jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
3580 + jbd_debug(4, "orphan inode %lu will point to %d\n",
3581 + inode->i_ino, NEXT_ORPHAN(inode));
3582 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
3583 +index ebbc663d0798..a5efee34415f 100644
3584 +--- a/fs/ext4/resize.c
3585 ++++ b/fs/ext4/resize.c
3586 +@@ -459,16 +459,18 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
3587 +
3588 + BUFFER_TRACE(bh, "get_write_access");
3589 + err = ext4_journal_get_write_access(handle, bh);
3590 +- if (err)
3591 ++ if (err) {
3592 ++ brelse(bh);
3593 + return err;
3594 ++ }
3595 + ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
3596 + first_cluster, first_cluster - start, count2);
3597 + ext4_set_bits(bh->b_data, first_cluster - start, count2);
3598 +
3599 + err = ext4_handle_dirty_metadata(handle, NULL, bh);
3600 ++ brelse(bh);
3601 + if (unlikely(err))
3602 + return err;
3603 +- brelse(bh);
3604 + }
3605 +
3606 + return 0;
3607 +@@ -605,7 +607,6 @@ handle_bb:
3608 + bh = bclean(handle, sb, block);
3609 + if (IS_ERR(bh)) {
3610 + err = PTR_ERR(bh);
3611 +- bh = NULL;
3612 + goto out;
3613 + }
3614 + overhead = ext4_group_overhead_blocks(sb, group);
3615 +@@ -618,9 +619,9 @@ handle_bb:
3616 + ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
3617 + sb->s_blocksize * 8, bh->b_data);
3618 + err = ext4_handle_dirty_metadata(handle, NULL, bh);
3619 ++ brelse(bh);
3620 + if (err)
3621 + goto out;
3622 +- brelse(bh);
3623 +
3624 + handle_ib:
3625 + if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
3626 +@@ -635,18 +636,16 @@ handle_ib:
3627 + bh = bclean(handle, sb, block);
3628 + if (IS_ERR(bh)) {
3629 + err = PTR_ERR(bh);
3630 +- bh = NULL;
3631 + goto out;
3632 + }
3633 +
3634 + ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
3635 + sb->s_blocksize * 8, bh->b_data);
3636 + err = ext4_handle_dirty_metadata(handle, NULL, bh);
3637 ++ brelse(bh);
3638 + if (err)
3639 + goto out;
3640 +- brelse(bh);
3641 + }
3642 +- bh = NULL;
3643 +
3644 + /* Mark group tables in block bitmap */
3645 + for (j = 0; j < GROUP_TABLE_COUNT; j++) {
3646 +@@ -685,7 +684,6 @@ handle_ib:
3647 + }
3648 +
3649 + out:
3650 +- brelse(bh);
3651 + err2 = ext4_journal_stop(handle);
3652 + if (err2 && !err)
3653 + err = err2;
3654 +@@ -873,6 +871,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
3655 + err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
3656 + if (unlikely(err)) {
3657 + ext4_std_error(sb, err);
3658 ++ iloc.bh = NULL;
3659 + goto exit_inode;
3660 + }
3661 + brelse(dind);
3662 +@@ -924,6 +923,7 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
3663 + sizeof(struct buffer_head *),
3664 + GFP_NOFS);
3665 + if (!n_group_desc) {
3666 ++ brelse(gdb_bh);
3667 + err = -ENOMEM;
3668 + ext4_warning(sb, "not enough memory for %lu groups",
3669 + gdb_num + 1);
3670 +@@ -939,8 +939,6 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
3671 + kvfree(o_group_desc);
3672 + BUFFER_TRACE(gdb_bh, "get_write_access");
3673 + err = ext4_journal_get_write_access(handle, gdb_bh);
3674 +- if (unlikely(err))
3675 +- brelse(gdb_bh);
3676 + return err;
3677 + }
3678 +
3679 +@@ -1124,8 +1122,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
3680 + backup_block, backup_block -
3681 + ext4_group_first_block_no(sb, group));
3682 + BUFFER_TRACE(bh, "get_write_access");
3683 +- if ((err = ext4_journal_get_write_access(handle, bh)))
3684 ++ if ((err = ext4_journal_get_write_access(handle, bh))) {
3685 ++ brelse(bh);
3686 + break;
3687 ++ }
3688 + lock_buffer(bh);
3689 + memcpy(bh->b_data, data, size);
3690 + if (rest)
3691 +@@ -2023,7 +2023,7 @@ retry:
3692 +
3693 + err = ext4_alloc_flex_bg_array(sb, n_group + 1);
3694 + if (err)
3695 +- return err;
3696 ++ goto out;
3697 +
3698 + err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
3699 + if (err)
3700 +@@ -2059,6 +2059,10 @@ retry:
3701 + n_blocks_count_retry = 0;
3702 + free_flex_gd(flex_gd);
3703 + flex_gd = NULL;
3704 ++ if (resize_inode) {
3705 ++ iput(resize_inode);
3706 ++ resize_inode = NULL;
3707 ++ }
3708 + goto retry;
3709 + }
3710 +
3711 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3712 +index 8d91d50ccf42..8b8c351fa9c5 100644
3713 +--- a/fs/ext4/super.c
3714 ++++ b/fs/ext4/super.c
3715 +@@ -4053,6 +4053,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3716 + sbi->s_groups_count = blocks_count;
3717 + sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
3718 + (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
3719 ++ if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
3720 ++ le32_to_cpu(es->s_inodes_count)) {
3721 ++ ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
3722 ++ le32_to_cpu(es->s_inodes_count),
3723 ++ ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
3724 ++ ret = -EINVAL;
3725 ++ goto failed_mount;
3726 ++ }
3727 + db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
3728 + EXT4_DESC_PER_BLOCK(sb);
3729 + if (ext4_has_feature_meta_bg(sb)) {
3730 +@@ -4072,14 +4080,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3731 + ret = -ENOMEM;
3732 + goto failed_mount;
3733 + }
3734 +- if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
3735 +- le32_to_cpu(es->s_inodes_count)) {
3736 +- ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
3737 +- le32_to_cpu(es->s_inodes_count),
3738 +- ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
3739 +- ret = -EINVAL;
3740 +- goto failed_mount;
3741 +- }
3742 +
3743 + bgl_lock_init(sbi->s_blockgroup_lock);
3744 +
3745 +@@ -4488,6 +4488,7 @@ failed_mount6:
3746 + percpu_counter_destroy(&sbi->s_freeinodes_counter);
3747 + percpu_counter_destroy(&sbi->s_dirs_counter);
3748 + percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
3749 ++ percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
3750 + failed_mount5:
3751 + ext4_ext_release(sb);
3752 + ext4_release_system_zone(sb);
3753 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
3754 +index f36fc5d5b257..4380c8630539 100644
3755 +--- a/fs/ext4/xattr.c
3756 ++++ b/fs/ext4/xattr.c
3757 +@@ -1388,6 +1388,12 @@ retry:
3758 + bh = ext4_getblk(handle, ea_inode, block, 0);
3759 + if (IS_ERR(bh))
3760 + return PTR_ERR(bh);
3761 ++ if (!bh) {
3762 ++ WARN_ON_ONCE(1);
3763 ++ EXT4_ERROR_INODE(ea_inode,
3764 ++ "ext4_getblk() return bh = NULL");
3765 ++ return -EFSCORRUPTED;
3766 ++ }
3767 + ret = ext4_journal_get_write_access(handle, bh);
3768 + if (ret)
3769 + goto out;
3770 +@@ -2276,8 +2282,10 @@ static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
3771 + if (!bh)
3772 + return ERR_PTR(-EIO);
3773 + error = ext4_xattr_check_block(inode, bh);
3774 +- if (error)
3775 ++ if (error) {
3776 ++ brelse(bh);
3777 + return ERR_PTR(error);
3778 ++ }
3779 + return bh;
3780 + }
3781 +
3782 +@@ -2397,6 +2405,8 @@ retry_inode:
3783 + error = ext4_xattr_block_set(handle, inode, &i, &bs);
3784 + } else if (error == -ENOSPC) {
3785 + if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
3786 ++ brelse(bs.bh);
3787 ++ bs.bh = NULL;
3788 + error = ext4_xattr_block_find(inode, &i, &bs);
3789 + if (error)
3790 + goto cleanup;
3791 +@@ -2617,6 +2627,8 @@ out:
3792 + kfree(buffer);
3793 + if (is)
3794 + brelse(is->iloc.bh);
3795 ++ if (bs)
3796 ++ brelse(bs->bh);
3797 + kfree(is);
3798 + kfree(bs);
3799 +
3800 +@@ -2696,7 +2708,6 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
3801 + struct ext4_inode *raw_inode, handle_t *handle)
3802 + {
3803 + struct ext4_xattr_ibody_header *header;
3804 +- struct buffer_head *bh;
3805 + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3806 + static unsigned int mnt_count;
3807 + size_t min_offs;
3808 +@@ -2737,13 +2748,17 @@ retry:
3809 + * EA block can hold new_extra_isize bytes.
3810 + */
3811 + if (EXT4_I(inode)->i_file_acl) {
3812 ++ struct buffer_head *bh;
3813 ++
3814 + bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
3815 + error = -EIO;
3816 + if (!bh)
3817 + goto cleanup;
3818 + error = ext4_xattr_check_block(inode, bh);
3819 +- if (error)
3820 ++ if (error) {
3821 ++ brelse(bh);
3822 + goto cleanup;
3823 ++ }
3824 + base = BHDR(bh);
3825 + end = bh->b_data + bh->b_size;
3826 + min_offs = end - base;
3827 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
3828 +index 4a9ace7280b9..97f15787cfeb 100644
3829 +--- a/fs/fuse/dev.c
3830 ++++ b/fs/fuse/dev.c
3831 +@@ -391,12 +391,19 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
3832 + if (test_bit(FR_BACKGROUND, &req->flags)) {
3833 + spin_lock(&fc->lock);
3834 + clear_bit(FR_BACKGROUND, &req->flags);
3835 +- if (fc->num_background == fc->max_background)
3836 ++ if (fc->num_background == fc->max_background) {
3837 + fc->blocked = 0;
3838 +-
3839 +- /* Wake up next waiter, if any */
3840 +- if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
3841 + wake_up(&fc->blocked_waitq);
3842 ++ } else if (!fc->blocked) {
3843 ++ /*
3844 ++ * Wake up next waiter, if any. It's okay to use
3845 ++ * waitqueue_active(), as we've already synced up
3846 ++ * fc->blocked with waiters with the wake_up() call
3847 ++ * above.
3848 ++ */
3849 ++ if (waitqueue_active(&fc->blocked_waitq))
3850 ++ wake_up(&fc->blocked_waitq);
3851 ++ }
3852 +
3853 + if (fc->num_background == fc->congestion_threshold && fc->sb) {
3854 + clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
3855 +@@ -1311,12 +1318,14 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
3856 + goto out_end;
3857 + }
3858 + list_move_tail(&req->list, &fpq->processing);
3859 +- spin_unlock(&fpq->lock);
3860 ++ __fuse_get_request(req);
3861 + set_bit(FR_SENT, &req->flags);
3862 ++ spin_unlock(&fpq->lock);
3863 + /* matches barrier in request_wait_answer() */
3864 + smp_mb__after_atomic();
3865 + if (test_bit(FR_INTERRUPTED, &req->flags))
3866 + queue_interrupt(fiq, req);
3867 ++ fuse_put_request(fc, req);
3868 +
3869 + return reqsize;
3870 +
3871 +@@ -1715,8 +1724,10 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
3872 + req->in.args[1].size = total_len;
3873 +
3874 + err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
3875 +- if (err)
3876 ++ if (err) {
3877 + fuse_retrieve_end(fc, req);
3878 ++ fuse_put_request(fc, req);
3879 ++ }
3880 +
3881 + return err;
3882 + }
3883 +@@ -1875,16 +1886,20 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
3884 +
3885 + /* Is it an interrupt reply? */
3886 + if (req->intr_unique == oh.unique) {
3887 ++ __fuse_get_request(req);
3888 + spin_unlock(&fpq->lock);
3889 +
3890 + err = -EINVAL;
3891 +- if (nbytes != sizeof(struct fuse_out_header))
3892 ++ if (nbytes != sizeof(struct fuse_out_header)) {
3893 ++ fuse_put_request(fc, req);
3894 + goto err_finish;
3895 ++ }
3896 +
3897 + if (oh.error == -ENOSYS)
3898 + fc->no_interrupt = 1;
3899 + else if (oh.error == -EAGAIN)
3900 + queue_interrupt(&fc->iq, req);
3901 ++ fuse_put_request(fc, req);
3902 +
3903 + fuse_copy_finish(cs);
3904 + return nbytes;
3905 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
3906 +index aa23749a943b..2162771ce7d5 100644
3907 +--- a/fs/fuse/file.c
3908 ++++ b/fs/fuse/file.c
3909 +@@ -2912,10 +2912,12 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3910 + }
3911 +
3912 + if (io->async) {
3913 ++ bool blocking = io->blocking;
3914 ++
3915 + fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
3916 +
3917 + /* we have a non-extending, async request, so return */
3918 +- if (!io->blocking)
3919 ++ if (!blocking)
3920 + return -EIOCBQUEUED;
3921 +
3922 + wait_for_completion(&wait);
3923 +diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
3924 +index fd5bea55fd60..9c418249734d 100644
3925 +--- a/fs/gfs2/bmap.c
3926 ++++ b/fs/gfs2/bmap.c
3927 +@@ -1652,10 +1652,16 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
3928 + if (ret < 0)
3929 + goto out;
3930 +
3931 +- /* issue read-ahead on metadata */
3932 +- if (mp.mp_aheight > 1) {
3933 +- for (; ret > 1; ret--) {
3934 +- metapointer_range(&mp, mp.mp_aheight - ret,
3935 ++ /* On the first pass, issue read-ahead on metadata. */
3936 ++ if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
3937 ++ unsigned int height = mp.mp_aheight - 1;
3938 ++
3939 ++ /* No read-ahead for data blocks. */
3940 ++ if (mp.mp_aheight - 1 == strip_h)
3941 ++ height--;
3942 ++
3943 ++ for (; height >= mp.mp_aheight - ret; height--) {
3944 ++ metapointer_range(&mp, height,
3945 + start_list, start_aligned,
3946 + end_list, end_aligned,
3947 + &start, &end);
3948 +diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
3949 +index b86249ebde11..1d62526738c4 100644
3950 +--- a/fs/gfs2/rgrp.c
3951 ++++ b/fs/gfs2/rgrp.c
3952 +@@ -714,6 +714,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
3953 +
3954 + if (gl) {
3955 + glock_clear_object(gl, rgd);
3956 ++ gfs2_rgrp_brelse(rgd);
3957 + gfs2_glock_put(gl);
3958 + }
3959 +
3960 +@@ -1136,7 +1137,7 @@ static u32 count_unlinked(struct gfs2_rgrpd *rgd)
3961 + * @rgd: the struct gfs2_rgrpd describing the RG to read in
3962 + *
3963 + * Read in all of a Resource Group's header and bitmap blocks.
3964 +- * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
3965 ++ * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
3966 + *
3967 + * Returns: errno
3968 + */
3969 +diff --git a/fs/namespace.c b/fs/namespace.c
3970 +index bd2f4c68506a..e65254003cad 100644
3971 +--- a/fs/namespace.c
3972 ++++ b/fs/namespace.c
3973 +@@ -780,9 +780,6 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
3974 +
3975 + hlist_for_each_entry(mp, chain, m_hash) {
3976 + if (mp->m_dentry == dentry) {
3977 +- /* might be worth a WARN_ON() */
3978 +- if (d_unlinked(dentry))
3979 +- return ERR_PTR(-ENOENT);
3980 + mp->m_count++;
3981 + return mp;
3982 + }
3983 +@@ -796,6 +793,9 @@ static struct mountpoint *get_mountpoint(struct dentry *dentry)
3984 + int ret;
3985 +
3986 + if (d_mountpoint(dentry)) {
3987 ++ /* might be worth a WARN_ON() */
3988 ++ if (d_unlinked(dentry))
3989 ++ return ERR_PTR(-ENOENT);
3990 + mountpoint:
3991 + read_seqlock_excl(&mount_lock);
3992 + mp = lookup_mountpoint(dentry);
3993 +@@ -1625,8 +1625,13 @@ static int do_umount(struct mount *mnt, int flags)
3994 +
3995 + namespace_lock();
3996 + lock_mount_hash();
3997 +- event++;
3998 +
3999 ++ /* Recheck MNT_LOCKED with the locks held */
4000 ++ retval = -EINVAL;
4001 ++ if (mnt->mnt.mnt_flags & MNT_LOCKED)
4002 ++ goto out;
4003 ++
4004 ++ event++;
4005 + if (flags & MNT_DETACH) {
4006 + if (!list_empty(&mnt->mnt_list))
4007 + umount_tree(mnt, UMOUNT_PROPAGATE);
4008 +@@ -1640,6 +1645,7 @@ static int do_umount(struct mount *mnt, int flags)
4009 + retval = 0;
4010 + }
4011 + }
4012 ++out:
4013 + unlock_mount_hash();
4014 + namespace_unlock();
4015 + return retval;
4016 +@@ -1730,7 +1736,7 @@ int ksys_umount(char __user *name, int flags)
4017 + goto dput_and_out;
4018 + if (!check_mnt(mnt))
4019 + goto dput_and_out;
4020 +- if (mnt->mnt.mnt_flags & MNT_LOCKED)
4021 ++ if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
4022 + goto dput_and_out;
4023 + retval = -EPERM;
4024 + if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
4025 +@@ -1813,8 +1819,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
4026 + for (s = r; s; s = next_mnt(s, r)) {
4027 + if (!(flag & CL_COPY_UNBINDABLE) &&
4028 + IS_MNT_UNBINDABLE(s)) {
4029 +- s = skip_mnt_tree(s);
4030 +- continue;
4031 ++ if (s->mnt.mnt_flags & MNT_LOCKED) {
4032 ++ /* Both unbindable and locked. */
4033 ++ q = ERR_PTR(-EPERM);
4034 ++ goto out;
4035 ++ } else {
4036 ++ s = skip_mnt_tree(s);
4037 ++ continue;
4038 ++ }
4039 + }
4040 + if (!(flag & CL_COPY_MNT_NS_FILE) &&
4041 + is_mnt_ns_file(s->mnt.mnt_root)) {
4042 +@@ -1867,7 +1879,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
4043 + {
4044 + namespace_lock();
4045 + lock_mount_hash();
4046 +- umount_tree(real_mount(mnt), UMOUNT_SYNC);
4047 ++ umount_tree(real_mount(mnt), 0);
4048 + unlock_mount_hash();
4049 + namespace_unlock();
4050 + }
4051 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
4052 +index 3c18c12a5c4c..b8615a4f5316 100644
4053 +--- a/fs/nfs/nfs4state.c
4054 ++++ b/fs/nfs/nfs4state.c
4055 +@@ -2553,11 +2553,12 @@ static void nfs4_state_manager(struct nfs_client *clp)
4056 + nfs4_clear_state_manager_bit(clp);
4057 + /* Did we race with an attempt to give us more work? */
4058 + if (clp->cl_state == 0)
4059 +- break;
4060 ++ return;
4061 + if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
4062 +- break;
4063 ++ return;
4064 + } while (refcount_read(&clp->cl_count) > 1);
4065 +- return;
4066 ++ goto out_drain;
4067 ++
4068 + out_error:
4069 + if (strlen(section))
4070 + section_sep = ": ";
4071 +@@ -2565,6 +2566,7 @@ out_error:
4072 + " with error %d\n", section_sep, section,
4073 + clp->cl_hostname, -status);
4074 + ssleep(1);
4075 ++out_drain:
4076 + nfs4_end_drain_session(clp);
4077 + nfs4_clear_state_manager_bit(clp);
4078 + }
4079 +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
4080 +index 0dded931f119..7c78d10a58a0 100644
4081 +--- a/fs/nfsd/nfs4proc.c
4082 ++++ b/fs/nfsd/nfs4proc.c
4083 +@@ -1048,6 +1048,9 @@ nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4084 + {
4085 + __be32 status;
4086 +
4087 ++ if (!cstate->save_fh.fh_dentry)
4088 ++ return nfserr_nofilehandle;
4089 ++
4090 + status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
4091 + src_stateid, RD_STATE, src, NULL);
4092 + if (status) {
4093 +diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
4094 +index 302cd7caa4a7..7578bd507c70 100644
4095 +--- a/fs/ocfs2/aops.c
4096 ++++ b/fs/ocfs2/aops.c
4097 +@@ -2412,8 +2412,16 @@ static int ocfs2_dio_end_io(struct kiocb *iocb,
4098 + /* this io's submitter should not have unlocked this before we could */
4099 + BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
4100 +
4101 +- if (bytes > 0 && private)
4102 +- ret = ocfs2_dio_end_io_write(inode, private, offset, bytes);
4103 ++ if (bytes <= 0)
4104 ++ mlog_ratelimited(ML_ERROR, "Direct IO failed, bytes = %lld",
4105 ++ (long long)bytes);
4106 ++ if (private) {
4107 ++ if (bytes > 0)
4108 ++ ret = ocfs2_dio_end_io_write(inode, private, offset,
4109 ++ bytes);
4110 ++ else
4111 ++ ocfs2_dio_free_write_ctx(inode, private);
4112 ++ }
4113 +
4114 + ocfs2_iocb_clear_rw_locked(iocb);
4115 +
4116 +diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
4117 +index 308ea0eb35fd..a396096a5099 100644
4118 +--- a/fs/ocfs2/cluster/masklog.h
4119 ++++ b/fs/ocfs2/cluster/masklog.h
4120 +@@ -178,6 +178,15 @@ do { \
4121 + ##__VA_ARGS__); \
4122 + } while (0)
4123 +
4124 ++#define mlog_ratelimited(mask, fmt, ...) \
4125 ++do { \
4126 ++ static DEFINE_RATELIMIT_STATE(_rs, \
4127 ++ DEFAULT_RATELIMIT_INTERVAL, \
4128 ++ DEFAULT_RATELIMIT_BURST); \
4129 ++ if (__ratelimit(&_rs)) \
4130 ++ mlog(mask, fmt, ##__VA_ARGS__); \
4131 ++} while (0)
4132 ++
4133 + #define mlog_errno(st) ({ \
4134 + int _st = (st); \
4135 + if (_st != -ERESTARTSYS && _st != -EINTR && \
4136 +diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
4137 +index b048d4fa3959..c121abbdfc7d 100644
4138 +--- a/fs/ocfs2/dir.c
4139 ++++ b/fs/ocfs2/dir.c
4140 +@@ -1897,8 +1897,7 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
4141 + /* On error, skip the f_pos to the
4142 + next block. */
4143 + ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
4144 +- brelse(bh);
4145 +- continue;
4146 ++ break;
4147 + }
4148 + if (le64_to_cpu(de->inode)) {
4149 + unsigned char d_type = DT_UNKNOWN;
4150 +diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
4151 +index da9b3ccfde23..f1dffd70a1c0 100644
4152 +--- a/fs/overlayfs/dir.c
4153 ++++ b/fs/overlayfs/dir.c
4154 +@@ -461,6 +461,10 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
4155 + if (IS_ERR(upper))
4156 + goto out_unlock;
4157 +
4158 ++ err = -ESTALE;
4159 ++ if (d_is_negative(upper) || !IS_WHITEOUT(d_inode(upper)))
4160 ++ goto out_dput;
4161 ++
4162 + newdentry = ovl_create_temp(workdir, cattr);
4163 + err = PTR_ERR(newdentry);
4164 + if (IS_ERR(newdentry))
4165 +@@ -661,6 +665,11 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
4166 + if (err)
4167 + goto out_drop_write;
4168 +
4169 ++ err = ovl_copy_up(new->d_parent);
4170 ++ if (err)
4171 ++ goto out_drop_write;
4172 ++
4173 ++
4174 + err = ovl_nlink_start(old, &locked);
4175 + if (err)
4176 + goto out_drop_write;
4177 +diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
4178 +index c2229f02389b..1531f81037b9 100644
4179 +--- a/fs/overlayfs/namei.c
4180 ++++ b/fs/overlayfs/namei.c
4181 +@@ -441,8 +441,10 @@ int ovl_verify_set_fh(struct dentry *dentry, const char *name,
4182 +
4183 + fh = ovl_encode_real_fh(real, is_upper);
4184 + err = PTR_ERR(fh);
4185 +- if (IS_ERR(fh))
4186 ++ if (IS_ERR(fh)) {
4187 ++ fh = NULL;
4188 + goto fail;
4189 ++ }
4190 +
4191 + err = ovl_verify_fh(dentry, name, fh);
4192 + if (set && err == -ENODATA)
4193 +diff --git a/fs/udf/super.c b/fs/udf/super.c
4194 +index 74b13347cd94..e557d1317d0e 100644
4195 +--- a/fs/udf/super.c
4196 ++++ b/fs/udf/super.c
4197 +@@ -613,14 +613,11 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
4198 + struct udf_options uopt;
4199 + struct udf_sb_info *sbi = UDF_SB(sb);
4200 + int error = 0;
4201 +- struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
4202 ++
4203 ++ if (!(*flags & SB_RDONLY) && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
4204 ++ return -EACCES;
4205 +
4206 + sync_filesystem(sb);
4207 +- if (lvidiu) {
4208 +- int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
4209 +- if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & SB_RDONLY))
4210 +- return -EACCES;
4211 +- }
4212 +
4213 + uopt.flags = sbi->s_flags;
4214 + uopt.uid = sbi->s_uid;
4215 +@@ -1317,6 +1314,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
4216 + ret = -EACCES;
4217 + goto out_bh;
4218 + }
4219 ++ UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
4220 + ret = udf_load_vat(sb, i, type1_idx);
4221 + if (ret < 0)
4222 + goto out_bh;
4223 +@@ -2215,10 +2213,12 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
4224 + UDF_MAX_READ_VERSION);
4225 + ret = -EINVAL;
4226 + goto error_out;
4227 +- } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION &&
4228 +- !sb_rdonly(sb)) {
4229 +- ret = -EACCES;
4230 +- goto error_out;
4231 ++ } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) {
4232 ++ if (!sb_rdonly(sb)) {
4233 ++ ret = -EACCES;
4234 ++ goto error_out;
4235 ++ }
4236 ++ UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
4237 + }
4238 +
4239 + sbi->s_udfrev = minUDFWriteRev;
4240 +@@ -2236,10 +2236,12 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
4241 + }
4242 +
4243 + if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
4244 +- UDF_PART_FLAG_READ_ONLY &&
4245 +- !sb_rdonly(sb)) {
4246 +- ret = -EACCES;
4247 +- goto error_out;
4248 ++ UDF_PART_FLAG_READ_ONLY) {
4249 ++ if (!sb_rdonly(sb)) {
4250 ++ ret = -EACCES;
4251 ++ goto error_out;
4252 ++ }
4253 ++ UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
4254 + }
4255 +
4256 + if (udf_find_fileset(sb, &fileset, &rootdir)) {
4257 +diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
4258 +index 9dd3e1b9619e..f8e0d200271d 100644
4259 +--- a/fs/udf/udf_sb.h
4260 ++++ b/fs/udf/udf_sb.h
4261 +@@ -30,6 +30,8 @@
4262 + #define UDF_FLAG_LASTBLOCK_SET 16
4263 + #define UDF_FLAG_BLOCKSIZE_SET 17
4264 + #define UDF_FLAG_INCONSISTENT 18
4265 ++#define UDF_FLAG_RW_INCOMPAT 19 /* Set when we find RW incompatible
4266 ++ * feature */
4267 +
4268 + #define UDF_PART_FLAG_UNALLOC_BITMAP 0x0001
4269 + #define UDF_PART_FLAG_UNALLOC_TABLE 0x0002
4270 +diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
4271 +index 49c93b9308d7..68bb09c29ce8 100644
4272 +--- a/include/linux/ceph/libceph.h
4273 ++++ b/include/linux/ceph/libceph.h
4274 +@@ -81,7 +81,13 @@ struct ceph_options {
4275 +
4276 + #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
4277 + #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
4278 +-#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
4279 ++
4280 ++/*
4281 ++ * Handle the largest possible rbd object in one message.
4282 ++ * There is no limit on the size of cephfs objects, but it has to obey
4283 ++ * rsize and wsize mount options anyway.
4284 ++ */
4285 ++#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024)
4286 +
4287 + #define CEPH_AUTH_NAME_DEFAULT "guest"
4288 +
4289 +diff --git a/include/linux/i8253.h b/include/linux/i8253.h
4290 +index e6bb36a97519..8336b2f6f834 100644
4291 +--- a/include/linux/i8253.h
4292 ++++ b/include/linux/i8253.h
4293 +@@ -21,6 +21,7 @@
4294 + #define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
4295 +
4296 + extern raw_spinlock_t i8253_lock;
4297 ++extern bool i8253_clear_counter_on_shutdown;
4298 + extern struct clock_event_device i8253_clockevent;
4299 + extern void clockevent_i8253_init(bool oneshot);
4300 +
4301 +diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
4302 +index abe975c87b90..78b86dea2f29 100644
4303 +--- a/include/linux/mtd/nand.h
4304 ++++ b/include/linux/mtd/nand.h
4305 +@@ -324,9 +324,8 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
4306 + */
4307 + static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
4308 + {
4309 +- return (u64)nand->memorg.luns_per_target *
4310 +- nand->memorg.eraseblocks_per_lun *
4311 +- nand->memorg.pages_per_eraseblock;
4312 ++ return nand->memorg.ntargets * nand->memorg.luns_per_target *
4313 ++ nand->memorg.eraseblocks_per_lun;
4314 + }
4315 +
4316 + /**
4317 +diff --git a/include/linux/nmi.h b/include/linux/nmi.h
4318 +index b8d868d23e79..50d143995338 100644
4319 +--- a/include/linux/nmi.h
4320 ++++ b/include/linux/nmi.h
4321 +@@ -113,6 +113,8 @@ static inline int hardlockup_detector_perf_init(void) { return 0; }
4322 + void watchdog_nmi_stop(void);
4323 + void watchdog_nmi_start(void);
4324 + int watchdog_nmi_probe(void);
4325 ++int watchdog_nmi_enable(unsigned int cpu);
4326 ++void watchdog_nmi_disable(unsigned int cpu);
4327 +
4328 + /**
4329 + * touch_nmi_watchdog - restart NMI watchdog timeout.
4330 +diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
4331 +index fd18c974a619..f6e798d42069 100644
4332 +--- a/include/xen/xen-ops.h
4333 ++++ b/include/xen/xen-ops.h
4334 +@@ -41,7 +41,7 @@ int xen_setup_shutdown_event(void);
4335 +
4336 + extern unsigned long *xen_contiguous_bitmap;
4337 +
4338 +-#ifdef CONFIG_XEN_PV
4339 ++#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
4340 + int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
4341 + unsigned int address_bits,
4342 + dma_addr_t *dma_handle);
4343 +diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
4344 +index 6ad4a9fcbd6f..7921ae4fca8d 100644
4345 +--- a/kernel/debug/kdb/kdb_bt.c
4346 ++++ b/kernel/debug/kdb/kdb_bt.c
4347 +@@ -179,14 +179,14 @@ kdb_bt(int argc, const char **argv)
4348 + kdb_printf("no process for cpu %ld\n", cpu);
4349 + return 0;
4350 + }
4351 +- sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
4352 ++ sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
4353 + kdb_parse(buf);
4354 + return 0;
4355 + }
4356 + kdb_printf("btc: cpu status: ");
4357 + kdb_parse("cpu\n");
4358 + for_each_online_cpu(cpu) {
4359 +- sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
4360 ++ sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
4361 + kdb_parse(buf);
4362 + touch_nmi_watchdog();
4363 + }
4364 +diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
4365 +index 2ddfce8f1e8f..f338d23b112b 100644
4366 +--- a/kernel/debug/kdb/kdb_main.c
4367 ++++ b/kernel/debug/kdb/kdb_main.c
4368 +@@ -1192,7 +1192,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
4369 + if (reason == KDB_REASON_DEBUG) {
4370 + /* special case below */
4371 + } else {
4372 +- kdb_printf("\nEntering kdb (current=0x%p, pid %d) ",
4373 ++ kdb_printf("\nEntering kdb (current=0x%px, pid %d) ",
4374 + kdb_current, kdb_current ? kdb_current->pid : 0);
4375 + #if defined(CONFIG_SMP)
4376 + kdb_printf("on processor %d ", raw_smp_processor_id());
4377 +@@ -1208,7 +1208,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
4378 + */
4379 + switch (db_result) {
4380 + case KDB_DB_BPT:
4381 +- kdb_printf("\nEntering kdb (0x%p, pid %d) ",
4382 ++ kdb_printf("\nEntering kdb (0x%px, pid %d) ",
4383 + kdb_current, kdb_current->pid);
4384 + #if defined(CONFIG_SMP)
4385 + kdb_printf("on processor %d ", raw_smp_processor_id());
4386 +@@ -2048,7 +2048,7 @@ static int kdb_lsmod(int argc, const char **argv)
4387 + if (mod->state == MODULE_STATE_UNFORMED)
4388 + continue;
4389 +
4390 +- kdb_printf("%-20s%8u 0x%p ", mod->name,
4391 ++ kdb_printf("%-20s%8u 0x%px ", mod->name,
4392 + mod->core_layout.size, (void *)mod);
4393 + #ifdef CONFIG_MODULE_UNLOAD
4394 + kdb_printf("%4d ", module_refcount(mod));
4395 +@@ -2059,7 +2059,7 @@ static int kdb_lsmod(int argc, const char **argv)
4396 + kdb_printf(" (Loading)");
4397 + else
4398 + kdb_printf(" (Live)");
4399 +- kdb_printf(" 0x%p", mod->core_layout.base);
4400 ++ kdb_printf(" 0x%px", mod->core_layout.base);
4401 +
4402 + #ifdef CONFIG_MODULE_UNLOAD
4403 + {
4404 +@@ -2341,7 +2341,7 @@ void kdb_ps1(const struct task_struct *p)
4405 + return;
4406 +
4407 + cpu = kdb_process_cpu(p);
4408 +- kdb_printf("0x%p %8d %8d %d %4d %c 0x%p %c%s\n",
4409 ++ kdb_printf("0x%px %8d %8d %d %4d %c 0x%px %c%s\n",
4410 + (void *)p, p->pid, p->parent->pid,
4411 + kdb_task_has_cpu(p), kdb_process_cpu(p),
4412 + kdb_task_state_char(p),
4413 +@@ -2354,7 +2354,7 @@ void kdb_ps1(const struct task_struct *p)
4414 + } else {
4415 + if (KDB_TSK(cpu) != p)
4416 + kdb_printf(" Error: does not match running "
4417 +- "process table (0x%p)\n", KDB_TSK(cpu));
4418 ++ "process table (0x%px)\n", KDB_TSK(cpu));
4419 + }
4420 + }
4421 + }
4422 +@@ -2692,7 +2692,7 @@ int kdb_register_flags(char *cmd,
4423 + for_each_kdbcmd(kp, i) {
4424 + if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
4425 + kdb_printf("Duplicate kdb command registered: "
4426 +- "%s, func %p help %s\n", cmd, func, help);
4427 ++ "%s, func %px help %s\n", cmd, func, help);
4428 + return 1;
4429 + }
4430 + }
4431 +diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
4432 +index 990b3cc526c8..987eb73284d2 100644
4433 +--- a/kernel/debug/kdb/kdb_support.c
4434 ++++ b/kernel/debug/kdb/kdb_support.c
4435 +@@ -40,7 +40,7 @@
4436 + int kdbgetsymval(const char *symname, kdb_symtab_t *symtab)
4437 + {
4438 + if (KDB_DEBUG(AR))
4439 +- kdb_printf("kdbgetsymval: symname=%s, symtab=%p\n", symname,
4440 ++ kdb_printf("kdbgetsymval: symname=%s, symtab=%px\n", symname,
4441 + symtab);
4442 + memset(symtab, 0, sizeof(*symtab));
4443 + symtab->sym_start = kallsyms_lookup_name(symname);
4444 +@@ -88,7 +88,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
4445 + char *knt1 = NULL;
4446 +
4447 + if (KDB_DEBUG(AR))
4448 +- kdb_printf("kdbnearsym: addr=0x%lx, symtab=%p\n", addr, symtab);
4449 ++ kdb_printf("kdbnearsym: addr=0x%lx, symtab=%px\n", addr, symtab);
4450 + memset(symtab, 0, sizeof(*symtab));
4451 +
4452 + if (addr < 4096)
4453 +@@ -149,7 +149,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
4454 + symtab->mod_name = "kernel";
4455 + if (KDB_DEBUG(AR))
4456 + kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, "
4457 +- "symtab->mod_name=%p, symtab->sym_name=%p (%s)\n", ret,
4458 ++ "symtab->mod_name=%px, symtab->sym_name=%px (%s)\n", ret,
4459 + symtab->sym_start, symtab->mod_name, symtab->sym_name,
4460 + symtab->sym_name);
4461 +
4462 +@@ -887,13 +887,13 @@ void debug_kusage(void)
4463 + __func__, dah_first);
4464 + if (dah_first) {
4465 + h_used = (struct debug_alloc_header *)debug_alloc_pool;
4466 +- kdb_printf("%s: h_used %p size %d\n", __func__, h_used,
4467 ++ kdb_printf("%s: h_used %px size %d\n", __func__, h_used,
4468 + h_used->size);
4469 + }
4470 + do {
4471 + h_used = (struct debug_alloc_header *)
4472 + ((char *)h_free + dah_overhead + h_free->size);
4473 +- kdb_printf("%s: h_used %p size %d caller %p\n",
4474 ++ kdb_printf("%s: h_used %px size %d caller %px\n",
4475 + __func__, h_used, h_used->size, h_used->caller);
4476 + h_free = (struct debug_alloc_header *)
4477 + (debug_alloc_pool + h_free->next);
4478 +@@ -902,7 +902,7 @@ void debug_kusage(void)
4479 + ((char *)h_free + dah_overhead + h_free->size);
4480 + if ((char *)h_used - debug_alloc_pool !=
4481 + sizeof(debug_alloc_pool_aligned))
4482 +- kdb_printf("%s: h_used %p size %d caller %p\n",
4483 ++ kdb_printf("%s: h_used %px size %d caller %px\n",
4484 + __func__, h_used, h_used->size, h_used->caller);
4485 + out:
4486 + spin_unlock(&dap_lock);
4487 +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
4488 +index 6b71860f3998..4a8f3780aae5 100644
4489 +--- a/kernel/trace/trace_kprobe.c
4490 ++++ b/kernel/trace/trace_kprobe.c
4491 +@@ -71,9 +71,23 @@ static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
4492 + return strncmp(mod->name, name, len) == 0 && name[len] == ':';
4493 + }
4494 +
4495 +-static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
4496 ++static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
4497 + {
4498 +- return !!strchr(trace_kprobe_symbol(tk), ':');
4499 ++ char *p;
4500 ++ bool ret;
4501 ++
4502 ++ if (!tk->symbol)
4503 ++ return false;
4504 ++ p = strchr(tk->symbol, ':');
4505 ++ if (!p)
4506 ++ return true;
4507 ++ *p = '\0';
4508 ++ mutex_lock(&module_mutex);
4509 ++ ret = !!find_module(tk->symbol);
4510 ++ mutex_unlock(&module_mutex);
4511 ++ *p = ':';
4512 ++
4513 ++ return ret;
4514 + }
4515 +
4516 + static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
4517 +@@ -520,19 +534,13 @@ static int __register_trace_kprobe(struct trace_kprobe *tk)
4518 + else
4519 + ret = register_kprobe(&tk->rp.kp);
4520 +
4521 +- if (ret == 0)
4522 ++ if (ret == 0) {
4523 + tk->tp.flags |= TP_FLAG_REGISTERED;
4524 +- else {
4525 +- if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
4526 +- pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
4527 +- ret = 0;
4528 +- } else if (ret == -EILSEQ) {
4529 +- pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
4530 +- tk->rp.kp.addr);
4531 +- ret = -EINVAL;
4532 +- }
4533 ++ } else if (ret == -EILSEQ) {
4534 ++ pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
4535 ++ tk->rp.kp.addr);
4536 ++ ret = -EINVAL;
4537 + }
4538 +-
4539 + return ret;
4540 + }
4541 +
4542 +@@ -595,6 +603,11 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
4543 +
4544 + /* Register k*probe */
4545 + ret = __register_trace_kprobe(tk);
4546 ++ if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
4547 ++ pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
4548 ++ ret = 0;
4549 ++ }
4550 ++
4551 + if (ret < 0)
4552 + unregister_kprobe_event(tk);
4553 + else
4554 +diff --git a/lib/ubsan.c b/lib/ubsan.c
4555 +index 59fee96c29a0..e4162f59a81c 100644
4556 +--- a/lib/ubsan.c
4557 ++++ b/lib/ubsan.c
4558 +@@ -427,8 +427,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
4559 + EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
4560 +
4561 +
4562 +-void __noreturn
4563 +-__ubsan_handle_builtin_unreachable(struct unreachable_data *data)
4564 ++void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
4565 + {
4566 + unsigned long flags;
4567 +
4568 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
4569 +index 5b38fbef9441..bf15bd78846b 100644
4570 +--- a/mm/hugetlb.c
4571 ++++ b/mm/hugetlb.c
4572 +@@ -3240,7 +3240,7 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
4573 + int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4574 + struct vm_area_struct *vma)
4575 + {
4576 +- pte_t *src_pte, *dst_pte, entry;
4577 ++ pte_t *src_pte, *dst_pte, entry, dst_entry;
4578 + struct page *ptepage;
4579 + unsigned long addr;
4580 + int cow;
4581 +@@ -3268,15 +3268,30 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4582 + break;
4583 + }
4584 +
4585 +- /* If the pagetables are shared don't copy or take references */
4586 +- if (dst_pte == src_pte)
4587 ++ /*
4588 ++ * If the pagetables are shared don't copy or take references.
4589 ++ * dst_pte == src_pte is the common case of src/dest sharing.
4590 ++ *
4591 ++ * However, src could have 'unshared' and dst shares with
4592 ++ * another vma. If dst_pte !none, this implies sharing.
4593 ++ * Check here before taking page table lock, and once again
4594 ++ * after taking the lock below.
4595 ++ */
4596 ++ dst_entry = huge_ptep_get(dst_pte);
4597 ++ if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
4598 + continue;
4599 +
4600 + dst_ptl = huge_pte_lock(h, dst, dst_pte);
4601 + src_ptl = huge_pte_lockptr(h, src, src_pte);
4602 + spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4603 + entry = huge_ptep_get(src_pte);
4604 +- if (huge_pte_none(entry)) { /* skip none entry */
4605 ++ dst_entry = huge_ptep_get(dst_pte);
4606 ++ if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
4607 ++ /*
4608 ++ * Skip if src entry none. Also, skip in the
4609 ++ * unlikely case dst entry !none as this implies
4610 ++ * sharing with another vma.
4611 ++ */
4612 + ;
4613 + } else if (unlikely(is_hugetlb_entry_migration(entry) ||
4614 + is_hugetlb_entry_hwpoisoned(entry))) {
4615 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
4616 +index 785252397e35..03fd2d08c361 100644
4617 +--- a/mm/memory_hotplug.c
4618 ++++ b/mm/memory_hotplug.c
4619 +@@ -587,6 +587,7 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
4620 + for (i = 0; i < sections_to_remove; i++) {
4621 + unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
4622 +
4623 ++ cond_resched();
4624 + ret = __remove_section(zone, __pfn_to_section(pfn), map_offset,
4625 + altmap);
4626 + map_offset = 0;
4627 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
4628 +index 01f1a14facc4..73fd00d2df8c 100644
4629 +--- a/mm/mempolicy.c
4630 ++++ b/mm/mempolicy.c
4631 +@@ -2046,8 +2046,36 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
4632 + nmask = policy_nodemask(gfp, pol);
4633 + if (!nmask || node_isset(hpage_node, *nmask)) {
4634 + mpol_cond_put(pol);
4635 +- page = __alloc_pages_node(hpage_node,
4636 +- gfp | __GFP_THISNODE, order);
4637 ++ /*
4638 ++ * We cannot invoke reclaim if __GFP_THISNODE
4639 ++ * is set. Invoking reclaim with
4640 ++ * __GFP_THISNODE set, would cause THP
4641 ++ * allocations to trigger heavy swapping
4642 ++ * despite there may be tons of free memory
4643 ++ * (including potentially plenty of THP
4644 ++ * already available in the buddy) on all the
4645 ++ * other NUMA nodes.
4646 ++ *
4647 ++ * At most we could invoke compaction when
4648 ++ * __GFP_THISNODE is set (but we would need to
4649 ++ * refrain from invoking reclaim even if
4650 ++ * compaction returned COMPACT_SKIPPED because
4651 ++ * there wasn't not enough memory to succeed
4652 ++ * compaction). For now just avoid
4653 ++ * __GFP_THISNODE instead of limiting the
4654 ++ * allocation path to a strict and single
4655 ++ * compaction invocation.
4656 ++ *
4657 ++ * Supposedly if direct reclaim was enabled by
4658 ++ * the caller, the app prefers THP regardless
4659 ++ * of the node it comes from so this would be
4660 ++ * more desiderable behavior than only
4661 ++ * providing THP originated from the local
4662 ++ * node in such case.
4663 ++ */
4664 ++ if (!(gfp & __GFP_DIRECT_RECLAIM))
4665 ++ gfp |= __GFP_THISNODE;
4666 ++ page = __alloc_pages_node(hpage_node, gfp, order);
4667 + goto out;
4668 + }
4669 + }
4670 +diff --git a/mm/swapfile.c b/mm/swapfile.c
4671 +index 18185ae4f223..f8b846b5108c 100644
4672 +--- a/mm/swapfile.c
4673 ++++ b/mm/swapfile.c
4674 +@@ -2837,7 +2837,7 @@ static struct swap_info_struct *alloc_swap_info(void)
4675 + unsigned int type;
4676 + int i;
4677 +
4678 +- p = kzalloc(sizeof(*p), GFP_KERNEL);
4679 ++ p = kvzalloc(sizeof(*p), GFP_KERNEL);
4680 + if (!p)
4681 + return ERR_PTR(-ENOMEM);
4682 +
4683 +@@ -2848,7 +2848,7 @@ static struct swap_info_struct *alloc_swap_info(void)
4684 + }
4685 + if (type >= MAX_SWAPFILES) {
4686 + spin_unlock(&swap_lock);
4687 +- kfree(p);
4688 ++ kvfree(p);
4689 + return ERR_PTR(-EPERM);
4690 + }
4691 + if (type >= nr_swapfiles) {
4692 +@@ -2862,7 +2862,7 @@ static struct swap_info_struct *alloc_swap_info(void)
4693 + smp_wmb();
4694 + nr_swapfiles++;
4695 + } else {
4696 +- kfree(p);
4697 ++ kvfree(p);
4698 + p = swap_info[type];
4699 + /*
4700 + * Do not memset this entry: a racing procfs swap_next()
4701 +diff --git a/net/9p/protocol.c b/net/9p/protocol.c
4702 +index 931ea00c4fed..ce7c221ca18b 100644
4703 +--- a/net/9p/protocol.c
4704 ++++ b/net/9p/protocol.c
4705 +@@ -46,10 +46,15 @@ p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
4706 + void p9stat_free(struct p9_wstat *stbuf)
4707 + {
4708 + kfree(stbuf->name);
4709 ++ stbuf->name = NULL;
4710 + kfree(stbuf->uid);
4711 ++ stbuf->uid = NULL;
4712 + kfree(stbuf->gid);
4713 ++ stbuf->gid = NULL;
4714 + kfree(stbuf->muid);
4715 ++ stbuf->muid = NULL;
4716 + kfree(stbuf->extension);
4717 ++ stbuf->extension = NULL;
4718 + }
4719 + EXPORT_SYMBOL(p9stat_free);
4720 +
4721 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
4722 +index 3d5280425027..2d3723606fb0 100644
4723 +--- a/net/netfilter/nf_conntrack_core.c
4724 ++++ b/net/netfilter/nf_conntrack_core.c
4725 +@@ -929,19 +929,22 @@ static unsigned int early_drop_list(struct net *net,
4726 + return drops;
4727 + }
4728 +
4729 +-static noinline int early_drop(struct net *net, unsigned int _hash)
4730 ++static noinline int early_drop(struct net *net, unsigned int hash)
4731 + {
4732 +- unsigned int i;
4733 ++ unsigned int i, bucket;
4734 +
4735 + for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
4736 + struct hlist_nulls_head *ct_hash;
4737 +- unsigned int hash, hsize, drops;
4738 ++ unsigned int hsize, drops;
4739 +
4740 + rcu_read_lock();
4741 + nf_conntrack_get_ht(&ct_hash, &hsize);
4742 +- hash = reciprocal_scale(_hash++, hsize);
4743 ++ if (!i)
4744 ++ bucket = reciprocal_scale(hash, hsize);
4745 ++ else
4746 ++ bucket = (bucket + 1) % hsize;
4747 +
4748 +- drops = early_drop_list(net, &ct_hash[hash]);
4749 ++ drops = early_drop_list(net, &ct_hash[bucket]);
4750 + rcu_read_unlock();
4751 +
4752 + if (drops) {
4753 +diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
4754 +index 30afbd236656..b53cc0960b5d 100644
4755 +--- a/net/sunrpc/xdr.c
4756 ++++ b/net/sunrpc/xdr.c
4757 +@@ -639,11 +639,10 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
4758 + WARN_ON_ONCE(xdr->iov);
4759 + return;
4760 + }
4761 +- if (fraglen) {
4762 ++ if (fraglen)
4763 + xdr->end = head->iov_base + head->iov_len;
4764 +- xdr->page_ptr--;
4765 +- }
4766 + /* (otherwise assume xdr->end is already set) */
4767 ++ xdr->page_ptr--;
4768 + head->iov_len = len;
4769 + buf->len = len;
4770 + xdr->p = head->iov_base + head->iov_len;
4771 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
4772 +index 4680a217d0fa..46ec7be75d4b 100644
4773 +--- a/security/selinux/hooks.c
4774 ++++ b/security/selinux/hooks.c
4775 +@@ -5306,6 +5306,9 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
4776 + addr_buf = address;
4777 +
4778 + while (walk_size < addrlen) {
4779 ++ if (walk_size + sizeof(sa_family_t) > addrlen)
4780 ++ return -EINVAL;
4781 ++
4782 + addr = addr_buf;
4783 + switch (addr->sa_family) {
4784 + case AF_UNSPEC:
4785 +diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
4786 +index 02580f3ded1a..0b88ec9381e7 100644
4787 +--- a/tools/perf/util/pmu.c
4788 ++++ b/tools/perf/util/pmu.c
4789 +@@ -779,7 +779,7 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
4790 +
4791 + if (!is_arm_pmu_core(name)) {
4792 + pname = pe->pmu ? pe->pmu : "cpu";
4793 +- if (strncmp(pname, name, strlen(pname)))
4794 ++ if (strcmp(pname, name))
4795 + continue;
4796 + }
4797 +
4798 +diff --git a/tools/testing/selftests/powerpc/tm/tm-tmspr.c b/tools/testing/selftests/powerpc/tm/tm-tmspr.c
4799 +index 2bda81c7bf23..df1d7d4b1c89 100644
4800 +--- a/tools/testing/selftests/powerpc/tm/tm-tmspr.c
4801 ++++ b/tools/testing/selftests/powerpc/tm/tm-tmspr.c
4802 +@@ -98,7 +98,7 @@ void texasr(void *in)
4803 +
4804 + int test_tmspr()
4805 + {
4806 +- pthread_t thread;
4807 ++ pthread_t *thread;
4808 + int thread_num;
4809 + unsigned long i;
4810 +
4811 +@@ -107,21 +107,28 @@ int test_tmspr()
4812 + /* To cause some context switching */
4813 + thread_num = 10 * sysconf(_SC_NPROCESSORS_ONLN);
4814 +
4815 ++ thread = malloc(thread_num * sizeof(pthread_t));
4816 ++ if (thread == NULL)
4817 ++ return EXIT_FAILURE;
4818 ++
4819 + /* Test TFIAR and TFHAR */
4820 +- for (i = 0 ; i < thread_num ; i += 2){
4821 +- if (pthread_create(&thread, NULL, (void*)tfiar_tfhar, (void *)i))
4822 ++ for (i = 0; i < thread_num; i += 2) {
4823 ++ if (pthread_create(&thread[i], NULL, (void *)tfiar_tfhar,
4824 ++ (void *)i))
4825 + return EXIT_FAILURE;
4826 + }
4827 +- if (pthread_join(thread, NULL) != 0)
4828 +- return EXIT_FAILURE;
4829 +-
4830 + /* Test TEXASR */
4831 +- for (i = 0 ; i < thread_num ; i++){
4832 +- if (pthread_create(&thread, NULL, (void*)texasr, (void *)i))
4833 ++ for (i = 1; i < thread_num; i += 2) {
4834 ++ if (pthread_create(&thread[i], NULL, (void *)texasr, (void *)i))
4835 + return EXIT_FAILURE;
4836 + }
4837 +- if (pthread_join(thread, NULL) != 0)
4838 +- return EXIT_FAILURE;
4839 ++
4840 ++ for (i = 0; i < thread_num; i++) {
4841 ++ if (pthread_join(thread[i], NULL) != 0)
4842 ++ return EXIT_FAILURE;
4843 ++ }
4844 ++
4845 ++ free(thread);
4846 +
4847 + if (passed)
4848 + return 0;