Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.11 commit in: /
Date: Wed, 28 Apr 2021 12:05:48
Message-Id: 1619611527.96af2504c6ae2a1e698861b8847b14b7ace48889.alicef@gentoo
1 commit: 96af2504c6ae2a1e698861b8847b14b7ace48889
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 28 12:05:13 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 28 12:05:27 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=96af2504
7
8 Linux patch 5.11.17
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1016_linux-5.11.17.patch | 2076 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2080 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e06ab59..c4f4eb4 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -107,6 +107,10 @@ Patch: 1015_linux-5.11.16.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.11.16
23
24 +Patch: 1016_linux-5.11.17.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.11.17
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1016_linux-5.11.17.patch b/1016_linux-5.11.17.patch
33 new file mode 100644
34 index 0000000..86f76ba
35 --- /dev/null
36 +++ b/1016_linux-5.11.17.patch
37 @@ -0,0 +1,2076 @@
38 +diff --git a/Makefile b/Makefile
39 +index 124d8e2007765..d8367e1932324 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 11
46 +-SUBLEVEL = 16
47 ++SUBLEVEL = 17
48 + EXTRAVERSION =
49 + NAME = 💕 Valentine's Day Edition 💕
50 +
51 +diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
52 +index 9dcae1f2bc99f..c5b9da0d7e6ce 100644
53 +--- a/arch/arm/boot/dts/omap3.dtsi
54 ++++ b/arch/arm/boot/dts/omap3.dtsi
55 +@@ -24,6 +24,9 @@
56 + i2c0 = &i2c1;
57 + i2c1 = &i2c2;
58 + i2c2 = &i2c3;
59 ++ mmc0 = &mmc1;
60 ++ mmc1 = &mmc2;
61 ++ mmc2 = &mmc3;
62 + serial0 = &uart1;
63 + serial1 = &uart2;
64 + serial2 = &uart3;
65 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
66 +index a1f621b388fe7..358df6d926aff 100644
67 +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
68 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
69 +@@ -10,5 +10,5 @@
70 + };
71 +
72 + &mmc0 {
73 +- cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 push-push switch */
74 ++ broken-cd; /* card detect is broken on *some* boards */
75 + };
76 +diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
77 +index 66aac2881ba84..85645b2b0c7ab 100644
78 +--- a/arch/arm64/kernel/probes/kprobes.c
79 ++++ b/arch/arm64/kernel/probes/kprobes.c
80 +@@ -267,10 +267,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
81 + if (!instruction_pointer(regs))
82 + BUG();
83 +
84 +- if (kcb->kprobe_status == KPROBE_REENTER)
85 ++ if (kcb->kprobe_status == KPROBE_REENTER) {
86 + restore_previous_kprobe(kcb);
87 +- else
88 ++ } else {
89 ++ kprobes_restore_local_irqflag(kcb, regs);
90 + reset_current_kprobe();
91 ++ }
92 +
93 + break;
94 + case KPROBE_HIT_ACTIVE:
95 +diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
96 +index 89dd2fcf38fa1..3b16d081b4d7f 100644
97 +--- a/arch/csky/Kconfig
98 ++++ b/arch/csky/Kconfig
99 +@@ -292,7 +292,7 @@ config FORCE_MAX_ZONEORDER
100 + int "Maximum zone order"
101 + default "11"
102 +
103 +-config RAM_BASE
104 ++config DRAM_BASE
105 + hex "DRAM start addr (the same with memory-section in dts)"
106 + default 0x0
107 +
108 +diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h
109 +index 9b98bf31d57ce..16878240ef9ac 100644
110 +--- a/arch/csky/include/asm/page.h
111 ++++ b/arch/csky/include/asm/page.h
112 +@@ -28,7 +28,7 @@
113 + #define SSEG_SIZE 0x20000000
114 + #define LOWMEM_LIMIT (SSEG_SIZE * 2)
115 +
116 +-#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1))
117 ++#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1))
118 +
119 + #ifndef __ASSEMBLY__
120 +
121 +diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
122 +index c7311131156e8..ba3edb8a04b16 100644
123 +--- a/arch/ia64/mm/discontig.c
124 ++++ b/arch/ia64/mm/discontig.c
125 +@@ -94,7 +94,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
126 + * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
127 + * called yet. Note that node 0 will also count all non-existent cpus.
128 + */
129 +-static int __meminit early_nr_cpus_node(int node)
130 ++static int early_nr_cpus_node(int node)
131 + {
132 + int cpu, n = 0;
133 +
134 +@@ -109,7 +109,7 @@ static int __meminit early_nr_cpus_node(int node)
135 + * compute_pernodesize - compute size of pernode data
136 + * @node: the node id.
137 + */
138 +-static unsigned long __meminit compute_pernodesize(int node)
139 ++static unsigned long compute_pernodesize(int node)
140 + {
141 + unsigned long pernodesize = 0, cpus;
142 +
143 +@@ -366,7 +366,7 @@ static void __init reserve_pernode_space(void)
144 + }
145 + }
146 +
147 +-static void __meminit scatter_node_data(void)
148 ++static void scatter_node_data(void)
149 + {
150 + pg_data_t **dst;
151 + int node;
152 +diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h
153 +index 7f5912af2a52e..21b1071e0a34a 100644
154 +--- a/arch/m68k/include/asm/page_mm.h
155 ++++ b/arch/m68k/include/asm/page_mm.h
156 +@@ -167,7 +167,7 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
157 + ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
158 + })
159 + #else
160 +-#define ARCH_PFN_OFFSET (m68k_memory[0].addr)
161 ++#define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT)
162 + #include <asm-generic/memory_model.h>
163 + #endif
164 +
165 +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
166 +index f1ba197b10c0e..f0a215cf010c7 100644
167 +--- a/arch/s390/kernel/entry.S
168 ++++ b/arch/s390/kernel/entry.S
169 +@@ -976,6 +976,7 @@ ENDPROC(ext_int_handler)
170 + * Load idle PSW.
171 + */
172 + ENTRY(psw_idle)
173 ++ stg %r14,(__SF_GPRS+8*8)(%r15)
174 + stg %r3,__SF_EMPTY(%r15)
175 + larl %r1,.Lpsw_idle_exit
176 + stg %r1,__SF_EMPTY+8(%r15)
177 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
178 +index bfd42e0853ed6..6c88f245b33ac 100644
179 +--- a/arch/x86/events/intel/core.c
180 ++++ b/arch/x86/events/intel/core.c
181 +@@ -4400,7 +4400,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = {
182 + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009),
183 + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009),
184 + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002),
185 +- INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014),
186 ++ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014),
187 + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
188 + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
189 + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
190 +diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
191 +index 7bdb1821215db..3112186a4f4b2 100644
192 +--- a/arch/x86/events/intel/uncore_snbep.c
193 ++++ b/arch/x86/events/intel/uncore_snbep.c
194 +@@ -1159,7 +1159,6 @@ enum {
195 + SNBEP_PCI_QPI_PORT0_FILTER,
196 + SNBEP_PCI_QPI_PORT1_FILTER,
197 + BDX_PCI_QPI_PORT2_FILTER,
198 +- HSWEP_PCI_PCU_3,
199 + };
200 +
201 + static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
202 +@@ -2816,22 +2815,33 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
203 + NULL,
204 + };
205 +
206 +-void hswep_uncore_cpu_init(void)
207 ++#define HSWEP_PCU_DID 0x2fc0
208 ++#define HSWEP_PCU_CAPID4_OFFET 0x94
209 ++#define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
210 ++
211 ++static bool hswep_has_limit_sbox(unsigned int device)
212 + {
213 +- int pkg = boot_cpu_data.logical_proc_id;
214 ++ struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
215 ++ u32 capid4;
216 ++
217 ++ if (!dev)
218 ++ return false;
219 ++
220 ++ pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
221 ++ if (!hswep_get_chop(capid4))
222 ++ return true;
223 +
224 ++ return false;
225 ++}
226 ++
227 ++void hswep_uncore_cpu_init(void)
228 ++{
229 + if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
230 + hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
231 +
232 + /* Detect 6-8 core systems with only two SBOXes */
233 +- if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
234 +- u32 capid4;
235 +-
236 +- pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
237 +- 0x94, &capid4);
238 +- if (((capid4 >> 6) & 0x3) == 0)
239 +- hswep_uncore_sbox.num_boxes = 2;
240 +- }
241 ++ if (hswep_has_limit_sbox(HSWEP_PCU_DID))
242 ++ hswep_uncore_sbox.num_boxes = 2;
243 +
244 + uncore_msr_uncores = hswep_msr_uncores;
245 + }
246 +@@ -3094,11 +3104,6 @@ static const struct pci_device_id hswep_uncore_pci_ids[] = {
247 + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
248 + SNBEP_PCI_QPI_PORT1_FILTER),
249 + },
250 +- { /* PCU.3 (for Capability registers) */
251 +- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
252 +- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
253 +- HSWEP_PCI_PCU_3),
254 +- },
255 + { /* end: all zeroes */ }
256 + };
257 +
258 +@@ -3190,27 +3195,18 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
259 + EVENT_CONSTRAINT_END
260 + };
261 +
262 ++#define BDX_PCU_DID 0x6fc0
263 ++
264 + void bdx_uncore_cpu_init(void)
265 + {
266 +- int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
267 +-
268 + if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
269 + bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
270 + uncore_msr_uncores = bdx_msr_uncores;
271 +
272 +- /* BDX-DE doesn't have SBOX */
273 +- if (boot_cpu_data.x86_model == 86) {
274 +- uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
275 + /* Detect systems with no SBOXes */
276 +- } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
277 +- struct pci_dev *pdev;
278 +- u32 capid4;
279 +-
280 +- pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
281 +- pci_read_config_dword(pdev, 0x94, &capid4);
282 +- if (((capid4 >> 6) & 0x3) == 0)
283 +- bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
284 +- }
285 ++ if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
286 ++ uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
287 ++
288 + hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
289 + }
290 +
291 +@@ -3431,11 +3427,6 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
292 + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
293 + BDX_PCI_QPI_PORT2_FILTER),
294 + },
295 +- { /* PCU.3 (for Capability registers) */
296 +- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
297 +- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
298 +- HSWEP_PCI_PCU_3),
299 +- },
300 + { /* end: all zeroes */ }
301 + };
302 +
303 +diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
304 +index a8f3af257e26c..b1deacbeb2669 100644
305 +--- a/arch/x86/kernel/crash.c
306 ++++ b/arch/x86/kernel/crash.c
307 +@@ -337,7 +337,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
308 + struct crash_memmap_data cmd;
309 + struct crash_mem *cmem;
310 +
311 +- cmem = vzalloc(sizeof(struct crash_mem));
312 ++ cmem = vzalloc(struct_size(cmem, ranges, 1));
313 + if (!cmem)
314 + return -ENOMEM;
315 +
316 +diff --git a/block/ioctl.c b/block/ioctl.c
317 +index ff241e663c018..8ba1ed8defd0b 100644
318 +--- a/block/ioctl.c
319 ++++ b/block/ioctl.c
320 +@@ -89,6 +89,8 @@ static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
321 + return -EINVAL;
322 + if (!capable(CAP_SYS_ADMIN))
323 + return -EACCES;
324 ++ if (bdev->bd_part_count)
325 ++ return -EBUSY;
326 +
327 + /*
328 + * Reopen the device to revalidate the driver state and force a
329 +diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
330 +index 71827d9b0aa19..b7260749e8eee 100644
331 +--- a/drivers/dma/tegra20-apb-dma.c
332 ++++ b/drivers/dma/tegra20-apb-dma.c
333 +@@ -723,7 +723,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
334 + goto end;
335 + }
336 + if (!tdc->busy) {
337 +- err = pm_runtime_get_sync(tdc->tdma->dev);
338 ++ err = pm_runtime_resume_and_get(tdc->tdma->dev);
339 + if (err < 0) {
340 + dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
341 + goto end;
342 +@@ -818,7 +818,7 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
343 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
344 + int err;
345 +
346 +- err = pm_runtime_get_sync(tdc->tdma->dev);
347 ++ err = pm_runtime_resume_and_get(tdc->tdma->dev);
348 + if (err < 0) {
349 + dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
350 + return;
351 +diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
352 +index 55df63dead8d3..70b29bd079c9f 100644
353 +--- a/drivers/dma/xilinx/xilinx_dpdma.c
354 ++++ b/drivers/dma/xilinx/xilinx_dpdma.c
355 +@@ -839,6 +839,7 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
356 + struct xilinx_dpdma_tx_desc *desc;
357 + struct virt_dma_desc *vdesc;
358 + u32 reg, channels;
359 ++ bool first_frame;
360 +
361 + lockdep_assert_held(&chan->lock);
362 +
363 +@@ -852,14 +853,6 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
364 + chan->running = true;
365 + }
366 +
367 +- if (chan->video_group)
368 +- channels = xilinx_dpdma_chan_video_group_ready(chan);
369 +- else
370 +- channels = BIT(chan->id);
371 +-
372 +- if (!channels)
373 +- return;
374 +-
375 + vdesc = vchan_next_desc(&chan->vchan);
376 + if (!vdesc)
377 + return;
378 +@@ -884,13 +877,26 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
379 + FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
380 + upper_32_bits(sw_desc->dma_addr)));
381 +
382 +- if (chan->first_frame)
383 ++ first_frame = chan->first_frame;
384 ++ chan->first_frame = false;
385 ++
386 ++ if (chan->video_group) {
387 ++ channels = xilinx_dpdma_chan_video_group_ready(chan);
388 ++ /*
389 ++ * Trigger the transfer only when all channels in the group are
390 ++ * ready.
391 ++ */
392 ++ if (!channels)
393 ++ return;
394 ++ } else {
395 ++ channels = BIT(chan->id);
396 ++ }
397 ++
398 ++ if (first_frame)
399 + reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
400 + else
401 + reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
402 +
403 +- chan->first_frame = false;
404 +-
405 + dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
406 + }
407 +
408 +@@ -1042,13 +1048,14 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
409 + */
410 + static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
411 + {
412 +- struct xilinx_dpdma_tx_desc *active = chan->desc.active;
413 ++ struct xilinx_dpdma_tx_desc *active;
414 + unsigned long flags;
415 +
416 + spin_lock_irqsave(&chan->lock, flags);
417 +
418 + xilinx_dpdma_debugfs_desc_done_irq(chan);
419 +
420 ++ active = chan->desc.active;
421 + if (active)
422 + vchan_cyclic_callback(&active->vdesc);
423 + else
424 +diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
425 +index 41952bb818ad5..56152263ab38f 100644
426 +--- a/drivers/gpio/gpio-omap.c
427 ++++ b/drivers/gpio/gpio-omap.c
428 +@@ -29,6 +29,7 @@
429 + #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
430 +
431 + struct gpio_regs {
432 ++ u32 sysconfig;
433 + u32 irqenable1;
434 + u32 irqenable2;
435 + u32 wake_en;
436 +@@ -1069,6 +1070,7 @@ static void omap_gpio_init_context(struct gpio_bank *p)
437 + const struct omap_gpio_reg_offs *regs = p->regs;
438 + void __iomem *base = p->base;
439 +
440 ++ p->context.sysconfig = readl_relaxed(base + regs->sysconfig);
441 + p->context.ctrl = readl_relaxed(base + regs->ctrl);
442 + p->context.oe = readl_relaxed(base + regs->direction);
443 + p->context.wake_en = readl_relaxed(base + regs->wkup_en);
444 +@@ -1088,6 +1090,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank)
445 + const struct omap_gpio_reg_offs *regs = bank->regs;
446 + void __iomem *base = bank->base;
447 +
448 ++ writel_relaxed(bank->context.sysconfig, base + regs->sysconfig);
449 + writel_relaxed(bank->context.wake_en, base + regs->wkup_en);
450 + writel_relaxed(bank->context.ctrl, base + regs->ctrl);
451 + writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0);
452 +@@ -1115,6 +1118,10 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
453 +
454 + bank->saved_datain = readl_relaxed(base + bank->regs->datain);
455 +
456 ++ /* Save syconfig, it's runtime value can be different from init value */
457 ++ if (bank->loses_context)
458 ++ bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig);
459 ++
460 + if (!bank->enabled_non_wakeup_gpios)
461 + goto update_gpio_context_count;
462 +
463 +@@ -1279,6 +1286,7 @@ out_unlock:
464 +
465 + static const struct omap_gpio_reg_offs omap2_gpio_regs = {
466 + .revision = OMAP24XX_GPIO_REVISION,
467 ++ .sysconfig = OMAP24XX_GPIO_SYSCONFIG,
468 + .direction = OMAP24XX_GPIO_OE,
469 + .datain = OMAP24XX_GPIO_DATAIN,
470 + .dataout = OMAP24XX_GPIO_DATAOUT,
471 +@@ -1302,6 +1310,7 @@ static const struct omap_gpio_reg_offs omap2_gpio_regs = {
472 +
473 + static const struct omap_gpio_reg_offs omap4_gpio_regs = {
474 + .revision = OMAP4_GPIO_REVISION,
475 ++ .sysconfig = OMAP4_GPIO_SYSCONFIG,
476 + .direction = OMAP4_GPIO_OE,
477 + .datain = OMAP4_GPIO_DATAIN,
478 + .dataout = OMAP4_GPIO_DATAOUT,
479 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
480 +index b24cb44739132..8090c1e7a3bac 100644
481 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
482 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
483 +@@ -3298,7 +3298,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
484 + struct amdgpu_bo *root;
485 + uint64_t value, flags;
486 + struct amdgpu_vm *vm;
487 +- long r;
488 ++ int r;
489 +
490 + spin_lock(&adev->vm_manager.pasid_lock);
491 + vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
492 +@@ -3347,6 +3347,12 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
493 + value = 0;
494 + }
495 +
496 ++ r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
497 ++ if (r) {
498 ++ pr_debug("failed %d to reserve fence slot\n", r);
499 ++ goto error_unlock;
500 ++ }
501 ++
502 + r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
503 + addr, flags, value, NULL, NULL,
504 + NULL);
505 +@@ -3358,7 +3364,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
506 + error_unlock:
507 + amdgpu_bo_unreserve(root);
508 + if (r < 0)
509 +- DRM_ERROR("Can't handle page fault (%ld)\n", r);
510 ++ DRM_ERROR("Can't handle page fault (%d)\n", r);
511 +
512 + error_unref:
513 + amdgpu_bo_unref(&root);
514 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
515 +index e7d6da05011ff..4f24663d81696 100644
516 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
517 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
518 +@@ -3280,7 +3280,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
519 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
520 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000280, 0x00000280),
521 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07800000, 0x00800000),
522 +- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x00001d00, 0x00000500),
523 ++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x00001d00, 0x00000500),
524 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003c0000, 0x00280400),
525 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
526 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
527 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
528 +index ad4afbc37d516..54fd48ee5f275 100644
529 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
530 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
531 +@@ -3962,13 +3962,6 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
532 + if (modifier == DRM_FORMAT_MOD_LINEAR)
533 + return true;
534 +
535 +- /*
536 +- * The arbitrary tiling support for multiplane formats has not been hooked
537 +- * up.
538 +- */
539 +- if (info->num_planes > 1)
540 +- return false;
541 +-
542 + /*
543 + * For D swizzle the canonical modifier depends on the bpp, so check
544 + * it here.
545 +@@ -3987,6 +3980,10 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
546 + /* Per radeonsi comments 16/64 bpp are more complicated. */
547 + if (info->cpp[0] != 4)
548 + return false;
549 ++ /* We support multi-planar formats, but not when combined with
550 ++ * additional DCC metadata planes. */
551 ++ if (info->num_planes > 1)
552 ++ return false;
553 + }
554 +
555 + return true;
556 +diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
557 +index 3feaece13ade0..6b665931147df 100644
558 +--- a/drivers/hid/hid-alps.c
559 ++++ b/drivers/hid/hid-alps.c
560 +@@ -761,6 +761,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
561 +
562 + if (input_register_device(data->input2)) {
563 + input_free_device(input2);
564 ++ ret = -ENOENT;
565 + goto exit;
566 + }
567 + }
568 +diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
569 +index 1dfe184ebf5a1..2ab22b9259418 100644
570 +--- a/drivers/hid/hid-asus.c
571 ++++ b/drivers/hid/hid-asus.c
572 +@@ -1221,6 +1221,9 @@ static const struct hid_device_id asus_devices[] = {
573 + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
574 + USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD),
575 + QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
576 ++ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
577 ++ USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2),
578 ++ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
579 + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
580 + USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
581 + QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
582 +diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
583 +index 21e15627a4614..477baa30889cc 100644
584 +--- a/drivers/hid/hid-cp2112.c
585 ++++ b/drivers/hid/hid-cp2112.c
586 +@@ -161,6 +161,7 @@ struct cp2112_device {
587 + atomic_t read_avail;
588 + atomic_t xfer_avail;
589 + struct gpio_chip gc;
590 ++ struct irq_chip irq;
591 + u8 *in_out_buffer;
592 + struct mutex lock;
593 +
594 +@@ -1175,16 +1176,6 @@ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
595 + return 0;
596 + }
597 +
598 +-static struct irq_chip cp2112_gpio_irqchip = {
599 +- .name = "cp2112-gpio",
600 +- .irq_startup = cp2112_gpio_irq_startup,
601 +- .irq_shutdown = cp2112_gpio_irq_shutdown,
602 +- .irq_ack = cp2112_gpio_irq_ack,
603 +- .irq_mask = cp2112_gpio_irq_mask,
604 +- .irq_unmask = cp2112_gpio_irq_unmask,
605 +- .irq_set_type = cp2112_gpio_irq_type,
606 +-};
607 +-
608 + static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
609 + int pin)
610 + {
611 +@@ -1339,8 +1330,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
612 + dev->gc.can_sleep = 1;
613 + dev->gc.parent = &hdev->dev;
614 +
615 ++ dev->irq.name = "cp2112-gpio";
616 ++ dev->irq.irq_startup = cp2112_gpio_irq_startup;
617 ++ dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown;
618 ++ dev->irq.irq_ack = cp2112_gpio_irq_ack;
619 ++ dev->irq.irq_mask = cp2112_gpio_irq_mask;
620 ++ dev->irq.irq_unmask = cp2112_gpio_irq_unmask;
621 ++ dev->irq.irq_set_type = cp2112_gpio_irq_type;
622 ++ dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND;
623 ++
624 + girq = &dev->gc.irq;
625 +- girq->chip = &cp2112_gpio_irqchip;
626 ++ girq->chip = &dev->irq;
627 + /* The event comes from the outside so no parent handler */
628 + girq->parent_handler = NULL;
629 + girq->num_parents = 0;
630 +diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
631 +index 85a054f1ce389..2a176f77b32e9 100644
632 +--- a/drivers/hid/hid-google-hammer.c
633 ++++ b/drivers/hid/hid-google-hammer.c
634 +@@ -526,6 +526,8 @@ static void hammer_remove(struct hid_device *hdev)
635 + }
636 +
637 + static const struct hid_device_id hammer_devices[] = {
638 ++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
639 ++ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
640 + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
641 + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
642 + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
643 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
644 +index b60279aaed438..09d0499865160 100644
645 +--- a/drivers/hid/hid-ids.h
646 ++++ b/drivers/hid/hid-ids.h
647 +@@ -191,6 +191,7 @@
648 + #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837
649 + #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
650 + #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866
651 ++#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2 0x19b6
652 + #define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869
653 +
654 + #define USB_VENDOR_ID_ATEN 0x0557
655 +@@ -488,6 +489,7 @@
656 + #define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
657 + #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
658 + #define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
659 ++#define USB_DEVICE_ID_GOOGLE_DON 0x5050
660 +
661 + #define USB_VENDOR_ID_GOTOP 0x08f2
662 + #define USB_DEVICE_ID_SUPER_Q2 0x007f
663 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
664 +index 6cda5935fc09c..2d70dc4bea654 100644
665 +--- a/drivers/hid/wacom_wac.c
666 ++++ b/drivers/hid/wacom_wac.c
667 +@@ -2533,7 +2533,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
668 + !wacom_wac->shared->is_touch_on) {
669 + if (!wacom_wac->shared->touch_down)
670 + return;
671 +- prox = 0;
672 ++ prox = false;
673 + }
674 +
675 + wacom_wac->hid_data.num_received++;
676 +diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
677 +index b248966837b4c..7aad40b2aa736 100644
678 +--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
679 ++++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
680 +@@ -412,7 +412,7 @@
681 + | CN6XXX_INTR_M0UNWI_ERR \
682 + | CN6XXX_INTR_M1UPB0_ERR \
683 + | CN6XXX_INTR_M1UPWI_ERR \
684 +- | CN6XXX_INTR_M1UPB0_ERR \
685 ++ | CN6XXX_INTR_M1UNB0_ERR \
686 + | CN6XXX_INTR_M1UNWI_ERR \
687 + | CN6XXX_INTR_INSTR_DB_OF_ERR \
688 + | CN6XXX_INTR_SLIST_DB_OF_ERR \
689 +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
690 +index f35b0b83fe85a..040edc6fc5609 100644
691 +--- a/drivers/net/geneve.c
692 ++++ b/drivers/net/geneve.c
693 +@@ -891,6 +891,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
694 + __be16 sport;
695 + int err;
696 +
697 ++ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
698 ++ return -EINVAL;
699 ++
700 + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
701 + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
702 + geneve->cfg.info.key.tp_dst, sport);
703 +@@ -985,6 +988,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
704 + __be16 sport;
705 + int err;
706 +
707 ++ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
708 ++ return -EINVAL;
709 ++
710 + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
711 + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
712 + geneve->cfg.info.key.tp_dst, sport);
713 +diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
714 +index d18642a8144cf..4909405803d57 100644
715 +--- a/drivers/net/usb/hso.c
716 ++++ b/drivers/net/usb/hso.c
717 +@@ -3104,7 +3104,7 @@ static void hso_free_interface(struct usb_interface *interface)
718 + cancel_work_sync(&serial_table[i]->async_put_intf);
719 + cancel_work_sync(&serial_table[i]->async_get_intf);
720 + hso_serial_tty_unregister(serial);
721 +- kref_put(&serial_table[i]->ref, hso_serial_ref_free);
722 ++ kref_put(&serial->parent->ref, hso_serial_ref_free);
723 + }
724 + }
725 +
726 +diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
727 +index 6f10e0998f1ce..94d19158efc18 100644
728 +--- a/drivers/net/xen-netback/xenbus.c
729 ++++ b/drivers/net/xen-netback/xenbus.c
730 +@@ -824,11 +824,15 @@ static void connect(struct backend_info *be)
731 + xenvif_carrier_on(be->vif);
732 +
733 + unregister_hotplug_status_watch(be);
734 +- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
735 +- hotplug_status_changed,
736 +- "%s/%s", dev->nodename, "hotplug-status");
737 +- if (!err)
738 ++ if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
739 ++ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
740 ++ NULL, hotplug_status_changed,
741 ++ "%s/%s", dev->nodename,
742 ++ "hotplug-status");
743 ++ if (err)
744 ++ goto err;
745 + be->have_hotplug_status_watch = 1;
746 ++ }
747 +
748 + netif_tx_wake_all_queues(be->vif->dev);
749 +
750 +diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
751 +index 9fc4433fece4f..20b477cd5a30a 100644
752 +--- a/drivers/pinctrl/core.c
753 ++++ b/drivers/pinctrl/core.c
754 +@@ -1604,8 +1604,8 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
755 + unsigned i, pin;
756 + #ifdef CONFIG_GPIOLIB
757 + struct pinctrl_gpio_range *range;
758 +- unsigned int gpio_num;
759 + struct gpio_chip *chip;
760 ++ int gpio_num;
761 + #endif
762 +
763 + seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
764 +@@ -1625,7 +1625,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
765 + seq_printf(s, "pin %d (%s) ", pin, desc->name);
766 +
767 + #ifdef CONFIG_GPIOLIB
768 +- gpio_num = 0;
769 ++ gpio_num = -1;
770 + list_for_each_entry(range, &pctldev->gpio_ranges, node) {
771 + if ((pin >= range->pin_base) &&
772 + (pin < (range->pin_base + range->npins))) {
773 +@@ -1633,10 +1633,12 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
774 + break;
775 + }
776 + }
777 +- chip = gpio_to_chip(gpio_num);
778 +- if (chip && chip->gpiodev && chip->gpiodev->base)
779 +- seq_printf(s, "%u:%s ", gpio_num -
780 +- chip->gpiodev->base, chip->label);
781 ++ if (gpio_num >= 0)
782 ++ chip = gpio_to_chip(gpio_num);
783 ++ else
784 ++ chip = NULL;
785 ++ if (chip)
786 ++ seq_printf(s, "%u:%s ", gpio_num - chip->gpiodev->base, chip->label);
787 + else
788 + seq_puts(s, "0:? ");
789 + #endif
790 +diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
791 +index 7fdf4257df1ed..ad4b446d588e6 100644
792 +--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
793 ++++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
794 +@@ -299,9 +299,9 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
795 + static const struct intel_community lbg_communities[] = {
796 + LBG_COMMUNITY(0, 0, 71),
797 + LBG_COMMUNITY(1, 72, 132),
798 +- LBG_COMMUNITY(3, 133, 144),
799 +- LBG_COMMUNITY(4, 145, 180),
800 +- LBG_COMMUNITY(5, 181, 246),
801 ++ LBG_COMMUNITY(3, 133, 143),
802 ++ LBG_COMMUNITY(4, 144, 178),
803 ++ LBG_COMMUNITY(5, 179, 246),
804 + };
805 +
806 + static const struct intel_pinctrl_soc_data lbg_soc_data = {
807 +diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
808 +index 1fd29f93ff6d6..5bdfb1565c14d 100644
809 +--- a/drivers/soc/qcom/qcom-geni-se.c
810 ++++ b/drivers/soc/qcom/qcom-geni-se.c
811 +@@ -756,6 +756,9 @@ int geni_icc_get(struct geni_se *se, const char *icc_ddr)
812 + int i, err;
813 + const char *icc_names[] = {"qup-core", "qup-config", icc_ddr};
814 +
815 ++ if (has_acpi_companion(se->dev))
816 ++ return 0;
817 ++
818 + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
819 + if (!icc_names[i])
820 + continue;
821 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
822 +index e79359326411a..bc035ba6e0105 100644
823 +--- a/drivers/usb/class/cdc-acm.c
824 ++++ b/drivers/usb/class/cdc-acm.c
825 +@@ -1637,12 +1637,13 @@ static int acm_resume(struct usb_interface *intf)
826 + struct urb *urb;
827 + int rv = 0;
828 +
829 +- acm_unpoison_urbs(acm);
830 + spin_lock_irq(&acm->write_lock);
831 +
832 + if (--acm->susp_count)
833 + goto out;
834 +
835 ++ acm_unpoison_urbs(acm);
836 ++
837 + if (tty_port_initialized(&acm->port)) {
838 + rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
839 +
840 +diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
841 +index d300f799efcd1..aa656f57bf5b7 100644
842 +--- a/drivers/vdpa/mlx5/core/mr.c
843 ++++ b/drivers/vdpa/mlx5/core/mr.c
844 +@@ -273,8 +273,10 @@ done:
845 + mr->log_size = log_entity_size;
846 + mr->nsg = nsg;
847 + mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
848 +- if (!mr->nent)
849 ++ if (!mr->nent) {
850 ++ err = -ENOMEM;
851 + goto err_map;
852 ++ }
853 +
854 + err = create_direct_mr(mvdev, mr);
855 + if (err)
856 +diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
857 +index e0a27e3362935..bfa4c6ef554e5 100644
858 +--- a/drivers/vhost/vdpa.c
859 ++++ b/drivers/vhost/vdpa.c
860 +@@ -745,9 +745,11 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
861 + const struct vdpa_config_ops *ops = vdpa->config;
862 + int r = 0;
863 +
864 ++ mutex_lock(&dev->mutex);
865 ++
866 + r = vhost_dev_check_owner(dev);
867 + if (r)
868 +- return r;
869 ++ goto unlock;
870 +
871 + switch (msg->type) {
872 + case VHOST_IOTLB_UPDATE:
873 +@@ -768,6 +770,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
874 + r = -EINVAL;
875 + break;
876 + }
877 ++unlock:
878 ++ mutex_unlock(&dev->mutex);
879 +
880 + return r;
881 + }
882 +diff --git a/fs/coda/file.c b/fs/coda/file.c
883 +index 128d63df5bfb6..ef5ca22bfb3ea 100644
884 +--- a/fs/coda/file.c
885 ++++ b/fs/coda/file.c
886 +@@ -175,10 +175,10 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
887 + ret = call_mmap(vma->vm_file, vma);
888 +
889 + if (ret) {
890 +- /* if call_mmap fails, our caller will put coda_file so we
891 +- * should drop the reference to the host_file that we got.
892 ++ /* if call_mmap fails, our caller will put host_file so we
893 ++ * should drop the reference to the coda_file that we got.
894 + */
895 +- fput(host_file);
896 ++ fput(coda_file);
897 + kfree(cvm_ops);
898 + } else {
899 + /* here we add redirects for the open/close vm_operations */
900 +diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
901 +index 077d3ad343f68..7bf6ac142ff04 100644
902 +--- a/fs/overlayfs/file.c
903 ++++ b/fs/overlayfs/file.c
904 +@@ -430,20 +430,11 @@ static int ovl_mmap(struct file *file, struct vm_area_struct *vma)
905 + if (WARN_ON(file != vma->vm_file))
906 + return -EIO;
907 +
908 +- vma->vm_file = get_file(realfile);
909 ++ vma_set_file(vma, realfile);
910 +
911 + old_cred = ovl_override_creds(file_inode(file)->i_sb);
912 + ret = call_mmap(vma->vm_file, vma);
913 + revert_creds(old_cred);
914 +-
915 +- if (ret) {
916 +- /* Drop reference count from new vm_file value */
917 +- fput(realfile);
918 +- } else {
919 +- /* Drop reference count from previous vm_file value */
920 +- fput(file);
921 +- }
922 +-
923 + ovl_file_accessed(file);
924 +
925 + return ret;
926 +diff --git a/include/linux/bpf.h b/include/linux/bpf.h
927 +index 88b581b75d5be..b14c045320fbf 100644
928 +--- a/include/linux/bpf.h
929 ++++ b/include/linux/bpf.h
930 +@@ -1288,6 +1288,11 @@ static inline bool bpf_allow_ptr_leaks(void)
931 + return perfmon_capable();
932 + }
933 +
934 ++static inline bool bpf_allow_uninit_stack(void)
935 ++{
936 ++ return perfmon_capable();
937 ++}
938 ++
939 + static inline bool bpf_allow_ptr_to_map_access(void)
940 + {
941 + return perfmon_capable();
942 +diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
943 +index e941fe1484e57..57c11e5bec6cf 100644
944 +--- a/include/linux/bpf_verifier.h
945 ++++ b/include/linux/bpf_verifier.h
946 +@@ -195,7 +195,7 @@ struct bpf_func_state {
947 + * 0 = main function, 1 = first callee.
948 + */
949 + u32 frameno;
950 +- /* subprog number == index within subprog_stack_depth
951 ++ /* subprog number == index within subprog_info
952 + * zero == main subprog
953 + */
954 + u32 subprogno;
955 +@@ -401,6 +401,7 @@ struct bpf_verifier_env {
956 + u32 used_map_cnt; /* number of used maps */
957 + u32 id_gen; /* used to generate unique reg IDs */
958 + bool allow_ptr_leaks;
959 ++ bool allow_uninit_stack;
960 + bool allow_ptr_to_map_access;
961 + bool bpf_capable;
962 + bool bypass_spec_v1;
963 +diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
964 +index 8b30b14b47d3f..f377817ce75c1 100644
965 +--- a/include/linux/platform_data/gpio-omap.h
966 ++++ b/include/linux/platform_data/gpio-omap.h
967 +@@ -85,6 +85,7 @@
968 + * omap2+ specific GPIO registers
969 + */
970 + #define OMAP24XX_GPIO_REVISION 0x0000
971 ++#define OMAP24XX_GPIO_SYSCONFIG 0x0010
972 + #define OMAP24XX_GPIO_IRQSTATUS1 0x0018
973 + #define OMAP24XX_GPIO_IRQSTATUS2 0x0028
974 + #define OMAP24XX_GPIO_IRQENABLE2 0x002c
975 +@@ -108,6 +109,7 @@
976 + #define OMAP24XX_GPIO_SETDATAOUT 0x0094
977 +
978 + #define OMAP4_GPIO_REVISION 0x0000
979 ++#define OMAP4_GPIO_SYSCONFIG 0x0010
980 + #define OMAP4_GPIO_EOI 0x0020
981 + #define OMAP4_GPIO_IRQSTATUSRAW0 0x0024
982 + #define OMAP4_GPIO_IRQSTATUSRAW1 0x0028
983 +@@ -148,6 +150,7 @@
984 + #ifndef __ASSEMBLER__
985 + struct omap_gpio_reg_offs {
986 + u16 revision;
987 ++ u16 sysconfig;
988 + u16 direction;
989 + u16 datain;
990 + u16 dataout;
991 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
992 +index c198d19fa1c89..d3a2f0cef76d1 100644
993 +--- a/kernel/bpf/verifier.c
994 ++++ b/kernel/bpf/verifier.c
995 +@@ -2271,12 +2271,14 @@ static void save_register_state(struct bpf_func_state *state,
996 + state->stack[spi].slot_type[i] = STACK_SPILL;
997 + }
998 +
999 +-/* check_stack_read/write functions track spill/fill of registers,
1000 ++/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
1001 + * stack boundary and alignment are checked in check_mem_access()
1002 + */
1003 +-static int check_stack_write(struct bpf_verifier_env *env,
1004 +- struct bpf_func_state *state, /* func where register points to */
1005 +- int off, int size, int value_regno, int insn_idx)
1006 ++static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
1007 ++ /* stack frame we're writing to */
1008 ++ struct bpf_func_state *state,
1009 ++ int off, int size, int value_regno,
1010 ++ int insn_idx)
1011 + {
1012 + struct bpf_func_state *cur; /* state of the current function */
1013 + int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
1014 +@@ -2402,9 +2404,175 @@ static int check_stack_write(struct bpf_verifier_env *env,
1015 + return 0;
1016 + }
1017 +
1018 +-static int check_stack_read(struct bpf_verifier_env *env,
1019 +- struct bpf_func_state *reg_state /* func where register points to */,
1020 +- int off, int size, int value_regno)
1021 ++/* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
1022 ++ * known to contain a variable offset.
1023 ++ * This function checks whether the write is permitted and conservatively
1024 ++ * tracks the effects of the write, considering that each stack slot in the
1025 ++ * dynamic range is potentially written to.
1026 ++ *
1027 ++ * 'off' includes 'regno->off'.
1028 ++ * 'value_regno' can be -1, meaning that an unknown value is being written to
1029 ++ * the stack.
1030 ++ *
1031 ++ * Spilled pointers in range are not marked as written because we don't know
1032 ++ * what's going to be actually written. This means that read propagation for
1033 ++ * future reads cannot be terminated by this write.
1034 ++ *
1035 ++ * For privileged programs, uninitialized stack slots are considered
1036 ++ * initialized by this write (even though we don't know exactly what offsets
1037 ++ * are going to be written to). The idea is that we don't want the verifier to
1038 ++ * reject future reads that access slots written to through variable offsets.
1039 ++ */
1040 ++static int check_stack_write_var_off(struct bpf_verifier_env *env,
1041 ++ /* func where register points to */
1042 ++ struct bpf_func_state *state,
1043 ++ int ptr_regno, int off, int size,
1044 ++ int value_regno, int insn_idx)
1045 ++{
1046 ++ struct bpf_func_state *cur; /* state of the current function */
1047 ++ int min_off, max_off;
1048 ++ int i, err;
1049 ++ struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
1050 ++ bool writing_zero = false;
1051 ++ /* set if the fact that we're writing a zero is used to let any
1052 ++ * stack slots remain STACK_ZERO
1053 ++ */
1054 ++ bool zero_used = false;
1055 ++
1056 ++ cur = env->cur_state->frame[env->cur_state->curframe];
1057 ++ ptr_reg = &cur->regs[ptr_regno];
1058 ++ min_off = ptr_reg->smin_value + off;
1059 ++ max_off = ptr_reg->smax_value + off + size;
1060 ++ if (value_regno >= 0)
1061 ++ value_reg = &cur->regs[value_regno];
1062 ++ if (value_reg && register_is_null(value_reg))
1063 ++ writing_zero = true;
1064 ++
1065 ++ err = realloc_func_state(state, round_up(-min_off, BPF_REG_SIZE),
1066 ++ state->acquired_refs, true);
1067 ++ if (err)
1068 ++ return err;
1069 ++
1070 ++
1071 ++ /* Variable offset writes destroy any spilled pointers in range. */
1072 ++ for (i = min_off; i < max_off; i++) {
1073 ++ u8 new_type, *stype;
1074 ++ int slot, spi;
1075 ++
1076 ++ slot = -i - 1;
1077 ++ spi = slot / BPF_REG_SIZE;
1078 ++ stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
1079 ++
1080 ++ if (!env->allow_ptr_leaks
1081 ++ && *stype != NOT_INIT
1082 ++ && *stype != SCALAR_VALUE) {
1083 ++ /* Reject the write if there's are spilled pointers in
1084 ++ * range. If we didn't reject here, the ptr status
1085 ++ * would be erased below (even though not all slots are
1086 ++ * actually overwritten), possibly opening the door to
1087 ++ * leaks.
1088 ++ */
1089 ++ verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
1090 ++ insn_idx, i);
1091 ++ return -EINVAL;
1092 ++ }
1093 ++
1094 ++ /* Erase all spilled pointers. */
1095 ++ state->stack[spi].spilled_ptr.type = NOT_INIT;
1096 ++
1097 ++ /* Update the slot type. */
1098 ++ new_type = STACK_MISC;
1099 ++ if (writing_zero && *stype == STACK_ZERO) {
1100 ++ new_type = STACK_ZERO;
1101 ++ zero_used = true;
1102 ++ }
1103 ++ /* If the slot is STACK_INVALID, we check whether it's OK to
1104 ++ * pretend that it will be initialized by this write. The slot
1105 ++ * might not actually be written to, and so if we mark it as
1106 ++ * initialized future reads might leak uninitialized memory.
1107 ++ * For privileged programs, we will accept such reads to slots
1108 ++ * that may or may not be written because, if we're reject
1109 ++ * them, the error would be too confusing.
1110 ++ */
1111 ++ if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
1112 ++ verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
1113 ++ insn_idx, i);
1114 ++ return -EINVAL;
1115 ++ }
1116 ++ *stype = new_type;
1117 ++ }
1118 ++ if (zero_used) {
1119 ++ /* backtracking doesn't work for STACK_ZERO yet. */
1120 ++ err = mark_chain_precision(env, value_regno);
1121 ++ if (err)
1122 ++ return err;
1123 ++ }
1124 ++ return 0;
1125 ++}
1126 ++
1127 ++/* When register 'dst_regno' is assigned some values from stack[min_off,
1128 ++ * max_off), we set the register's type according to the types of the
1129 ++ * respective stack slots. If all the stack values are known to be zeros, then
1130 ++ * so is the destination reg. Otherwise, the register is considered to be
1131 ++ * SCALAR. This function does not deal with register filling; the caller must
1132 ++ * ensure that all spilled registers in the stack range have been marked as
1133 ++ * read.
1134 ++ */
1135 ++static void mark_reg_stack_read(struct bpf_verifier_env *env,
1136 ++ /* func where src register points to */
1137 ++ struct bpf_func_state *ptr_state,
1138 ++ int min_off, int max_off, int dst_regno)
1139 ++{
1140 ++ struct bpf_verifier_state *vstate = env->cur_state;
1141 ++ struct bpf_func_state *state = vstate->frame[vstate->curframe];
1142 ++ int i, slot, spi;
1143 ++ u8 *stype;
1144 ++ int zeros = 0;
1145 ++
1146 ++ for (i = min_off; i < max_off; i++) {
1147 ++ slot = -i - 1;
1148 ++ spi = slot / BPF_REG_SIZE;
1149 ++ stype = ptr_state->stack[spi].slot_type;
1150 ++ if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
1151 ++ break;
1152 ++ zeros++;
1153 ++ }
1154 ++ if (zeros == max_off - min_off) {
1155 ++ /* any access_size read into register is zero extended,
1156 ++ * so the whole register == const_zero
1157 ++ */
1158 ++ __mark_reg_const_zero(&state->regs[dst_regno]);
1159 ++ /* backtracking doesn't support STACK_ZERO yet,
1160 ++ * so mark it precise here, so that later
1161 ++ * backtracking can stop here.
1162 ++ * Backtracking may not need this if this register
1163 ++ * doesn't participate in pointer adjustment.
1164 ++ * Forward propagation of precise flag is not
1165 ++ * necessary either. This mark is only to stop
1166 ++ * backtracking. Any register that contributed
1167 ++ * to const 0 was marked precise before spill.
1168 ++ */
1169 ++ state->regs[dst_regno].precise = true;
1170 ++ } else {
1171 ++ /* have read misc data from the stack */
1172 ++ mark_reg_unknown(env, state->regs, dst_regno);
1173 ++ }
1174 ++ state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
1175 ++}
1176 ++
1177 ++/* Read the stack at 'off' and put the results into the register indicated by
1178 ++ * 'dst_regno'. It handles reg filling if the addressed stack slot is a
1179 ++ * spilled reg.
1180 ++ *
1181 ++ * 'dst_regno' can be -1, meaning that the read value is not going to a
1182 ++ * register.
1183 ++ *
1184 ++ * The access is assumed to be within the current stack bounds.
1185 ++ */
1186 ++static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
1187 ++ /* func where src register points to */
1188 ++ struct bpf_func_state *reg_state,
1189 ++ int off, int size, int dst_regno)
1190 + {
1191 + struct bpf_verifier_state *vstate = env->cur_state;
1192 + struct bpf_func_state *state = vstate->frame[vstate->curframe];
1193 +@@ -2412,11 +2580,6 @@ static int check_stack_read(struct bpf_verifier_env *env,
1194 + struct bpf_reg_state *reg;
1195 + u8 *stype;
1196 +
1197 +- if (reg_state->allocated_stack <= slot) {
1198 +- verbose(env, "invalid read from stack off %d+0 size %d\n",
1199 +- off, size);
1200 +- return -EACCES;
1201 +- }
1202 + stype = reg_state->stack[spi].slot_type;
1203 + reg = &reg_state->stack[spi].spilled_ptr;
1204 +
1205 +@@ -2427,9 +2590,9 @@ static int check_stack_read(struct bpf_verifier_env *env,
1206 + verbose(env, "invalid size of register fill\n");
1207 + return -EACCES;
1208 + }
1209 +- if (value_regno >= 0) {
1210 +- mark_reg_unknown(env, state->regs, value_regno);
1211 +- state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1212 ++ if (dst_regno >= 0) {
1213 ++ mark_reg_unknown(env, state->regs, dst_regno);
1214 ++ state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
1215 + }
1216 + mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
1217 + return 0;
1218 +@@ -2441,16 +2604,16 @@ static int check_stack_read(struct bpf_verifier_env *env,
1219 + }
1220 + }
1221 +
1222 +- if (value_regno >= 0) {
1223 ++ if (dst_regno >= 0) {
1224 + /* restore register state from stack */
1225 +- state->regs[value_regno] = *reg;
1226 ++ state->regs[dst_regno] = *reg;
1227 + /* mark reg as written since spilled pointer state likely
1228 + * has its liveness marks cleared by is_state_visited()
1229 + * which resets stack/reg liveness for state transitions
1230 + */
1231 +- state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1232 ++ state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
1233 + } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
1234 +- /* If value_regno==-1, the caller is asking us whether
1235 ++ /* If dst_regno==-1, the caller is asking us whether
1236 + * it is acceptable to use this value as a SCALAR_VALUE
1237 + * (e.g. for XADD).
1238 + * We must not allow unprivileged callers to do that
1239 +@@ -2462,70 +2625,167 @@ static int check_stack_read(struct bpf_verifier_env *env,
1240 + }
1241 + mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
1242 + } else {
1243 +- int zeros = 0;
1244 ++ u8 type;
1245 +
1246 + for (i = 0; i < size; i++) {
1247 +- if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
1248 ++ type = stype[(slot - i) % BPF_REG_SIZE];
1249 ++ if (type == STACK_MISC)
1250 + continue;
1251 +- if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
1252 +- zeros++;
1253 ++ if (type == STACK_ZERO)
1254 + continue;
1255 +- }
1256 + verbose(env, "invalid read from stack off %d+%d size %d\n",
1257 + off, i, size);
1258 + return -EACCES;
1259 + }
1260 + mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
1261 +- if (value_regno >= 0) {
1262 +- if (zeros == size) {
1263 +- /* any size read into register is zero extended,
1264 +- * so the whole register == const_zero
1265 +- */
1266 +- __mark_reg_const_zero(&state->regs[value_regno]);
1267 +- /* backtracking doesn't support STACK_ZERO yet,
1268 +- * so mark it precise here, so that later
1269 +- * backtracking can stop here.
1270 +- * Backtracking may not need this if this register
1271 +- * doesn't participate in pointer adjustment.
1272 +- * Forward propagation of precise flag is not
1273 +- * necessary either. This mark is only to stop
1274 +- * backtracking. Any register that contributed
1275 +- * to const 0 was marked precise before spill.
1276 +- */
1277 +- state->regs[value_regno].precise = true;
1278 +- } else {
1279 +- /* have read misc data from the stack */
1280 +- mark_reg_unknown(env, state->regs, value_regno);
1281 +- }
1282 +- state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1283 +- }
1284 ++ if (dst_regno >= 0)
1285 ++ mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
1286 + }
1287 + return 0;
1288 + }
1289 +
1290 +-static int check_stack_access(struct bpf_verifier_env *env,
1291 +- const struct bpf_reg_state *reg,
1292 +- int off, int size)
1293 ++enum stack_access_src {
1294 ++ ACCESS_DIRECT = 1, /* the access is performed by an instruction */
1295 ++ ACCESS_HELPER = 2, /* the access is performed by a helper */
1296 ++};
1297 ++
1298 ++static int check_stack_range_initialized(struct bpf_verifier_env *env,
1299 ++ int regno, int off, int access_size,
1300 ++ bool zero_size_allowed,
1301 ++ enum stack_access_src type,
1302 ++ struct bpf_call_arg_meta *meta);
1303 ++
1304 ++static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
1305 ++{
1306 ++ return cur_regs(env) + regno;
1307 ++}
1308 ++
1309 ++/* Read the stack at 'ptr_regno + off' and put the result into the register
1310 ++ * 'dst_regno'.
1311 ++ * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
1312 ++ * but not its variable offset.
1313 ++ * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
1314 ++ *
1315 ++ * As opposed to check_stack_read_fixed_off, this function doesn't deal with
1316 ++ * filling registers (i.e. reads of spilled register cannot be detected when
1317 ++ * the offset is not fixed). We conservatively mark 'dst_regno' as containing
1318 ++ * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
1319 ++ * offset; for a fixed offset check_stack_read_fixed_off should be used
1320 ++ * instead.
1321 ++ */
1322 ++static int check_stack_read_var_off(struct bpf_verifier_env *env,
1323 ++ int ptr_regno, int off, int size, int dst_regno)
1324 ++{
1325 ++ /* The state of the source register. */
1326 ++ struct bpf_reg_state *reg = reg_state(env, ptr_regno);
1327 ++ struct bpf_func_state *ptr_state = func(env, reg);
1328 ++ int err;
1329 ++ int min_off, max_off;
1330 ++
1331 ++ /* Note that we pass a NULL meta, so raw access will not be permitted.
1332 ++ */
1333 ++ err = check_stack_range_initialized(env, ptr_regno, off, size,
1334 ++ false, ACCESS_DIRECT, NULL);
1335 ++ if (err)
1336 ++ return err;
1337 ++
1338 ++ min_off = reg->smin_value + off;
1339 ++ max_off = reg->smax_value + off;
1340 ++ mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
1341 ++ return 0;
1342 ++}
1343 ++
1344 ++/* check_stack_read dispatches to check_stack_read_fixed_off or
1345 ++ * check_stack_read_var_off.
1346 ++ *
1347 ++ * The caller must ensure that the offset falls within the allocated stack
1348 ++ * bounds.
1349 ++ *
1350 ++ * 'dst_regno' is a register which will receive the value from the stack. It
1351 ++ * can be -1, meaning that the read value is not going to a register.
1352 ++ */
1353 ++static int check_stack_read(struct bpf_verifier_env *env,
1354 ++ int ptr_regno, int off, int size,
1355 ++ int dst_regno)
1356 + {
1357 +- /* Stack accesses must be at a fixed offset, so that we
1358 +- * can determine what type of data were returned. See
1359 +- * check_stack_read().
1360 ++ struct bpf_reg_state *reg = reg_state(env, ptr_regno);
1361 ++ struct bpf_func_state *state = func(env, reg);
1362 ++ int err;
1363 ++ /* Some accesses are only permitted with a static offset. */
1364 ++ bool var_off = !tnum_is_const(reg->var_off);
1365 ++
1366 ++ /* The offset is required to be static when reads don't go to a
1367 ++ * register, in order to not leak pointers (see
1368 ++ * check_stack_read_fixed_off).
1369 + */
1370 +- if (!tnum_is_const(reg->var_off)) {
1371 ++ if (dst_regno < 0 && var_off) {
1372 + char tn_buf[48];
1373 +
1374 + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1375 +- verbose(env, "variable stack access var_off=%s off=%d size=%d\n",
1376 ++ verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
1377 + tn_buf, off, size);
1378 + return -EACCES;
1379 + }
1380 ++ /* Variable offset is prohibited for unprivileged mode for simplicity
1381 ++ * since it requires corresponding support in Spectre masking for stack
1382 ++ * ALU. See also retrieve_ptr_limit().
1383 ++ */
1384 ++ if (!env->bypass_spec_v1 && var_off) {
1385 ++ char tn_buf[48];
1386 +
1387 +- if (off >= 0 || off < -MAX_BPF_STACK) {
1388 +- verbose(env, "invalid stack off=%d size=%d\n", off, size);
1389 ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1390 ++ verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
1391 ++ ptr_regno, tn_buf);
1392 + return -EACCES;
1393 + }
1394 +
1395 +- return 0;
1396 ++ if (!var_off) {
1397 ++ off += reg->var_off.value;
1398 ++ err = check_stack_read_fixed_off(env, state, off, size,
1399 ++ dst_regno);
1400 ++ } else {
1401 ++ /* Variable offset stack reads need more conservative handling
1402 ++ * than fixed offset ones. Note that dst_regno >= 0 on this
1403 ++ * branch.
1404 ++ */
1405 ++ err = check_stack_read_var_off(env, ptr_regno, off, size,
1406 ++ dst_regno);
1407 ++ }
1408 ++ return err;
1409 ++}
1410 ++
1411 ++
1412 ++/* check_stack_write dispatches to check_stack_write_fixed_off or
1413 ++ * check_stack_write_var_off.
1414 ++ *
1415 ++ * 'ptr_regno' is the register used as a pointer into the stack.
1416 ++ * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
1417 ++ * 'value_regno' is the register whose value we're writing to the stack. It can
1418 ++ * be -1, meaning that we're not writing from a register.
1419 ++ *
1420 ++ * The caller must ensure that the offset falls within the maximum stack size.
1421 ++ */
1422 ++static int check_stack_write(struct bpf_verifier_env *env,
1423 ++ int ptr_regno, int off, int size,
1424 ++ int value_regno, int insn_idx)
1425 ++{
1426 ++ struct bpf_reg_state *reg = reg_state(env, ptr_regno);
1427 ++ struct bpf_func_state *state = func(env, reg);
1428 ++ int err;
1429 ++
1430 ++ if (tnum_is_const(reg->var_off)) {
1431 ++ off += reg->var_off.value;
1432 ++ err = check_stack_write_fixed_off(env, state, off, size,
1433 ++ value_regno, insn_idx);
1434 ++ } else {
1435 ++ /* Variable offset stack reads need more conservative handling
1436 ++ * than fixed offset ones.
1437 ++ */
1438 ++ err = check_stack_write_var_off(env, state,
1439 ++ ptr_regno, off, size,
1440 ++ value_regno, insn_idx);
1441 ++ }
1442 ++ return err;
1443 + }
1444 +
1445 + static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
1446 +@@ -2858,11 +3118,6 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
1447 + return -EACCES;
1448 + }
1449 +
1450 +-static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
1451 +-{
1452 +- return cur_regs(env) + regno;
1453 +-}
1454 +-
1455 + static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1456 + {
1457 + return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
1458 +@@ -2981,8 +3236,8 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
1459 + break;
1460 + case PTR_TO_STACK:
1461 + pointer_desc = "stack ";
1462 +- /* The stack spill tracking logic in check_stack_write()
1463 +- * and check_stack_read() relies on stack accesses being
1464 ++ /* The stack spill tracking logic in check_stack_write_fixed_off()
1465 ++ * and check_stack_read_fixed_off() relies on stack accesses being
1466 + * aligned.
1467 + */
1468 + strict = true;
1469 +@@ -3400,6 +3655,91 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
1470 + return 0;
1471 + }
1472 +
1473 ++/* Check that the stack access at the given offset is within bounds. The
1474 ++ * maximum valid offset is -1.
1475 ++ *
1476 ++ * The minimum valid offset is -MAX_BPF_STACK for writes, and
1477 ++ * -state->allocated_stack for reads.
1478 ++ */
1479 ++static int check_stack_slot_within_bounds(int off,
1480 ++ struct bpf_func_state *state,
1481 ++ enum bpf_access_type t)
1482 ++{
1483 ++ int min_valid_off;
1484 ++
1485 ++ if (t == BPF_WRITE)
1486 ++ min_valid_off = -MAX_BPF_STACK;
1487 ++ else
1488 ++ min_valid_off = -state->allocated_stack;
1489 ++
1490 ++ if (off < min_valid_off || off > -1)
1491 ++ return -EACCES;
1492 ++ return 0;
1493 ++}
1494 ++
1495 ++/* Check that the stack access at 'regno + off' falls within the maximum stack
1496 ++ * bounds.
1497 ++ *
1498 ++ * 'off' includes `regno->offset`, but not its dynamic part (if any).
1499 ++ */
1500 ++static int check_stack_access_within_bounds(
1501 ++ struct bpf_verifier_env *env,
1502 ++ int regno, int off, int access_size,
1503 ++ enum stack_access_src src, enum bpf_access_type type)
1504 ++{
1505 ++ struct bpf_reg_state *regs = cur_regs(env);
1506 ++ struct bpf_reg_state *reg = regs + regno;
1507 ++ struct bpf_func_state *state = func(env, reg);
1508 ++ int min_off, max_off;
1509 ++ int err;
1510 ++ char *err_extra;
1511 ++
1512 ++ if (src == ACCESS_HELPER)
1513 ++ /* We don't know if helpers are reading or writing (or both). */
1514 ++ err_extra = " indirect access to";
1515 ++ else if (type == BPF_READ)
1516 ++ err_extra = " read from";
1517 ++ else
1518 ++ err_extra = " write to";
1519 ++
1520 ++ if (tnum_is_const(reg->var_off)) {
1521 ++ min_off = reg->var_off.value + off;
1522 ++ if (access_size > 0)
1523 ++ max_off = min_off + access_size - 1;
1524 ++ else
1525 ++ max_off = min_off;
1526 ++ } else {
1527 ++ if (reg->smax_value >= BPF_MAX_VAR_OFF ||
1528 ++ reg->smin_value <= -BPF_MAX_VAR_OFF) {
1529 ++ verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
1530 ++ err_extra, regno);
1531 ++ return -EACCES;
1532 ++ }
1533 ++ min_off = reg->smin_value + off;
1534 ++ if (access_size > 0)
1535 ++ max_off = reg->smax_value + off + access_size - 1;
1536 ++ else
1537 ++ max_off = min_off;
1538 ++ }
1539 ++
1540 ++ err = check_stack_slot_within_bounds(min_off, state, type);
1541 ++ if (!err)
1542 ++ err = check_stack_slot_within_bounds(max_off, state, type);
1543 ++
1544 ++ if (err) {
1545 ++ if (tnum_is_const(reg->var_off)) {
1546 ++ verbose(env, "invalid%s stack R%d off=%d size=%d\n",
1547 ++ err_extra, regno, off, access_size);
1548 ++ } else {
1549 ++ char tn_buf[48];
1550 ++
1551 ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1552 ++ verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
1553 ++ err_extra, regno, tn_buf, access_size);
1554 ++ }
1555 ++ }
1556 ++ return err;
1557 ++}
1558 +
1559 + /* check whether memory at (regno + off) is accessible for t = (read | write)
1560 + * if t==write, value_regno is a register which value is stored into memory
1561 +@@ -3515,8 +3855,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1562 + }
1563 +
1564 + } else if (reg->type == PTR_TO_STACK) {
1565 +- off += reg->var_off.value;
1566 +- err = check_stack_access(env, reg, off, size);
1567 ++ /* Basic bounds checks. */
1568 ++ err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
1569 + if (err)
1570 + return err;
1571 +
1572 +@@ -3525,12 +3865,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1573 + if (err)
1574 + return err;
1575 +
1576 +- if (t == BPF_WRITE)
1577 +- err = check_stack_write(env, state, off, size,
1578 +- value_regno, insn_idx);
1579 +- else
1580 +- err = check_stack_read(env, state, off, size,
1581 ++ if (t == BPF_READ)
1582 ++ err = check_stack_read(env, regno, off, size,
1583 + value_regno);
1584 ++ else
1585 ++ err = check_stack_write(env, regno, off, size,
1586 ++ value_regno, insn_idx);
1587 + } else if (reg_is_pkt_pointer(reg)) {
1588 + if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
1589 + verbose(env, "cannot write into packet\n");
1590 +@@ -3652,49 +3992,53 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
1591 + BPF_SIZE(insn->code), BPF_WRITE, -1, true);
1592 + }
1593 +
1594 +-static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
1595 +- int off, int access_size,
1596 +- bool zero_size_allowed)
1597 ++/* When register 'regno' is used to read the stack (either directly or through
1598 ++ * a helper function) make sure that it's within stack boundary and, depending
1599 ++ * on the access type, that all elements of the stack are initialized.
1600 ++ *
1601 ++ * 'off' includes 'regno->off', but not its dynamic part (if any).
1602 ++ *
1603 ++ * All registers that have been spilled on the stack in the slots within the
1604 ++ * read offsets are marked as read.
1605 ++ */
1606 ++static int check_stack_range_initialized(
1607 ++ struct bpf_verifier_env *env, int regno, int off,
1608 ++ int access_size, bool zero_size_allowed,
1609 ++ enum stack_access_src type, struct bpf_call_arg_meta *meta)
1610 + {
1611 + struct bpf_reg_state *reg = reg_state(env, regno);
1612 ++ struct bpf_func_state *state = func(env, reg);
1613 ++ int err, min_off, max_off, i, j, slot, spi;
1614 ++ char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
1615 ++ enum bpf_access_type bounds_check_type;
1616 ++ /* Some accesses can write anything into the stack, others are
1617 ++ * read-only.
1618 ++ */
1619 ++ bool clobber = false;
1620 +
1621 +- if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
1622 +- access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
1623 +- if (tnum_is_const(reg->var_off)) {
1624 +- verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
1625 +- regno, off, access_size);
1626 +- } else {
1627 +- char tn_buf[48];
1628 +-
1629 +- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1630 +- verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n",
1631 +- regno, tn_buf, access_size);
1632 +- }
1633 ++ if (access_size == 0 && !zero_size_allowed) {
1634 ++ verbose(env, "invalid zero-sized read\n");
1635 + return -EACCES;
1636 + }
1637 +- return 0;
1638 +-}
1639 +
1640 +-/* when register 'regno' is passed into function that will read 'access_size'
1641 +- * bytes from that pointer, make sure that it's within stack boundary
1642 +- * and all elements of stack are initialized.
1643 +- * Unlike most pointer bounds-checking functions, this one doesn't take an
1644 +- * 'off' argument, so it has to add in reg->off itself.
1645 +- */
1646 +-static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1647 +- int access_size, bool zero_size_allowed,
1648 +- struct bpf_call_arg_meta *meta)
1649 +-{
1650 +- struct bpf_reg_state *reg = reg_state(env, regno);
1651 +- struct bpf_func_state *state = func(env, reg);
1652 +- int err, min_off, max_off, i, j, slot, spi;
1653 ++ if (type == ACCESS_HELPER) {
1654 ++ /* The bounds checks for writes are more permissive than for
1655 ++ * reads. However, if raw_mode is not set, we'll do extra
1656 ++ * checks below.
1657 ++ */
1658 ++ bounds_check_type = BPF_WRITE;
1659 ++ clobber = true;
1660 ++ } else {
1661 ++ bounds_check_type = BPF_READ;
1662 ++ }
1663 ++ err = check_stack_access_within_bounds(env, regno, off, access_size,
1664 ++ type, bounds_check_type);
1665 ++ if (err)
1666 ++ return err;
1667 ++
1668 +
1669 + if (tnum_is_const(reg->var_off)) {
1670 +- min_off = max_off = reg->var_off.value + reg->off;
1671 +- err = __check_stack_boundary(env, regno, min_off, access_size,
1672 +- zero_size_allowed);
1673 +- if (err)
1674 +- return err;
1675 ++ min_off = max_off = reg->var_off.value + off;
1676 + } else {
1677 + /* Variable offset is prohibited for unprivileged mode for
1678 + * simplicity since it requires corresponding support in
1679 +@@ -3705,8 +4049,8 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1680 + char tn_buf[48];
1681 +
1682 + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1683 +- verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n",
1684 +- regno, tn_buf);
1685 ++ verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
1686 ++ regno, err_extra, tn_buf);
1687 + return -EACCES;
1688 + }
1689 + /* Only initialized buffer on stack is allowed to be accessed
1690 +@@ -3718,28 +4062,8 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1691 + if (meta && meta->raw_mode)
1692 + meta = NULL;
1693 +
1694 +- if (reg->smax_value >= BPF_MAX_VAR_OFF ||
1695 +- reg->smax_value <= -BPF_MAX_VAR_OFF) {
1696 +- verbose(env, "R%d unbounded indirect variable offset stack access\n",
1697 +- regno);
1698 +- return -EACCES;
1699 +- }
1700 +- min_off = reg->smin_value + reg->off;
1701 +- max_off = reg->smax_value + reg->off;
1702 +- err = __check_stack_boundary(env, regno, min_off, access_size,
1703 +- zero_size_allowed);
1704 +- if (err) {
1705 +- verbose(env, "R%d min value is outside of stack bound\n",
1706 +- regno);
1707 +- return err;
1708 +- }
1709 +- err = __check_stack_boundary(env, regno, max_off, access_size,
1710 +- zero_size_allowed);
1711 +- if (err) {
1712 +- verbose(env, "R%d max value is outside of stack bound\n",
1713 +- regno);
1714 +- return err;
1715 +- }
1716 ++ min_off = reg->smin_value + off;
1717 ++ max_off = reg->smax_value + off;
1718 + }
1719 +
1720 + if (meta && meta->raw_mode) {
1721 +@@ -3759,8 +4083,10 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1722 + if (*stype == STACK_MISC)
1723 + goto mark;
1724 + if (*stype == STACK_ZERO) {
1725 +- /* helper can write anything into the stack */
1726 +- *stype = STACK_MISC;
1727 ++ if (clobber) {
1728 ++ /* helper can write anything into the stack */
1729 ++ *stype = STACK_MISC;
1730 ++ }
1731 + goto mark;
1732 + }
1733 +
1734 +@@ -3771,22 +4097,24 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1735 + if (state->stack[spi].slot_type[0] == STACK_SPILL &&
1736 + (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
1737 + env->allow_ptr_leaks)) {
1738 +- __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
1739 +- for (j = 0; j < BPF_REG_SIZE; j++)
1740 +- state->stack[spi].slot_type[j] = STACK_MISC;
1741 ++ if (clobber) {
1742 ++ __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
1743 ++ for (j = 0; j < BPF_REG_SIZE; j++)
1744 ++ state->stack[spi].slot_type[j] = STACK_MISC;
1745 ++ }
1746 + goto mark;
1747 + }
1748 +
1749 + err:
1750 + if (tnum_is_const(reg->var_off)) {
1751 +- verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
1752 +- min_off, i - min_off, access_size);
1753 ++ verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
1754 ++ err_extra, regno, min_off, i - min_off, access_size);
1755 + } else {
1756 + char tn_buf[48];
1757 +
1758 + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1759 +- verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n",
1760 +- tn_buf, i - min_off, access_size);
1761 ++ verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
1762 ++ err_extra, regno, tn_buf, i - min_off, access_size);
1763 + }
1764 + return -EACCES;
1765 + mark:
1766 +@@ -3835,8 +4163,10 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
1767 + "rdwr",
1768 + &env->prog->aux->max_rdwr_access);
1769 + case PTR_TO_STACK:
1770 +- return check_stack_boundary(env, regno, access_size,
1771 +- zero_size_allowed, meta);
1772 ++ return check_stack_range_initialized(
1773 ++ env,
1774 ++ regno, reg->off, access_size,
1775 ++ zero_size_allowed, ACCESS_HELPER, meta);
1776 + default: /* scalar_value or invalid ptr */
1777 + /* Allow zero-byte read from NULL, regardless of pointer type */
1778 + if (zero_size_allowed && access_size == 0 &&
1779 +@@ -5399,7 +5729,7 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
1780 + bool off_is_neg = off_reg->smin_value < 0;
1781 + bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
1782 + (opcode == BPF_SUB && !off_is_neg);
1783 +- u32 off, max = 0, ptr_limit = 0;
1784 ++ u32 max = 0, ptr_limit = 0;
1785 +
1786 + if (!tnum_is_const(off_reg->var_off) &&
1787 + (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
1788 +@@ -5408,26 +5738,18 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
1789 + switch (ptr_reg->type) {
1790 + case PTR_TO_STACK:
1791 + /* Offset 0 is out-of-bounds, but acceptable start for the
1792 +- * left direction, see BPF_REG_FP.
1793 ++ * left direction, see BPF_REG_FP. Also, unknown scalar
1794 ++ * offset where we would need to deal with min/max bounds is
1795 ++ * currently prohibited for unprivileged.
1796 + */
1797 + max = MAX_BPF_STACK + mask_to_left;
1798 +- /* Indirect variable offset stack access is prohibited in
1799 +- * unprivileged mode so it's not handled here.
1800 +- */
1801 +- off = ptr_reg->off + ptr_reg->var_off.value;
1802 +- if (mask_to_left)
1803 +- ptr_limit = MAX_BPF_STACK + off;
1804 +- else
1805 +- ptr_limit = -off - 1;
1806 ++ ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
1807 + break;
1808 + case PTR_TO_MAP_VALUE:
1809 + max = ptr_reg->map_ptr->value_size;
1810 +- if (mask_to_left) {
1811 +- ptr_limit = ptr_reg->umax_value + ptr_reg->off;
1812 +- } else {
1813 +- off = ptr_reg->smin_value + ptr_reg->off;
1814 +- ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
1815 +- }
1816 ++ ptr_limit = (mask_to_left ?
1817 ++ ptr_reg->smin_value :
1818 ++ ptr_reg->umax_value) + ptr_reg->off;
1819 + break;
1820 + default:
1821 + return REASON_TYPE;
1822 +@@ -5482,10 +5804,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
1823 + struct bpf_insn *insn,
1824 + const struct bpf_reg_state *ptr_reg,
1825 + const struct bpf_reg_state *off_reg,
1826 +- struct bpf_reg_state *dst_reg)
1827 ++ struct bpf_reg_state *dst_reg,
1828 ++ struct bpf_insn_aux_data *tmp_aux,
1829 ++ const bool commit_window)
1830 + {
1831 ++ struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
1832 + struct bpf_verifier_state *vstate = env->cur_state;
1833 +- struct bpf_insn_aux_data *aux = cur_aux(env);
1834 + bool off_is_neg = off_reg->smin_value < 0;
1835 + bool ptr_is_dst_reg = ptr_reg == dst_reg;
1836 + u8 opcode = BPF_OP(insn->code);
1837 +@@ -5504,18 +5828,33 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
1838 + if (vstate->speculative)
1839 + goto do_sim;
1840 +
1841 +- alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
1842 +- alu_state |= ptr_is_dst_reg ?
1843 +- BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
1844 +-
1845 + err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
1846 + if (err < 0)
1847 + return err;
1848 +
1849 ++ if (commit_window) {
1850 ++ /* In commit phase we narrow the masking window based on
1851 ++ * the observed pointer move after the simulated operation.
1852 ++ */
1853 ++ alu_state = tmp_aux->alu_state;
1854 ++ alu_limit = abs(tmp_aux->alu_limit - alu_limit);
1855 ++ } else {
1856 ++ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
1857 ++ alu_state |= ptr_is_dst_reg ?
1858 ++ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
1859 ++ }
1860 ++
1861 + err = update_alu_sanitation_state(aux, alu_state, alu_limit);
1862 + if (err < 0)
1863 + return err;
1864 + do_sim:
1865 ++ /* If we're in commit phase, we're done here given we already
1866 ++ * pushed the truncated dst_reg into the speculative verification
1867 ++ * stack.
1868 ++ */
1869 ++ if (commit_window)
1870 ++ return 0;
1871 ++
1872 + /* Simulate and find potential out-of-bounds access under
1873 + * speculative execution from truncation as a result of
1874 + * masking when off was not within expected range. If off
1875 +@@ -5574,6 +5913,72 @@ static int sanitize_err(struct bpf_verifier_env *env,
1876 + return -EACCES;
1877 + }
1878 +
1879 ++/* check that stack access falls within stack limits and that 'reg' doesn't
1880 ++ * have a variable offset.
1881 ++ *
1882 ++ * Variable offset is prohibited for unprivileged mode for simplicity since it
1883 ++ * requires corresponding support in Spectre masking for stack ALU. See also
1884 ++ * retrieve_ptr_limit().
1885 ++ *
1886 ++ *
1887 ++ * 'off' includes 'reg->off'.
1888 ++ */
1889 ++static int check_stack_access_for_ptr_arithmetic(
1890 ++ struct bpf_verifier_env *env,
1891 ++ int regno,
1892 ++ const struct bpf_reg_state *reg,
1893 ++ int off)
1894 ++{
1895 ++ if (!tnum_is_const(reg->var_off)) {
1896 ++ char tn_buf[48];
1897 ++
1898 ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1899 ++ verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
1900 ++ regno, tn_buf, off);
1901 ++ return -EACCES;
1902 ++ }
1903 ++
1904 ++ if (off >= 0 || off < -MAX_BPF_STACK) {
1905 ++ verbose(env, "R%d stack pointer arithmetic goes out of range, "
1906 ++ "prohibited for !root; off=%d\n", regno, off);
1907 ++ return -EACCES;
1908 ++ }
1909 ++
1910 ++ return 0;
1911 ++}
1912 ++
1913 ++static int sanitize_check_bounds(struct bpf_verifier_env *env,
1914 ++ const struct bpf_insn *insn,
1915 ++ const struct bpf_reg_state *dst_reg)
1916 ++{
1917 ++ u32 dst = insn->dst_reg;
1918 ++
1919 ++ /* For unprivileged we require that resulting offset must be in bounds
1920 ++ * in order to be able to sanitize access later on.
1921 ++ */
1922 ++ if (env->bypass_spec_v1)
1923 ++ return 0;
1924 ++
1925 ++ switch (dst_reg->type) {
1926 ++ case PTR_TO_STACK:
1927 ++ if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
1928 ++ dst_reg->off + dst_reg->var_off.value))
1929 ++ return -EACCES;
1930 ++ break;
1931 ++ case PTR_TO_MAP_VALUE:
1932 ++ if (check_map_access(env, dst, dst_reg->off, 1, false)) {
1933 ++ verbose(env, "R%d pointer arithmetic of map value goes out of range, "
1934 ++ "prohibited for !root\n", dst);
1935 ++ return -EACCES;
1936 ++ }
1937 ++ break;
1938 ++ default:
1939 ++ break;
1940 ++ }
1941 ++
1942 ++ return 0;
1943 ++}
1944 ++
1945 + /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
1946 + * Caller should also handle BPF_MOV case separately.
1947 + * If we return -EACCES, caller may want to try again treating pointer as a
1948 +@@ -5592,6 +5997,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1949 + smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
1950 + u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
1951 + umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
1952 ++ struct bpf_insn_aux_data tmp_aux = {};
1953 + u8 opcode = BPF_OP(insn->code);
1954 + u32 dst = insn->dst_reg;
1955 + int ret;
1956 +@@ -5658,12 +6064,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1957 + /* pointer types do not carry 32-bit bounds at the moment. */
1958 + __mark_reg32_unbounded(dst_reg);
1959 +
1960 +- switch (opcode) {
1961 +- case BPF_ADD:
1962 +- ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
1963 ++ if (sanitize_needed(opcode)) {
1964 ++ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
1965 ++ &tmp_aux, false);
1966 + if (ret < 0)
1967 + return sanitize_err(env, insn, ret, off_reg, dst_reg);
1968 ++ }
1969 +
1970 ++ switch (opcode) {
1971 ++ case BPF_ADD:
1972 + /* We can take a fixed offset as long as it doesn't overflow
1973 + * the s32 'off' field
1974 + */
1975 +@@ -5714,10 +6123,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1976 + }
1977 + break;
1978 + case BPF_SUB:
1979 +- ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
1980 +- if (ret < 0)
1981 +- return sanitize_err(env, insn, ret, off_reg, dst_reg);
1982 +-
1983 + if (dst_reg == off_reg) {
1984 + /* scalar -= pointer. Creates an unknown scalar */
1985 + verbose(env, "R%d tried to subtract pointer from scalar\n",
1986 +@@ -5798,22 +6203,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1987 + __reg_deduce_bounds(dst_reg);
1988 + __reg_bound_offset(dst_reg);
1989 +
1990 +- /* For unprivileged we require that resulting offset must be in bounds
1991 +- * in order to be able to sanitize access later on.
1992 +- */
1993 +- if (!env->bypass_spec_v1) {
1994 +- if (dst_reg->type == PTR_TO_MAP_VALUE &&
1995 +- check_map_access(env, dst, dst_reg->off, 1, false)) {
1996 +- verbose(env, "R%d pointer arithmetic of map value goes out of range, "
1997 +- "prohibited for !root\n", dst);
1998 +- return -EACCES;
1999 +- } else if (dst_reg->type == PTR_TO_STACK &&
2000 +- check_stack_access(env, dst_reg, dst_reg->off +
2001 +- dst_reg->var_off.value, 1)) {
2002 +- verbose(env, "R%d stack pointer arithmetic goes out of range, "
2003 +- "prohibited for !root\n", dst);
2004 +- return -EACCES;
2005 +- }
2006 ++ if (sanitize_check_bounds(env, insn, dst_reg) < 0)
2007 ++ return -EACCES;
2008 ++ if (sanitize_needed(opcode)) {
2009 ++ ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
2010 ++ &tmp_aux, true);
2011 ++ if (ret < 0)
2012 ++ return sanitize_err(env, insn, ret, off_reg, dst_reg);
2013 + }
2014 +
2015 + return 0;
2016 +@@ -12078,6 +12474,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
2017 + env->strict_alignment = false;
2018 +
2019 + env->allow_ptr_leaks = bpf_allow_ptr_leaks();
2020 ++ env->allow_uninit_stack = bpf_allow_uninit_stack();
2021 + env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
2022 + env->bypass_spec_v1 = bpf_bypass_spec_v1();
2023 + env->bypass_spec_v4 = bpf_bypass_spec_v4();
2024 +diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
2025 +index fe9ca92faa2a7..909b0bf22a1ec 100644
2026 +--- a/kernel/locking/qrwlock.c
2027 ++++ b/kernel/locking/qrwlock.c
2028 +@@ -61,6 +61,8 @@ EXPORT_SYMBOL(queued_read_lock_slowpath);
2029 + */
2030 + void queued_write_lock_slowpath(struct qrwlock *lock)
2031 + {
2032 ++ int cnts;
2033 ++
2034 + /* Put the writer into the wait queue */
2035 + arch_spin_lock(&lock->wait_lock);
2036 +
2037 +@@ -74,9 +76,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
2038 +
2039 + /* When no more readers or writers, set the locked flag */
2040 + do {
2041 +- atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
2042 +- } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
2043 +- _QW_LOCKED) != _QW_WAITING);
2044 ++ cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
2045 ++ } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
2046 + unlock:
2047 + arch_spin_unlock(&lock->wait_lock);
2048 + }
2049 +diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
2050 +index e2a0ed5d02f01..c87c4df8703d4 100644
2051 +--- a/security/keys/trusted-keys/trusted_tpm2.c
2052 ++++ b/security/keys/trusted-keys/trusted_tpm2.c
2053 +@@ -79,7 +79,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
2054 + if (i == ARRAY_SIZE(tpm2_hash_map))
2055 + return -EINVAL;
2056 +
2057 +- rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
2058 ++ rc = tpm_try_get_ops(chip);
2059 + if (rc)
2060 + return rc;
2061 +
2062 +diff --git a/tools/arch/ia64/include/asm/barrier.h b/tools/arch/ia64/include/asm/barrier.h
2063 +index 4d471d9511a54..6fffe56827134 100644
2064 +--- a/tools/arch/ia64/include/asm/barrier.h
2065 ++++ b/tools/arch/ia64/include/asm/barrier.h
2066 +@@ -39,9 +39,6 @@
2067 + * sequential memory pages only.
2068 + */
2069 +
2070 +-/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */
2071 +-#define ia64_mf() asm volatile ("mf" ::: "memory")
2072 +-
2073 + #define mb() ia64_mf()
2074 + #define rmb() mb()
2075 + #define wmb() mb()
2076 +diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
2077 +index 2723082f38170..e7a071a154706 100644
2078 +--- a/tools/perf/util/auxtrace.c
2079 ++++ b/tools/perf/util/auxtrace.c
2080 +@@ -634,7 +634,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
2081 + break;
2082 + }
2083 +
2084 +- if (itr)
2085 ++ if (itr && itr->parse_snapshot_options)
2086 + return itr->parse_snapshot_options(itr, opts, str);
2087 +
2088 + pr_err("No AUX area tracing to snapshot\n");
2089 +diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
2090 +index e2537d5acab09..f4d44f75ba152 100644
2091 +--- a/tools/perf/util/map.c
2092 ++++ b/tools/perf/util/map.c
2093 +@@ -836,15 +836,18 @@ out:
2094 + int maps__clone(struct thread *thread, struct maps *parent)
2095 + {
2096 + struct maps *maps = thread->maps;
2097 +- int err = -ENOMEM;
2098 ++ int err;
2099 + struct map *map;
2100 +
2101 + down_read(&parent->lock);
2102 +
2103 + maps__for_each_entry(parent, map) {
2104 + struct map *new = map__clone(map);
2105 +- if (new == NULL)
2106 ++
2107 ++ if (new == NULL) {
2108 ++ err = -ENOMEM;
2109 + goto out_unlock;
2110 ++ }
2111 +
2112 + err = unwind__prepare_access(maps, new, NULL);
2113 + if (err)