1 |
commit: cd69f153f5510bc0fb2d5ce2698e8fdc5784e61c |
2 |
Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org> |
3 |
AuthorDate: Wed Apr 28 12:03:18 2021 +0000 |
4 |
Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org> |
5 |
CommitDate: Wed Apr 28 12:03:29 2021 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cd69f153 |
7 |
|
8 |
Linux patch 5.10.33 |
9 |
|
10 |
Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1032_linux-5.10.33.patch | 1964 ++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 1968 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index 68ff733..acf61dd 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -171,6 +171,10 @@ Patch: 1031_linux-5.10.32.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 5.10.32 |
23 |
|
24 |
+Patch: 1032_linux-5.10.33.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 5.10.33 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1032_linux-5.10.33.patch b/1032_linux-5.10.33.patch |
33 |
new file mode 100644 |
34 |
index 0000000..ace1cc0 |
35 |
--- /dev/null |
36 |
+++ b/1032_linux-5.10.33.patch |
37 |
@@ -0,0 +1,1964 @@ |
38 |
+diff --git a/Makefile b/Makefile |
39 |
+index cad90171b4b9b..fd5c8b5c013bf 100644 |
40 |
+--- a/Makefile |
41 |
++++ b/Makefile |
42 |
+@@ -1,7 +1,7 @@ |
43 |
+ # SPDX-License-Identifier: GPL-2.0 |
44 |
+ VERSION = 5 |
45 |
+ PATCHLEVEL = 10 |
46 |
+-SUBLEVEL = 32 |
47 |
++SUBLEVEL = 33 |
48 |
+ EXTRAVERSION = |
49 |
+ NAME = Dare mighty things |
50 |
+ |
51 |
+diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi |
52 |
+index 9dcae1f2bc99f..c5b9da0d7e6ce 100644 |
53 |
+--- a/arch/arm/boot/dts/omap3.dtsi |
54 |
++++ b/arch/arm/boot/dts/omap3.dtsi |
55 |
+@@ -24,6 +24,9 @@ |
56 |
+ i2c0 = &i2c1; |
57 |
+ i2c1 = &i2c2; |
58 |
+ i2c2 = &i2c3; |
59 |
++ mmc0 = &mmc1; |
60 |
++ mmc1 = &mmc2; |
61 |
++ mmc2 = &mmc3; |
62 |
+ serial0 = &uart1; |
63 |
+ serial1 = &uart2; |
64 |
+ serial2 = &uart3; |
65 |
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts |
66 |
+index a1f621b388fe7..358df6d926aff 100644 |
67 |
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts |
68 |
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts |
69 |
+@@ -10,5 +10,5 @@ |
70 |
+ }; |
71 |
+ |
72 |
+ &mmc0 { |
73 |
+- cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 push-push switch */ |
74 |
++ broken-cd; /* card detect is broken on *some* boards */ |
75 |
+ }; |
76 |
+diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c |
77 |
+index f11a1a1f70261..798c3e78b84bb 100644 |
78 |
+--- a/arch/arm64/kernel/probes/kprobes.c |
79 |
++++ b/arch/arm64/kernel/probes/kprobes.c |
80 |
+@@ -286,10 +286,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) |
81 |
+ if (!instruction_pointer(regs)) |
82 |
+ BUG(); |
83 |
+ |
84 |
+- if (kcb->kprobe_status == KPROBE_REENTER) |
85 |
++ if (kcb->kprobe_status == KPROBE_REENTER) { |
86 |
+ restore_previous_kprobe(kcb); |
87 |
+- else |
88 |
++ } else { |
89 |
++ kprobes_restore_local_irqflag(kcb, regs); |
90 |
+ reset_current_kprobe(); |
91 |
++ } |
92 |
+ |
93 |
+ break; |
94 |
+ case KPROBE_HIT_ACTIVE: |
95 |
+diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig |
96 |
+index 268fad5f51cf4..7bf0a617e94c3 100644 |
97 |
+--- a/arch/csky/Kconfig |
98 |
++++ b/arch/csky/Kconfig |
99 |
+@@ -292,7 +292,7 @@ config FORCE_MAX_ZONEORDER |
100 |
+ int "Maximum zone order" |
101 |
+ default "11" |
102 |
+ |
103 |
+-config RAM_BASE |
104 |
++config DRAM_BASE |
105 |
+ hex "DRAM start addr (the same with memory-section in dts)" |
106 |
+ default 0x0 |
107 |
+ |
108 |
+diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h |
109 |
+index 9b98bf31d57ce..16878240ef9ac 100644 |
110 |
+--- a/arch/csky/include/asm/page.h |
111 |
++++ b/arch/csky/include/asm/page.h |
112 |
+@@ -28,7 +28,7 @@ |
113 |
+ #define SSEG_SIZE 0x20000000 |
114 |
+ #define LOWMEM_LIMIT (SSEG_SIZE * 2) |
115 |
+ |
116 |
+-#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1)) |
117 |
++#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1)) |
118 |
+ |
119 |
+ #ifndef __ASSEMBLY__ |
120 |
+ |
121 |
+diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c |
122 |
+index dbe829fc52980..4d08134190134 100644 |
123 |
+--- a/arch/ia64/mm/discontig.c |
124 |
++++ b/arch/ia64/mm/discontig.c |
125 |
+@@ -94,7 +94,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len, |
126 |
+ * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been |
127 |
+ * called yet. Note that node 0 will also count all non-existent cpus. |
128 |
+ */ |
129 |
+-static int __meminit early_nr_cpus_node(int node) |
130 |
++static int early_nr_cpus_node(int node) |
131 |
+ { |
132 |
+ int cpu, n = 0; |
133 |
+ |
134 |
+@@ -109,7 +109,7 @@ static int __meminit early_nr_cpus_node(int node) |
135 |
+ * compute_pernodesize - compute size of pernode data |
136 |
+ * @node: the node id. |
137 |
+ */ |
138 |
+-static unsigned long __meminit compute_pernodesize(int node) |
139 |
++static unsigned long compute_pernodesize(int node) |
140 |
+ { |
141 |
+ unsigned long pernodesize = 0, cpus; |
142 |
+ |
143 |
+@@ -366,7 +366,7 @@ static void __init reserve_pernode_space(void) |
144 |
+ } |
145 |
+ } |
146 |
+ |
147 |
+-static void __meminit scatter_node_data(void) |
148 |
++static void scatter_node_data(void) |
149 |
+ { |
150 |
+ pg_data_t **dst; |
151 |
+ int node; |
152 |
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S |
153 |
+index 71203324ff42b..81c458e996d9b 100644 |
154 |
+--- a/arch/s390/kernel/entry.S |
155 |
++++ b/arch/s390/kernel/entry.S |
156 |
+@@ -994,6 +994,7 @@ ENDPROC(ext_int_handler) |
157 |
+ * Load idle PSW. |
158 |
+ */ |
159 |
+ ENTRY(psw_idle) |
160 |
++ stg %r14,(__SF_GPRS+8*8)(%r15) |
161 |
+ stg %r3,__SF_EMPTY(%r15) |
162 |
+ larl %r1,.Lpsw_idle_exit |
163 |
+ stg %r1,__SF_EMPTY+8(%r15) |
164 |
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c |
165 |
+index e7dc13fe5e29f..0b9975200ae35 100644 |
166 |
+--- a/arch/x86/events/intel/core.c |
167 |
++++ b/arch/x86/events/intel/core.c |
168 |
+@@ -4387,7 +4387,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = { |
169 |
+ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009), |
170 |
+ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009), |
171 |
+ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002), |
172 |
+- INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014), |
173 |
++ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014), |
174 |
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021), |
175 |
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000), |
176 |
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000), |
177 |
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c |
178 |
+index 7bdb1821215db..3112186a4f4b2 100644 |
179 |
+--- a/arch/x86/events/intel/uncore_snbep.c |
180 |
++++ b/arch/x86/events/intel/uncore_snbep.c |
181 |
+@@ -1159,7 +1159,6 @@ enum { |
182 |
+ SNBEP_PCI_QPI_PORT0_FILTER, |
183 |
+ SNBEP_PCI_QPI_PORT1_FILTER, |
184 |
+ BDX_PCI_QPI_PORT2_FILTER, |
185 |
+- HSWEP_PCI_PCU_3, |
186 |
+ }; |
187 |
+ |
188 |
+ static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
189 |
+@@ -2816,22 +2815,33 @@ static struct intel_uncore_type *hswep_msr_uncores[] = { |
190 |
+ NULL, |
191 |
+ }; |
192 |
+ |
193 |
+-void hswep_uncore_cpu_init(void) |
194 |
++#define HSWEP_PCU_DID 0x2fc0 |
195 |
++#define HSWEP_PCU_CAPID4_OFFET 0x94 |
196 |
++#define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3) |
197 |
++ |
198 |
++static bool hswep_has_limit_sbox(unsigned int device) |
199 |
+ { |
200 |
+- int pkg = boot_cpu_data.logical_proc_id; |
201 |
++ struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); |
202 |
++ u32 capid4; |
203 |
++ |
204 |
++ if (!dev) |
205 |
++ return false; |
206 |
++ |
207 |
++ pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4); |
208 |
++ if (!hswep_get_chop(capid4)) |
209 |
++ return true; |
210 |
+ |
211 |
++ return false; |
212 |
++} |
213 |
++ |
214 |
++void hswep_uncore_cpu_init(void) |
215 |
++{ |
216 |
+ if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) |
217 |
+ hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; |
218 |
+ |
219 |
+ /* Detect 6-8 core systems with only two SBOXes */ |
220 |
+- if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) { |
221 |
+- u32 capid4; |
222 |
+- |
223 |
+- pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3], |
224 |
+- 0x94, &capid4); |
225 |
+- if (((capid4 >> 6) & 0x3) == 0) |
226 |
+- hswep_uncore_sbox.num_boxes = 2; |
227 |
+- } |
228 |
++ if (hswep_has_limit_sbox(HSWEP_PCU_DID)) |
229 |
++ hswep_uncore_sbox.num_boxes = 2; |
230 |
+ |
231 |
+ uncore_msr_uncores = hswep_msr_uncores; |
232 |
+ } |
233 |
+@@ -3094,11 +3104,6 @@ static const struct pci_device_id hswep_uncore_pci_ids[] = { |
234 |
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, |
235 |
+ SNBEP_PCI_QPI_PORT1_FILTER), |
236 |
+ }, |
237 |
+- { /* PCU.3 (for Capability registers) */ |
238 |
+- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0), |
239 |
+- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, |
240 |
+- HSWEP_PCI_PCU_3), |
241 |
+- }, |
242 |
+ { /* end: all zeroes */ } |
243 |
+ }; |
244 |
+ |
245 |
+@@ -3190,27 +3195,18 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = { |
246 |
+ EVENT_CONSTRAINT_END |
247 |
+ }; |
248 |
+ |
249 |
++#define BDX_PCU_DID 0x6fc0 |
250 |
++ |
251 |
+ void bdx_uncore_cpu_init(void) |
252 |
+ { |
253 |
+- int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id); |
254 |
+- |
255 |
+ if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) |
256 |
+ bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; |
257 |
+ uncore_msr_uncores = bdx_msr_uncores; |
258 |
+ |
259 |
+- /* BDX-DE doesn't have SBOX */ |
260 |
+- if (boot_cpu_data.x86_model == 86) { |
261 |
+- uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; |
262 |
+ /* Detect systems with no SBOXes */ |
263 |
+- } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) { |
264 |
+- struct pci_dev *pdev; |
265 |
+- u32 capid4; |
266 |
+- |
267 |
+- pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]; |
268 |
+- pci_read_config_dword(pdev, 0x94, &capid4); |
269 |
+- if (((capid4 >> 6) & 0x3) == 0) |
270 |
+- bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; |
271 |
+- } |
272 |
++ if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID)) |
273 |
++ uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; |
274 |
++ |
275 |
+ hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; |
276 |
+ } |
277 |
+ |
278 |
+@@ -3431,11 +3427,6 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = { |
279 |
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, |
280 |
+ BDX_PCI_QPI_PORT2_FILTER), |
281 |
+ }, |
282 |
+- { /* PCU.3 (for Capability registers) */ |
283 |
+- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0), |
284 |
+- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, |
285 |
+- HSWEP_PCI_PCU_3), |
286 |
+- }, |
287 |
+ { /* end: all zeroes */ } |
288 |
+ }; |
289 |
+ |
290 |
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c |
291 |
+index a8f3af257e26c..b1deacbeb2669 100644 |
292 |
+--- a/arch/x86/kernel/crash.c |
293 |
++++ b/arch/x86/kernel/crash.c |
294 |
+@@ -337,7 +337,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) |
295 |
+ struct crash_memmap_data cmd; |
296 |
+ struct crash_mem *cmem; |
297 |
+ |
298 |
+- cmem = vzalloc(sizeof(struct crash_mem)); |
299 |
++ cmem = vzalloc(struct_size(cmem, ranges, 1)); |
300 |
+ if (!cmem) |
301 |
+ return -ENOMEM; |
302 |
+ |
303 |
+diff --git a/block/ioctl.c b/block/ioctl.c |
304 |
+index 3be4d0e2a96c3..ed240e170e148 100644 |
305 |
+--- a/block/ioctl.c |
306 |
++++ b/block/ioctl.c |
307 |
+@@ -98,6 +98,8 @@ static int blkdev_reread_part(struct block_device *bdev, fmode_t mode) |
308 |
+ return -EINVAL; |
309 |
+ if (!capable(CAP_SYS_ADMIN)) |
310 |
+ return -EACCES; |
311 |
++ if (bdev->bd_part_count) |
312 |
++ return -EBUSY; |
313 |
+ |
314 |
+ /* |
315 |
+ * Reopen the device to revalidate the driver state and force a |
316 |
+diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c |
317 |
+index 71827d9b0aa19..b7260749e8eee 100644 |
318 |
+--- a/drivers/dma/tegra20-apb-dma.c |
319 |
++++ b/drivers/dma/tegra20-apb-dma.c |
320 |
+@@ -723,7 +723,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc) |
321 |
+ goto end; |
322 |
+ } |
323 |
+ if (!tdc->busy) { |
324 |
+- err = pm_runtime_get_sync(tdc->tdma->dev); |
325 |
++ err = pm_runtime_resume_and_get(tdc->tdma->dev); |
326 |
+ if (err < 0) { |
327 |
+ dev_err(tdc2dev(tdc), "Failed to enable DMA\n"); |
328 |
+ goto end; |
329 |
+@@ -818,7 +818,7 @@ static void tegra_dma_synchronize(struct dma_chan *dc) |
330 |
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
331 |
+ int err; |
332 |
+ |
333 |
+- err = pm_runtime_get_sync(tdc->tdma->dev); |
334 |
++ err = pm_runtime_resume_and_get(tdc->tdma->dev); |
335 |
+ if (err < 0) { |
336 |
+ dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err); |
337 |
+ return; |
338 |
+diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c |
339 |
+index 55df63dead8d3..70b29bd079c9f 100644 |
340 |
+--- a/drivers/dma/xilinx/xilinx_dpdma.c |
341 |
++++ b/drivers/dma/xilinx/xilinx_dpdma.c |
342 |
+@@ -839,6 +839,7 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan) |
343 |
+ struct xilinx_dpdma_tx_desc *desc; |
344 |
+ struct virt_dma_desc *vdesc; |
345 |
+ u32 reg, channels; |
346 |
++ bool first_frame; |
347 |
+ |
348 |
+ lockdep_assert_held(&chan->lock); |
349 |
+ |
350 |
+@@ -852,14 +853,6 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan) |
351 |
+ chan->running = true; |
352 |
+ } |
353 |
+ |
354 |
+- if (chan->video_group) |
355 |
+- channels = xilinx_dpdma_chan_video_group_ready(chan); |
356 |
+- else |
357 |
+- channels = BIT(chan->id); |
358 |
+- |
359 |
+- if (!channels) |
360 |
+- return; |
361 |
+- |
362 |
+ vdesc = vchan_next_desc(&chan->vchan); |
363 |
+ if (!vdesc) |
364 |
+ return; |
365 |
+@@ -884,13 +877,26 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan) |
366 |
+ FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK, |
367 |
+ upper_32_bits(sw_desc->dma_addr))); |
368 |
+ |
369 |
+- if (chan->first_frame) |
370 |
++ first_frame = chan->first_frame; |
371 |
++ chan->first_frame = false; |
372 |
++ |
373 |
++ if (chan->video_group) { |
374 |
++ channels = xilinx_dpdma_chan_video_group_ready(chan); |
375 |
++ /* |
376 |
++ * Trigger the transfer only when all channels in the group are |
377 |
++ * ready. |
378 |
++ */ |
379 |
++ if (!channels) |
380 |
++ return; |
381 |
++ } else { |
382 |
++ channels = BIT(chan->id); |
383 |
++ } |
384 |
++ |
385 |
++ if (first_frame) |
386 |
+ reg = XILINX_DPDMA_GBL_TRIG_MASK(channels); |
387 |
+ else |
388 |
+ reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels); |
389 |
+ |
390 |
+- chan->first_frame = false; |
391 |
+- |
392 |
+ dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg); |
393 |
+ } |
394 |
+ |
395 |
+@@ -1042,13 +1048,14 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan) |
396 |
+ */ |
397 |
+ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan) |
398 |
+ { |
399 |
+- struct xilinx_dpdma_tx_desc *active = chan->desc.active; |
400 |
++ struct xilinx_dpdma_tx_desc *active; |
401 |
+ unsigned long flags; |
402 |
+ |
403 |
+ spin_lock_irqsave(&chan->lock, flags); |
404 |
+ |
405 |
+ xilinx_dpdma_debugfs_desc_done_irq(chan); |
406 |
+ |
407 |
++ active = chan->desc.active; |
408 |
+ if (active) |
409 |
+ vchan_cyclic_callback(&active->vdesc); |
410 |
+ else |
411 |
+diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c |
412 |
+index f7ceb2b11afc5..a7e8ed5191a8e 100644 |
413 |
+--- a/drivers/gpio/gpio-omap.c |
414 |
++++ b/drivers/gpio/gpio-omap.c |
415 |
+@@ -29,6 +29,7 @@ |
416 |
+ #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF |
417 |
+ |
418 |
+ struct gpio_regs { |
419 |
++ u32 sysconfig; |
420 |
+ u32 irqenable1; |
421 |
+ u32 irqenable2; |
422 |
+ u32 wake_en; |
423 |
+@@ -1072,6 +1073,7 @@ static void omap_gpio_init_context(struct gpio_bank *p) |
424 |
+ const struct omap_gpio_reg_offs *regs = p->regs; |
425 |
+ void __iomem *base = p->base; |
426 |
+ |
427 |
++ p->context.sysconfig = readl_relaxed(base + regs->sysconfig); |
428 |
+ p->context.ctrl = readl_relaxed(base + regs->ctrl); |
429 |
+ p->context.oe = readl_relaxed(base + regs->direction); |
430 |
+ p->context.wake_en = readl_relaxed(base + regs->wkup_en); |
431 |
+@@ -1091,6 +1093,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank) |
432 |
+ const struct omap_gpio_reg_offs *regs = bank->regs; |
433 |
+ void __iomem *base = bank->base; |
434 |
+ |
435 |
++ writel_relaxed(bank->context.sysconfig, base + regs->sysconfig); |
436 |
+ writel_relaxed(bank->context.wake_en, base + regs->wkup_en); |
437 |
+ writel_relaxed(bank->context.ctrl, base + regs->ctrl); |
438 |
+ writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0); |
439 |
+@@ -1118,6 +1121,10 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context) |
440 |
+ |
441 |
+ bank->saved_datain = readl_relaxed(base + bank->regs->datain); |
442 |
+ |
443 |
++ /* Save syconfig, it's runtime value can be different from init value */ |
444 |
++ if (bank->loses_context) |
445 |
++ bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig); |
446 |
++ |
447 |
+ if (!bank->enabled_non_wakeup_gpios) |
448 |
+ goto update_gpio_context_count; |
449 |
+ |
450 |
+@@ -1282,6 +1289,7 @@ out_unlock: |
451 |
+ |
452 |
+ static const struct omap_gpio_reg_offs omap2_gpio_regs = { |
453 |
+ .revision = OMAP24XX_GPIO_REVISION, |
454 |
++ .sysconfig = OMAP24XX_GPIO_SYSCONFIG, |
455 |
+ .direction = OMAP24XX_GPIO_OE, |
456 |
+ .datain = OMAP24XX_GPIO_DATAIN, |
457 |
+ .dataout = OMAP24XX_GPIO_DATAOUT, |
458 |
+@@ -1305,6 +1313,7 @@ static const struct omap_gpio_reg_offs omap2_gpio_regs = { |
459 |
+ |
460 |
+ static const struct omap_gpio_reg_offs omap4_gpio_regs = { |
461 |
+ .revision = OMAP4_GPIO_REVISION, |
462 |
++ .sysconfig = OMAP4_GPIO_SYSCONFIG, |
463 |
+ .direction = OMAP4_GPIO_OE, |
464 |
+ .datain = OMAP4_GPIO_DATAIN, |
465 |
+ .dataout = OMAP4_GPIO_DATAOUT, |
466 |
+diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c |
467 |
+index 3feaece13ade0..6b665931147df 100644 |
468 |
+--- a/drivers/hid/hid-alps.c |
469 |
++++ b/drivers/hid/hid-alps.c |
470 |
+@@ -761,6 +761,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) |
471 |
+ |
472 |
+ if (input_register_device(data->input2)) { |
473 |
+ input_free_device(input2); |
474 |
++ ret = -ENOENT; |
475 |
+ goto exit; |
476 |
+ } |
477 |
+ } |
478 |
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c |
479 |
+index 21e15627a4614..477baa30889cc 100644 |
480 |
+--- a/drivers/hid/hid-cp2112.c |
481 |
++++ b/drivers/hid/hid-cp2112.c |
482 |
+@@ -161,6 +161,7 @@ struct cp2112_device { |
483 |
+ atomic_t read_avail; |
484 |
+ atomic_t xfer_avail; |
485 |
+ struct gpio_chip gc; |
486 |
++ struct irq_chip irq; |
487 |
+ u8 *in_out_buffer; |
488 |
+ struct mutex lock; |
489 |
+ |
490 |
+@@ -1175,16 +1176,6 @@ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type) |
491 |
+ return 0; |
492 |
+ } |
493 |
+ |
494 |
+-static struct irq_chip cp2112_gpio_irqchip = { |
495 |
+- .name = "cp2112-gpio", |
496 |
+- .irq_startup = cp2112_gpio_irq_startup, |
497 |
+- .irq_shutdown = cp2112_gpio_irq_shutdown, |
498 |
+- .irq_ack = cp2112_gpio_irq_ack, |
499 |
+- .irq_mask = cp2112_gpio_irq_mask, |
500 |
+- .irq_unmask = cp2112_gpio_irq_unmask, |
501 |
+- .irq_set_type = cp2112_gpio_irq_type, |
502 |
+-}; |
503 |
+- |
504 |
+ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev, |
505 |
+ int pin) |
506 |
+ { |
507 |
+@@ -1339,8 +1330,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) |
508 |
+ dev->gc.can_sleep = 1; |
509 |
+ dev->gc.parent = &hdev->dev; |
510 |
+ |
511 |
++ dev->irq.name = "cp2112-gpio"; |
512 |
++ dev->irq.irq_startup = cp2112_gpio_irq_startup; |
513 |
++ dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown; |
514 |
++ dev->irq.irq_ack = cp2112_gpio_irq_ack; |
515 |
++ dev->irq.irq_mask = cp2112_gpio_irq_mask; |
516 |
++ dev->irq.irq_unmask = cp2112_gpio_irq_unmask; |
517 |
++ dev->irq.irq_set_type = cp2112_gpio_irq_type; |
518 |
++ dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND; |
519 |
++ |
520 |
+ girq = &dev->gc.irq; |
521 |
+- girq->chip = &cp2112_gpio_irqchip; |
522 |
++ girq->chip = &dev->irq; |
523 |
+ /* The event comes from the outside so no parent handler */ |
524 |
+ girq->parent_handler = NULL; |
525 |
+ girq->num_parents = 0; |
526 |
+diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c |
527 |
+index 85a054f1ce389..2a176f77b32e9 100644 |
528 |
+--- a/drivers/hid/hid-google-hammer.c |
529 |
++++ b/drivers/hid/hid-google-hammer.c |
530 |
+@@ -526,6 +526,8 @@ static void hammer_remove(struct hid_device *hdev) |
531 |
+ } |
532 |
+ |
533 |
+ static const struct hid_device_id hammer_devices[] = { |
534 |
++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, |
535 |
++ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) }, |
536 |
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, |
537 |
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) }, |
538 |
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, |
539 |
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h |
540 |
+index 06813f297dcca..b93ce0d475e09 100644 |
541 |
+--- a/drivers/hid/hid-ids.h |
542 |
++++ b/drivers/hid/hid-ids.h |
543 |
+@@ -486,6 +486,7 @@ |
544 |
+ #define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c |
545 |
+ #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d |
546 |
+ #define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044 |
547 |
++#define USB_DEVICE_ID_GOOGLE_DON 0x5050 |
548 |
+ |
549 |
+ #define USB_VENDOR_ID_GOTOP 0x08f2 |
550 |
+ #define USB_DEVICE_ID_SUPER_Q2 0x007f |
551 |
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c |
552 |
+index 6cda5935fc09c..2d70dc4bea654 100644 |
553 |
+--- a/drivers/hid/wacom_wac.c |
554 |
++++ b/drivers/hid/wacom_wac.c |
555 |
+@@ -2533,7 +2533,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac, |
556 |
+ !wacom_wac->shared->is_touch_on) { |
557 |
+ if (!wacom_wac->shared->touch_down) |
558 |
+ return; |
559 |
+- prox = 0; |
560 |
++ prox = false; |
561 |
+ } |
562 |
+ |
563 |
+ wacom_wac->hid_data.num_received++; |
564 |
+diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h |
565 |
+index b248966837b4c..7aad40b2aa736 100644 |
566 |
+--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h |
567 |
++++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h |
568 |
+@@ -412,7 +412,7 @@ |
569 |
+ | CN6XXX_INTR_M0UNWI_ERR \ |
570 |
+ | CN6XXX_INTR_M1UPB0_ERR \ |
571 |
+ | CN6XXX_INTR_M1UPWI_ERR \ |
572 |
+- | CN6XXX_INTR_M1UPB0_ERR \ |
573 |
++ | CN6XXX_INTR_M1UNB0_ERR \ |
574 |
+ | CN6XXX_INTR_M1UNWI_ERR \ |
575 |
+ | CN6XXX_INTR_INSTR_DB_OF_ERR \ |
576 |
+ | CN6XXX_INTR_SLIST_DB_OF_ERR \ |
577 |
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c |
578 |
+index abd37f26af682..11864ac101b8d 100644 |
579 |
+--- a/drivers/net/geneve.c |
580 |
++++ b/drivers/net/geneve.c |
581 |
+@@ -890,6 +890,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
582 |
+ __be16 sport; |
583 |
+ int err; |
584 |
+ |
585 |
++ if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) |
586 |
++ return -EINVAL; |
587 |
++ |
588 |
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); |
589 |
+ rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, |
590 |
+ geneve->cfg.info.key.tp_dst, sport); |
591 |
+@@ -984,6 +987,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
592 |
+ __be16 sport; |
593 |
+ int err; |
594 |
+ |
595 |
++ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) |
596 |
++ return -EINVAL; |
597 |
++ |
598 |
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); |
599 |
+ dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, |
600 |
+ geneve->cfg.info.key.tp_dst, sport); |
601 |
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c |
602 |
+index d18642a8144cf..4909405803d57 100644 |
603 |
+--- a/drivers/net/usb/hso.c |
604 |
++++ b/drivers/net/usb/hso.c |
605 |
+@@ -3104,7 +3104,7 @@ static void hso_free_interface(struct usb_interface *interface) |
606 |
+ cancel_work_sync(&serial_table[i]->async_put_intf); |
607 |
+ cancel_work_sync(&serial_table[i]->async_get_intf); |
608 |
+ hso_serial_tty_unregister(serial); |
609 |
+- kref_put(&serial_table[i]->ref, hso_serial_ref_free); |
610 |
++ kref_put(&serial->parent->ref, hso_serial_ref_free); |
611 |
+ } |
612 |
+ } |
613 |
+ |
614 |
+diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c |
615 |
+index 6f10e0998f1ce..94d19158efc18 100644 |
616 |
+--- a/drivers/net/xen-netback/xenbus.c |
617 |
++++ b/drivers/net/xen-netback/xenbus.c |
618 |
+@@ -824,11 +824,15 @@ static void connect(struct backend_info *be) |
619 |
+ xenvif_carrier_on(be->vif); |
620 |
+ |
621 |
+ unregister_hotplug_status_watch(be); |
622 |
+- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL, |
623 |
+- hotplug_status_changed, |
624 |
+- "%s/%s", dev->nodename, "hotplug-status"); |
625 |
+- if (!err) |
626 |
++ if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) { |
627 |
++ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, |
628 |
++ NULL, hotplug_status_changed, |
629 |
++ "%s/%s", dev->nodename, |
630 |
++ "hotplug-status"); |
631 |
++ if (err) |
632 |
++ goto err; |
633 |
+ be->have_hotplug_status_watch = 1; |
634 |
++ } |
635 |
+ |
636 |
+ netif_tx_wake_all_queues(be->vif->dev); |
637 |
+ |
638 |
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c |
639 |
+index 9fc4433fece4f..20b477cd5a30a 100644 |
640 |
+--- a/drivers/pinctrl/core.c |
641 |
++++ b/drivers/pinctrl/core.c |
642 |
+@@ -1604,8 +1604,8 @@ static int pinctrl_pins_show(struct seq_file *s, void *what) |
643 |
+ unsigned i, pin; |
644 |
+ #ifdef CONFIG_GPIOLIB |
645 |
+ struct pinctrl_gpio_range *range; |
646 |
+- unsigned int gpio_num; |
647 |
+ struct gpio_chip *chip; |
648 |
++ int gpio_num; |
649 |
+ #endif |
650 |
+ |
651 |
+ seq_printf(s, "registered pins: %d\n", pctldev->desc->npins); |
652 |
+@@ -1625,7 +1625,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what) |
653 |
+ seq_printf(s, "pin %d (%s) ", pin, desc->name); |
654 |
+ |
655 |
+ #ifdef CONFIG_GPIOLIB |
656 |
+- gpio_num = 0; |
657 |
++ gpio_num = -1; |
658 |
+ list_for_each_entry(range, &pctldev->gpio_ranges, node) { |
659 |
+ if ((pin >= range->pin_base) && |
660 |
+ (pin < (range->pin_base + range->npins))) { |
661 |
+@@ -1633,10 +1633,12 @@ static int pinctrl_pins_show(struct seq_file *s, void *what) |
662 |
+ break; |
663 |
+ } |
664 |
+ } |
665 |
+- chip = gpio_to_chip(gpio_num); |
666 |
+- if (chip && chip->gpiodev && chip->gpiodev->base) |
667 |
+- seq_printf(s, "%u:%s ", gpio_num - |
668 |
+- chip->gpiodev->base, chip->label); |
669 |
++ if (gpio_num >= 0) |
670 |
++ chip = gpio_to_chip(gpio_num); |
671 |
++ else |
672 |
++ chip = NULL; |
673 |
++ if (chip) |
674 |
++ seq_printf(s, "%u:%s ", gpio_num - chip->gpiodev->base, chip->label); |
675 |
+ else |
676 |
+ seq_puts(s, "0:? "); |
677 |
+ #endif |
678 |
+diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c |
679 |
+index 7fdf4257df1ed..ad4b446d588e6 100644 |
680 |
+--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c |
681 |
++++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c |
682 |
+@@ -299,9 +299,9 @@ static const struct pinctrl_pin_desc lbg_pins[] = { |
683 |
+ static const struct intel_community lbg_communities[] = { |
684 |
+ LBG_COMMUNITY(0, 0, 71), |
685 |
+ LBG_COMMUNITY(1, 72, 132), |
686 |
+- LBG_COMMUNITY(3, 133, 144), |
687 |
+- LBG_COMMUNITY(4, 145, 180), |
688 |
+- LBG_COMMUNITY(5, 181, 246), |
689 |
++ LBG_COMMUNITY(3, 133, 143), |
690 |
++ LBG_COMMUNITY(4, 144, 178), |
691 |
++ LBG_COMMUNITY(5, 179, 246), |
692 |
+ }; |
693 |
+ |
694 |
+ static const struct intel_pinctrl_soc_data lbg_soc_data = { |
695 |
+diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c |
696 |
+index be76fddbf524b..0dbca679bd32f 100644 |
697 |
+--- a/drivers/soc/qcom/qcom-geni-se.c |
698 |
++++ b/drivers/soc/qcom/qcom-geni-se.c |
699 |
+@@ -741,6 +741,9 @@ int geni_icc_get(struct geni_se *se, const char *icc_ddr) |
700 |
+ int i, err; |
701 |
+ const char *icc_names[] = {"qup-core", "qup-config", icc_ddr}; |
702 |
+ |
703 |
++ if (has_acpi_companion(se->dev)) |
704 |
++ return 0; |
705 |
++ |
706 |
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) { |
707 |
+ if (!icc_names[i]) |
708 |
+ continue; |
709 |
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c |
710 |
+index e79359326411a..bc035ba6e0105 100644 |
711 |
+--- a/drivers/usb/class/cdc-acm.c |
712 |
++++ b/drivers/usb/class/cdc-acm.c |
713 |
+@@ -1637,12 +1637,13 @@ static int acm_resume(struct usb_interface *intf) |
714 |
+ struct urb *urb; |
715 |
+ int rv = 0; |
716 |
+ |
717 |
+- acm_unpoison_urbs(acm); |
718 |
+ spin_lock_irq(&acm->write_lock); |
719 |
+ |
720 |
+ if (--acm->susp_count) |
721 |
+ goto out; |
722 |
+ |
723 |
++ acm_unpoison_urbs(acm); |
724 |
++ |
725 |
+ if (tty_port_initialized(&acm->port)) { |
726 |
+ rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC); |
727 |
+ |
728 |
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c |
729 |
+index d300f799efcd1..aa656f57bf5b7 100644 |
730 |
+--- a/drivers/vdpa/mlx5/core/mr.c |
731 |
++++ b/drivers/vdpa/mlx5/core/mr.c |
732 |
+@@ -273,8 +273,10 @@ done: |
733 |
+ mr->log_size = log_entity_size; |
734 |
+ mr->nsg = nsg; |
735 |
+ mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); |
736 |
+- if (!mr->nent) |
737 |
++ if (!mr->nent) { |
738 |
++ err = -ENOMEM; |
739 |
+ goto err_map; |
740 |
++ } |
741 |
+ |
742 |
+ err = create_direct_mr(mvdev, mr); |
743 |
+ if (err) |
744 |
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c |
745 |
+index fc5707ada024e..84e5949bc8617 100644 |
746 |
+--- a/drivers/vhost/vdpa.c |
747 |
++++ b/drivers/vhost/vdpa.c |
748 |
+@@ -749,9 +749,11 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, |
749 |
+ const struct vdpa_config_ops *ops = vdpa->config; |
750 |
+ int r = 0; |
751 |
+ |
752 |
++ mutex_lock(&dev->mutex); |
753 |
++ |
754 |
+ r = vhost_dev_check_owner(dev); |
755 |
+ if (r) |
756 |
+- return r; |
757 |
++ goto unlock; |
758 |
+ |
759 |
+ switch (msg->type) { |
760 |
+ case VHOST_IOTLB_UPDATE: |
761 |
+@@ -772,6 +774,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, |
762 |
+ r = -EINVAL; |
763 |
+ break; |
764 |
+ } |
765 |
++unlock: |
766 |
++ mutex_unlock(&dev->mutex); |
767 |
+ |
768 |
+ return r; |
769 |
+ } |
770 |
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h |
771 |
+index b416bba3a62b5..8ad819132dde3 100644 |
772 |
+--- a/include/linux/bpf.h |
773 |
++++ b/include/linux/bpf.h |
774 |
+@@ -1259,6 +1259,11 @@ static inline bool bpf_allow_ptr_leaks(void) |
775 |
+ return perfmon_capable(); |
776 |
+ } |
777 |
+ |
778 |
++static inline bool bpf_allow_uninit_stack(void) |
779 |
++{ |
780 |
++ return perfmon_capable(); |
781 |
++} |
782 |
++ |
783 |
+ static inline bool bpf_allow_ptr_to_map_access(void) |
784 |
+ { |
785 |
+ return perfmon_capable(); |
786 |
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h |
787 |
+index e83ef6f6bf43a..85bac3191e127 100644 |
788 |
+--- a/include/linux/bpf_verifier.h |
789 |
++++ b/include/linux/bpf_verifier.h |
790 |
+@@ -187,7 +187,7 @@ struct bpf_func_state { |
791 |
+ * 0 = main function, 1 = first callee. |
792 |
+ */ |
793 |
+ u32 frameno; |
794 |
+- /* subprog number == index within subprog_stack_depth |
795 |
++ /* subprog number == index within subprog_info |
796 |
+ * zero == main subprog |
797 |
+ */ |
798 |
+ u32 subprogno; |
799 |
+@@ -390,6 +390,7 @@ struct bpf_verifier_env { |
800 |
+ u32 used_map_cnt; /* number of used maps */ |
801 |
+ u32 id_gen; /* used to generate unique reg IDs */ |
802 |
+ bool allow_ptr_leaks; |
803 |
++ bool allow_uninit_stack; |
804 |
+ bool allow_ptr_to_map_access; |
805 |
+ bool bpf_capable; |
806 |
+ bool bypass_spec_v1; |
807 |
+diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h |
808 |
+index 8b30b14b47d3f..f377817ce75c1 100644 |
809 |
+--- a/include/linux/platform_data/gpio-omap.h |
810 |
++++ b/include/linux/platform_data/gpio-omap.h |
811 |
+@@ -85,6 +85,7 @@ |
812 |
+ * omap2+ specific GPIO registers |
813 |
+ */ |
814 |
+ #define OMAP24XX_GPIO_REVISION 0x0000 |
815 |
++#define OMAP24XX_GPIO_SYSCONFIG 0x0010 |
816 |
+ #define OMAP24XX_GPIO_IRQSTATUS1 0x0018 |
817 |
+ #define OMAP24XX_GPIO_IRQSTATUS2 0x0028 |
818 |
+ #define OMAP24XX_GPIO_IRQENABLE2 0x002c |
819 |
+@@ -108,6 +109,7 @@ |
820 |
+ #define OMAP24XX_GPIO_SETDATAOUT 0x0094 |
821 |
+ |
822 |
+ #define OMAP4_GPIO_REVISION 0x0000 |
823 |
++#define OMAP4_GPIO_SYSCONFIG 0x0010 |
824 |
+ #define OMAP4_GPIO_EOI 0x0020 |
825 |
+ #define OMAP4_GPIO_IRQSTATUSRAW0 0x0024 |
826 |
+ #define OMAP4_GPIO_IRQSTATUSRAW1 0x0028 |
827 |
+@@ -148,6 +150,7 @@ |
828 |
+ #ifndef __ASSEMBLER__ |
829 |
+ struct omap_gpio_reg_offs { |
830 |
+ u16 revision; |
831 |
++ u16 sysconfig; |
832 |
+ u16 direction; |
833 |
+ u16 datain; |
834 |
+ u16 dataout; |
835 |
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c |
836 |
+index 3370f0d476e97..b9180509917e3 100644 |
837 |
+--- a/kernel/bpf/verifier.c |
838 |
++++ b/kernel/bpf/verifier.c |
839 |
+@@ -2268,12 +2268,14 @@ static void save_register_state(struct bpf_func_state *state, |
840 |
+ state->stack[spi].slot_type[i] = STACK_SPILL; |
841 |
+ } |
842 |
+ |
843 |
+-/* check_stack_read/write functions track spill/fill of registers, |
844 |
++/* check_stack_{read,write}_fixed_off functions track spill/fill of registers, |
845 |
+ * stack boundary and alignment are checked in check_mem_access() |
846 |
+ */ |
847 |
+-static int check_stack_write(struct bpf_verifier_env *env, |
848 |
+- struct bpf_func_state *state, /* func where register points to */ |
849 |
+- int off, int size, int value_regno, int insn_idx) |
850 |
++static int check_stack_write_fixed_off(struct bpf_verifier_env *env, |
851 |
++ /* stack frame we're writing to */ |
852 |
++ struct bpf_func_state *state, |
853 |
++ int off, int size, int value_regno, |
854 |
++ int insn_idx) |
855 |
+ { |
856 |
+ struct bpf_func_state *cur; /* state of the current function */ |
857 |
+ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; |
858 |
+@@ -2399,9 +2401,175 @@ static int check_stack_write(struct bpf_verifier_env *env, |
859 |
+ return 0; |
860 |
+ } |
861 |
+ |
862 |
+-static int check_stack_read(struct bpf_verifier_env *env, |
863 |
+- struct bpf_func_state *reg_state /* func where register points to */, |
864 |
+- int off, int size, int value_regno) |
865 |
++/* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is |
866 |
++ * known to contain a variable offset. |
867 |
++ * This function checks whether the write is permitted and conservatively |
868 |
++ * tracks the effects of the write, considering that each stack slot in the |
869 |
++ * dynamic range is potentially written to. |
870 |
++ * |
871 |
++ * 'off' includes 'regno->off'. |
872 |
++ * 'value_regno' can be -1, meaning that an unknown value is being written to |
873 |
++ * the stack. |
874 |
++ * |
875 |
++ * Spilled pointers in range are not marked as written because we don't know |
876 |
++ * what's going to be actually written. This means that read propagation for |
877 |
++ * future reads cannot be terminated by this write. |
878 |
++ * |
879 |
++ * For privileged programs, uninitialized stack slots are considered |
880 |
++ * initialized by this write (even though we don't know exactly what offsets |
881 |
++ * are going to be written to). The idea is that we don't want the verifier to |
882 |
++ * reject future reads that access slots written to through variable offsets. |
883 |
++ */ |
884 |
++static int check_stack_write_var_off(struct bpf_verifier_env *env, |
885 |
++ /* func where register points to */ |
886 |
++ struct bpf_func_state *state, |
887 |
++ int ptr_regno, int off, int size, |
888 |
++ int value_regno, int insn_idx) |
889 |
++{ |
890 |
++ struct bpf_func_state *cur; /* state of the current function */ |
891 |
++ int min_off, max_off; |
892 |
++ int i, err; |
893 |
++ struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL; |
894 |
++ bool writing_zero = false; |
895 |
++ /* set if the fact that we're writing a zero is used to let any |
896 |
++ * stack slots remain STACK_ZERO |
897 |
++ */ |
898 |
++ bool zero_used = false; |
899 |
++ |
900 |
++ cur = env->cur_state->frame[env->cur_state->curframe]; |
901 |
++ ptr_reg = &cur->regs[ptr_regno]; |
902 |
++ min_off = ptr_reg->smin_value + off; |
903 |
++ max_off = ptr_reg->smax_value + off + size; |
904 |
++ if (value_regno >= 0) |
905 |
++ value_reg = &cur->regs[value_regno]; |
906 |
++ if (value_reg && register_is_null(value_reg)) |
907 |
++ writing_zero = true; |
908 |
++ |
909 |
++ err = realloc_func_state(state, round_up(-min_off, BPF_REG_SIZE), |
910 |
++ state->acquired_refs, true); |
911 |
++ if (err) |
912 |
++ return err; |
913 |
++ |
914 |
++ |
915 |
++ /* Variable offset writes destroy any spilled pointers in range. */ |
916 |
++ for (i = min_off; i < max_off; i++) { |
917 |
++ u8 new_type, *stype; |
918 |
++ int slot, spi; |
919 |
++ |
920 |
++ slot = -i - 1; |
921 |
++ spi = slot / BPF_REG_SIZE; |
922 |
++ stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; |
923 |
++ |
924 |
++ if (!env->allow_ptr_leaks |
925 |
++ && *stype != NOT_INIT |
926 |
++ && *stype != SCALAR_VALUE) { |
927 |
++ /* Reject the write if there's are spilled pointers in |
928 |
++ * range. If we didn't reject here, the ptr status |
929 |
++ * would be erased below (even though not all slots are |
930 |
++ * actually overwritten), possibly opening the door to |
931 |
++ * leaks. |
932 |
++ */ |
933 |
++ verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", |
934 |
++ insn_idx, i); |
935 |
++ return -EINVAL; |
936 |
++ } |
937 |
++ |
938 |
++ /* Erase all spilled pointers. */ |
939 |
++ state->stack[spi].spilled_ptr.type = NOT_INIT; |
940 |
++ |
941 |
++ /* Update the slot type. */ |
942 |
++ new_type = STACK_MISC; |
943 |
++ if (writing_zero && *stype == STACK_ZERO) { |
944 |
++ new_type = STACK_ZERO; |
945 |
++ zero_used = true; |
946 |
++ } |
947 |
++ /* If the slot is STACK_INVALID, we check whether it's OK to |
948 |
++ * pretend that it will be initialized by this write. The slot |
949 |
++ * might not actually be written to, and so if we mark it as |
950 |
++ * initialized future reads might leak uninitialized memory. |
951 |
++ * For privileged programs, we will accept such reads to slots |
952 |
++ * that may or may not be written because, if we're reject |
953 |
++ * them, the error would be too confusing. |
954 |
++ */ |
955 |
++ if (*stype == STACK_INVALID && !env->allow_uninit_stack) { |
956 |
++ verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d", |
957 |
++ insn_idx, i); |
958 |
++ return -EINVAL; |
959 |
++ } |
960 |
++ *stype = new_type; |
961 |
++ } |
962 |
++ if (zero_used) { |
963 |
++ /* backtracking doesn't work for STACK_ZERO yet. */ |
964 |
++ err = mark_chain_precision(env, value_regno); |
965 |
++ if (err) |
966 |
++ return err; |
967 |
++ } |
968 |
++ return 0; |
969 |
++} |
970 |
++ |
971 |
++/* When register 'dst_regno' is assigned some values from stack[min_off, |
972 |
++ * max_off), we set the register's type according to the types of the |
973 |
++ * respective stack slots. If all the stack values are known to be zeros, then |
974 |
++ * so is the destination reg. Otherwise, the register is considered to be |
975 |
++ * SCALAR. This function does not deal with register filling; the caller must |
976 |
++ * ensure that all spilled registers in the stack range have been marked as |
977 |
++ * read. |
978 |
++ */ |
979 |
++static void mark_reg_stack_read(struct bpf_verifier_env *env, |
980 |
++ /* func where src register points to */ |
981 |
++ struct bpf_func_state *ptr_state, |
982 |
++ int min_off, int max_off, int dst_regno) |
983 |
++{ |
984 |
++ struct bpf_verifier_state *vstate = env->cur_state; |
985 |
++ struct bpf_func_state *state = vstate->frame[vstate->curframe]; |
986 |
++ int i, slot, spi; |
987 |
++ u8 *stype; |
988 |
++ int zeros = 0; |
989 |
++ |
990 |
++ for (i = min_off; i < max_off; i++) { |
991 |
++ slot = -i - 1; |
992 |
++ spi = slot / BPF_REG_SIZE; |
993 |
++ stype = ptr_state->stack[spi].slot_type; |
994 |
++ if (stype[slot % BPF_REG_SIZE] != STACK_ZERO) |
995 |
++ break; |
996 |
++ zeros++; |
997 |
++ } |
998 |
++ if (zeros == max_off - min_off) { |
999 |
++ /* any access_size read into register is zero extended, |
1000 |
++ * so the whole register == const_zero |
1001 |
++ */ |
1002 |
++ __mark_reg_const_zero(&state->regs[dst_regno]); |
1003 |
++ /* backtracking doesn't support STACK_ZERO yet, |
1004 |
++ * so mark it precise here, so that later |
1005 |
++ * backtracking can stop here. |
1006 |
++ * Backtracking may not need this if this register |
1007 |
++ * doesn't participate in pointer adjustment. |
1008 |
++ * Forward propagation of precise flag is not |
1009 |
++ * necessary either. This mark is only to stop |
1010 |
++ * backtracking. Any register that contributed |
1011 |
++ * to const 0 was marked precise before spill. |
1012 |
++ */ |
1013 |
++ state->regs[dst_regno].precise = true; |
1014 |
++ } else { |
1015 |
++ /* have read misc data from the stack */ |
1016 |
++ mark_reg_unknown(env, state->regs, dst_regno); |
1017 |
++ } |
1018 |
++ state->regs[dst_regno].live |= REG_LIVE_WRITTEN; |
1019 |
++} |
1020 |
++ |
1021 |
++/* Read the stack at 'off' and put the results into the register indicated by |
1022 |
++ * 'dst_regno'. It handles reg filling if the addressed stack slot is a |
1023 |
++ * spilled reg. |
1024 |
++ * |
1025 |
++ * 'dst_regno' can be -1, meaning that the read value is not going to a |
1026 |
++ * register. |
1027 |
++ * |
1028 |
++ * The access is assumed to be within the current stack bounds. |
1029 |
++ */ |
1030 |
++static int check_stack_read_fixed_off(struct bpf_verifier_env *env, |
1031 |
++ /* func where src register points to */ |
1032 |
++ struct bpf_func_state *reg_state, |
1033 |
++ int off, int size, int dst_regno) |
1034 |
+ { |
1035 |
+ struct bpf_verifier_state *vstate = env->cur_state; |
1036 |
+ struct bpf_func_state *state = vstate->frame[vstate->curframe]; |
1037 |
+@@ -2409,11 +2577,6 @@ static int check_stack_read(struct bpf_verifier_env *env, |
1038 |
+ struct bpf_reg_state *reg; |
1039 |
+ u8 *stype; |
1040 |
+ |
1041 |
+- if (reg_state->allocated_stack <= slot) { |
1042 |
+- verbose(env, "invalid read from stack off %d+0 size %d\n", |
1043 |
+- off, size); |
1044 |
+- return -EACCES; |
1045 |
+- } |
1046 |
+ stype = reg_state->stack[spi].slot_type; |
1047 |
+ reg = ®_state->stack[spi].spilled_ptr; |
1048 |
+ |
1049 |
+@@ -2424,9 +2587,9 @@ static int check_stack_read(struct bpf_verifier_env *env, |
1050 |
+ verbose(env, "invalid size of register fill\n"); |
1051 |
+ return -EACCES; |
1052 |
+ } |
1053 |
+- if (value_regno >= 0) { |
1054 |
+- mark_reg_unknown(env, state->regs, value_regno); |
1055 |
+- state->regs[value_regno].live |= REG_LIVE_WRITTEN; |
1056 |
++ if (dst_regno >= 0) { |
1057 |
++ mark_reg_unknown(env, state->regs, dst_regno); |
1058 |
++ state->regs[dst_regno].live |= REG_LIVE_WRITTEN; |
1059 |
+ } |
1060 |
+ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); |
1061 |
+ return 0; |
1062 |
+@@ -2438,16 +2601,16 @@ static int check_stack_read(struct bpf_verifier_env *env, |
1063 |
+ } |
1064 |
+ } |
1065 |
+ |
1066 |
+- if (value_regno >= 0) { |
1067 |
++ if (dst_regno >= 0) { |
1068 |
+ /* restore register state from stack */ |
1069 |
+- state->regs[value_regno] = *reg; |
1070 |
++ state->regs[dst_regno] = *reg; |
1071 |
+ /* mark reg as written since spilled pointer state likely |
1072 |
+ * has its liveness marks cleared by is_state_visited() |
1073 |
+ * which resets stack/reg liveness for state transitions |
1074 |
+ */ |
1075 |
+- state->regs[value_regno].live |= REG_LIVE_WRITTEN; |
1076 |
++ state->regs[dst_regno].live |= REG_LIVE_WRITTEN; |
1077 |
+ } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { |
1078 |
+- /* If value_regno==-1, the caller is asking us whether |
1079 |
++ /* If dst_regno==-1, the caller is asking us whether |
1080 |
+ * it is acceptable to use this value as a SCALAR_VALUE |
1081 |
+ * (e.g. for XADD). |
1082 |
+ * We must not allow unprivileged callers to do that |
1083 |
+@@ -2459,70 +2622,167 @@ static int check_stack_read(struct bpf_verifier_env *env, |
1084 |
+ } |
1085 |
+ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); |
1086 |
+ } else { |
1087 |
+- int zeros = 0; |
1088 |
++ u8 type; |
1089 |
+ |
1090 |
+ for (i = 0; i < size; i++) { |
1091 |
+- if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC) |
1092 |
++ type = stype[(slot - i) % BPF_REG_SIZE]; |
1093 |
++ if (type == STACK_MISC) |
1094 |
+ continue; |
1095 |
+- if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) { |
1096 |
+- zeros++; |
1097 |
++ if (type == STACK_ZERO) |
1098 |
+ continue; |
1099 |
+- } |
1100 |
+ verbose(env, "invalid read from stack off %d+%d size %d\n", |
1101 |
+ off, i, size); |
1102 |
+ return -EACCES; |
1103 |
+ } |
1104 |
+ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); |
1105 |
+- if (value_regno >= 0) { |
1106 |
+- if (zeros == size) { |
1107 |
+- /* any size read into register is zero extended, |
1108 |
+- * so the whole register == const_zero |
1109 |
+- */ |
1110 |
+- __mark_reg_const_zero(&state->regs[value_regno]); |
1111 |
+- /* backtracking doesn't support STACK_ZERO yet, |
1112 |
+- * so mark it precise here, so that later |
1113 |
+- * backtracking can stop here. |
1114 |
+- * Backtracking may not need this if this register |
1115 |
+- * doesn't participate in pointer adjustment. |
1116 |
+- * Forward propagation of precise flag is not |
1117 |
+- * necessary either. This mark is only to stop |
1118 |
+- * backtracking. Any register that contributed |
1119 |
+- * to const 0 was marked precise before spill. |
1120 |
+- */ |
1121 |
+- state->regs[value_regno].precise = true; |
1122 |
+- } else { |
1123 |
+- /* have read misc data from the stack */ |
1124 |
+- mark_reg_unknown(env, state->regs, value_regno); |
1125 |
+- } |
1126 |
+- state->regs[value_regno].live |= REG_LIVE_WRITTEN; |
1127 |
+- } |
1128 |
++ if (dst_regno >= 0) |
1129 |
++ mark_reg_stack_read(env, reg_state, off, off + size, dst_regno); |
1130 |
+ } |
1131 |
+ return 0; |
1132 |
+ } |
1133 |
+ |
1134 |
+-static int check_stack_access(struct bpf_verifier_env *env, |
1135 |
+- const struct bpf_reg_state *reg, |
1136 |
+- int off, int size) |
1137 |
++enum stack_access_src { |
1138 |
++ ACCESS_DIRECT = 1, /* the access is performed by an instruction */ |
1139 |
++ ACCESS_HELPER = 2, /* the access is performed by a helper */ |
1140 |
++}; |
1141 |
++ |
1142 |
++static int check_stack_range_initialized(struct bpf_verifier_env *env, |
1143 |
++ int regno, int off, int access_size, |
1144 |
++ bool zero_size_allowed, |
1145 |
++ enum stack_access_src type, |
1146 |
++ struct bpf_call_arg_meta *meta); |
1147 |
++ |
1148 |
++static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) |
1149 |
++{ |
1150 |
++ return cur_regs(env) + regno; |
1151 |
++} |
1152 |
++ |
1153 |
++/* Read the stack at 'ptr_regno + off' and put the result into the register |
1154 |
++ * 'dst_regno'. |
1155 |
++ * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'), |
1156 |
++ * but not its variable offset. |
1157 |
++ * 'size' is assumed to be <= reg size and the access is assumed to be aligned. |
1158 |
++ * |
1159 |
++ * As opposed to check_stack_read_fixed_off, this function doesn't deal with |
1160 |
++ * filling registers (i.e. reads of spilled register cannot be detected when |
1161 |
++ * the offset is not fixed). We conservatively mark 'dst_regno' as containing |
1162 |
++ * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable |
1163 |
++ * offset; for a fixed offset check_stack_read_fixed_off should be used |
1164 |
++ * instead. |
1165 |
++ */ |
1166 |
++static int check_stack_read_var_off(struct bpf_verifier_env *env, |
1167 |
++ int ptr_regno, int off, int size, int dst_regno) |
1168 |
++{ |
1169 |
++ /* The state of the source register. */ |
1170 |
++ struct bpf_reg_state *reg = reg_state(env, ptr_regno); |
1171 |
++ struct bpf_func_state *ptr_state = func(env, reg); |
1172 |
++ int err; |
1173 |
++ int min_off, max_off; |
1174 |
++ |
1175 |
++ /* Note that we pass a NULL meta, so raw access will not be permitted. |
1176 |
++ */ |
1177 |
++ err = check_stack_range_initialized(env, ptr_regno, off, size, |
1178 |
++ false, ACCESS_DIRECT, NULL); |
1179 |
++ if (err) |
1180 |
++ return err; |
1181 |
++ |
1182 |
++ min_off = reg->smin_value + off; |
1183 |
++ max_off = reg->smax_value + off; |
1184 |
++ mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno); |
1185 |
++ return 0; |
1186 |
++} |
1187 |
++ |
1188 |
++/* check_stack_read dispatches to check_stack_read_fixed_off or |
1189 |
++ * check_stack_read_var_off. |
1190 |
++ * |
1191 |
++ * The caller must ensure that the offset falls within the allocated stack |
1192 |
++ * bounds. |
1193 |
++ * |
1194 |
++ * 'dst_regno' is a register which will receive the value from the stack. It |
1195 |
++ * can be -1, meaning that the read value is not going to a register. |
1196 |
++ */ |
1197 |
++static int check_stack_read(struct bpf_verifier_env *env, |
1198 |
++ int ptr_regno, int off, int size, |
1199 |
++ int dst_regno) |
1200 |
+ { |
1201 |
+- /* Stack accesses must be at a fixed offset, so that we |
1202 |
+- * can determine what type of data were returned. See |
1203 |
+- * check_stack_read(). |
1204 |
++ struct bpf_reg_state *reg = reg_state(env, ptr_regno); |
1205 |
++ struct bpf_func_state *state = func(env, reg); |
1206 |
++ int err; |
1207 |
++ /* Some accesses are only permitted with a static offset. */ |
1208 |
++ bool var_off = !tnum_is_const(reg->var_off); |
1209 |
++ |
1210 |
++ /* The offset is required to be static when reads don't go to a |
1211 |
++ * register, in order to not leak pointers (see |
1212 |
++ * check_stack_read_fixed_off). |
1213 |
+ */ |
1214 |
+- if (!tnum_is_const(reg->var_off)) { |
1215 |
++ if (dst_regno < 0 && var_off) { |
1216 |
+ char tn_buf[48]; |
1217 |
+ |
1218 |
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
1219 |
+- verbose(env, "variable stack access var_off=%s off=%d size=%d\n", |
1220 |
++ verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n", |
1221 |
+ tn_buf, off, size); |
1222 |
+ return -EACCES; |
1223 |
+ } |
1224 |
++ /* Variable offset is prohibited for unprivileged mode for simplicity |
1225 |
++ * since it requires corresponding support in Spectre masking for stack |
1226 |
++ * ALU. See also retrieve_ptr_limit(). |
1227 |
++ */ |
1228 |
++ if (!env->bypass_spec_v1 && var_off) { |
1229 |
++ char tn_buf[48]; |
1230 |
+ |
1231 |
+- if (off >= 0 || off < -MAX_BPF_STACK) { |
1232 |
+- verbose(env, "invalid stack off=%d size=%d\n", off, size); |
1233 |
++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
1234 |
++ verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n", |
1235 |
++ ptr_regno, tn_buf); |
1236 |
+ return -EACCES; |
1237 |
+ } |
1238 |
+ |
1239 |
+- return 0; |
1240 |
++ if (!var_off) { |
1241 |
++ off += reg->var_off.value; |
1242 |
++ err = check_stack_read_fixed_off(env, state, off, size, |
1243 |
++ dst_regno); |
1244 |
++ } else { |
1245 |
++ /* Variable offset stack reads need more conservative handling |
1246 |
++ * than fixed offset ones. Note that dst_regno >= 0 on this |
1247 |
++ * branch. |
1248 |
++ */ |
1249 |
++ err = check_stack_read_var_off(env, ptr_regno, off, size, |
1250 |
++ dst_regno); |
1251 |
++ } |
1252 |
++ return err; |
1253 |
++} |
1254 |
++ |
1255 |
++ |
1256 |
++/* check_stack_write dispatches to check_stack_write_fixed_off or |
1257 |
++ * check_stack_write_var_off. |
1258 |
++ * |
1259 |
++ * 'ptr_regno' is the register used as a pointer into the stack. |
1260 |
++ * 'off' includes 'ptr_regno->off', but not its variable offset (if any). |
1261 |
++ * 'value_regno' is the register whose value we're writing to the stack. It can |
1262 |
++ * be -1, meaning that we're not writing from a register. |
1263 |
++ * |
1264 |
++ * The caller must ensure that the offset falls within the maximum stack size. |
1265 |
++ */ |
1266 |
++static int check_stack_write(struct bpf_verifier_env *env, |
1267 |
++ int ptr_regno, int off, int size, |
1268 |
++ int value_regno, int insn_idx) |
1269 |
++{ |
1270 |
++ struct bpf_reg_state *reg = reg_state(env, ptr_regno); |
1271 |
++ struct bpf_func_state *state = func(env, reg); |
1272 |
++ int err; |
1273 |
++ |
1274 |
++ if (tnum_is_const(reg->var_off)) { |
1275 |
++ off += reg->var_off.value; |
1276 |
++ err = check_stack_write_fixed_off(env, state, off, size, |
1277 |
++ value_regno, insn_idx); |
1278 |
++ } else { |
1279 |
++ /* Variable offset stack reads need more conservative handling |
1280 |
++ * than fixed offset ones. |
1281 |
++ */ |
1282 |
++ err = check_stack_write_var_off(env, state, |
1283 |
++ ptr_regno, off, size, |
1284 |
++ value_regno, insn_idx); |
1285 |
++ } |
1286 |
++ return err; |
1287 |
+ } |
1288 |
+ |
1289 |
+ static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, |
1290 |
+@@ -2851,11 +3111,6 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, |
1291 |
+ return -EACCES; |
1292 |
+ } |
1293 |
+ |
1294 |
+-static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) |
1295 |
+-{ |
1296 |
+- return cur_regs(env) + regno; |
1297 |
+-} |
1298 |
+- |
1299 |
+ static bool is_pointer_value(struct bpf_verifier_env *env, int regno) |
1300 |
+ { |
1301 |
+ return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); |
1302 |
+@@ -2974,8 +3229,8 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, |
1303 |
+ break; |
1304 |
+ case PTR_TO_STACK: |
1305 |
+ pointer_desc = "stack "; |
1306 |
+- /* The stack spill tracking logic in check_stack_write() |
1307 |
+- * and check_stack_read() relies on stack accesses being |
1308 |
++ /* The stack spill tracking logic in check_stack_write_fixed_off() |
1309 |
++ * and check_stack_read_fixed_off() relies on stack accesses being |
1310 |
+ * aligned. |
1311 |
+ */ |
1312 |
+ strict = true; |
1313 |
+@@ -3393,6 +3648,91 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env, |
1314 |
+ return 0; |
1315 |
+ } |
1316 |
+ |
1317 |
++/* Check that the stack access at the given offset is within bounds. The |
1318 |
++ * maximum valid offset is -1. |
1319 |
++ * |
1320 |
++ * The minimum valid offset is -MAX_BPF_STACK for writes, and |
1321 |
++ * -state->allocated_stack for reads. |
1322 |
++ */ |
1323 |
++static int check_stack_slot_within_bounds(int off, |
1324 |
++ struct bpf_func_state *state, |
1325 |
++ enum bpf_access_type t) |
1326 |
++{ |
1327 |
++ int min_valid_off; |
1328 |
++ |
1329 |
++ if (t == BPF_WRITE) |
1330 |
++ min_valid_off = -MAX_BPF_STACK; |
1331 |
++ else |
1332 |
++ min_valid_off = -state->allocated_stack; |
1333 |
++ |
1334 |
++ if (off < min_valid_off || off > -1) |
1335 |
++ return -EACCES; |
1336 |
++ return 0; |
1337 |
++} |
1338 |
++ |
1339 |
++/* Check that the stack access at 'regno + off' falls within the maximum stack |
1340 |
++ * bounds. |
1341 |
++ * |
1342 |
++ * 'off' includes `regno->offset`, but not its dynamic part (if any). |
1343 |
++ */ |
1344 |
++static int check_stack_access_within_bounds( |
1345 |
++ struct bpf_verifier_env *env, |
1346 |
++ int regno, int off, int access_size, |
1347 |
++ enum stack_access_src src, enum bpf_access_type type) |
1348 |
++{ |
1349 |
++ struct bpf_reg_state *regs = cur_regs(env); |
1350 |
++ struct bpf_reg_state *reg = regs + regno; |
1351 |
++ struct bpf_func_state *state = func(env, reg); |
1352 |
++ int min_off, max_off; |
1353 |
++ int err; |
1354 |
++ char *err_extra; |
1355 |
++ |
1356 |
++ if (src == ACCESS_HELPER) |
1357 |
++ /* We don't know if helpers are reading or writing (or both). */ |
1358 |
++ err_extra = " indirect access to"; |
1359 |
++ else if (type == BPF_READ) |
1360 |
++ err_extra = " read from"; |
1361 |
++ else |
1362 |
++ err_extra = " write to"; |
1363 |
++ |
1364 |
++ if (tnum_is_const(reg->var_off)) { |
1365 |
++ min_off = reg->var_off.value + off; |
1366 |
++ if (access_size > 0) |
1367 |
++ max_off = min_off + access_size - 1; |
1368 |
++ else |
1369 |
++ max_off = min_off; |
1370 |
++ } else { |
1371 |
++ if (reg->smax_value >= BPF_MAX_VAR_OFF || |
1372 |
++ reg->smin_value <= -BPF_MAX_VAR_OFF) { |
1373 |
++ verbose(env, "invalid unbounded variable-offset%s stack R%d\n", |
1374 |
++ err_extra, regno); |
1375 |
++ return -EACCES; |
1376 |
++ } |
1377 |
++ min_off = reg->smin_value + off; |
1378 |
++ if (access_size > 0) |
1379 |
++ max_off = reg->smax_value + off + access_size - 1; |
1380 |
++ else |
1381 |
++ max_off = min_off; |
1382 |
++ } |
1383 |
++ |
1384 |
++ err = check_stack_slot_within_bounds(min_off, state, type); |
1385 |
++ if (!err) |
1386 |
++ err = check_stack_slot_within_bounds(max_off, state, type); |
1387 |
++ |
1388 |
++ if (err) { |
1389 |
++ if (tnum_is_const(reg->var_off)) { |
1390 |
++ verbose(env, "invalid%s stack R%d off=%d size=%d\n", |
1391 |
++ err_extra, regno, off, access_size); |
1392 |
++ } else { |
1393 |
++ char tn_buf[48]; |
1394 |
++ |
1395 |
++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
1396 |
++ verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n", |
1397 |
++ err_extra, regno, tn_buf, access_size); |
1398 |
++ } |
1399 |
++ } |
1400 |
++ return err; |
1401 |
++} |
1402 |
+ |
1403 |
+ /* check whether memory at (regno + off) is accessible for t = (read | write) |
1404 |
+ * if t==write, value_regno is a register which value is stored into memory |
1405 |
+@@ -3505,8 +3845,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn |
1406 |
+ } |
1407 |
+ |
1408 |
+ } else if (reg->type == PTR_TO_STACK) { |
1409 |
+- off += reg->var_off.value; |
1410 |
+- err = check_stack_access(env, reg, off, size); |
1411 |
++ /* Basic bounds checks. */ |
1412 |
++ err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t); |
1413 |
+ if (err) |
1414 |
+ return err; |
1415 |
+ |
1416 |
+@@ -3515,12 +3855,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn |
1417 |
+ if (err) |
1418 |
+ return err; |
1419 |
+ |
1420 |
+- if (t == BPF_WRITE) |
1421 |
+- err = check_stack_write(env, state, off, size, |
1422 |
+- value_regno, insn_idx); |
1423 |
+- else |
1424 |
+- err = check_stack_read(env, state, off, size, |
1425 |
++ if (t == BPF_READ) |
1426 |
++ err = check_stack_read(env, regno, off, size, |
1427 |
+ value_regno); |
1428 |
++ else |
1429 |
++ err = check_stack_write(env, regno, off, size, |
1430 |
++ value_regno, insn_idx); |
1431 |
+ } else if (reg_is_pkt_pointer(reg)) { |
1432 |
+ if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { |
1433 |
+ verbose(env, "cannot write into packet\n"); |
1434 |
+@@ -3642,49 +3982,53 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins |
1435 |
+ BPF_SIZE(insn->code), BPF_WRITE, -1, true); |
1436 |
+ } |
1437 |
+ |
1438 |
+-static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno, |
1439 |
+- int off, int access_size, |
1440 |
+- bool zero_size_allowed) |
1441 |
++/* When register 'regno' is used to read the stack (either directly or through |
1442 |
++ * a helper function) make sure that it's within stack boundary and, depending |
1443 |
++ * on the access type, that all elements of the stack are initialized. |
1444 |
++ * |
1445 |
++ * 'off' includes 'regno->off', but not its dynamic part (if any). |
1446 |
++ * |
1447 |
++ * All registers that have been spilled on the stack in the slots within the |
1448 |
++ * read offsets are marked as read. |
1449 |
++ */ |
1450 |
++static int check_stack_range_initialized( |
1451 |
++ struct bpf_verifier_env *env, int regno, int off, |
1452 |
++ int access_size, bool zero_size_allowed, |
1453 |
++ enum stack_access_src type, struct bpf_call_arg_meta *meta) |
1454 |
+ { |
1455 |
+ struct bpf_reg_state *reg = reg_state(env, regno); |
1456 |
++ struct bpf_func_state *state = func(env, reg); |
1457 |
++ int err, min_off, max_off, i, j, slot, spi; |
1458 |
++ char *err_extra = type == ACCESS_HELPER ? " indirect" : ""; |
1459 |
++ enum bpf_access_type bounds_check_type; |
1460 |
++ /* Some accesses can write anything into the stack, others are |
1461 |
++ * read-only. |
1462 |
++ */ |
1463 |
++ bool clobber = false; |
1464 |
+ |
1465 |
+- if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || |
1466 |
+- access_size < 0 || (access_size == 0 && !zero_size_allowed)) { |
1467 |
+- if (tnum_is_const(reg->var_off)) { |
1468 |
+- verbose(env, "invalid stack type R%d off=%d access_size=%d\n", |
1469 |
+- regno, off, access_size); |
1470 |
+- } else { |
1471 |
+- char tn_buf[48]; |
1472 |
+- |
1473 |
+- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
1474 |
+- verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n", |
1475 |
+- regno, tn_buf, access_size); |
1476 |
+- } |
1477 |
++ if (access_size == 0 && !zero_size_allowed) { |
1478 |
++ verbose(env, "invalid zero-sized read\n"); |
1479 |
+ return -EACCES; |
1480 |
+ } |
1481 |
+- return 0; |
1482 |
+-} |
1483 |
+ |
1484 |
+-/* when register 'regno' is passed into function that will read 'access_size' |
1485 |
+- * bytes from that pointer, make sure that it's within stack boundary |
1486 |
+- * and all elements of stack are initialized. |
1487 |
+- * Unlike most pointer bounds-checking functions, this one doesn't take an |
1488 |
+- * 'off' argument, so it has to add in reg->off itself. |
1489 |
+- */ |
1490 |
+-static int check_stack_boundary(struct bpf_verifier_env *env, int regno, |
1491 |
+- int access_size, bool zero_size_allowed, |
1492 |
+- struct bpf_call_arg_meta *meta) |
1493 |
+-{ |
1494 |
+- struct bpf_reg_state *reg = reg_state(env, regno); |
1495 |
+- struct bpf_func_state *state = func(env, reg); |
1496 |
+- int err, min_off, max_off, i, j, slot, spi; |
1497 |
++ if (type == ACCESS_HELPER) { |
1498 |
++ /* The bounds checks for writes are more permissive than for |
1499 |
++ * reads. However, if raw_mode is not set, we'll do extra |
1500 |
++ * checks below. |
1501 |
++ */ |
1502 |
++ bounds_check_type = BPF_WRITE; |
1503 |
++ clobber = true; |
1504 |
++ } else { |
1505 |
++ bounds_check_type = BPF_READ; |
1506 |
++ } |
1507 |
++ err = check_stack_access_within_bounds(env, regno, off, access_size, |
1508 |
++ type, bounds_check_type); |
1509 |
++ if (err) |
1510 |
++ return err; |
1511 |
++ |
1512 |
+ |
1513 |
+ if (tnum_is_const(reg->var_off)) { |
1514 |
+- min_off = max_off = reg->var_off.value + reg->off; |
1515 |
+- err = __check_stack_boundary(env, regno, min_off, access_size, |
1516 |
+- zero_size_allowed); |
1517 |
+- if (err) |
1518 |
+- return err; |
1519 |
++ min_off = max_off = reg->var_off.value + off; |
1520 |
+ } else { |
1521 |
+ /* Variable offset is prohibited for unprivileged mode for |
1522 |
+ * simplicity since it requires corresponding support in |
1523 |
+@@ -3695,8 +4039,8 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, |
1524 |
+ char tn_buf[48]; |
1525 |
+ |
1526 |
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
1527 |
+- verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n", |
1528 |
+- regno, tn_buf); |
1529 |
++ verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n", |
1530 |
++ regno, err_extra, tn_buf); |
1531 |
+ return -EACCES; |
1532 |
+ } |
1533 |
+ /* Only initialized buffer on stack is allowed to be accessed |
1534 |
+@@ -3708,28 +4052,8 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, |
1535 |
+ if (meta && meta->raw_mode) |
1536 |
+ meta = NULL; |
1537 |
+ |
1538 |
+- if (reg->smax_value >= BPF_MAX_VAR_OFF || |
1539 |
+- reg->smax_value <= -BPF_MAX_VAR_OFF) { |
1540 |
+- verbose(env, "R%d unbounded indirect variable offset stack access\n", |
1541 |
+- regno); |
1542 |
+- return -EACCES; |
1543 |
+- } |
1544 |
+- min_off = reg->smin_value + reg->off; |
1545 |
+- max_off = reg->smax_value + reg->off; |
1546 |
+- err = __check_stack_boundary(env, regno, min_off, access_size, |
1547 |
+- zero_size_allowed); |
1548 |
+- if (err) { |
1549 |
+- verbose(env, "R%d min value is outside of stack bound\n", |
1550 |
+- regno); |
1551 |
+- return err; |
1552 |
+- } |
1553 |
+- err = __check_stack_boundary(env, regno, max_off, access_size, |
1554 |
+- zero_size_allowed); |
1555 |
+- if (err) { |
1556 |
+- verbose(env, "R%d max value is outside of stack bound\n", |
1557 |
+- regno); |
1558 |
+- return err; |
1559 |
+- } |
1560 |
++ min_off = reg->smin_value + off; |
1561 |
++ max_off = reg->smax_value + off; |
1562 |
+ } |
1563 |
+ |
1564 |
+ if (meta && meta->raw_mode) { |
1565 |
+@@ -3749,8 +4073,10 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, |
1566 |
+ if (*stype == STACK_MISC) |
1567 |
+ goto mark; |
1568 |
+ if (*stype == STACK_ZERO) { |
1569 |
+- /* helper can write anything into the stack */ |
1570 |
+- *stype = STACK_MISC; |
1571 |
++ if (clobber) { |
1572 |
++ /* helper can write anything into the stack */ |
1573 |
++ *stype = STACK_MISC; |
1574 |
++ } |
1575 |
+ goto mark; |
1576 |
+ } |
1577 |
+ |
1578 |
+@@ -3759,23 +4085,26 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, |
1579 |
+ goto mark; |
1580 |
+ |
1581 |
+ if (state->stack[spi].slot_type[0] == STACK_SPILL && |
1582 |
+- state->stack[spi].spilled_ptr.type == SCALAR_VALUE) { |
1583 |
+- __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); |
1584 |
+- for (j = 0; j < BPF_REG_SIZE; j++) |
1585 |
+- state->stack[spi].slot_type[j] = STACK_MISC; |
1586 |
++ (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || |
1587 |
++ env->allow_ptr_leaks)) { |
1588 |
++ if (clobber) { |
1589 |
++ __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); |
1590 |
++ for (j = 0; j < BPF_REG_SIZE; j++) |
1591 |
++ state->stack[spi].slot_type[j] = STACK_MISC; |
1592 |
++ } |
1593 |
+ goto mark; |
1594 |
+ } |
1595 |
+ |
1596 |
+ err: |
1597 |
+ if (tnum_is_const(reg->var_off)) { |
1598 |
+- verbose(env, "invalid indirect read from stack off %d+%d size %d\n", |
1599 |
+- min_off, i - min_off, access_size); |
1600 |
++ verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n", |
1601 |
++ err_extra, regno, min_off, i - min_off, access_size); |
1602 |
+ } else { |
1603 |
+ char tn_buf[48]; |
1604 |
+ |
1605 |
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
1606 |
+- verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n", |
1607 |
+- tn_buf, i - min_off, access_size); |
1608 |
++ verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n", |
1609 |
++ err_extra, regno, tn_buf, i - min_off, access_size); |
1610 |
+ } |
1611 |
+ return -EACCES; |
1612 |
+ mark: |
1613 |
+@@ -3824,8 +4153,10 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, |
1614 |
+ "rdwr", |
1615 |
+ &env->prog->aux->max_rdwr_access); |
1616 |
+ case PTR_TO_STACK: |
1617 |
+- return check_stack_boundary(env, regno, access_size, |
1618 |
+- zero_size_allowed, meta); |
1619 |
++ return check_stack_range_initialized( |
1620 |
++ env, |
1621 |
++ regno, reg->off, access_size, |
1622 |
++ zero_size_allowed, ACCESS_HELPER, meta); |
1623 |
+ default: /* scalar_value or invalid ptr */ |
1624 |
+ /* Allow zero-byte read from NULL, regardless of pointer type */ |
1625 |
+ if (zero_size_allowed && access_size == 0 && |
1626 |
+@@ -5343,7 +5674,7 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, |
1627 |
+ bool off_is_neg = off_reg->smin_value < 0; |
1628 |
+ bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || |
1629 |
+ (opcode == BPF_SUB && !off_is_neg); |
1630 |
+- u32 off, max = 0, ptr_limit = 0; |
1631 |
++ u32 max = 0, ptr_limit = 0; |
1632 |
+ |
1633 |
+ if (!tnum_is_const(off_reg->var_off) && |
1634 |
+ (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) |
1635 |
+@@ -5352,26 +5683,18 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, |
1636 |
+ switch (ptr_reg->type) { |
1637 |
+ case PTR_TO_STACK: |
1638 |
+ /* Offset 0 is out-of-bounds, but acceptable start for the |
1639 |
+- * left direction, see BPF_REG_FP. |
1640 |
++ * left direction, see BPF_REG_FP. Also, unknown scalar |
1641 |
++ * offset where we would need to deal with min/max bounds is |
1642 |
++ * currently prohibited for unprivileged. |
1643 |
+ */ |
1644 |
+ max = MAX_BPF_STACK + mask_to_left; |
1645 |
+- /* Indirect variable offset stack access is prohibited in |
1646 |
+- * unprivileged mode so it's not handled here. |
1647 |
+- */ |
1648 |
+- off = ptr_reg->off + ptr_reg->var_off.value; |
1649 |
+- if (mask_to_left) |
1650 |
+- ptr_limit = MAX_BPF_STACK + off; |
1651 |
+- else |
1652 |
+- ptr_limit = -off - 1; |
1653 |
++ ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off); |
1654 |
+ break; |
1655 |
+ case PTR_TO_MAP_VALUE: |
1656 |
+ max = ptr_reg->map_ptr->value_size; |
1657 |
+- if (mask_to_left) { |
1658 |
+- ptr_limit = ptr_reg->umax_value + ptr_reg->off; |
1659 |
+- } else { |
1660 |
+- off = ptr_reg->smin_value + ptr_reg->off; |
1661 |
+- ptr_limit = ptr_reg->map_ptr->value_size - off - 1; |
1662 |
+- } |
1663 |
++ ptr_limit = (mask_to_left ? |
1664 |
++ ptr_reg->smin_value : |
1665 |
++ ptr_reg->umax_value) + ptr_reg->off; |
1666 |
+ break; |
1667 |
+ default: |
1668 |
+ return REASON_TYPE; |
1669 |
+@@ -5426,10 +5749,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, |
1670 |
+ struct bpf_insn *insn, |
1671 |
+ const struct bpf_reg_state *ptr_reg, |
1672 |
+ const struct bpf_reg_state *off_reg, |
1673 |
+- struct bpf_reg_state *dst_reg) |
1674 |
++ struct bpf_reg_state *dst_reg, |
1675 |
++ struct bpf_insn_aux_data *tmp_aux, |
1676 |
++ const bool commit_window) |
1677 |
+ { |
1678 |
++ struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux; |
1679 |
+ struct bpf_verifier_state *vstate = env->cur_state; |
1680 |
+- struct bpf_insn_aux_data *aux = cur_aux(env); |
1681 |
+ bool off_is_neg = off_reg->smin_value < 0; |
1682 |
+ bool ptr_is_dst_reg = ptr_reg == dst_reg; |
1683 |
+ u8 opcode = BPF_OP(insn->code); |
1684 |
+@@ -5448,18 +5773,33 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, |
1685 |
+ if (vstate->speculative) |
1686 |
+ goto do_sim; |
1687 |
+ |
1688 |
+- alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; |
1689 |
+- alu_state |= ptr_is_dst_reg ? |
1690 |
+- BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; |
1691 |
+- |
1692 |
+ err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode); |
1693 |
+ if (err < 0) |
1694 |
+ return err; |
1695 |
+ |
1696 |
++ if (commit_window) { |
1697 |
++ /* In commit phase we narrow the masking window based on |
1698 |
++ * the observed pointer move after the simulated operation. |
1699 |
++ */ |
1700 |
++ alu_state = tmp_aux->alu_state; |
1701 |
++ alu_limit = abs(tmp_aux->alu_limit - alu_limit); |
1702 |
++ } else { |
1703 |
++ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; |
1704 |
++ alu_state |= ptr_is_dst_reg ? |
1705 |
++ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; |
1706 |
++ } |
1707 |
++ |
1708 |
+ err = update_alu_sanitation_state(aux, alu_state, alu_limit); |
1709 |
+ if (err < 0) |
1710 |
+ return err; |
1711 |
+ do_sim: |
1712 |
++ /* If we're in commit phase, we're done here given we already |
1713 |
++ * pushed the truncated dst_reg into the speculative verification |
1714 |
++ * stack. |
1715 |
++ */ |
1716 |
++ if (commit_window) |
1717 |
++ return 0; |
1718 |
++ |
1719 |
+ /* Simulate and find potential out-of-bounds access under |
1720 |
+ * speculative execution from truncation as a result of |
1721 |
+ * masking when off was not within expected range. If off |
1722 |
+@@ -5518,6 +5858,72 @@ static int sanitize_err(struct bpf_verifier_env *env, |
1723 |
+ return -EACCES; |
1724 |
+ } |
1725 |
+ |
1726 |
++/* check that stack access falls within stack limits and that 'reg' doesn't |
1727 |
++ * have a variable offset. |
1728 |
++ * |
1729 |
++ * Variable offset is prohibited for unprivileged mode for simplicity since it |
1730 |
++ * requires corresponding support in Spectre masking for stack ALU. See also |
1731 |
++ * retrieve_ptr_limit(). |
1732 |
++ * |
1733 |
++ * |
1734 |
++ * 'off' includes 'reg->off'. |
1735 |
++ */ |
1736 |
++static int check_stack_access_for_ptr_arithmetic( |
1737 |
++ struct bpf_verifier_env *env, |
1738 |
++ int regno, |
1739 |
++ const struct bpf_reg_state *reg, |
1740 |
++ int off) |
1741 |
++{ |
1742 |
++ if (!tnum_is_const(reg->var_off)) { |
1743 |
++ char tn_buf[48]; |
1744 |
++ |
1745 |
++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
1746 |
++ verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n", |
1747 |
++ regno, tn_buf, off); |
1748 |
++ return -EACCES; |
1749 |
++ } |
1750 |
++ |
1751 |
++ if (off >= 0 || off < -MAX_BPF_STACK) { |
1752 |
++ verbose(env, "R%d stack pointer arithmetic goes out of range, " |
1753 |
++ "prohibited for !root; off=%d\n", regno, off); |
1754 |
++ return -EACCES; |
1755 |
++ } |
1756 |
++ |
1757 |
++ return 0; |
1758 |
++} |
1759 |
++ |
1760 |
++static int sanitize_check_bounds(struct bpf_verifier_env *env, |
1761 |
++ const struct bpf_insn *insn, |
1762 |
++ const struct bpf_reg_state *dst_reg) |
1763 |
++{ |
1764 |
++ u32 dst = insn->dst_reg; |
1765 |
++ |
1766 |
++ /* For unprivileged we require that resulting offset must be in bounds |
1767 |
++ * in order to be able to sanitize access later on. |
1768 |
++ */ |
1769 |
++ if (env->bypass_spec_v1) |
1770 |
++ return 0; |
1771 |
++ |
1772 |
++ switch (dst_reg->type) { |
1773 |
++ case PTR_TO_STACK: |
1774 |
++ if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg, |
1775 |
++ dst_reg->off + dst_reg->var_off.value)) |
1776 |
++ return -EACCES; |
1777 |
++ break; |
1778 |
++ case PTR_TO_MAP_VALUE: |
1779 |
++ if (check_map_access(env, dst, dst_reg->off, 1, false)) { |
1780 |
++ verbose(env, "R%d pointer arithmetic of map value goes out of range, " |
1781 |
++ "prohibited for !root\n", dst); |
1782 |
++ return -EACCES; |
1783 |
++ } |
1784 |
++ break; |
1785 |
++ default: |
1786 |
++ break; |
1787 |
++ } |
1788 |
++ |
1789 |
++ return 0; |
1790 |
++} |
1791 |
++ |
1792 |
+ /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. |
1793 |
+ * Caller should also handle BPF_MOV case separately. |
1794 |
+ * If we return -EACCES, caller may want to try again treating pointer as a |
1795 |
+@@ -5536,6 +5942,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, |
1796 |
+ smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; |
1797 |
+ u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, |
1798 |
+ umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; |
1799 |
++ struct bpf_insn_aux_data tmp_aux = {}; |
1800 |
+ u8 opcode = BPF_OP(insn->code); |
1801 |
+ u32 dst = insn->dst_reg; |
1802 |
+ int ret; |
1803 |
+@@ -5602,12 +6009,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, |
1804 |
+ /* pointer types do not carry 32-bit bounds at the moment. */ |
1805 |
+ __mark_reg32_unbounded(dst_reg); |
1806 |
+ |
1807 |
+- switch (opcode) { |
1808 |
+- case BPF_ADD: |
1809 |
+- ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg); |
1810 |
++ if (sanitize_needed(opcode)) { |
1811 |
++ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg, |
1812 |
++ &tmp_aux, false); |
1813 |
+ if (ret < 0) |
1814 |
+ return sanitize_err(env, insn, ret, off_reg, dst_reg); |
1815 |
++ } |
1816 |
+ |
1817 |
++ switch (opcode) { |
1818 |
++ case BPF_ADD: |
1819 |
+ /* We can take a fixed offset as long as it doesn't overflow |
1820 |
+ * the s32 'off' field |
1821 |
+ */ |
1822 |
+@@ -5658,10 +6068,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, |
1823 |
+ } |
1824 |
+ break; |
1825 |
+ case BPF_SUB: |
1826 |
+- ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg); |
1827 |
+- if (ret < 0) |
1828 |
+- return sanitize_err(env, insn, ret, off_reg, dst_reg); |
1829 |
+- |
1830 |
+ if (dst_reg == off_reg) { |
1831 |
+ /* scalar -= pointer. Creates an unknown scalar */ |
1832 |
+ verbose(env, "R%d tried to subtract pointer from scalar\n", |
1833 |
+@@ -5742,22 +6148,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, |
1834 |
+ __reg_deduce_bounds(dst_reg); |
1835 |
+ __reg_bound_offset(dst_reg); |
1836 |
+ |
1837 |
+- /* For unprivileged we require that resulting offset must be in bounds |
1838 |
+- * in order to be able to sanitize access later on. |
1839 |
+- */ |
1840 |
+- if (!env->bypass_spec_v1) { |
1841 |
+- if (dst_reg->type == PTR_TO_MAP_VALUE && |
1842 |
+- check_map_access(env, dst, dst_reg->off, 1, false)) { |
1843 |
+- verbose(env, "R%d pointer arithmetic of map value goes out of range, " |
1844 |
+- "prohibited for !root\n", dst); |
1845 |
+- return -EACCES; |
1846 |
+- } else if (dst_reg->type == PTR_TO_STACK && |
1847 |
+- check_stack_access(env, dst_reg, dst_reg->off + |
1848 |
+- dst_reg->var_off.value, 1)) { |
1849 |
+- verbose(env, "R%d stack pointer arithmetic goes out of range, " |
1850 |
+- "prohibited for !root\n", dst); |
1851 |
+- return -EACCES; |
1852 |
+- } |
1853 |
++ if (sanitize_check_bounds(env, insn, dst_reg) < 0) |
1854 |
++ return -EACCES; |
1855 |
++ if (sanitize_needed(opcode)) { |
1856 |
++ ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg, |
1857 |
++ &tmp_aux, true); |
1858 |
++ if (ret < 0) |
1859 |
++ return sanitize_err(env, insn, ret, off_reg, dst_reg); |
1860 |
+ } |
1861 |
+ |
1862 |
+ return 0; |
1863 |
+@@ -11951,6 +12348,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, |
1864 |
+ env->strict_alignment = false; |
1865 |
+ |
1866 |
+ env->allow_ptr_leaks = bpf_allow_ptr_leaks(); |
1867 |
++ env->allow_uninit_stack = bpf_allow_uninit_stack(); |
1868 |
+ env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access(); |
1869 |
+ env->bypass_spec_v1 = bpf_bypass_spec_v1(); |
1870 |
+ env->bypass_spec_v4 = bpf_bypass_spec_v4(); |
1871 |
+diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c |
1872 |
+index fe9ca92faa2a7..909b0bf22a1ec 100644 |
1873 |
+--- a/kernel/locking/qrwlock.c |
1874 |
++++ b/kernel/locking/qrwlock.c |
1875 |
+@@ -61,6 +61,8 @@ EXPORT_SYMBOL(queued_read_lock_slowpath); |
1876 |
+ */ |
1877 |
+ void queued_write_lock_slowpath(struct qrwlock *lock) |
1878 |
+ { |
1879 |
++ int cnts; |
1880 |
++ |
1881 |
+ /* Put the writer into the wait queue */ |
1882 |
+ arch_spin_lock(&lock->wait_lock); |
1883 |
+ |
1884 |
+@@ -74,9 +76,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock) |
1885 |
+ |
1886 |
+ /* When no more readers or writers, set the locked flag */ |
1887 |
+ do { |
1888 |
+- atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING); |
1889 |
+- } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING, |
1890 |
+- _QW_LOCKED) != _QW_WAITING); |
1891 |
++ cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING); |
1892 |
++ } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)); |
1893 |
+ unlock: |
1894 |
+ arch_spin_unlock(&lock->wait_lock); |
1895 |
+ } |
1896 |
+diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan |
1897 |
+index 1e000cc2e7b4b..127012f45166e 100644 |
1898 |
+--- a/scripts/Makefile.kasan |
1899 |
++++ b/scripts/Makefile.kasan |
1900 |
+@@ -2,6 +2,8 @@ |
1901 |
+ CFLAGS_KASAN_NOSANITIZE := -fno-builtin |
1902 |
+ KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET) |
1903 |
+ |
1904 |
++cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1))) |
1905 |
++ |
1906 |
+ ifdef CONFIG_KASAN_GENERIC |
1907 |
+ |
1908 |
+ ifdef CONFIG_KASAN_INLINE |
1909 |
+@@ -12,8 +14,6 @@ endif |
1910 |
+ |
1911 |
+ CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address |
1912 |
+ |
1913 |
+-cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1))) |
1914 |
+- |
1915 |
+ # -fasan-shadow-offset fails without -fsanitize |
1916 |
+ CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \ |
1917 |
+ -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \ |
1918 |
+@@ -36,14 +36,14 @@ endif # CONFIG_KASAN_GENERIC |
1919 |
+ ifdef CONFIG_KASAN_SW_TAGS |
1920 |
+ |
1921 |
+ ifdef CONFIG_KASAN_INLINE |
1922 |
+- instrumentation_flags := -mllvm -hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET) |
1923 |
++ instrumentation_flags := $(call cc-param,hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET)) |
1924 |
+ else |
1925 |
+- instrumentation_flags := -mllvm -hwasan-instrument-with-calls=1 |
1926 |
++ instrumentation_flags := $(call cc-param,hwasan-instrument-with-calls=1) |
1927 |
+ endif |
1928 |
+ |
1929 |
+ CFLAGS_KASAN := -fsanitize=kernel-hwaddress \ |
1930 |
+- -mllvm -hwasan-instrument-stack=$(CONFIG_KASAN_STACK) \ |
1931 |
+- -mllvm -hwasan-use-short-granules=0 \ |
1932 |
++ $(call cc-param,hwasan-instrument-stack=$(CONFIG_KASAN_STACK)) \ |
1933 |
++ $(call cc-param,hwasan-use-short-granules=0) \ |
1934 |
+ $(instrumentation_flags) |
1935 |
+ |
1936 |
+ endif # CONFIG_KASAN_SW_TAGS |
1937 |
+diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c |
1938 |
+index e2a0ed5d02f01..c87c4df8703d4 100644 |
1939 |
+--- a/security/keys/trusted-keys/trusted_tpm2.c |
1940 |
++++ b/security/keys/trusted-keys/trusted_tpm2.c |
1941 |
+@@ -79,7 +79,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip, |
1942 |
+ if (i == ARRAY_SIZE(tpm2_hash_map)) |
1943 |
+ return -EINVAL; |
1944 |
+ |
1945 |
+- rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE); |
1946 |
++ rc = tpm_try_get_ops(chip); |
1947 |
+ if (rc) |
1948 |
+ return rc; |
1949 |
+ |
1950 |
+diff --git a/tools/arch/ia64/include/asm/barrier.h b/tools/arch/ia64/include/asm/barrier.h |
1951 |
+index 4d471d9511a54..6fffe56827134 100644 |
1952 |
+--- a/tools/arch/ia64/include/asm/barrier.h |
1953 |
++++ b/tools/arch/ia64/include/asm/barrier.h |
1954 |
+@@ -39,9 +39,6 @@ |
1955 |
+ * sequential memory pages only. |
1956 |
+ */ |
1957 |
+ |
1958 |
+-/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */ |
1959 |
+-#define ia64_mf() asm volatile ("mf" ::: "memory") |
1960 |
+- |
1961 |
+ #define mb() ia64_mf() |
1962 |
+ #define rmb() mb() |
1963 |
+ #define wmb() mb() |
1964 |
+diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c |
1965 |
+index d8ada6a3c555a..d3c15b53495d6 100644 |
1966 |
+--- a/tools/perf/util/auxtrace.c |
1967 |
++++ b/tools/perf/util/auxtrace.c |
1968 |
+@@ -636,7 +636,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, |
1969 |
+ break; |
1970 |
+ } |
1971 |
+ |
1972 |
+- if (itr) |
1973 |
++ if (itr && itr->parse_snapshot_options) |
1974 |
+ return itr->parse_snapshot_options(itr, opts, str); |
1975 |
+ |
1976 |
+ pr_err("No AUX area tracing to snapshot\n"); |
1977 |
+diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c |
1978 |
+index e2537d5acab09..f4d44f75ba152 100644 |
1979 |
+--- a/tools/perf/util/map.c |
1980 |
++++ b/tools/perf/util/map.c |
1981 |
+@@ -836,15 +836,18 @@ out: |
1982 |
+ int maps__clone(struct thread *thread, struct maps *parent) |
1983 |
+ { |
1984 |
+ struct maps *maps = thread->maps; |
1985 |
+- int err = -ENOMEM; |
1986 |
++ int err; |
1987 |
+ struct map *map; |
1988 |
+ |
1989 |
+ down_read(&parent->lock); |
1990 |
+ |
1991 |
+ maps__for_each_entry(parent, map) { |
1992 |
+ struct map *new = map__clone(map); |
1993 |
+- if (new == NULL) |
1994 |
++ |
1995 |
++ if (new == NULL) { |
1996 |
++ err = -ENOMEM; |
1997 |
+ goto out_unlock; |
1998 |
++ } |
1999 |
+ |
2000 |
+ err = unwind__prepare_access(maps, new, NULL); |
2001 |
+ if (err) |