Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Sun, 12 Jan 2020 14:53:55
Message-Id: 1578840817.dd458323bfd7b0f33dbdfbfb163a3da902f97fd1.mpagano@gentoo
1 commit: dd458323bfd7b0f33dbdfbfb163a3da902f97fd1
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Jan 12 14:53:37 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Jan 12 14:53:37 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dd458323
7
8 Linux patch 4.14.164
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1163_linux-4.14.164.patch | 2533 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2537 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index b1304d8..9cdb60b 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -695,6 +695,10 @@ Patch: 1162_linux-4.14.163.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.163
23
24 +Patch: 1163_linux-4.14.164.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.164
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1163_linux-4.14.164.patch b/1163_linux-4.14.164.patch
33 new file mode 100644
34 index 0000000..8b56337
35 --- /dev/null
36 +++ b/1163_linux-4.14.164.patch
37 @@ -0,0 +1,2533 @@
38 +diff --git a/Makefile b/Makefile
39 +index 35a71a78d1d2..f2aa55cea457 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 163
47 ++SUBLEVEL = 164
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
52 +index afb8eb0a0a16..051823b7e5a1 100644
53 +--- a/arch/arm/boot/dts/am437x-gp-evm.dts
54 ++++ b/arch/arm/boot/dts/am437x-gp-evm.dts
55 +@@ -83,7 +83,7 @@
56 + };
57 +
58 + lcd0: display {
59 +- compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
60 ++ compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
61 + label = "lcd";
62 +
63 + panel-timing {
64 +diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
65 +index 081fa68b6f98..c4279b0b9f12 100644
66 +--- a/arch/arm/boot/dts/am43x-epos-evm.dts
67 ++++ b/arch/arm/boot/dts/am43x-epos-evm.dts
68 +@@ -45,7 +45,7 @@
69 + };
70 +
71 + lcd0: display {
72 +- compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
73 ++ compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
74 + label = "lcd";
75 +
76 + panel-timing {
77 +diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
78 +index 8b2c65cd61a2..b822952c29f8 100644
79 +--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
80 ++++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
81 +@@ -165,8 +165,8 @@
82 + mdio: mdio@18002000 {
83 + compatible = "brcm,iproc-mdio";
84 + reg = <0x18002000 0x8>;
85 +- #size-cells = <1>;
86 +- #address-cells = <0>;
87 ++ #size-cells = <0>;
88 ++ #address-cells = <1>;
89 + status = "disabled";
90 +
91 + gphy0: ethernet-phy@0 {
92 +diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
93 +index 4745e3c7806b..fdb018e1278f 100644
94 +--- a/arch/arm/boot/dts/bcm283x.dtsi
95 ++++ b/arch/arm/boot/dts/bcm283x.dtsi
96 +@@ -38,7 +38,7 @@
97 +
98 + trips {
99 + cpu-crit {
100 +- temperature = <80000>;
101 ++ temperature = <90000>;
102 + hysteresis = <0>;
103 + type = "critical";
104 + };
105 +diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
106 +index fe488523694c..635b0d549487 100644
107 +--- a/arch/arm/mach-vexpress/spc.c
108 ++++ b/arch/arm/mach-vexpress/spc.c
109 +@@ -555,8 +555,9 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
110 +
111 + static int __init ve_spc_clk_init(void)
112 + {
113 +- int cpu;
114 ++ int cpu, cluster;
115 + struct clk *clk;
116 ++ bool init_opp_table[MAX_CLUSTERS] = { false };
117 +
118 + if (!info)
119 + return 0; /* Continue only if SPC is initialised */
120 +@@ -582,8 +583,17 @@ static int __init ve_spc_clk_init(void)
121 + continue;
122 + }
123 +
124 ++ cluster = topology_physical_package_id(cpu_dev->id);
125 ++ if (init_opp_table[cluster])
126 ++ continue;
127 ++
128 + if (ve_init_opp_table(cpu_dev))
129 + pr_warn("failed to initialise cpu%d opp table\n", cpu);
130 ++ else if (dev_pm_opp_set_sharing_cpus(cpu_dev,
131 ++ topology_core_cpumask(cpu_dev->id)))
132 ++ pr_warn("failed to mark OPPs shared for cpu%d\n", cpu);
133 ++ else
134 ++ init_opp_table[cluster] = true;
135 + }
136 +
137 + platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
138 +diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
139 +index 42faa95ce664..57a7a9d68475 100644
140 +--- a/arch/mips/net/ebpf_jit.c
141 ++++ b/arch/mips/net/ebpf_jit.c
142 +@@ -612,6 +612,7 @@ static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
143 + static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
144 + {
145 + int off, b_off;
146 ++ int tcc_reg;
147 +
148 + ctx->flags |= EBPF_SEEN_TC;
149 + /*
150 +@@ -624,14 +625,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
151 + b_off = b_imm(this_idx + 1, ctx);
152 + emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
153 + /*
154 +- * if (--TCC < 0)
155 ++ * if (TCC-- < 0)
156 + * goto out;
157 + */
158 + /* Delay slot */
159 +- emit_instr(ctx, daddiu, MIPS_R_T5,
160 +- (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
161 ++ tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4;
162 ++ emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1);
163 + b_off = b_imm(this_idx + 1, ctx);
164 +- emit_instr(ctx, bltz, MIPS_R_T5, b_off);
165 ++ emit_instr(ctx, bltz, tcc_reg, b_off);
166 + /*
167 + * prog = array->ptrs[index];
168 + * if (prog == NULL)
169 +diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
170 +index f627c37dad9c..ab5c215cf46c 100644
171 +--- a/arch/parisc/include/asm/cmpxchg.h
172 ++++ b/arch/parisc/include/asm/cmpxchg.h
173 +@@ -44,8 +44,14 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size)
174 + ** if (((unsigned long)p & 0xf) == 0)
175 + ** return __ldcw(p);
176 + */
177 +-#define xchg(ptr, x) \
178 +- ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
179 ++#define xchg(ptr, x) \
180 ++({ \
181 ++ __typeof__(*(ptr)) __ret; \
182 ++ __typeof__(*(ptr)) _x_ = (x); \
183 ++ __ret = (__typeof__(*(ptr))) \
184 ++ __xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \
185 ++ __ret; \
186 ++})
187 +
188 + /* bug catcher for when unsupported size is used - won't link */
189 + extern void __cmpxchg_called_with_bad_pointer(void);
190 +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
191 +index 30bf13b72e5e..3c5abfbbe60e 100644
192 +--- a/arch/powerpc/mm/mem.c
193 ++++ b/arch/powerpc/mm/mem.c
194 +@@ -353,6 +353,14 @@ void __init mem_init(void)
195 + BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
196 +
197 + #ifdef CONFIG_SWIOTLB
198 ++ /*
199 ++ * Some platforms (e.g. 85xx) limit DMA-able memory way below
200 ++ * 4G. We force memblock to bottom-up mode to ensure that the
201 ++ * memory allocated in swiotlb_init() is DMA-able.
202 ++ * As it's the last memblock allocation, no need to reset it
203 ++ * back to to-down.
204 ++ */
205 ++ memblock_set_bottom_up(true);
206 + swiotlb_init(0);
207 + #endif
208 +
209 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
210 +index 6ed99de2ddf5..c1f7b3cb84a9 100644
211 +--- a/arch/x86/events/core.c
212 ++++ b/arch/x86/events/core.c
213 +@@ -375,7 +375,7 @@ int x86_add_exclusive(unsigned int what)
214 + * LBR and BTS are still mutually exclusive.
215 + */
216 + if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
217 +- return 0;
218 ++ goto out;
219 +
220 + if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
221 + mutex_lock(&pmc_reserve_mutex);
222 +@@ -387,6 +387,7 @@ int x86_add_exclusive(unsigned int what)
223 + mutex_unlock(&pmc_reserve_mutex);
224 + }
225 +
226 ++out:
227 + atomic_inc(&active_events);
228 + return 0;
229 +
230 +@@ -397,11 +398,15 @@ fail_unlock:
231 +
232 + void x86_del_exclusive(unsigned int what)
233 + {
234 ++ atomic_dec(&active_events);
235 ++
236 ++ /*
237 ++ * See the comment in x86_add_exclusive().
238 ++ */
239 + if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
240 + return;
241 +
242 + atomic_dec(&x86_pmu.lbr_exclusive[what]);
243 +- atomic_dec(&active_events);
244 + }
245 +
246 + int x86_setup_perfctr(struct perf_event *event)
247 +diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
248 +index 5b513ccffde4..cadd7fd290fa 100644
249 +--- a/arch/x86/platform/efi/quirks.c
250 ++++ b/arch/x86/platform/efi/quirks.c
251 +@@ -257,10 +257,6 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
252 + return;
253 + }
254 +
255 +- /* No need to reserve regions that will never be freed. */
256 +- if (md.attribute & EFI_MEMORY_RUNTIME)
257 +- return;
258 +-
259 + size += addr % EFI_PAGE_SIZE;
260 + size = round_up(size, EFI_PAGE_SIZE);
261 + addr = round_down(addr, EFI_PAGE_SIZE);
262 +@@ -290,6 +286,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
263 + early_memunmap(new, new_size);
264 +
265 + efi_memmap_install(new_phys, num_entries);
266 ++ e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
267 ++ e820__update_table(e820_table);
268 + }
269 +
270 + /*
271 +diff --git a/block/blk-map.c b/block/blk-map.c
272 +index e31be14da8ea..f72a3af689b6 100644
273 +--- a/block/blk-map.c
274 ++++ b/block/blk-map.c
275 +@@ -152,7 +152,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
276 + return 0;
277 +
278 + unmap_rq:
279 +- __blk_rq_unmap_user(bio);
280 ++ blk_rq_unmap_user(bio);
281 + fail:
282 + rq->bio = NULL;
283 + return ret;
284 +diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
285 +index 24c461dea7af..fd8053f9556e 100644
286 +--- a/drivers/firmware/efi/libstub/gop.c
287 ++++ b/drivers/firmware/efi/libstub/gop.c
288 +@@ -85,30 +85,6 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
289 + }
290 + }
291 +
292 +-static efi_status_t
293 +-__gop_query32(efi_system_table_t *sys_table_arg,
294 +- struct efi_graphics_output_protocol_32 *gop32,
295 +- struct efi_graphics_output_mode_info **info,
296 +- unsigned long *size, u64 *fb_base)
297 +-{
298 +- struct efi_graphics_output_protocol_mode_32 *mode;
299 +- efi_graphics_output_protocol_query_mode query_mode;
300 +- efi_status_t status;
301 +- unsigned long m;
302 +-
303 +- m = gop32->mode;
304 +- mode = (struct efi_graphics_output_protocol_mode_32 *)m;
305 +- query_mode = (void *)(unsigned long)gop32->query_mode;
306 +-
307 +- status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size,
308 +- info);
309 +- if (status != EFI_SUCCESS)
310 +- return status;
311 +-
312 +- *fb_base = mode->frame_buffer_base;
313 +- return status;
314 +-}
315 +-
316 + static efi_status_t
317 + setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
318 + efi_guid_t *proto, unsigned long size, void **gop_handle)
319 +@@ -121,7 +97,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
320 + u64 fb_base;
321 + struct efi_pixel_bitmask pixel_info;
322 + int pixel_format;
323 +- efi_status_t status = EFI_NOT_FOUND;
324 ++ efi_status_t status;
325 + u32 *handles = (u32 *)(unsigned long)gop_handle;
326 + int i;
327 +
328 +@@ -130,6 +106,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
329 +
330 + nr_gops = size / sizeof(u32);
331 + for (i = 0; i < nr_gops; i++) {
332 ++ struct efi_graphics_output_protocol_mode_32 *mode;
333 + struct efi_graphics_output_mode_info *info = NULL;
334 + efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
335 + bool conout_found = false;
336 +@@ -147,9 +124,11 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
337 + if (status == EFI_SUCCESS)
338 + conout_found = true;
339 +
340 +- status = __gop_query32(sys_table_arg, gop32, &info, &size,
341 +- &current_fb_base);
342 +- if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
343 ++ mode = (void *)(unsigned long)gop32->mode;
344 ++ info = (void *)(unsigned long)mode->info;
345 ++ current_fb_base = mode->frame_buffer_base;
346 ++
347 ++ if ((!first_gop || conout_found) &&
348 + info->pixel_format != PIXEL_BLT_ONLY) {
349 + /*
350 + * Systems that use the UEFI Console Splitter may
351 +@@ -177,7 +156,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
352 +
353 + /* Did we find any GOPs? */
354 + if (!first_gop)
355 +- goto out;
356 ++ return EFI_NOT_FOUND;
357 +
358 + /* EFI framebuffer */
359 + si->orig_video_isVGA = VIDEO_TYPE_EFI;
360 +@@ -199,32 +178,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
361 + si->lfb_size = si->lfb_linelength * si->lfb_height;
362 +
363 + si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
364 +-out:
365 +- return status;
366 +-}
367 +-
368 +-static efi_status_t
369 +-__gop_query64(efi_system_table_t *sys_table_arg,
370 +- struct efi_graphics_output_protocol_64 *gop64,
371 +- struct efi_graphics_output_mode_info **info,
372 +- unsigned long *size, u64 *fb_base)
373 +-{
374 +- struct efi_graphics_output_protocol_mode_64 *mode;
375 +- efi_graphics_output_protocol_query_mode query_mode;
376 +- efi_status_t status;
377 +- unsigned long m;
378 +-
379 +- m = gop64->mode;
380 +- mode = (struct efi_graphics_output_protocol_mode_64 *)m;
381 +- query_mode = (void *)(unsigned long)gop64->query_mode;
382 +-
383 +- status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size,
384 +- info);
385 +- if (status != EFI_SUCCESS)
386 +- return status;
387 +
388 +- *fb_base = mode->frame_buffer_base;
389 +- return status;
390 ++ return EFI_SUCCESS;
391 + }
392 +
393 + static efi_status_t
394 +@@ -239,7 +194,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
395 + u64 fb_base;
396 + struct efi_pixel_bitmask pixel_info;
397 + int pixel_format;
398 +- efi_status_t status = EFI_NOT_FOUND;
399 ++ efi_status_t status;
400 + u64 *handles = (u64 *)(unsigned long)gop_handle;
401 + int i;
402 +
403 +@@ -248,6 +203,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
404 +
405 + nr_gops = size / sizeof(u64);
406 + for (i = 0; i < nr_gops; i++) {
407 ++ struct efi_graphics_output_protocol_mode_64 *mode;
408 + struct efi_graphics_output_mode_info *info = NULL;
409 + efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
410 + bool conout_found = false;
411 +@@ -265,9 +221,11 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
412 + if (status == EFI_SUCCESS)
413 + conout_found = true;
414 +
415 +- status = __gop_query64(sys_table_arg, gop64, &info, &size,
416 +- &current_fb_base);
417 +- if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
418 ++ mode = (void *)(unsigned long)gop64->mode;
419 ++ info = (void *)(unsigned long)mode->info;
420 ++ current_fb_base = mode->frame_buffer_base;
421 ++
422 ++ if ((!first_gop || conout_found) &&
423 + info->pixel_format != PIXEL_BLT_ONLY) {
424 + /*
425 + * Systems that use the UEFI Console Splitter may
426 +@@ -295,7 +253,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
427 +
428 + /* Did we find any GOPs? */
429 + if (!first_gop)
430 +- goto out;
431 ++ return EFI_NOT_FOUND;
432 +
433 + /* EFI framebuffer */
434 + si->orig_video_isVGA = VIDEO_TYPE_EFI;
435 +@@ -317,8 +275,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
436 + si->lfb_size = si->lfb_linelength * si->lfb_height;
437 +
438 + si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
439 +-out:
440 +- return status;
441 ++
442 ++ return EFI_SUCCESS;
443 + }
444 +
445 + /*
446 +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
447 +index ce6dd49fbb98..916b88ee2de4 100644
448 +--- a/drivers/mmc/core/block.c
449 ++++ b/drivers/mmc/core/block.c
450 +@@ -28,6 +28,7 @@
451 + #include <linux/hdreg.h>
452 + #include <linux/kdev_t.h>
453 + #include <linux/blkdev.h>
454 ++#include <linux/cdev.h>
455 + #include <linux/mutex.h>
456 + #include <linux/scatterlist.h>
457 + #include <linux/string_helpers.h>
458 +@@ -87,6 +88,7 @@ static int max_devices;
459 + #define MAX_DEVICES 256
460 +
461 + static DEFINE_IDA(mmc_blk_ida);
462 ++static DEFINE_IDA(mmc_rpmb_ida);
463 +
464 + /*
465 + * There is one mmc_blk_data per slot.
466 +@@ -97,6 +99,7 @@ struct mmc_blk_data {
467 + struct gendisk *disk;
468 + struct mmc_queue queue;
469 + struct list_head part;
470 ++ struct list_head rpmbs;
471 +
472 + unsigned int flags;
473 + #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
474 +@@ -126,6 +129,32 @@ struct mmc_blk_data {
475 + struct dentry *ext_csd_dentry;
476 + };
477 +
478 ++/* Device type for RPMB character devices */
479 ++static dev_t mmc_rpmb_devt;
480 ++
481 ++/* Bus type for RPMB character devices */
482 ++static struct bus_type mmc_rpmb_bus_type = {
483 ++ .name = "mmc_rpmb",
484 ++};
485 ++
486 ++/**
487 ++ * struct mmc_rpmb_data - special RPMB device type for these areas
488 ++ * @dev: the device for the RPMB area
489 ++ * @chrdev: character device for the RPMB area
490 ++ * @id: unique device ID number
491 ++ * @part_index: partition index (0 on first)
492 ++ * @md: parent MMC block device
493 ++ * @node: list item, so we can put this device on a list
494 ++ */
495 ++struct mmc_rpmb_data {
496 ++ struct device dev;
497 ++ struct cdev chrdev;
498 ++ int id;
499 ++ unsigned int part_index;
500 ++ struct mmc_blk_data *md;
501 ++ struct list_head node;
502 ++};
503 ++
504 + static DEFINE_MUTEX(open_lock);
505 +
506 + module_param(perdev_minors, int, 0444);
507 +@@ -309,6 +338,7 @@ struct mmc_blk_ioc_data {
508 + struct mmc_ioc_cmd ic;
509 + unsigned char *buf;
510 + u64 buf_bytes;
511 ++ struct mmc_rpmb_data *rpmb;
512 + };
513 +
514 + static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
515 +@@ -447,14 +477,25 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
516 + struct mmc_request mrq = {};
517 + struct scatterlist sg;
518 + int err;
519 +- bool is_rpmb = false;
520 ++ unsigned int target_part;
521 + u32 status = 0;
522 +
523 + if (!card || !md || !idata)
524 + return -EINVAL;
525 +
526 +- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
527 +- is_rpmb = true;
528 ++ /*
529 ++ * The RPMB accesses comes in from the character device, so we
530 ++ * need to target these explicitly. Else we just target the
531 ++ * partition type for the block device the ioctl() was issued
532 ++ * on.
533 ++ */
534 ++ if (idata->rpmb) {
535 ++ /* Support multiple RPMB partitions */
536 ++ target_part = idata->rpmb->part_index;
537 ++ target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB;
538 ++ } else {
539 ++ target_part = md->part_type;
540 ++ }
541 +
542 + cmd.opcode = idata->ic.opcode;
543 + cmd.arg = idata->ic.arg;
544 +@@ -498,7 +539,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
545 +
546 + mrq.cmd = &cmd;
547 +
548 +- err = mmc_blk_part_switch(card, md->part_type);
549 ++ err = mmc_blk_part_switch(card, target_part);
550 + if (err)
551 + return err;
552 +
553 +@@ -508,7 +549,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
554 + return err;
555 + }
556 +
557 +- if (is_rpmb) {
558 ++ if (idata->rpmb) {
559 + err = mmc_set_blockcount(card, data.blocks,
560 + idata->ic.write_flag & (1 << 31));
561 + if (err)
562 +@@ -566,7 +607,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
563 +
564 + memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
565 +
566 +- if (is_rpmb) {
567 ++ if (idata->rpmb) {
568 + /*
569 + * Ensure RPMB command has completed by polling CMD13
570 + * "Send Status".
571 +@@ -582,7 +623,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
572 + }
573 +
574 + static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
575 +- struct mmc_ioc_cmd __user *ic_ptr)
576 ++ struct mmc_ioc_cmd __user *ic_ptr,
577 ++ struct mmc_rpmb_data *rpmb)
578 + {
579 + struct mmc_blk_ioc_data *idata;
580 + struct mmc_blk_ioc_data *idatas[1];
581 +@@ -594,6 +636,8 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
582 + idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
583 + if (IS_ERR(idata))
584 + return PTR_ERR(idata);
585 ++ /* This will be NULL on non-RPMB ioctl():s */
586 ++ idata->rpmb = rpmb;
587 +
588 + card = md->queue.card;
589 + if (IS_ERR(card)) {
590 +@@ -613,7 +657,8 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
591 + goto cmd_done;
592 + }
593 + idatas[0] = idata;
594 +- req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
595 ++ req_to_mmc_queue_req(req)->drv_op =
596 ++ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
597 + req_to_mmc_queue_req(req)->drv_op_data = idatas;
598 + req_to_mmc_queue_req(req)->ioc_count = 1;
599 + blk_execute_rq(mq->queue, NULL, req, 0);
600 +@@ -628,7 +673,8 @@ cmd_done:
601 + }
602 +
603 + static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
604 +- struct mmc_ioc_multi_cmd __user *user)
605 ++ struct mmc_ioc_multi_cmd __user *user,
606 ++ struct mmc_rpmb_data *rpmb)
607 + {
608 + struct mmc_blk_ioc_data **idata = NULL;
609 + struct mmc_ioc_cmd __user *cmds = user->cmds;
610 +@@ -659,6 +705,8 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
611 + num_of_cmds = i;
612 + goto cmd_err;
613 + }
614 ++ /* This will be NULL on non-RPMB ioctl():s */
615 ++ idata[i]->rpmb = rpmb;
616 + }
617 +
618 + card = md->queue.card;
619 +@@ -679,7 +727,8 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
620 + err = PTR_ERR(req);
621 + goto cmd_err;
622 + }
623 +- req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
624 ++ req_to_mmc_queue_req(req)->drv_op =
625 ++ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
626 + req_to_mmc_queue_req(req)->drv_op_data = idata;
627 + req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
628 + blk_execute_rq(mq->queue, NULL, req, 0);
629 +@@ -727,7 +776,8 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
630 + if (!md)
631 + return -EINVAL;
632 + ret = mmc_blk_ioctl_cmd(md,
633 +- (struct mmc_ioc_cmd __user *)arg);
634 ++ (struct mmc_ioc_cmd __user *)arg,
635 ++ NULL);
636 + mmc_blk_put(md);
637 + return ret;
638 + case MMC_IOC_MULTI_CMD:
639 +@@ -738,7 +788,8 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
640 + if (!md)
641 + return -EINVAL;
642 + ret = mmc_blk_ioctl_multi_cmd(md,
643 +- (struct mmc_ioc_multi_cmd __user *)arg);
644 ++ (struct mmc_ioc_multi_cmd __user *)arg,
645 ++ NULL);
646 + mmc_blk_put(md);
647 + return ret;
648 + default:
649 +@@ -1188,18 +1239,6 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
650 + md->reset_done &= ~type;
651 + }
652 +
653 +-int mmc_access_rpmb(struct mmc_queue *mq)
654 +-{
655 +- struct mmc_blk_data *md = mq->blkdata;
656 +- /*
657 +- * If this is a RPMB partition access, return ture
658 +- */
659 +- if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
660 +- return true;
661 +-
662 +- return false;
663 +-}
664 +-
665 + /*
666 + * The non-block commands come back from the block layer after it queued it and
667 + * processed it with all other requests and then they get issued in this
668 +@@ -1210,17 +1249,19 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
669 + struct mmc_queue_req *mq_rq;
670 + struct mmc_card *card = mq->card;
671 + struct mmc_blk_data *md = mq->blkdata;
672 +- struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
673 + struct mmc_blk_ioc_data **idata;
674 ++ bool rpmb_ioctl;
675 + u8 **ext_csd;
676 + u32 status;
677 + int ret;
678 + int i;
679 +
680 + mq_rq = req_to_mmc_queue_req(req);
681 ++ rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB);
682 +
683 + switch (mq_rq->drv_op) {
684 + case MMC_DRV_OP_IOCTL:
685 ++ case MMC_DRV_OP_IOCTL_RPMB:
686 + idata = mq_rq->drv_op_data;
687 + for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
688 + ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
689 +@@ -1228,8 +1269,8 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
690 + break;
691 + }
692 + /* Always switch back to main area after RPMB access */
693 +- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
694 +- mmc_blk_part_switch(card, main_md->part_type);
695 ++ if (rpmb_ioctl)
696 ++ mmc_blk_part_switch(card, 0);
697 + break;
698 + case MMC_DRV_OP_BOOT_WP:
699 + ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
700 +@@ -2114,6 +2155,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
701 +
702 + spin_lock_init(&md->lock);
703 + INIT_LIST_HEAD(&md->part);
704 ++ INIT_LIST_HEAD(&md->rpmbs);
705 + md->usage = 1;
706 +
707 + ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
708 +@@ -2232,6 +2274,158 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
709 + return 0;
710 + }
711 +
712 ++/**
713 ++ * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
714 ++ * @filp: the character device file
715 ++ * @cmd: the ioctl() command
716 ++ * @arg: the argument from userspace
717 ++ *
718 ++ * This will essentially just redirect the ioctl()s coming in over to
719 ++ * the main block device spawning the RPMB character device.
720 ++ */
721 ++static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
722 ++ unsigned long arg)
723 ++{
724 ++ struct mmc_rpmb_data *rpmb = filp->private_data;
725 ++ int ret;
726 ++
727 ++ switch (cmd) {
728 ++ case MMC_IOC_CMD:
729 ++ ret = mmc_blk_ioctl_cmd(rpmb->md,
730 ++ (struct mmc_ioc_cmd __user *)arg,
731 ++ rpmb);
732 ++ break;
733 ++ case MMC_IOC_MULTI_CMD:
734 ++ ret = mmc_blk_ioctl_multi_cmd(rpmb->md,
735 ++ (struct mmc_ioc_multi_cmd __user *)arg,
736 ++ rpmb);
737 ++ break;
738 ++ default:
739 ++ ret = -EINVAL;
740 ++ break;
741 ++ }
742 ++
743 ++ return ret;
744 ++}
745 ++
746 ++#ifdef CONFIG_COMPAT
747 ++static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd,
748 ++ unsigned long arg)
749 ++{
750 ++ return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
751 ++}
752 ++#endif
753 ++
754 ++static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
755 ++{
756 ++ struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
757 ++ struct mmc_rpmb_data, chrdev);
758 ++
759 ++ get_device(&rpmb->dev);
760 ++ filp->private_data = rpmb;
761 ++ mmc_blk_get(rpmb->md->disk);
762 ++
763 ++ return nonseekable_open(inode, filp);
764 ++}
765 ++
766 ++static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
767 ++{
768 ++ struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
769 ++ struct mmc_rpmb_data, chrdev);
770 ++
771 ++ put_device(&rpmb->dev);
772 ++ mmc_blk_put(rpmb->md);
773 ++
774 ++ return 0;
775 ++}
776 ++
777 ++static const struct file_operations mmc_rpmb_fileops = {
778 ++ .release = mmc_rpmb_chrdev_release,
779 ++ .open = mmc_rpmb_chrdev_open,
780 ++ .owner = THIS_MODULE,
781 ++ .llseek = no_llseek,
782 ++ .unlocked_ioctl = mmc_rpmb_ioctl,
783 ++#ifdef CONFIG_COMPAT
784 ++ .compat_ioctl = mmc_rpmb_ioctl_compat,
785 ++#endif
786 ++};
787 ++
788 ++static void mmc_blk_rpmb_device_release(struct device *dev)
789 ++{
790 ++ struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
791 ++
792 ++ ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
793 ++ kfree(rpmb);
794 ++}
795 ++
796 ++static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
797 ++ struct mmc_blk_data *md,
798 ++ unsigned int part_index,
799 ++ sector_t size,
800 ++ const char *subname)
801 ++{
802 ++ int devidx, ret;
803 ++ char rpmb_name[DISK_NAME_LEN];
804 ++ char cap_str[10];
805 ++ struct mmc_rpmb_data *rpmb;
806 ++
807 ++ /* This creates the minor number for the RPMB char device */
808 ++ devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
809 ++ if (devidx < 0)
810 ++ return devidx;
811 ++
812 ++ rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
813 ++ if (!rpmb) {
814 ++ ida_simple_remove(&mmc_rpmb_ida, devidx);
815 ++ return -ENOMEM;
816 ++ }
817 ++
818 ++ snprintf(rpmb_name, sizeof(rpmb_name),
819 ++ "mmcblk%u%s", card->host->index, subname ? subname : "");
820 ++
821 ++ rpmb->id = devidx;
822 ++ rpmb->part_index = part_index;
823 ++ rpmb->dev.init_name = rpmb_name;
824 ++ rpmb->dev.bus = &mmc_rpmb_bus_type;
825 ++ rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id);
826 ++ rpmb->dev.parent = &card->dev;
827 ++ rpmb->dev.release = mmc_blk_rpmb_device_release;
828 ++ device_initialize(&rpmb->dev);
829 ++ dev_set_drvdata(&rpmb->dev, rpmb);
830 ++ rpmb->md = md;
831 ++
832 ++ cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
833 ++ rpmb->chrdev.owner = THIS_MODULE;
834 ++ ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev);
835 ++ if (ret) {
836 ++ pr_err("%s: could not add character device\n", rpmb_name);
837 ++ goto out_put_device;
838 ++ }
839 ++
840 ++ list_add(&rpmb->node, &md->rpmbs);
841 ++
842 ++ string_get_size((u64)size, 512, STRING_UNITS_2,
843 ++ cap_str, sizeof(cap_str));
844 ++
845 ++ pr_info("%s: %s %s partition %u %s, chardev (%d:%d)\n",
846 ++ rpmb_name, mmc_card_id(card),
847 ++ mmc_card_name(card), EXT_CSD_PART_CONFIG_ACC_RPMB, cap_str,
848 ++ MAJOR(mmc_rpmb_devt), rpmb->id);
849 ++
850 ++ return 0;
851 ++
852 ++out_put_device:
853 ++ put_device(&rpmb->dev);
854 ++ return ret;
855 ++}
856 ++
857 ++static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb)
858 ++
859 ++{
860 ++ cdev_device_del(&rpmb->chrdev, &rpmb->dev);
861 ++ put_device(&rpmb->dev);
862 ++}
863 ++
864 + /* MMC Physical partitions consist of two boot partitions and
865 + * up to four general purpose partitions.
866 + * For each partition enabled in EXT_CSD a block device will be allocatedi
867 +@@ -2240,13 +2434,26 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
868 +
869 + static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
870 + {
871 +- int idx, ret = 0;
872 ++ int idx, ret;
873 +
874 + if (!mmc_card_mmc(card))
875 + return 0;
876 +
877 + for (idx = 0; idx < card->nr_parts; idx++) {
878 +- if (card->part[idx].size) {
879 ++ if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) {
880 ++ /*
881 ++ * RPMB partitions does not provide block access, they
882 ++ * are only accessed using ioctl():s. Thus create
883 ++ * special RPMB block devices that do not have a
884 ++ * backing block queue for these.
885 ++ */
886 ++ ret = mmc_blk_alloc_rpmb_part(card, md,
887 ++ card->part[idx].part_cfg,
888 ++ card->part[idx].size >> 9,
889 ++ card->part[idx].name);
890 ++ if (ret)
891 ++ return ret;
892 ++ } else if (card->part[idx].size) {
893 + ret = mmc_blk_alloc_part(card, md,
894 + card->part[idx].part_cfg,
895 + card->part[idx].size >> 9,
896 +@@ -2258,7 +2465,7 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
897 + }
898 + }
899 +
900 +- return ret;
901 ++ return 0;
902 + }
903 +
904 + static void mmc_blk_remove_req(struct mmc_blk_data *md)
905 +@@ -2295,7 +2502,15 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
906 + {
907 + struct list_head *pos, *q;
908 + struct mmc_blk_data *part_md;
909 ++ struct mmc_rpmb_data *rpmb;
910 +
911 ++ /* Remove RPMB partitions */
912 ++ list_for_each_safe(pos, q, &md->rpmbs) {
913 ++ rpmb = list_entry(pos, struct mmc_rpmb_data, node);
914 ++ list_del(pos);
915 ++ mmc_blk_remove_rpmb_part(rpmb);
916 ++ }
917 ++ /* Remove block partitions */
918 + list_for_each_safe(pos, q, &md->part) {
919 + part_md = list_entry(pos, struct mmc_blk_data, part);
920 + list_del(pos);
921 +@@ -2649,6 +2864,17 @@ static int __init mmc_blk_init(void)
922 + {
923 + int res;
924 +
925 ++ res = bus_register(&mmc_rpmb_bus_type);
926 ++ if (res < 0) {
927 ++ pr_err("mmcblk: could not register RPMB bus type\n");
928 ++ return res;
929 ++ }
930 ++ res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb");
931 ++ if (res < 0) {
932 ++ pr_err("mmcblk: failed to allocate rpmb chrdev region\n");
933 ++ goto out_bus_unreg;
934 ++ }
935 ++
936 + if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
937 + pr_info("mmcblk: using %d minors per device\n", perdev_minors);
938 +
939 +@@ -2656,16 +2882,20 @@ static int __init mmc_blk_init(void)
940 +
941 + res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
942 + if (res)
943 +- goto out;
944 ++ goto out_chrdev_unreg;
945 +
946 + res = mmc_register_driver(&mmc_driver);
947 + if (res)
948 +- goto out2;
949 ++ goto out_blkdev_unreg;
950 +
951 + return 0;
952 +- out2:
953 ++
954 ++out_blkdev_unreg:
955 + unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
956 +- out:
957 ++out_chrdev_unreg:
958 ++ unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
959 ++out_bus_unreg:
960 ++ bus_unregister(&mmc_rpmb_bus_type);
961 + return res;
962 + }
963 +
964 +@@ -2673,6 +2903,8 @@ static void __exit mmc_blk_exit(void)
965 + {
966 + mmc_unregister_driver(&mmc_driver);
967 + unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
968 ++ unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
969 ++ bus_unregister(&mmc_rpmb_bus_type);
970 + }
971 +
972 + module_init(mmc_blk_init);
973 +diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
974 +index 0a4e77a5ba33..f74f9ef460cc 100644
975 +--- a/drivers/mmc/core/queue.c
976 ++++ b/drivers/mmc/core/queue.c
977 +@@ -30,7 +30,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
978 + {
979 + struct mmc_queue *mq = q->queuedata;
980 +
981 +- if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
982 ++ if (mq && mmc_card_removed(mq->card))
983 + return BLKPREP_KILL;
984 +
985 + req->rq_flags |= RQF_DONTPREP;
986 +diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
987 +index 6bfba32ffa66..547b457c4251 100644
988 +--- a/drivers/mmc/core/queue.h
989 ++++ b/drivers/mmc/core/queue.h
990 +@@ -36,12 +36,14 @@ struct mmc_blk_request {
991 + /**
992 + * enum mmc_drv_op - enumerates the operations in the mmc_queue_req
993 + * @MMC_DRV_OP_IOCTL: ioctl operation
994 ++ * @MMC_DRV_OP_IOCTL_RPMB: RPMB-oriented ioctl operation
995 + * @MMC_DRV_OP_BOOT_WP: write protect boot partitions
996 + * @MMC_DRV_OP_GET_CARD_STATUS: get card status
997 + * @MMC_DRV_OP_GET_EXT_CSD: get the EXT CSD from an eMMC card
998 + */
999 + enum mmc_drv_op {
1000 + MMC_DRV_OP_IOCTL,
1001 ++ MMC_DRV_OP_IOCTL_RPMB,
1002 + MMC_DRV_OP_BOOT_WP,
1003 + MMC_DRV_OP_GET_CARD_STATUS,
1004 + MMC_DRV_OP_GET_EXT_CSD,
1005 +@@ -82,6 +84,4 @@ extern void mmc_queue_resume(struct mmc_queue *);
1006 + extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
1007 + struct mmc_queue_req *);
1008 +
1009 +-extern int mmc_access_rpmb(struct mmc_queue *);
1010 +-
1011 + #endif
1012 +diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
1013 +index d76d7c7ea819..544b6a9cc01a 100644
1014 +--- a/drivers/net/dsa/mv88e6xxx/global1.c
1015 ++++ b/drivers/net/dsa/mv88e6xxx/global1.c
1016 +@@ -313,6 +313,11 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
1017 + {
1018 + u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST;
1019 +
1020 ++ /* Use the default high priority for management frames sent to
1021 ++ * the CPU.
1022 ++ */
1023 ++ port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
1024 ++
1025 + return mv88e6390_g1_monitor_write(chip, ptr, port);
1026 + }
1027 +
1028 +diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
1029 +index 950b914f9251..d82e8956cbd5 100644
1030 +--- a/drivers/net/dsa/mv88e6xxx/global1.h
1031 ++++ b/drivers/net/dsa/mv88e6xxx/global1.h
1032 +@@ -189,6 +189,7 @@
1033 + #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
1034 + #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
1035 + #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
1036 ++#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0
1037 + #define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
1038 +
1039 + /* Offset 0x1C: Global Control 2 */
1040 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
1041 +index 4e091a11daaf..52bce009d096 100644
1042 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
1043 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
1044 +@@ -1112,7 +1112,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
1045 + for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
1046 + u32 func_config =
1047 + MF_CFG_RD(bp,
1048 +- func_mf_config[BP_PORT(bp) + 2 * i].
1049 ++ func_mf_config[BP_PATH(bp) + 2 * i].
1050 + config);
1051 + func_num +=
1052 + ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
1053 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1054 +index dbe8feec456c..b0ada7eac652 100644
1055 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1056 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1057 +@@ -9995,10 +9995,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp)
1058 + */
1059 + static void bnx2x_parity_recover(struct bnx2x *bp)
1060 + {
1061 +- bool global = false;
1062 + u32 error_recovered, error_unrecovered;
1063 +- bool is_parity;
1064 ++ bool is_parity, global = false;
1065 ++#ifdef CONFIG_BNX2X_SRIOV
1066 ++ int vf_idx;
1067 ++
1068 ++ for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
1069 ++ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
1070 +
1071 ++ if (vf)
1072 ++ vf->state = VF_LOST;
1073 ++ }
1074 ++#endif
1075 + DP(NETIF_MSG_HW, "Handling parity\n");
1076 + while (1) {
1077 + switch (bp->recovery_state) {
1078 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
1079 +index 53466f6cebab..a887bfa24c88 100644
1080 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
1081 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
1082 +@@ -139,6 +139,7 @@ struct bnx2x_virtf {
1083 + #define VF_ACQUIRED 1 /* VF acquired, but not initialized */
1084 + #define VF_ENABLED 2 /* VF Enabled */
1085 + #define VF_RESET 3 /* VF FLR'd, pending cleanup */
1086 ++#define VF_LOST 4 /* Recovery while VFs are loaded */
1087 +
1088 + bool flr_clnup_stage; /* true during flr cleanup */
1089 + bool malicious; /* true if FW indicated so, until FLR */
1090 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1091 +index 76a4668c50fe..6d5b81a971e3 100644
1092 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1093 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1094 +@@ -2112,6 +2112,18 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1095 + {
1096 + int i;
1097 +
1098 ++ if (vf->state == VF_LOST) {
1099 ++ /* Just ack the FW and return if VFs are lost
1100 ++ * in case of parity error. VFs are supposed to be timedout
1101 ++ * on waiting for PF response.
1102 ++ */
1103 ++ DP(BNX2X_MSG_IOV,
1104 ++ "VF 0x%x lost, not handling the request\n", vf->abs_vfid);
1105 ++
1106 ++ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1107 ++ return;
1108 ++ }
1109 ++
1110 + /* check if tlv type is known */
1111 + if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
1112 + /* Lock the per vf op mutex and note the locker's identity.
1113 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
1114 +index a62128a444a6..149fd0d5e069 100644
1115 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
1116 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
1117 +@@ -724,6 +724,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
1118 + /* default */
1119 + break;
1120 + case PHY_INTERFACE_MODE_RGMII:
1121 ++ case PHY_INTERFACE_MODE_RGMII_ID:
1122 ++ case PHY_INTERFACE_MODE_RGMII_RXID:
1123 ++ case PHY_INTERFACE_MODE_RGMII_TXID:
1124 + reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
1125 + break;
1126 + case PHY_INTERFACE_MODE_RMII:
1127 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
1128 +index 62ccbd47c1db..fc1fa0f9f338 100644
1129 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
1130 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
1131 +@@ -53,7 +53,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
1132 + * rate, which then uses the auto-reparenting feature of the
1133 + * clock driver, and enabling/disabling the clock.
1134 + */
1135 +- if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
1136 ++ if (phy_interface_mode_is_rgmii(gmac->interface)) {
1137 + clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
1138 + clk_prepare_enable(gmac->tx_clk);
1139 + gmac->clk_enabled = 1;
1140 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1141 +index e6d16c48ffef..e89466bd432d 100644
1142 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1143 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1144 +@@ -51,7 +51,7 @@
1145 + #include <linux/of_mdio.h>
1146 + #include "dwmac1000.h"
1147 +
1148 +-#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
1149 ++#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
1150 + #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
1151 +
1152 + /* Module parameters */
1153 +@@ -3597,12 +3597,24 @@ static void stmmac_set_rx_mode(struct net_device *dev)
1154 + static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1155 + {
1156 + struct stmmac_priv *priv = netdev_priv(dev);
1157 ++ int txfifosz = priv->plat->tx_fifo_size;
1158 ++
1159 ++ if (txfifosz == 0)
1160 ++ txfifosz = priv->dma_cap.tx_fifo_size;
1161 ++
1162 ++ txfifosz /= priv->plat->tx_queues_to_use;
1163 +
1164 + if (netif_running(dev)) {
1165 + netdev_err(priv->dev, "must be stopped to change its MTU\n");
1166 + return -EBUSY;
1167 + }
1168 +
1169 ++ new_mtu = STMMAC_ALIGN(new_mtu);
1170 ++
1171 ++ /* If condition true, FIFO is too small or MTU too large */
1172 ++ if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
1173 ++ return -EINVAL;
1174 ++
1175 + dev->mtu = new_mtu;
1176 +
1177 + netdev_update_features(dev);
1178 +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
1179 +index 35905e9ee9ec..25be27826a22 100644
1180 +--- a/drivers/net/gtp.c
1181 ++++ b/drivers/net/gtp.c
1182 +@@ -816,7 +816,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
1183 + lock_sock(sock->sk);
1184 + if (sock->sk->sk_user_data) {
1185 + sk = ERR_PTR(-EBUSY);
1186 +- goto out_sock;
1187 ++ goto out_rel_sock;
1188 + }
1189 +
1190 + sk = sock->sk;
1191 +@@ -829,8 +829,9 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
1192 +
1193 + setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
1194 +
1195 +-out_sock:
1196 ++out_rel_sock:
1197 + release_sock(sock->sk);
1198 ++out_sock:
1199 + sockfd_put(sock);
1200 + return sk;
1201 + }
1202 +diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
1203 +index 0f07b5978fa1..fc794e69e6a1 100644
1204 +--- a/drivers/net/hyperv/hyperv_net.h
1205 ++++ b/drivers/net/hyperv/hyperv_net.h
1206 +@@ -179,7 +179,6 @@ struct rndis_device {
1207 +
1208 + u8 hw_mac_adr[ETH_ALEN];
1209 + u8 rss_key[NETVSC_HASH_KEYLEN];
1210 +- u16 rx_table[ITAB_NUM];
1211 + };
1212 +
1213 +
1214 +@@ -741,6 +740,8 @@ struct net_device_context {
1215 +
1216 + u32 tx_table[VRSS_SEND_TAB_SIZE];
1217 +
1218 ++ u16 rx_table[ITAB_NUM];
1219 ++
1220 + /* Ethtool settings */
1221 + bool udp4_l4_hash;
1222 + bool udp6_l4_hash;
1223 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1224 +index 5a44b9795266..a89de5752a8c 100644
1225 +--- a/drivers/net/hyperv/netvsc_drv.c
1226 ++++ b/drivers/net/hyperv/netvsc_drv.c
1227 +@@ -1528,7 +1528,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1228 + rndis_dev = ndev->extension;
1229 + if (indir) {
1230 + for (i = 0; i < ITAB_NUM; i++)
1231 +- indir[i] = rndis_dev->rx_table[i];
1232 ++ indir[i] = ndc->rx_table[i];
1233 + }
1234 +
1235 + if (key)
1236 +@@ -1558,7 +1558,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1237 + return -EINVAL;
1238 +
1239 + for (i = 0; i < ITAB_NUM; i++)
1240 +- rndis_dev->rx_table[i] = indir[i];
1241 ++ ndc->rx_table[i] = indir[i];
1242 + }
1243 +
1244 + if (!key) {
1245 +diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
1246 +index fc1d5e14d83e..b19557c035f2 100644
1247 +--- a/drivers/net/hyperv/rndis_filter.c
1248 ++++ b/drivers/net/hyperv/rndis_filter.c
1249 +@@ -715,6 +715,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
1250 + const u8 *rss_key, u16 flag)
1251 + {
1252 + struct net_device *ndev = rdev->ndev;
1253 ++ struct net_device_context *ndc = netdev_priv(ndev);
1254 + struct rndis_request *request;
1255 + struct rndis_set_request *set;
1256 + struct rndis_set_complete *set_complete;
1257 +@@ -754,7 +755,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
1258 + /* Set indirection table entries */
1259 + itab = (u32 *)(rssp + 1);
1260 + for (i = 0; i < ITAB_NUM; i++)
1261 +- itab[i] = rdev->rx_table[i];
1262 ++ itab[i] = ndc->rx_table[i];
1263 +
1264 + /* Set hask key values */
1265 + keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
1266 +@@ -1204,6 +1205,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1267 + struct netvsc_device_info *device_info)
1268 + {
1269 + struct net_device *net = hv_get_drvdata(dev);
1270 ++ struct net_device_context *ndc = netdev_priv(net);
1271 + struct netvsc_device *net_device;
1272 + struct rndis_device *rndis_device;
1273 + struct ndis_recv_scale_cap rsscap;
1274 +@@ -1286,9 +1288,11 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1275 + /* We will use the given number of channels if available. */
1276 + net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
1277 +
1278 +- for (i = 0; i < ITAB_NUM; i++)
1279 +- rndis_device->rx_table[i] = ethtool_rxfh_indir_default(
1280 ++ if (!netif_is_rxfh_configured(net)) {
1281 ++ for (i = 0; i < ITAB_NUM; i++)
1282 ++ ndc->rx_table[i] = ethtool_rxfh_indir_default(
1283 + i, net_device->num_chn);
1284 ++ }
1285 +
1286 + atomic_set(&net_device->open_chn, 1);
1287 + vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1288 +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
1289 +index 8d5f88a538fc..2b977655834c 100644
1290 +--- a/drivers/net/macvlan.c
1291 ++++ b/drivers/net/macvlan.c
1292 +@@ -263,7 +263,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
1293 + struct net_device *src,
1294 + enum macvlan_mode mode)
1295 + {
1296 +- const struct ethhdr *eth = eth_hdr(skb);
1297 ++ const struct ethhdr *eth = skb_eth_hdr(skb);
1298 + const struct macvlan_dev *vlan;
1299 + struct sk_buff *nskb;
1300 + unsigned int i;
1301 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
1302 +index 7d1d5b30ecc3..c23f35dba718 100644
1303 +--- a/drivers/net/usb/lan78xx.c
1304 ++++ b/drivers/net/usb/lan78xx.c
1305 +@@ -497,7 +497,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev,
1306 + }
1307 + } else {
1308 + netdev_warn(dev->net,
1309 +- "Failed to read stat ret = 0x%x", ret);
1310 ++ "Failed to read stat ret = %d", ret);
1311 + }
1312 +
1313 + kfree(stats);
1314 +@@ -2604,11 +2604,6 @@ static int lan78xx_stop(struct net_device *net)
1315 + return 0;
1316 + }
1317 +
1318 +-static int lan78xx_linearize(struct sk_buff *skb)
1319 +-{
1320 +- return skb_linearize(skb);
1321 +-}
1322 +-
1323 + static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
1324 + struct sk_buff *skb, gfp_t flags)
1325 + {
1326 +@@ -2619,8 +2614,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
1327 + return NULL;
1328 + }
1329 +
1330 +- if (lan78xx_linearize(skb) < 0)
1331 ++ if (skb_linearize(skb)) {
1332 ++ dev_kfree_skb_any(skb);
1333 + return NULL;
1334 ++ }
1335 +
1336 + tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
1337 +
1338 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
1339 +index 153a81ece9fe..5aa7d5091f4d 100644
1340 +--- a/drivers/net/vxlan.c
1341 ++++ b/drivers/net/vxlan.c
1342 +@@ -2216,7 +2216,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1343 + skb_dst_update_pmtu(skb, mtu);
1344 + }
1345 +
1346 +- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1347 ++ tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
1348 + ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1349 + err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
1350 + vni, md, flags, udp_sum);
1351 +@@ -2257,7 +2257,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1352 + skb_dst_update_pmtu(skb, mtu);
1353 + }
1354 +
1355 +- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1356 ++ tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
1357 + ttl = ttl ? : ip6_dst_hoplimit(ndst);
1358 + skb_scrub_packet(skb, xnet);
1359 + err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
1360 +diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
1361 +index e76af2866a19..b5340af9fa5e 100644
1362 +--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
1363 ++++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
1364 +@@ -956,59 +956,117 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
1365 +
1366 + switch (*pos) {
1367 + case WLAN_EID_SUPP_RATES:
1368 ++ if (pos[1] > 32)
1369 ++ return;
1370 + sta_ptr->tdls_cap.rates_len = pos[1];
1371 + for (i = 0; i < pos[1]; i++)
1372 + sta_ptr->tdls_cap.rates[i] = pos[i + 2];
1373 + break;
1374 +
1375 + case WLAN_EID_EXT_SUPP_RATES:
1376 ++ if (pos[1] > 32)
1377 ++ return;
1378 + basic = sta_ptr->tdls_cap.rates_len;
1379 ++ if (pos[1] > 32 - basic)
1380 ++ return;
1381 + for (i = 0; i < pos[1]; i++)
1382 + sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
1383 + sta_ptr->tdls_cap.rates_len += pos[1];
1384 + break;
1385 + case WLAN_EID_HT_CAPABILITY:
1386 +- memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
1387 ++ if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
1388 ++ return;
1389 ++ if (pos[1] != sizeof(struct ieee80211_ht_cap))
1390 ++ return;
1391 ++ /* copy the ie's value into ht_capb*/
1392 ++ memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
1393 + sizeof(struct ieee80211_ht_cap));
1394 + sta_ptr->is_11n_enabled = 1;
1395 + break;
1396 + case WLAN_EID_HT_OPERATION:
1397 +- memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
1398 ++ if (pos > end -
1399 ++ sizeof(struct ieee80211_ht_operation) - 2)
1400 ++ return;
1401 ++ if (pos[1] != sizeof(struct ieee80211_ht_operation))
1402 ++ return;
1403 ++ /* copy the ie's value into ht_oper*/
1404 ++ memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
1405 + sizeof(struct ieee80211_ht_operation));
1406 + break;
1407 + case WLAN_EID_BSS_COEX_2040:
1408 ++ if (pos > end - 3)
1409 ++ return;
1410 ++ if (pos[1] != 1)
1411 ++ return;
1412 + sta_ptr->tdls_cap.coex_2040 = pos[2];
1413 + break;
1414 + case WLAN_EID_EXT_CAPABILITY:
1415 ++ if (pos > end - sizeof(struct ieee_types_header))
1416 ++ return;
1417 ++ if (pos[1] < sizeof(struct ieee_types_header))
1418 ++ return;
1419 ++ if (pos[1] > 8)
1420 ++ return;
1421 + memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
1422 + sizeof(struct ieee_types_header) +
1423 + min_t(u8, pos[1], 8));
1424 + break;
1425 + case WLAN_EID_RSN:
1426 ++ if (pos > end - sizeof(struct ieee_types_header))
1427 ++ return;
1428 ++ if (pos[1] < sizeof(struct ieee_types_header))
1429 ++ return;
1430 ++ if (pos[1] > IEEE_MAX_IE_SIZE -
1431 ++ sizeof(struct ieee_types_header))
1432 ++ return;
1433 + memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
1434 + sizeof(struct ieee_types_header) +
1435 + min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
1436 + sizeof(struct ieee_types_header)));
1437 + break;
1438 + case WLAN_EID_QOS_CAPA:
1439 ++ if (pos > end - 3)
1440 ++ return;
1441 ++ if (pos[1] != 1)
1442 ++ return;
1443 + sta_ptr->tdls_cap.qos_info = pos[2];
1444 + break;
1445 + case WLAN_EID_VHT_OPERATION:
1446 +- if (priv->adapter->is_hw_11ac_capable)
1447 +- memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
1448 ++ if (priv->adapter->is_hw_11ac_capable) {
1449 ++ if (pos > end -
1450 ++ sizeof(struct ieee80211_vht_operation) - 2)
1451 ++ return;
1452 ++ if (pos[1] !=
1453 ++ sizeof(struct ieee80211_vht_operation))
1454 ++ return;
1455 ++ /* copy the ie's value into vhtoper*/
1456 ++ memcpy(&sta_ptr->tdls_cap.vhtoper, pos + 2,
1457 + sizeof(struct ieee80211_vht_operation));
1458 ++ }
1459 + break;
1460 + case WLAN_EID_VHT_CAPABILITY:
1461 + if (priv->adapter->is_hw_11ac_capable) {
1462 +- memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
1463 ++ if (pos > end -
1464 ++ sizeof(struct ieee80211_vht_cap) - 2)
1465 ++ return;
1466 ++ if (pos[1] != sizeof(struct ieee80211_vht_cap))
1467 ++ return;
1468 ++ /* copy the ie's value into vhtcap*/
1469 ++ memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
1470 + sizeof(struct ieee80211_vht_cap));
1471 + sta_ptr->is_11ac_enabled = 1;
1472 + }
1473 + break;
1474 + case WLAN_EID_AID:
1475 +- if (priv->adapter->is_hw_11ac_capable)
1476 ++ if (priv->adapter->is_hw_11ac_capable) {
1477 ++ if (pos > end - 4)
1478 ++ return;
1479 ++ if (pos[1] != 2)
1480 ++ return;
1481 + sta_ptr->tdls_cap.aid =
1482 + get_unaligned_le16((pos + 2));
1483 ++ }
1484 ++ break;
1485 + default:
1486 + break;
1487 + }
1488 +diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
1489 +index e3aefdafae89..0941555b84a5 100644
1490 +--- a/drivers/pci/switch/switchtec.c
1491 ++++ b/drivers/pci/switch/switchtec.c
1492 +@@ -23,7 +23,7 @@
1493 + #include <linux/pci.h>
1494 + #include <linux/cdev.h>
1495 + #include <linux/wait.h>
1496 +-
1497 ++#include <linux/io-64-nonatomic-lo-hi.h>
1498 + #include <linux/nospec.h>
1499 +
1500 + MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
1501 +@@ -898,7 +898,7 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
1502 + u32 reg;
1503 +
1504 + s.global = ioread32(&stdev->mmio_sw_event->global_summary);
1505 +- s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
1506 ++ s.part_bitmap = readq(&stdev->mmio_sw_event->part_event_bitmap);
1507 + s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
1508 +
1509 + for (i = 0; i < stdev->partition_count; i++) {
1510 +diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
1511 +index 790a4a73ea2c..40b74648bd31 100644
1512 +--- a/drivers/regulator/rn5t618-regulator.c
1513 ++++ b/drivers/regulator/rn5t618-regulator.c
1514 +@@ -154,6 +154,7 @@ static struct platform_driver rn5t618_regulator_driver = {
1515 +
1516 + module_platform_driver(rn5t618_regulator_driver);
1517 +
1518 ++MODULE_ALIAS("platform:rn5t618-regulator");
1519 + MODULE_AUTHOR("Beniamino Galvani <b.galvani@×××××.com>");
1520 + MODULE_DESCRIPTION("RN5T618 regulator driver");
1521 + MODULE_LICENSE("GPL v2");
1522 +diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
1523 +index 0d5e2d92e05b..aa651403546f 100644
1524 +--- a/drivers/s390/block/dasd_eckd.c
1525 ++++ b/drivers/s390/block/dasd_eckd.c
1526 +@@ -1133,7 +1133,8 @@ static u32 get_fcx_max_data(struct dasd_device *device)
1527 + {
1528 + struct dasd_eckd_private *private = device->private;
1529 + int fcx_in_css, fcx_in_gneq, fcx_in_features;
1530 +- int tpm, mdc;
1531 ++ unsigned int mdc;
1532 ++ int tpm;
1533 +
1534 + if (dasd_nofcx)
1535 + return 0;
1536 +@@ -1147,7 +1148,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
1537 + return 0;
1538 +
1539 + mdc = ccw_device_get_mdc(device->cdev, 0);
1540 +- if (mdc < 0) {
1541 ++ if (mdc == 0) {
1542 + dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
1543 + return 0;
1544 + } else {
1545 +@@ -1158,12 +1159,12 @@ static u32 get_fcx_max_data(struct dasd_device *device)
1546 + static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1547 + {
1548 + struct dasd_eckd_private *private = device->private;
1549 +- int mdc;
1550 ++ unsigned int mdc;
1551 + u32 fcx_max_data;
1552 +
1553 + if (private->fcx_max_data) {
1554 + mdc = ccw_device_get_mdc(device->cdev, lpm);
1555 +- if ((mdc < 0)) {
1556 ++ if (mdc == 0) {
1557 + dev_warn(&device->cdev->dev,
1558 + "Detecting the maximum data size for zHPF "
1559 + "requests failed (rc=%d) for a new path %x\n",
1560 +@@ -1767,7 +1768,7 @@ out_err2:
1561 + dasd_free_block(device->block);
1562 + device->block = NULL;
1563 + out_err1:
1564 +- kfree(private->conf_data);
1565 ++ dasd_eckd_clear_conf_data(device);
1566 + kfree(device->private);
1567 + device->private = NULL;
1568 + return rc;
1569 +@@ -1776,7 +1777,6 @@ out_err1:
1570 + static void dasd_eckd_uncheck_device(struct dasd_device *device)
1571 + {
1572 + struct dasd_eckd_private *private = device->private;
1573 +- int i;
1574 +
1575 + if (!private)
1576 + return;
1577 +@@ -1786,21 +1786,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
1578 + private->sneq = NULL;
1579 + private->vdsneq = NULL;
1580 + private->gneq = NULL;
1581 +- private->conf_len = 0;
1582 +- for (i = 0; i < 8; i++) {
1583 +- kfree(device->path[i].conf_data);
1584 +- if ((__u8 *)device->path[i].conf_data ==
1585 +- private->conf_data) {
1586 +- private->conf_data = NULL;
1587 +- private->conf_len = 0;
1588 +- }
1589 +- device->path[i].conf_data = NULL;
1590 +- device->path[i].cssid = 0;
1591 +- device->path[i].ssid = 0;
1592 +- device->path[i].chpid = 0;
1593 +- }
1594 +- kfree(private->conf_data);
1595 +- private->conf_data = NULL;
1596 ++ dasd_eckd_clear_conf_data(device);
1597 + }
1598 +
1599 + static struct dasd_ccw_req *
1600 +diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
1601 +index b22922ec32d1..474afec9ab87 100644
1602 +--- a/drivers/s390/cio/device_ops.c
1603 ++++ b/drivers/s390/cio/device_ops.c
1604 +@@ -595,7 +595,7 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout);
1605 + * @mask: mask of paths to use
1606 + *
1607 + * Return the number of 64K-bytes blocks all paths at least support
1608 +- * for a transport command. Return values <= 0 indicate failures.
1609 ++ * for a transport command. Return value 0 indicates failure.
1610 + */
1611 + int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
1612 + {
1613 +diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c
1614 +index 877937706240..828fbbebc3c4 100644
1615 +--- a/drivers/spi/spi-cavium-thunderx.c
1616 ++++ b/drivers/spi/spi-cavium-thunderx.c
1617 +@@ -81,6 +81,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
1618 +
1619 + error:
1620 + clk_disable_unprepare(p->clk);
1621 ++ pci_release_regions(pdev);
1622 + spi_master_put(master);
1623 + return ret;
1624 + }
1625 +@@ -95,6 +96,7 @@ static void thunderx_spi_remove(struct pci_dev *pdev)
1626 + return;
1627 +
1628 + clk_disable_unprepare(p->clk);
1629 ++ pci_release_regions(pdev);
1630 + /* Put everything in a known state. */
1631 + writeq(0, p->register_base + OCTEON_SPI_CFG(p));
1632 + }
1633 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
1634 +index cfb8f1126cf8..1f525d5f6d2d 100644
1635 +--- a/drivers/usb/core/config.c
1636 ++++ b/drivers/usb/core/config.c
1637 +@@ -203,9 +203,58 @@ static const unsigned short super_speed_maxpacket_maxes[4] = {
1638 + [USB_ENDPOINT_XFER_INT] = 1024,
1639 + };
1640 +
1641 +-static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
1642 +- int asnum, struct usb_host_interface *ifp, int num_ep,
1643 +- unsigned char *buffer, int size)
1644 ++static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1,
1645 ++ struct usb_endpoint_descriptor *e2)
1646 ++{
1647 ++ if (e1->bEndpointAddress == e2->bEndpointAddress)
1648 ++ return true;
1649 ++
1650 ++ if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) {
1651 ++ if (usb_endpoint_num(e1) == usb_endpoint_num(e2))
1652 ++ return true;
1653 ++ }
1654 ++
1655 ++ return false;
1656 ++}
1657 ++
1658 ++/*
1659 ++ * Check for duplicate endpoint addresses in other interfaces and in the
1660 ++ * altsetting currently being parsed.
1661 ++ */
1662 ++static bool config_endpoint_is_duplicate(struct usb_host_config *config,
1663 ++ int inum, int asnum, struct usb_endpoint_descriptor *d)
1664 ++{
1665 ++ struct usb_endpoint_descriptor *epd;
1666 ++ struct usb_interface_cache *intfc;
1667 ++ struct usb_host_interface *alt;
1668 ++ int i, j, k;
1669 ++
1670 ++ for (i = 0; i < config->desc.bNumInterfaces; ++i) {
1671 ++ intfc = config->intf_cache[i];
1672 ++
1673 ++ for (j = 0; j < intfc->num_altsetting; ++j) {
1674 ++ alt = &intfc->altsetting[j];
1675 ++
1676 ++ if (alt->desc.bInterfaceNumber == inum &&
1677 ++ alt->desc.bAlternateSetting != asnum)
1678 ++ continue;
1679 ++
1680 ++ for (k = 0; k < alt->desc.bNumEndpoints; ++k) {
1681 ++ epd = &alt->endpoint[k].desc;
1682 ++
1683 ++ if (endpoint_is_duplicate(epd, d))
1684 ++ return true;
1685 ++ }
1686 ++ }
1687 ++ }
1688 ++
1689 ++ return false;
1690 ++}
1691 ++
1692 ++static int usb_parse_endpoint(struct device *ddev, int cfgno,
1693 ++ struct usb_host_config *config, int inum, int asnum,
1694 ++ struct usb_host_interface *ifp, int num_ep,
1695 ++ unsigned char *buffer, int size)
1696 + {
1697 + unsigned char *buffer0 = buffer;
1698 + struct usb_endpoint_descriptor *d;
1699 +@@ -242,13 +291,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
1700 + goto skip_to_next_endpoint_or_interface_descriptor;
1701 +
1702 + /* Check for duplicate endpoint addresses */
1703 +- for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
1704 +- if (ifp->endpoint[i].desc.bEndpointAddress ==
1705 +- d->bEndpointAddress) {
1706 +- dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
1707 +- cfgno, inum, asnum, d->bEndpointAddress);
1708 +- goto skip_to_next_endpoint_or_interface_descriptor;
1709 +- }
1710 ++ if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
1711 ++ dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
1712 ++ cfgno, inum, asnum, d->bEndpointAddress);
1713 ++ goto skip_to_next_endpoint_or_interface_descriptor;
1714 + }
1715 +
1716 + endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
1717 +@@ -522,8 +568,8 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
1718 + if (((struct usb_descriptor_header *) buffer)->bDescriptorType
1719 + == USB_DT_INTERFACE)
1720 + break;
1721 +- retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt,
1722 +- num_ep, buffer, size);
1723 ++ retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum,
1724 ++ alt, num_ep, buffer, size);
1725 + if (retval < 0)
1726 + return retval;
1727 + ++n;
1728 +diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
1729 +index 7e90f786d923..a0c1d77a7e38 100644
1730 +--- a/drivers/usb/gadget/udc/dummy_hcd.c
1731 ++++ b/drivers/usb/gadget/udc/dummy_hcd.c
1732 +@@ -1325,7 +1325,7 @@ static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req,
1733 + u32 this_sg;
1734 + bool next_sg;
1735 +
1736 +- to_host = usb_pipein(urb->pipe);
1737 ++ to_host = usb_urb_dir_in(urb);
1738 + rbuf = req->req.buf + req->req.actual;
1739 +
1740 + if (!urb->num_sgs) {
1741 +@@ -1413,7 +1413,7 @@ top:
1742 +
1743 + /* FIXME update emulated data toggle too */
1744 +
1745 +- to_host = usb_pipein(urb->pipe);
1746 ++ to_host = usb_urb_dir_in(urb);
1747 + if (unlikely(len == 0))
1748 + is_short = 1;
1749 + else {
1750 +@@ -1837,7 +1837,7 @@ restart:
1751 +
1752 + /* find the gadget's ep for this request (if configured) */
1753 + address = usb_pipeendpoint (urb->pipe);
1754 +- if (usb_pipein(urb->pipe))
1755 ++ if (usb_urb_dir_in(urb))
1756 + address |= USB_DIR_IN;
1757 + ep = find_endpoint(dum, address);
1758 + if (!ep) {
1759 +@@ -2390,7 +2390,7 @@ static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb)
1760 + s = "?";
1761 + break;
1762 + } s; }),
1763 +- ep, ep ? (usb_pipein(urb->pipe) ? "in" : "out") : "",
1764 ++ ep, ep ? (usb_urb_dir_in(urb) ? "in" : "out") : "",
1765 + ({ char *s; \
1766 + switch (usb_pipetype(urb->pipe)) { \
1767 + case PIPE_CONTROL: \
1768 +@@ -2734,7 +2734,7 @@ static struct platform_driver dummy_hcd_driver = {
1769 + };
1770 +
1771 + /*-------------------------------------------------------------------------*/
1772 +-#define MAX_NUM_UDC 2
1773 ++#define MAX_NUM_UDC 32
1774 + static struct platform_device *the_udc_pdev[MAX_NUM_UDC];
1775 + static struct platform_device *the_hcd_pdev[MAX_NUM_UDC];
1776 +
1777 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1778 +index 8d349f2e5656..dc9a1139e7e1 100644
1779 +--- a/drivers/usb/serial/option.c
1780 ++++ b/drivers/usb/serial/option.c
1781 +@@ -1175,6 +1175,8 @@ static const struct usb_device_id option_ids[] = {
1782 + .driver_info = NCTRL(0) | RSVD(3) },
1783 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */
1784 + .driver_info = NCTRL(0) },
1785 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */
1786 ++ .driver_info = NCTRL(0) | RSVD(3) },
1787 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
1788 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
1789 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
1790 +diff --git a/fs/drop_caches.c b/fs/drop_caches.c
1791 +index d31b6c72b476..dc1a1d5d825b 100644
1792 +--- a/fs/drop_caches.c
1793 ++++ b/fs/drop_caches.c
1794 +@@ -35,11 +35,11 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
1795 + spin_unlock(&inode->i_lock);
1796 + spin_unlock(&sb->s_inode_list_lock);
1797 +
1798 +- cond_resched();
1799 + invalidate_mapping_pages(inode->i_mapping, 0, -1);
1800 + iput(toput_inode);
1801 + toput_inode = inode;
1802 +
1803 ++ cond_resched();
1804 + spin_lock(&sb->s_inode_list_lock);
1805 + }
1806 + spin_unlock(&sb->s_inode_list_lock);
1807 +diff --git a/fs/inode.c b/fs/inode.c
1808 +index 76f7535fe754..d2a700c5efce 100644
1809 +--- a/fs/inode.c
1810 ++++ b/fs/inode.c
1811 +@@ -656,6 +656,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
1812 + struct inode *inode, *next;
1813 + LIST_HEAD(dispose);
1814 +
1815 ++again:
1816 + spin_lock(&sb->s_inode_list_lock);
1817 + list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
1818 + spin_lock(&inode->i_lock);
1819 +@@ -678,6 +679,12 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
1820 + inode_lru_list_del(inode);
1821 + spin_unlock(&inode->i_lock);
1822 + list_add(&inode->i_lru, &dispose);
1823 ++ if (need_resched()) {
1824 ++ spin_unlock(&sb->s_inode_list_lock);
1825 ++ cond_resched();
1826 ++ dispose_list(&dispose);
1827 ++ goto again;
1828 ++ }
1829 + }
1830 + spin_unlock(&sb->s_inode_list_lock);
1831 +
1832 +diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
1833 +index 506da82ff3f1..a308f7a7e577 100644
1834 +--- a/fs/notify/fsnotify.c
1835 ++++ b/fs/notify/fsnotify.c
1836 +@@ -90,6 +90,7 @@ void fsnotify_unmount_inodes(struct super_block *sb)
1837 +
1838 + iput_inode = inode;
1839 +
1840 ++ cond_resched();
1841 + spin_lock(&sb->s_inode_list_lock);
1842 + }
1843 + spin_unlock(&sb->s_inode_list_lock);
1844 +diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
1845 +index 3fdbdd29702b..30f5da8f4aff 100644
1846 +--- a/fs/quota/dquot.c
1847 ++++ b/fs/quota/dquot.c
1848 +@@ -976,6 +976,7 @@ static int add_dquot_ref(struct super_block *sb, int type)
1849 + * later.
1850 + */
1851 + old_inode = inode;
1852 ++ cond_resched();
1853 + spin_lock(&sb->s_inode_list_lock);
1854 + }
1855 + spin_unlock(&sb->s_inode_list_lock);
1856 +diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
1857 +index 548fd535fd02..d433f5e292c9 100644
1858 +--- a/include/linux/if_ether.h
1859 ++++ b/include/linux/if_ether.h
1860 +@@ -28,6 +28,14 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
1861 + return (struct ethhdr *)skb_mac_header(skb);
1862 + }
1863 +
1864 ++/* Prefer this version in TX path, instead of
1865 ++ * skb_reset_mac_header() + eth_hdr()
1866 ++ */
1867 ++static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb)
1868 ++{
1869 ++ return (struct ethhdr *)skb->data;
1870 ++}
1871 ++
1872 + static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
1873 + {
1874 + return (struct ethhdr *)skb_inner_mac_header(skb);
1875 +diff --git a/include/uapi/linux/netfilter/xt_sctp.h b/include/uapi/linux/netfilter/xt_sctp.h
1876 +index 4bc6d1a08781..b4d804a9fccb 100644
1877 +--- a/include/uapi/linux/netfilter/xt_sctp.h
1878 ++++ b/include/uapi/linux/netfilter/xt_sctp.h
1879 +@@ -41,19 +41,19 @@ struct xt_sctp_info {
1880 + #define SCTP_CHUNKMAP_SET(chunkmap, type) \
1881 + do { \
1882 + (chunkmap)[type / bytes(__u32)] |= \
1883 +- 1 << (type % bytes(__u32)); \
1884 ++ 1u << (type % bytes(__u32)); \
1885 + } while (0)
1886 +
1887 + #define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \
1888 + do { \
1889 + (chunkmap)[type / bytes(__u32)] &= \
1890 +- ~(1 << (type % bytes(__u32))); \
1891 ++ ~(1u << (type % bytes(__u32))); \
1892 + } while (0)
1893 +
1894 + #define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \
1895 + ({ \
1896 + ((chunkmap)[type / bytes (__u32)] & \
1897 +- (1 << (type % bytes (__u32)))) ? 1: 0; \
1898 ++ (1u << (type % bytes (__u32)))) ? 1: 0; \
1899 + })
1900 +
1901 + #define SCTP_CHUNKMAP_RESET(chunkmap) \
1902 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
1903 +index a4875ff0bab1..615a2e44d2a0 100644
1904 +--- a/kernel/bpf/verifier.c
1905 ++++ b/kernel/bpf/verifier.c
1906 +@@ -1251,6 +1251,30 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
1907 + return check_generic_ptr_alignment(reg, pointer_desc, off, size, strict);
1908 + }
1909 +
1910 ++static int check_ctx_reg(struct bpf_verifier_env *env,
1911 ++ const struct bpf_reg_state *reg, int regno)
1912 ++{
1913 ++ /* Access to ctx or passing it to a helper is only allowed in
1914 ++ * its original, unmodified form.
1915 ++ */
1916 ++
1917 ++ if (reg->off) {
1918 ++ verbose("dereference of modified ctx ptr R%d off=%d disallowed\n",
1919 ++ regno, reg->off);
1920 ++ return -EACCES;
1921 ++ }
1922 ++
1923 ++ if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
1924 ++ char tn_buf[48];
1925 ++
1926 ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1927 ++ verbose("variable ctx access var_off=%s disallowed\n", tn_buf);
1928 ++ return -EACCES;
1929 ++ }
1930 ++
1931 ++ return 0;
1932 ++}
1933 ++
1934 + /* truncate register to smaller size (in bytes)
1935 + * must be called with size < BPF_REG_SIZE
1936 + */
1937 +@@ -1320,22 +1344,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1938 + verbose("R%d leaks addr into ctx\n", value_regno);
1939 + return -EACCES;
1940 + }
1941 +- /* ctx accesses must be at a fixed offset, so that we can
1942 +- * determine what type of data were returned.
1943 +- */
1944 +- if (reg->off) {
1945 +- verbose("dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
1946 +- regno, reg->off, off - reg->off);
1947 +- return -EACCES;
1948 +- }
1949 +- if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
1950 +- char tn_buf[48];
1951 ++ err = check_ctx_reg(env, reg, regno);
1952 ++ if (err < 0)
1953 ++ return err;
1954 +
1955 +- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1956 +- verbose("variable ctx access var_off=%s off=%d size=%d",
1957 +- tn_buf, off, size);
1958 +- return -EACCES;
1959 +- }
1960 + err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
1961 + if (!err && t == BPF_READ && value_regno >= 0) {
1962 + /* ctx access returns either a scalar, or a
1963 +@@ -1573,6 +1585,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
1964 + expected_type = PTR_TO_CTX;
1965 + if (type != expected_type)
1966 + goto err_type;
1967 ++ err = check_ctx_reg(env, reg, regno);
1968 ++ if (err < 0)
1969 ++ return err;
1970 + } else if (arg_type == ARG_PTR_TO_MEM ||
1971 + arg_type == ARG_PTR_TO_UNINIT_MEM) {
1972 + expected_type = PTR_TO_STACK;
1973 +@@ -3442,6 +3457,7 @@ static bool may_access_skb(enum bpf_prog_type type)
1974 + static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
1975 + {
1976 + struct bpf_reg_state *regs = cur_regs(env);
1977 ++ static const int ctx_reg = BPF_REG_6;
1978 + u8 mode = BPF_MODE(insn->code);
1979 + int i, err;
1980 +
1981 +@@ -3458,11 +3474,11 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
1982 + }
1983 +
1984 + /* check whether implicit source operand (register R6) is readable */
1985 +- err = check_reg_arg(env, BPF_REG_6, SRC_OP);
1986 ++ err = check_reg_arg(env, ctx_reg, SRC_OP);
1987 + if (err)
1988 + return err;
1989 +
1990 +- if (regs[BPF_REG_6].type != PTR_TO_CTX) {
1991 ++ if (regs[ctx_reg].type != PTR_TO_CTX) {
1992 + verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
1993 + return -EINVAL;
1994 + }
1995 +@@ -3474,6 +3490,10 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
1996 + return err;
1997 + }
1998 +
1999 ++ err = check_ctx_reg(env, &regs[ctx_reg], ctx_reg);
2000 ++ if (err < 0)
2001 ++ return err;
2002 ++
2003 + /* reset caller saved regs to unreadable */
2004 + for (i = 0; i < CALLER_SAVED_REGS; i++) {
2005 + mark_reg_not_init(regs, caller_saved[i]);
2006 +diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
2007 +index 9aa0fccd5d43..03595c29c566 100644
2008 +--- a/kernel/locking/spinlock_debug.c
2009 ++++ b/kernel/locking/spinlock_debug.c
2010 +@@ -51,19 +51,19 @@ EXPORT_SYMBOL(__rwlock_init);
2011 +
2012 + static void spin_dump(raw_spinlock_t *lock, const char *msg)
2013 + {
2014 +- struct task_struct *owner = NULL;
2015 ++ struct task_struct *owner = READ_ONCE(lock->owner);
2016 +
2017 +- if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
2018 +- owner = lock->owner;
2019 ++ if (owner == SPINLOCK_OWNER_INIT)
2020 ++ owner = NULL;
2021 + printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
2022 + msg, raw_smp_processor_id(),
2023 + current->comm, task_pid_nr(current));
2024 + printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
2025 + ".owner_cpu: %d\n",
2026 +- lock, lock->magic,
2027 ++ lock, READ_ONCE(lock->magic),
2028 + owner ? owner->comm : "<none>",
2029 + owner ? task_pid_nr(owner) : -1,
2030 +- lock->owner_cpu);
2031 ++ READ_ONCE(lock->owner_cpu));
2032 + dump_stack();
2033 + }
2034 +
2035 +@@ -80,16 +80,16 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg)
2036 + static inline void
2037 + debug_spin_lock_before(raw_spinlock_t *lock)
2038 + {
2039 +- SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
2040 +- SPIN_BUG_ON(lock->owner == current, lock, "recursion");
2041 +- SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
2042 ++ SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
2043 ++ SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
2044 ++ SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
2045 + lock, "cpu recursion");
2046 + }
2047 +
2048 + static inline void debug_spin_lock_after(raw_spinlock_t *lock)
2049 + {
2050 +- lock->owner_cpu = raw_smp_processor_id();
2051 +- lock->owner = current;
2052 ++ WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
2053 ++ WRITE_ONCE(lock->owner, current);
2054 + }
2055 +
2056 + static inline void debug_spin_unlock(raw_spinlock_t *lock)
2057 +@@ -99,8 +99,8 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
2058 + SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
2059 + SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
2060 + lock, "wrong CPU");
2061 +- lock->owner = SPINLOCK_OWNER_INIT;
2062 +- lock->owner_cpu = -1;
2063 ++ WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
2064 ++ WRITE_ONCE(lock->owner_cpu, -1);
2065 + }
2066 +
2067 + /*
2068 +@@ -183,8 +183,8 @@ static inline void debug_write_lock_before(rwlock_t *lock)
2069 +
2070 + static inline void debug_write_lock_after(rwlock_t *lock)
2071 + {
2072 +- lock->owner_cpu = raw_smp_processor_id();
2073 +- lock->owner = current;
2074 ++ WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
2075 ++ WRITE_ONCE(lock->owner, current);
2076 + }
2077 +
2078 + static inline void debug_write_unlock(rwlock_t *lock)
2079 +@@ -193,8 +193,8 @@ static inline void debug_write_unlock(rwlock_t *lock)
2080 + RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
2081 + RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
2082 + lock, "wrong CPU");
2083 +- lock->owner = SPINLOCK_OWNER_INIT;
2084 +- lock->owner_cpu = -1;
2085 ++ WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
2086 ++ WRITE_ONCE(lock->owner_cpu, -1);
2087 + }
2088 +
2089 + void do_raw_write_lock(rwlock_t *lock)
2090 +diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
2091 +index 0e7afdf86127..235bed825e3a 100644
2092 +--- a/net/8021q/vlan.h
2093 ++++ b/net/8021q/vlan.h
2094 +@@ -110,6 +110,7 @@ int vlan_check_real_dev(struct net_device *real_dev,
2095 + void vlan_setup(struct net_device *dev);
2096 + int register_vlan_dev(struct net_device *dev);
2097 + void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
2098 ++void vlan_dev_uninit(struct net_device *dev);
2099 + bool vlan_dev_inherit_address(struct net_device *dev,
2100 + struct net_device *real_dev);
2101 +
2102 +diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
2103 +index ac4c93c999b0..ed3717dc2d20 100644
2104 +--- a/net/8021q/vlan_dev.c
2105 ++++ b/net/8021q/vlan_dev.c
2106 +@@ -610,7 +610,8 @@ static int vlan_dev_init(struct net_device *dev)
2107 + return 0;
2108 + }
2109 +
2110 +-static void vlan_dev_uninit(struct net_device *dev)
2111 ++/* Note: this function might be called multiple times for the same device. */
2112 ++void vlan_dev_uninit(struct net_device *dev)
2113 + {
2114 + struct vlan_priority_tci_mapping *pm;
2115 + struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
2116 +diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
2117 +index 5e831de3103e..fdf39dd5e755 100644
2118 +--- a/net/8021q/vlan_netlink.c
2119 ++++ b/net/8021q/vlan_netlink.c
2120 +@@ -95,11 +95,13 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
2121 + struct ifla_vlan_flags *flags;
2122 + struct ifla_vlan_qos_mapping *m;
2123 + struct nlattr *attr;
2124 +- int rem;
2125 ++ int rem, err;
2126 +
2127 + if (data[IFLA_VLAN_FLAGS]) {
2128 + flags = nla_data(data[IFLA_VLAN_FLAGS]);
2129 +- vlan_dev_change_flags(dev, flags->flags, flags->mask);
2130 ++ err = vlan_dev_change_flags(dev, flags->flags, flags->mask);
2131 ++ if (err)
2132 ++ return err;
2133 + }
2134 + if (data[IFLA_VLAN_INGRESS_QOS]) {
2135 + nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
2136 +@@ -110,7 +112,9 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
2137 + if (data[IFLA_VLAN_EGRESS_QOS]) {
2138 + nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
2139 + m = nla_data(attr);
2140 +- vlan_dev_set_egress_priority(dev, m->from, m->to);
2141 ++ err = vlan_dev_set_egress_priority(dev, m->from, m->to);
2142 ++ if (err)
2143 ++ return err;
2144 + }
2145 + }
2146 + return 0;
2147 +@@ -157,10 +161,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
2148 + return -EINVAL;
2149 +
2150 + err = vlan_changelink(dev, tb, data, extack);
2151 +- if (err < 0)
2152 +- return err;
2153 +-
2154 +- return register_vlan_dev(dev);
2155 ++ if (!err)
2156 ++ err = register_vlan_dev(dev);
2157 ++ if (err)
2158 ++ vlan_dev_uninit(dev);
2159 ++ return err;
2160 + }
2161 +
2162 + static inline size_t vlan_qos_map_size(unsigned int n)
2163 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2164 +index 55253ba0681f..d2b1c39c4223 100644
2165 +--- a/net/ipv4/tcp_input.c
2166 ++++ b/net/ipv4/tcp_input.c
2167 +@@ -1750,8 +1750,11 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
2168 + }
2169 +
2170 + /* Ignore very old stuff early */
2171 +- if (!after(sp[used_sacks].end_seq, prior_snd_una))
2172 ++ if (!after(sp[used_sacks].end_seq, prior_snd_una)) {
2173 ++ if (i == 0)
2174 ++ first_sack_index = -1;
2175 + continue;
2176 ++ }
2177 +
2178 + used_sacks++;
2179 + }
2180 +diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
2181 +index 204a8351efff..c29170e767a8 100644
2182 +--- a/net/llc/llc_station.c
2183 ++++ b/net/llc/llc_station.c
2184 +@@ -32,7 +32,7 @@ static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb)
2185 + return LLC_PDU_IS_CMD(pdu) && /* command PDU */
2186 + LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
2187 + LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID &&
2188 +- !pdu->dsap ? 0 : 1; /* NULL DSAP value */
2189 ++ !pdu->dsap; /* NULL DSAP value */
2190 + }
2191 +
2192 + static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
2193 +@@ -42,7 +42,7 @@ static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
2194 + return LLC_PDU_IS_CMD(pdu) && /* command PDU */
2195 + LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
2196 + LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST &&
2197 +- !pdu->dsap ? 0 : 1; /* NULL DSAP */
2198 ++ !pdu->dsap; /* NULL DSAP */
2199 + }
2200 +
2201 + static int llc_station_ac_send_xid_r(struct sk_buff *skb)
2202 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
2203 +index c781c9a1a697..39a32edaa92c 100644
2204 +--- a/net/netfilter/nf_conntrack_netlink.c
2205 ++++ b/net/netfilter/nf_conntrack_netlink.c
2206 +@@ -3422,6 +3422,9 @@ static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
2207 +
2208 + list_for_each_entry(net, net_exit_list, exit_list)
2209 + ctnetlink_net_exit(net);
2210 ++
2211 ++ /* wait for other cpus until they are done with ctnl_notifiers */
2212 ++ synchronize_rcu();
2213 + }
2214 +
2215 + static struct pernet_operations ctnetlink_net_ops = {
2216 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
2217 +index 7ef126489d4e..91490446ebb4 100644
2218 +--- a/net/netfilter/nf_tables_api.c
2219 ++++ b/net/netfilter/nf_tables_api.c
2220 +@@ -3917,14 +3917,20 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
2221 + if (nla[NFTA_SET_ELEM_DATA] == NULL &&
2222 + !(flags & NFT_SET_ELEM_INTERVAL_END))
2223 + return -EINVAL;
2224 +- if (nla[NFTA_SET_ELEM_DATA] != NULL &&
2225 +- flags & NFT_SET_ELEM_INTERVAL_END)
2226 +- return -EINVAL;
2227 + } else {
2228 + if (nla[NFTA_SET_ELEM_DATA] != NULL)
2229 + return -EINVAL;
2230 + }
2231 +
2232 ++ if ((flags & NFT_SET_ELEM_INTERVAL_END) &&
2233 ++ (nla[NFTA_SET_ELEM_DATA] ||
2234 ++ nla[NFTA_SET_ELEM_OBJREF] ||
2235 ++ nla[NFTA_SET_ELEM_TIMEOUT] ||
2236 ++ nla[NFTA_SET_ELEM_EXPIRATION] ||
2237 ++ nla[NFTA_SET_ELEM_USERDATA] ||
2238 ++ nla[NFTA_SET_ELEM_EXPR]))
2239 ++ return -EINVAL;
2240 ++
2241 + timeout = 0;
2242 + if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
2243 + if (!(set->flags & NFT_SET_TIMEOUT))
2244 +diff --git a/net/rfkill/core.c b/net/rfkill/core.c
2245 +index 99a2e55b01cf..e31b4288f32c 100644
2246 +--- a/net/rfkill/core.c
2247 ++++ b/net/rfkill/core.c
2248 +@@ -998,10 +998,13 @@ static void rfkill_sync_work(struct work_struct *work)
2249 + int __must_check rfkill_register(struct rfkill *rfkill)
2250 + {
2251 + static unsigned long rfkill_no;
2252 +- struct device *dev = &rfkill->dev;
2253 ++ struct device *dev;
2254 + int error;
2255 +
2256 +- BUG_ON(!rfkill);
2257 ++ if (!rfkill)
2258 ++ return -EINVAL;
2259 ++
2260 ++ dev = &rfkill->dev;
2261 +
2262 + mutex_lock(&rfkill_global_mutex);
2263 +
2264 +diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
2265 +index f50eb87cfe79..7a944f508cae 100644
2266 +--- a/net/sched/sch_fq.c
2267 ++++ b/net/sched/sch_fq.c
2268 +@@ -734,7 +734,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
2269 + if (tb[TCA_FQ_QUANTUM]) {
2270 + u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
2271 +
2272 +- if (quantum > 0)
2273 ++ if (quantum > 0 && quantum <= (1 << 20))
2274 + q->quantum = quantum;
2275 + else
2276 + err = -EINVAL;
2277 +diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
2278 +index c60777351de1..ff6bc7cf6cbd 100644
2279 +--- a/net/sched/sch_prio.c
2280 ++++ b/net/sched/sch_prio.c
2281 +@@ -244,8 +244,14 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
2282 + struct prio_sched_data *q = qdisc_priv(sch);
2283 + unsigned long band = arg - 1;
2284 +
2285 +- if (new == NULL)
2286 +- new = &noop_qdisc;
2287 ++ if (!new) {
2288 ++ new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
2289 ++ TC_H_MAKE(sch->handle, arg));
2290 ++ if (!new)
2291 ++ new = &noop_qdisc;
2292 ++ else
2293 ++ qdisc_hash_add(new, true);
2294 ++ }
2295 +
2296 + *old = qdisc_replace(sch, new, &q->queues[band]);
2297 + return 0;
2298 +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
2299 +index bf0c61adb09c..482bb0a5d4d3 100644
2300 +--- a/net/sctp/sm_sideeffect.c
2301 ++++ b/net/sctp/sm_sideeffect.c
2302 +@@ -1359,8 +1359,10 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
2303 + /* Generate an INIT ACK chunk. */
2304 + new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
2305 + 0);
2306 +- if (!new_obj)
2307 +- goto nomem;
2308 ++ if (!new_obj) {
2309 ++ error = -ENOMEM;
2310 ++ break;
2311 ++ }
2312 +
2313 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
2314 + SCTP_CHUNK(new_obj));
2315 +@@ -1382,7 +1384,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
2316 + if (!new_obj) {
2317 + if (cmd->obj.chunk)
2318 + sctp_chunk_free(cmd->obj.chunk);
2319 +- goto nomem;
2320 ++ error = -ENOMEM;
2321 ++ break;
2322 + }
2323 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
2324 + SCTP_CHUNK(new_obj));
2325 +@@ -1429,8 +1432,10 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
2326 +
2327 + /* Generate a SHUTDOWN chunk. */
2328 + new_obj = sctp_make_shutdown(asoc, chunk);
2329 +- if (!new_obj)
2330 +- goto nomem;
2331 ++ if (!new_obj) {
2332 ++ error = -ENOMEM;
2333 ++ break;
2334 ++ }
2335 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
2336 + SCTP_CHUNK(new_obj));
2337 + break;
2338 +@@ -1760,11 +1765,17 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
2339 + break;
2340 + }
2341 +
2342 +- if (error)
2343 ++ if (error) {
2344 ++ cmd = sctp_next_cmd(commands);
2345 ++ while (cmd) {
2346 ++ if (cmd->verb == SCTP_CMD_REPLY)
2347 ++ sctp_chunk_free(cmd->obj.chunk);
2348 ++ cmd = sctp_next_cmd(commands);
2349 ++ }
2350 + break;
2351 ++ }
2352 + }
2353 +
2354 +-out:
2355 + /* If this is in response to a received chunk, wait until
2356 + * we are done with the packet to open the queue so that we don't
2357 + * send multiple packets in response to a single request.
2358 +@@ -1779,8 +1790,5 @@ out:
2359 + sp->data_ready_signalled = 0;
2360 +
2361 + return error;
2362 +-nomem:
2363 +- error = -ENOMEM;
2364 +- goto out;
2365 + }
2366 +
2367 +diff --git a/samples/bpf/syscall_tp_kern.c b/samples/bpf/syscall_tp_kern.c
2368 +index 9149c524d279..8833aacb9c8c 100644
2369 +--- a/samples/bpf/syscall_tp_kern.c
2370 ++++ b/samples/bpf/syscall_tp_kern.c
2371 +@@ -50,13 +50,27 @@ static __always_inline void count(void *map)
2372 + SEC("tracepoint/syscalls/sys_enter_open")
2373 + int trace_enter_open(struct syscalls_enter_open_args *ctx)
2374 + {
2375 +- count((void *)&enter_open_map);
2376 ++ count(&enter_open_map);
2377 ++ return 0;
2378 ++}
2379 ++
2380 ++SEC("tracepoint/syscalls/sys_enter_openat")
2381 ++int trace_enter_open_at(struct syscalls_enter_open_args *ctx)
2382 ++{
2383 ++ count(&enter_open_map);
2384 + return 0;
2385 + }
2386 +
2387 + SEC("tracepoint/syscalls/sys_exit_open")
2388 + int trace_enter_exit(struct syscalls_exit_open_args *ctx)
2389 + {
2390 +- count((void *)&exit_open_map);
2391 ++ count(&exit_open_map);
2392 ++ return 0;
2393 ++}
2394 ++
2395 ++SEC("tracepoint/syscalls/sys_exit_openat")
2396 ++int trace_enter_exit_at(struct syscalls_exit_open_args *ctx)
2397 ++{
2398 ++ count(&exit_open_map);
2399 + return 0;
2400 + }
2401 +diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
2402 +index c7d525e5696e..8c7445874662 100644
2403 +--- a/samples/bpf/trace_event_user.c
2404 ++++ b/samples/bpf/trace_event_user.c
2405 +@@ -34,9 +34,9 @@ static void print_ksym(__u64 addr)
2406 + return;
2407 + sym = ksym_search(addr);
2408 + printf("%s;", sym->name);
2409 +- if (!strcmp(sym->name, "sys_read"))
2410 ++ if (!strstr(sym->name, "sys_read"))
2411 + sys_read_seen = true;
2412 +- else if (!strcmp(sym->name, "sys_write"))
2413 ++ else if (!strstr(sym->name, "sys_write"))
2414 + sys_write_seen = true;
2415 + }
2416 +
2417 +diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
2418 +index ed29bad1f03a..96420b620963 100644
2419 +--- a/scripts/kconfig/expr.c
2420 ++++ b/scripts/kconfig/expr.c
2421 +@@ -201,6 +201,13 @@ static int expr_eq(struct expr *e1, struct expr *e2)
2422 + {
2423 + int res, old_count;
2424 +
2425 ++ /*
2426 ++ * A NULL expr is taken to be yes, but there's also a different way to
2427 ++ * represent yes. expr_is_yes() checks for either representation.
2428 ++ */
2429 ++ if (!e1 || !e2)
2430 ++ return expr_is_yes(e1) && expr_is_yes(e2);
2431 ++
2432 + if (e1->type != e2->type)
2433 + return 0;
2434 + switch (e1->type) {
2435 +diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
2436 +index fd2731d171dd..0e8008d38161 100644
2437 +--- a/sound/soc/codecs/wm8962.c
2438 ++++ b/sound/soc/codecs/wm8962.c
2439 +@@ -2791,7 +2791,7 @@ static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
2440 +
2441 + if (target % Fref == 0) {
2442 + fll_div->theta = 0;
2443 +- fll_div->lambda = 0;
2444 ++ fll_div->lambda = 1;
2445 + } else {
2446 + gcd_fll = gcd(target, fratio * Fref);
2447 +
2448 +@@ -2861,7 +2861,7 @@ static int wm8962_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
2449 + return -EINVAL;
2450 + }
2451 +
2452 +- if (fll_div.theta || fll_div.lambda)
2453 ++ if (fll_div.theta)
2454 + fll1 |= WM8962_FLL_FRAC;
2455 +
2456 + /* Stop the FLL while we reconfigure */
2457 +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
2458 +index 2d5cf263515b..72301bcad3bd 100644
2459 +--- a/sound/soc/soc-topology.c
2460 ++++ b/sound/soc/soc-topology.c
2461 +@@ -1921,6 +1921,7 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
2462 + int count = hdr->count;
2463 + int i;
2464 + bool abi_match;
2465 ++ int ret;
2466 +
2467 + if (tplg->pass != SOC_TPLG_PASS_PCM_DAI)
2468 + return 0;
2469 +@@ -1957,7 +1958,12 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
2470 + }
2471 +
2472 + /* create the FE DAIs and DAI links */
2473 +- soc_tplg_pcm_create(tplg, _pcm);
2474 ++ ret = soc_tplg_pcm_create(tplg, _pcm);
2475 ++ if (ret < 0) {
2476 ++ if (!abi_match)
2477 ++ kfree(_pcm);
2478 ++ return ret;
2479 ++ }
2480 +
2481 + /* offset by version-specific struct size and
2482 + * real priv data size
2483 +diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
2484 +index 8107f060fa84..a0ac01c647f5 100644
2485 +--- a/tools/lib/traceevent/Makefile
2486 ++++ b/tools/lib/traceevent/Makefile
2487 +@@ -115,6 +115,7 @@ EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION)
2488 +
2489 + LIB_TARGET = libtraceevent.a libtraceevent.so.$(EVENT_PARSE_VERSION)
2490 + LIB_INSTALL = libtraceevent.a libtraceevent.so*
2491 ++LIB_INSTALL := $(addprefix $(OUTPUT),$(LIB_INSTALL))
2492 +
2493 + INCLUDES = -I. -I $(srctree)/tools/include $(CONFIG_INCLUDES)
2494 +
2495 +diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
2496 +index 913539aea645..9babb3fef8e2 100644
2497 +--- a/tools/testing/selftests/bpf/test_verifier.c
2498 ++++ b/tools/testing/selftests/bpf/test_verifier.c
2499 +@@ -7281,7 +7281,7 @@ static struct bpf_test tests[] = {
2500 + offsetof(struct __sk_buff, mark)),
2501 + BPF_EXIT_INSN(),
2502 + },
2503 +- .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
2504 ++ .errstr = "dereference of modified ctx ptr",
2505 + .result = REJECT,
2506 + .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2507 + },
2508 +@@ -7944,6 +7944,62 @@ static struct bpf_test tests[] = {
2509 + .errstr = "BPF_XADD stores into R2 packet",
2510 + .prog_type = BPF_PROG_TYPE_XDP,
2511 + },
2512 ++ {
2513 ++ "pass unmodified ctx pointer to helper",
2514 ++ .insns = {
2515 ++ BPF_MOV64_IMM(BPF_REG_2, 0),
2516 ++ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2517 ++ BPF_FUNC_csum_update),
2518 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
2519 ++ BPF_EXIT_INSN(),
2520 ++ },
2521 ++ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2522 ++ .result = ACCEPT,
2523 ++ },
2524 ++ {
2525 ++ "pass modified ctx pointer to helper, 1",
2526 ++ .insns = {
2527 ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
2528 ++ BPF_MOV64_IMM(BPF_REG_2, 0),
2529 ++ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2530 ++ BPF_FUNC_csum_update),
2531 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
2532 ++ BPF_EXIT_INSN(),
2533 ++ },
2534 ++ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2535 ++ .result = REJECT,
2536 ++ .errstr = "dereference of modified ctx ptr",
2537 ++ },
2538 ++ {
2539 ++ "pass modified ctx pointer to helper, 2",
2540 ++ .insns = {
2541 ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
2542 ++ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2543 ++ BPF_FUNC_get_socket_cookie),
2544 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
2545 ++ BPF_EXIT_INSN(),
2546 ++ },
2547 ++ .result_unpriv = REJECT,
2548 ++ .result = REJECT,
2549 ++ .errstr_unpriv = "dereference of modified ctx ptr",
2550 ++ .errstr = "dereference of modified ctx ptr",
2551 ++ },
2552 ++ {
2553 ++ "pass modified ctx pointer to helper, 3",
2554 ++ .insns = {
2555 ++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
2556 ++ BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
2557 ++ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
2558 ++ BPF_MOV64_IMM(BPF_REG_2, 0),
2559 ++ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2560 ++ BPF_FUNC_csum_update),
2561 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
2562 ++ BPF_EXIT_INSN(),
2563 ++ },
2564 ++ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2565 ++ .result = REJECT,
2566 ++ .errstr = "variable ctx access var_off=(0x0; 0x4)",
2567 ++ },
2568 + };
2569 +
2570 + static int probe_filter_length(const struct bpf_insn *fp)