Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Tue, 29 Oct 2019 14:00:23
Message-Id: 1572357542.aaea871164fc7499fac550db0335462b1d12b864.mpagano@gentoo
1 commit: aaea871164fc7499fac550db0335462b1d12b864
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Sep 21 16:30:24 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Oct 29 13:59:02 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=aaea8711
7
8 Linux patch 4.14.146
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1145_linux-4.14.146.patch | 1614 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1618 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 38ce5d6..a4f3b29 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -623,6 +623,10 @@ Patch: 1144_linux-4.14.145.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.145
23
24 +Patch: 1145_linux-4.14.146.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.146
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1145_linux-4.14.146.patch b/1145_linux-4.14.146.patch
33 new file mode 100644
34 index 0000000..3e02615
35 --- /dev/null
36 +++ b/1145_linux-4.14.146.patch
37 @@ -0,0 +1,1614 @@
38 +diff --git a/Makefile b/Makefile
39 +index ce521c48b35e..ad923d5eae1e 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 145
47 ++SUBLEVEL = 146
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
52 +index 28ebb4eb884a..214b9e6de2c3 100644
53 +--- a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
54 ++++ b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
55 +@@ -32,7 +32,7 @@
56 + *
57 + * Datamanual Revisions:
58 + *
59 +- * AM572x Silicon Revision 2.0: SPRS953B, Revised November 2016
60 ++ * AM572x Silicon Revision 2.0: SPRS953F, Revised May 2019
61 + * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016
62 + *
63 + */
64 +@@ -229,45 +229,45 @@
65 +
66 + mmc3_pins_default: mmc3_pins_default {
67 + pinctrl-single,pins = <
68 +- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
69 +- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
70 +- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
71 +- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
72 +- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
73 +- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
74 ++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
75 ++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
76 ++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
77 ++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
78 ++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
79 ++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
80 + >;
81 + };
82 +
83 + mmc3_pins_hs: mmc3_pins_hs {
84 + pinctrl-single,pins = <
85 +- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
86 +- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
87 +- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
88 +- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
89 +- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
90 +- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
91 ++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
92 ++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
93 ++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
94 ++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
95 ++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
96 ++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
97 + >;
98 + };
99 +
100 + mmc3_pins_sdr12: mmc3_pins_sdr12 {
101 + pinctrl-single,pins = <
102 +- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
103 +- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
104 +- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
105 +- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
106 +- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
107 +- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
108 ++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
109 ++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
110 ++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
111 ++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
112 ++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
113 ++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
114 + >;
115 + };
116 +
117 + mmc3_pins_sdr25: mmc3_pins_sdr25 {
118 + pinctrl-single,pins = <
119 +- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
120 +- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
121 +- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
122 +- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
123 +- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
124 +- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
125 ++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
126 ++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
127 ++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
128 ++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
129 ++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
130 ++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
131 + >;
132 + };
133 +
134 +diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
135 +index cf65ab8bb004..e5dcbda20129 100644
136 +--- a/arch/arm/mach-omap2/omap4-common.c
137 ++++ b/arch/arm/mach-omap2/omap4-common.c
138 +@@ -131,6 +131,9 @@ static int __init omap4_sram_init(void)
139 + struct device_node *np;
140 + struct gen_pool *sram_pool;
141 +
142 ++ if (!soc_is_omap44xx() && !soc_is_omap54xx())
143 ++ return 0;
144 ++
145 + np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
146 + if (!np)
147 + pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
148 +diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
149 +index 2f4f7002f38d..87b0c38b7ca5 100644
150 +--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
151 ++++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
152 +@@ -389,7 +389,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = {
153 + static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
154 + .rev_offs = 0x0,
155 + .sysc_offs = 0x4,
156 +- .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET,
157 ++ .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
158 ++ SYSC_HAS_RESET_STATUS,
159 + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
160 + .sysc_fields = &omap_hwmod_sysc_type2,
161 + };
162 +diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
163 +index defb7fc26428..27a40101dd3a 100644
164 +--- a/arch/arm/mm/init.c
165 ++++ b/arch/arm/mm/init.c
166 +@@ -195,6 +195,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
167 + #ifdef CONFIG_HAVE_ARCH_PFN_VALID
168 + int pfn_valid(unsigned long pfn)
169 + {
170 ++ phys_addr_t addr = __pfn_to_phys(pfn);
171 ++
172 ++ if (__phys_to_pfn(addr) != pfn)
173 ++ return 0;
174 ++
175 + return memblock_is_map_memory(__pfn_to_phys(pfn));
176 + }
177 + EXPORT_SYMBOL(pfn_valid);
178 +@@ -722,7 +727,8 @@ static void update_sections_early(struct section_perm perms[], int n)
179 + if (t->flags & PF_KTHREAD)
180 + continue;
181 + for_each_thread(t, s)
182 +- set_section_perms(perms, n, true, s->mm);
183 ++ if (s->mm)
184 ++ set_section_perms(perms, n, true, s->mm);
185 + }
186 + set_section_perms(perms, n, true, current->active_mm);
187 + set_section_perms(perms, n, true, &init_mm);
188 +diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
189 +index 17ae5c15a9e0..ba02305f121e 100644
190 +--- a/arch/powerpc/mm/pgtable-radix.c
191 ++++ b/arch/powerpc/mm/pgtable-radix.c
192 +@@ -442,14 +442,6 @@ void __init radix__early_init_devtree(void)
193 + mmu_psize_defs[MMU_PAGE_64K].shift = 16;
194 + mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
195 + found:
196 +-#ifdef CONFIG_SPARSEMEM_VMEMMAP
197 +- if (mmu_psize_defs[MMU_PAGE_2M].shift) {
198 +- /*
199 +- * map vmemmap using 2M if available
200 +- */
201 +- mmu_vmemmap_psize = MMU_PAGE_2M;
202 +- }
203 +-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
204 + return;
205 + }
206 +
207 +@@ -527,7 +519,13 @@ void __init radix__early_init_mmu(void)
208 +
209 + #ifdef CONFIG_SPARSEMEM_VMEMMAP
210 + /* vmemmap mapping */
211 +- mmu_vmemmap_psize = mmu_virtual_psize;
212 ++ if (mmu_psize_defs[MMU_PAGE_2M].shift) {
213 ++ /*
214 ++ * map vmemmap using 2M if available
215 ++ */
216 ++ mmu_vmemmap_psize = MMU_PAGE_2M;
217 ++ } else
218 ++ mmu_vmemmap_psize = mmu_virtual_psize;
219 + #endif
220 + /*
221 + * initialize page table size
222 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
223 +index bc9431aace05..b8bd84104843 100644
224 +--- a/arch/s390/net/bpf_jit_comp.c
225 ++++ b/arch/s390/net/bpf_jit_comp.c
226 +@@ -882,7 +882,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
227 + break;
228 + case BPF_ALU64 | BPF_NEG: /* dst = -dst */
229 + /* lcgr %dst,%dst */
230 +- EMIT4(0xb9130000, dst_reg, dst_reg);
231 ++ EMIT4(0xb9030000, dst_reg, dst_reg);
232 + break;
233 + /*
234 + * BPF_FROM_BE/LE
235 +@@ -1063,8 +1063,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
236 + /* llgf %w1,map.max_entries(%b2) */
237 + EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
238 + offsetof(struct bpf_array, map.max_entries));
239 +- /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
240 +- EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
241 ++ /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
242 ++ EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
243 + REG_W1, 0, 0xa);
244 +
245 + /*
246 +@@ -1090,8 +1090,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
247 + * goto out;
248 + */
249 +
250 +- /* sllg %r1,%b3,3: %r1 = index * 8 */
251 +- EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
252 ++ /* llgfr %r1,%b3: %r1 = (u32) index */
253 ++ EMIT4(0xb9160000, REG_1, BPF_REG_3);
254 ++ /* sllg %r1,%r1,3: %r1 *= 8 */
255 ++ EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
256 + /* lg %r1,prog(%b2,%r1) */
257 + EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
258 + REG_1, offsetof(struct bpf_array, ptrs));
259 +diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
260 +index 8c51844694e2..7a86fbc07ddc 100644
261 +--- a/arch/x86/events/amd/ibs.c
262 ++++ b/arch/x86/events/amd/ibs.c
263 +@@ -672,10 +672,17 @@ fail:
264 +
265 + throttle = perf_event_overflow(event, &data, &regs);
266 + out:
267 +- if (throttle)
268 ++ if (throttle) {
269 + perf_ibs_stop(event, 0);
270 +- else
271 +- perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
272 ++ } else {
273 ++ period >>= 4;
274 ++
275 ++ if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
276 ++ (*config & IBS_OP_CNT_CTL))
277 ++ period |= *config & IBS_OP_CUR_CNT_RAND;
278 ++
279 ++ perf_ibs_enable_event(perf_ibs, hwc, period);
280 ++ }
281 +
282 + perf_event_update_userpage(event);
283 +
284 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
285 +index d44bb077c6cf..4a60ed8c4413 100644
286 +--- a/arch/x86/events/intel/core.c
287 ++++ b/arch/x86/events/intel/core.c
288 +@@ -3297,6 +3297,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left)
289 + return left;
290 + }
291 +
292 ++static u64 nhm_limit_period(struct perf_event *event, u64 left)
293 ++{
294 ++ return max(left, 32ULL);
295 ++}
296 ++
297 + PMU_FORMAT_ATTR(event, "config:0-7" );
298 + PMU_FORMAT_ATTR(umask, "config:8-15" );
299 + PMU_FORMAT_ATTR(edge, "config:18" );
300 +@@ -4092,6 +4097,7 @@ __init int intel_pmu_init(void)
301 + x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
302 + x86_pmu.enable_all = intel_pmu_nhm_enable_all;
303 + x86_pmu.extra_regs = intel_nehalem_extra_regs;
304 ++ x86_pmu.limit_period = nhm_limit_period;
305 +
306 + x86_pmu.cpu_events = nhm_events_attrs;
307 +
308 +diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
309 +index 56c9ebac946f..47718fff0b79 100644
310 +--- a/arch/x86/hyperv/mmu.c
311 ++++ b/arch/x86/hyperv/mmu.c
312 +@@ -57,12 +57,14 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
313 + * Lower 12 bits encode the number of additional
314 + * pages to flush (in addition to the 'cur' page).
315 + */
316 +- if (diff >= HV_TLB_FLUSH_UNIT)
317 ++ if (diff >= HV_TLB_FLUSH_UNIT) {
318 + gva_list[gva_n] |= ~PAGE_MASK;
319 +- else if (diff)
320 ++ cur += HV_TLB_FLUSH_UNIT;
321 ++ } else if (diff) {
322 + gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
323 ++ cur = end;
324 ++ }
325 +
326 +- cur += HV_TLB_FLUSH_UNIT;
327 + gva_n++;
328 +
329 + } while (cur < end);
330 +diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
331 +index 78241b736f2a..f6c4915a863e 100644
332 +--- a/arch/x86/include/asm/perf_event.h
333 ++++ b/arch/x86/include/asm/perf_event.h
334 +@@ -209,16 +209,20 @@ struct x86_pmu_capability {
335 + #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
336 + #define IBSCTL_LVT_OFFSET_MASK 0x0F
337 +
338 +-/* ibs fetch bits/masks */
339 ++/* IBS fetch bits/masks */
340 + #define IBS_FETCH_RAND_EN (1ULL<<57)
341 + #define IBS_FETCH_VAL (1ULL<<49)
342 + #define IBS_FETCH_ENABLE (1ULL<<48)
343 + #define IBS_FETCH_CNT 0xFFFF0000ULL
344 + #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
345 +
346 +-/* ibs op bits/masks */
347 +-/* lower 4 bits of the current count are ignored: */
348 +-#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
349 ++/*
350 ++ * IBS op bits/masks
351 ++ * The lower 7 bits of the current count are random bits
352 ++ * preloaded by hardware and ignored in software
353 ++ */
354 ++#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
355 ++#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
356 + #define IBS_OP_CNT_CTL (1ULL<<19)
357 + #define IBS_OP_VAL (1ULL<<18)
358 + #define IBS_OP_ENABLE (1ULL<<17)
359 +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
360 +index 4111edb3188e..971830341061 100644
361 +--- a/arch/x86/include/asm/uaccess.h
362 ++++ b/arch/x86/include/asm/uaccess.h
363 +@@ -451,8 +451,10 @@ do { \
364 + ({ \
365 + int __gu_err; \
366 + __inttype(*(ptr)) __gu_val; \
367 ++ __typeof__(ptr) __gu_ptr = (ptr); \
368 ++ __typeof__(size) __gu_size = (size); \
369 + __uaccess_begin_nospec(); \
370 +- __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
371 ++ __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
372 + __uaccess_end(); \
373 + (x) = (__force __typeof__(*(ptr)))__gu_val; \
374 + __builtin_expect(__gu_err, 0); \
375 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
376 +index 96a8a68f9c79..566b7bc5deaa 100644
377 +--- a/arch/x86/kernel/apic/io_apic.c
378 ++++ b/arch/x86/kernel/apic/io_apic.c
379 +@@ -2342,7 +2342,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
380 + * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
381 + * gsi_top if ioapic_dynirq_base hasn't been initialized yet.
382 + */
383 +- return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
384 ++ if (!ioapic_initialized)
385 ++ return gsi_top;
386 ++ /*
387 ++ * For DT enabled machines ioapic_dynirq_base is irrelevant and not
388 ++ * updated. So simply return @from if ioapic_dynirq_base == 0.
389 ++ */
390 ++ return ioapic_dynirq_base ? : from;
391 + }
392 +
393 + #ifdef CONFIG_X86_32
394 +diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
395 +index 2e2efa577437..8c37294f1d1e 100644
396 +--- a/drivers/atm/Kconfig
397 ++++ b/drivers/atm/Kconfig
398 +@@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI
399 + make the card work).
400 +
401 + config ATM_NICSTAR_USE_IDT77105
402 +- bool "Use IDT77015 PHY driver (25Mbps)"
403 ++ bool "Use IDT77105 PHY driver (25Mbps)"
404 + depends on ATM_NICSTAR
405 + help
406 + Support for the PHYsical layer chip in ForeRunner LE25 cards. In
407 +diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
408 +index a9d1430fc5ee..5f1aa3197244 100644
409 +--- a/drivers/block/floppy.c
410 ++++ b/drivers/block/floppy.c
411 +@@ -3786,7 +3786,7 @@ static int compat_getdrvprm(int drive,
412 + v.native_format = UDP->native_format;
413 + mutex_unlock(&floppy_mutex);
414 +
415 +- if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
416 ++ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
417 + return -EFAULT;
418 + return 0;
419 + }
420 +@@ -3822,7 +3822,7 @@ static int compat_getdrvstat(int drive, bool poll,
421 + v.bufblocks = UDRS->bufblocks;
422 + mutex_unlock(&floppy_mutex);
423 +
424 +- if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
425 ++ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
426 + return -EFAULT;
427 + return 0;
428 + Eintr:
429 +diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
430 +index 8c1665c8fe33..14b560facf77 100644
431 +--- a/drivers/dma/omap-dma.c
432 ++++ b/drivers/dma/omap-dma.c
433 +@@ -1534,8 +1534,10 @@ static int omap_dma_probe(struct platform_device *pdev)
434 +
435 + rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
436 + IRQF_SHARED, "omap-dma-engine", od);
437 +- if (rc)
438 ++ if (rc) {
439 ++ omap_dma_free(od);
440 + return rc;
441 ++ }
442 + }
443 +
444 + if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
445 +diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
446 +index 9272b173c746..6574cb5a12fe 100644
447 +--- a/drivers/dma/ti-dma-crossbar.c
448 ++++ b/drivers/dma/ti-dma-crossbar.c
449 +@@ -395,8 +395,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
450 +
451 + ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
452 + nelm * 2);
453 +- if (ret)
454 ++ if (ret) {
455 ++ kfree(rsv_events);
456 + return ret;
457 ++ }
458 +
459 + for (i = 0; i < nelm; i++) {
460 + ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
461 +diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
462 +index 9c0f7cf920af..5eb03a5d79dc 100644
463 +--- a/drivers/firmware/google/vpd.c
464 ++++ b/drivers/firmware/google/vpd.c
465 +@@ -100,8 +100,8 @@ static int vpd_section_check_key_name(const u8 *key, s32 key_len)
466 + return VPD_OK;
467 + }
468 +
469 +-static int vpd_section_attrib_add(const u8 *key, s32 key_len,
470 +- const u8 *value, s32 value_len,
471 ++static int vpd_section_attrib_add(const u8 *key, u32 key_len,
472 ++ const u8 *value, u32 value_len,
473 + void *arg)
474 + {
475 + int ret;
476 +diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
477 +index 943acaa8aa76..e75abe9fa122 100644
478 +--- a/drivers/firmware/google/vpd_decode.c
479 ++++ b/drivers/firmware/google/vpd_decode.c
480 +@@ -19,8 +19,8 @@
481 +
482 + #include "vpd_decode.h"
483 +
484 +-static int vpd_decode_len(const s32 max_len, const u8 *in,
485 +- s32 *length, s32 *decoded_len)
486 ++static int vpd_decode_len(const u32 max_len, const u8 *in,
487 ++ u32 *length, u32 *decoded_len)
488 + {
489 + u8 more;
490 + int i = 0;
491 +@@ -40,18 +40,39 @@ static int vpd_decode_len(const s32 max_len, const u8 *in,
492 + } while (more);
493 +
494 + *decoded_len = i;
495 ++ return VPD_OK;
496 ++}
497 ++
498 ++static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
499 ++ u32 *_consumed, const u8 **entry, u32 *entry_len)
500 ++{
501 ++ u32 decoded_len;
502 ++ u32 consumed = *_consumed;
503 ++
504 ++ if (vpd_decode_len(max_len - consumed, &input_buf[consumed],
505 ++ entry_len, &decoded_len) != VPD_OK)
506 ++ return VPD_FAIL;
507 ++ if (max_len - consumed < decoded_len)
508 ++ return VPD_FAIL;
509 ++
510 ++ consumed += decoded_len;
511 ++ *entry = input_buf + consumed;
512 ++
513 ++ /* entry_len is untrusted data and must be checked again. */
514 ++ if (max_len - consumed < *entry_len)
515 ++ return VPD_FAIL;
516 +
517 ++ consumed += decoded_len;
518 ++ *_consumed = consumed;
519 + return VPD_OK;
520 + }
521 +
522 +-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
523 ++int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
524 + vpd_decode_callback callback, void *callback_arg)
525 + {
526 + int type;
527 +- int res;
528 +- s32 key_len;
529 +- s32 value_len;
530 +- s32 decoded_len;
531 ++ u32 key_len;
532 ++ u32 value_len;
533 + const u8 *key;
534 + const u8 *value;
535 +
536 +@@ -66,26 +87,14 @@ int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
537 + case VPD_TYPE_STRING:
538 + (*consumed)++;
539 +
540 +- /* key */
541 +- res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
542 +- &key_len, &decoded_len);
543 +- if (res != VPD_OK || *consumed + decoded_len >= max_len)
544 ++ if (vpd_decode_entry(max_len, input_buf, consumed, &key,
545 ++ &key_len) != VPD_OK)
546 + return VPD_FAIL;
547 +
548 +- *consumed += decoded_len;
549 +- key = &input_buf[*consumed];
550 +- *consumed += key_len;
551 +-
552 +- /* value */
553 +- res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
554 +- &value_len, &decoded_len);
555 +- if (res != VPD_OK || *consumed + decoded_len > max_len)
556 ++ if (vpd_decode_entry(max_len, input_buf, consumed, &value,
557 ++ &value_len) != VPD_OK)
558 + return VPD_FAIL;
559 +
560 +- *consumed += decoded_len;
561 +- value = &input_buf[*consumed];
562 +- *consumed += value_len;
563 +-
564 + if (type == VPD_TYPE_STRING)
565 + return callback(key, key_len, value, value_len,
566 + callback_arg);
567 +diff --git a/drivers/firmware/google/vpd_decode.h b/drivers/firmware/google/vpd_decode.h
568 +index be3d62c5ca2f..e921456b8e78 100644
569 +--- a/drivers/firmware/google/vpd_decode.h
570 ++++ b/drivers/firmware/google/vpd_decode.h
571 +@@ -33,8 +33,8 @@ enum {
572 + };
573 +
574 + /* Callback for vpd_decode_string to invoke. */
575 +-typedef int vpd_decode_callback(const u8 *key, s32 key_len,
576 +- const u8 *value, s32 value_len,
577 ++typedef int vpd_decode_callback(const u8 *key, u32 key_len,
578 ++ const u8 *value, u32 value_len,
579 + void *arg);
580 +
581 + /*
582 +@@ -52,7 +52,7 @@ typedef int vpd_decode_callback(const u8 *key, s32 key_len,
583 + * If one entry is successfully decoded, sends it to callback and returns the
584 + * result.
585 + */
586 +-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
587 ++int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
588 + vpd_decode_callback callback, void *callback_arg);
589 +
590 + #endif /* __VPD_DECODE_H */
591 +diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
592 +index 06d212a3d49d..19b1cf8a8252 100644
593 +--- a/drivers/fpga/altera-ps-spi.c
594 ++++ b/drivers/fpga/altera-ps-spi.c
595 +@@ -207,7 +207,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr,
596 + return -EIO;
597 + }
598 +
599 +- if (!IS_ERR(conf->confd)) {
600 ++ if (conf->confd) {
601 + if (!gpiod_get_raw_value_cansleep(conf->confd)) {
602 + dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
603 + return -EIO;
604 +@@ -263,10 +263,13 @@ static int altera_ps_probe(struct spi_device *spi)
605 + return PTR_ERR(conf->status);
606 + }
607 +
608 +- conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN);
609 ++ conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN);
610 + if (IS_ERR(conf->confd)) {
611 +- dev_warn(&spi->dev, "Not using confd gpio: %ld\n",
612 +- PTR_ERR(conf->confd));
613 ++ dev_err(&spi->dev, "Failed to get confd gpio: %ld\n",
614 ++ PTR_ERR(conf->confd));
615 ++ return PTR_ERR(conf->confd);
616 ++ } else if (!conf->confd) {
617 ++ dev_warn(&spi->dev, "Not using confd gpio");
618 + }
619 +
620 + /* Register manager with unique name */
621 +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
622 +index ee87f11e8cd5..c4d4464c7b21 100644
623 +--- a/drivers/hid/wacom_sys.c
624 ++++ b/drivers/hid/wacom_sys.c
625 +@@ -125,14 +125,16 @@ static void wacom_feature_mapping(struct hid_device *hdev,
626 + /* leave touch_max as is if predefined */
627 + if (!features->touch_max) {
628 + /* read manually */
629 +- data = kzalloc(2, GFP_KERNEL);
630 ++ n = hid_report_len(field->report);
631 ++ data = hid_alloc_report_buf(field->report, GFP_KERNEL);
632 + if (!data)
633 + break;
634 + data[0] = field->report->id;
635 + ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
636 +- data, 2, WAC_CMD_RETRIES);
637 +- if (ret == 2) {
638 +- features->touch_max = data[1];
639 ++ data, n, WAC_CMD_RETRIES);
640 ++ if (ret == n) {
641 ++ ret = hid_report_raw_event(hdev,
642 ++ HID_FEATURE_REPORT, data, n, 0);
643 + } else {
644 + features->touch_max = 16;
645 + hid_warn(hdev, "wacom_feature_mapping: "
646 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
647 +index 2e593874f5e0..2e0c4df6ad08 100644
648 +--- a/drivers/hid/wacom_wac.c
649 ++++ b/drivers/hid/wacom_wac.c
650 +@@ -2428,6 +2428,7 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
651 + struct wacom *wacom = hid_get_drvdata(hdev);
652 + struct wacom_wac *wacom_wac = &wacom->wacom_wac;
653 + unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
654 ++ struct wacom_features *features = &wacom->wacom_wac.features;
655 +
656 + switch (equivalent_usage) {
657 + case HID_GD_X:
658 +@@ -2448,6 +2449,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
659 + case HID_DG_TIPSWITCH:
660 + wacom_wac->hid_data.tipswitch = value;
661 + break;
662 ++ case HID_DG_CONTACTMAX:
663 ++ features->touch_max = value;
664 ++ return;
665 + }
666 +
667 +
668 +diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
669 +index ea9578ab19a1..fccf936f4b9b 100644
670 +--- a/drivers/i2c/busses/i2c-designware-slave.c
671 ++++ b/drivers/i2c/busses/i2c-designware-slave.c
672 +@@ -206,6 +206,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
673 +
674 + dev->disable_int(dev);
675 + dev->disable(dev);
676 ++ synchronize_irq(dev->irq);
677 + dev->slave = NULL;
678 + pm_runtime_put(dev->dev);
679 +
680 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
681 +index ad89ba143a0e..73e5d485d849 100644
682 +--- a/drivers/input/mouse/elan_i2c_core.c
683 ++++ b/drivers/input/mouse/elan_i2c_core.c
684 +@@ -1274,7 +1274,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
685 + { "ELAN0618", 0 },
686 + { "ELAN0619", 0 },
687 + { "ELAN061A", 0 },
688 +- { "ELAN061B", 0 },
689 ++/* { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */
690 + { "ELAN061C", 0 },
691 + { "ELAN061D", 0 },
692 + { "ELAN061E", 0 },
693 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
694 +index 684f7cdd814b..a1174e61daf4 100644
695 +--- a/drivers/iommu/amd_iommu.c
696 ++++ b/drivers/iommu/amd_iommu.c
697 +@@ -1150,6 +1150,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
698 + iommu_completion_wait(iommu);
699 + }
700 +
701 ++static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
702 ++{
703 ++ struct iommu_cmd cmd;
704 ++
705 ++ build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
706 ++ dom_id, 1);
707 ++ iommu_queue_command(iommu, &cmd);
708 ++
709 ++ iommu_completion_wait(iommu);
710 ++}
711 ++
712 + static void amd_iommu_flush_all(struct amd_iommu *iommu)
713 + {
714 + struct iommu_cmd cmd;
715 +@@ -1326,18 +1337,21 @@ static void domain_flush_devices(struct protection_domain *domain)
716 + * another level increases the size of the address space by 9 bits to a size up
717 + * to 64 bits.
718 + */
719 +-static bool increase_address_space(struct protection_domain *domain,
720 ++static void increase_address_space(struct protection_domain *domain,
721 + gfp_t gfp)
722 + {
723 ++ unsigned long flags;
724 + u64 *pte;
725 +
726 +- if (domain->mode == PAGE_MODE_6_LEVEL)
727 ++ spin_lock_irqsave(&domain->lock, flags);
728 ++
729 ++ if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
730 + /* address space already 64 bit large */
731 +- return false;
732 ++ goto out;
733 +
734 + pte = (void *)get_zeroed_page(gfp);
735 + if (!pte)
736 +- return false;
737 ++ goto out;
738 +
739 + *pte = PM_LEVEL_PDE(domain->mode,
740 + iommu_virt_to_phys(domain->pt_root));
741 +@@ -1345,7 +1359,10 @@ static bool increase_address_space(struct protection_domain *domain,
742 + domain->mode += 1;
743 + domain->updated = true;
744 +
745 +- return true;
746 ++out:
747 ++ spin_unlock_irqrestore(&domain->lock, flags);
748 ++
749 ++ return;
750 + }
751 +
752 + static u64 *alloc_pte(struct protection_domain *domain,
753 +@@ -1835,6 +1852,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
754 + {
755 + u64 pte_root = 0;
756 + u64 flags = 0;
757 ++ u32 old_domid;
758 +
759 + if (domain->mode != PAGE_MODE_NONE)
760 + pte_root = iommu_virt_to_phys(domain->pt_root);
761 +@@ -1877,8 +1895,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
762 + flags &= ~DEV_DOMID_MASK;
763 + flags |= domain->id;
764 +
765 ++ old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
766 + amd_iommu_dev_table[devid].data[1] = flags;
767 + amd_iommu_dev_table[devid].data[0] = pte_root;
768 ++
769 ++ /*
770 ++ * A kdump kernel might be replacing a domain ID that was copied from
771 ++ * the previous kernel--if so, it needs to flush the translation cache
772 ++ * entries for the old domain ID that is being overwritten
773 ++ */
774 ++ if (old_domid) {
775 ++ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
776 ++
777 ++ amd_iommu_flush_tlb_domid(iommu, old_domid);
778 ++ }
779 + }
780 +
781 + static void clear_dte_entry(u16 devid)
782 +diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
783 +index 18d0f8f5283f..8d8e9f56a8be 100644
784 +--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
785 ++++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
786 +@@ -607,10 +607,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
787 + static int technisat_usb2_get_ir(struct dvb_usb_device *d)
788 + {
789 + struct technisat_usb2_state *state = d->priv;
790 +- u8 *buf = state->buf;
791 +- u8 *b;
792 +- int ret;
793 + struct ir_raw_event ev;
794 ++ u8 *buf = state->buf;
795 ++ int i, ret;
796 +
797 + buf[0] = GET_IR_DATA_VENDOR_REQUEST;
798 + buf[1] = 0x08;
799 +@@ -646,26 +645,25 @@ unlock:
800 + return 0; /* no key pressed */
801 +
802 + /* decoding */
803 +- b = buf+1;
804 +
805 + #if 0
806 + deb_rc("RC: %d ", ret);
807 +- debug_dump(b, ret, deb_rc);
808 ++ debug_dump(buf + 1, ret, deb_rc);
809 + #endif
810 +
811 + ev.pulse = 0;
812 +- while (1) {
813 +- ev.pulse = !ev.pulse;
814 +- ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000;
815 +- ir_raw_event_store(d->rc_dev, &ev);
816 +-
817 +- b++;
818 +- if (*b == 0xff) {
819 ++ for (i = 1; i < ARRAY_SIZE(state->buf); i++) {
820 ++ if (buf[i] == 0xff) {
821 + ev.pulse = 0;
822 + ev.duration = 888888*2;
823 + ir_raw_event_store(d->rc_dev, &ev);
824 + break;
825 + }
826 ++
827 ++ ev.pulse = !ev.pulse;
828 ++ ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR *
829 ++ FIRMWARE_CLOCK_TICK) / 1000;
830 ++ ir_raw_event_store(d->rc_dev, &ev);
831 + }
832 +
833 + ir_raw_event_handle(d->rc_dev);
834 +diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
835 +index 349f578273b6..9a2af71c2691 100644
836 +--- a/drivers/media/usb/tm6000/tm6000-dvb.c
837 ++++ b/drivers/media/usb/tm6000/tm6000-dvb.c
838 +@@ -105,6 +105,7 @@ static void tm6000_urb_received(struct urb *urb)
839 + printk(KERN_ERR "tm6000: error %s\n", __func__);
840 + kfree(urb->transfer_buffer);
841 + usb_free_urb(urb);
842 ++ dev->dvb->bulk_urb = NULL;
843 + }
844 + }
845 + }
846 +@@ -135,6 +136,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
847 + dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
848 + if (dvb->bulk_urb->transfer_buffer == NULL) {
849 + usb_free_urb(dvb->bulk_urb);
850 ++ dvb->bulk_urb = NULL;
851 + printk(KERN_ERR "tm6000: couldn't allocate transfer buffer!\n");
852 + return -ENOMEM;
853 + }
854 +@@ -162,6 +164,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
855 +
856 + kfree(dvb->bulk_urb->transfer_buffer);
857 + usb_free_urb(dvb->bulk_urb);
858 ++ dvb->bulk_urb = NULL;
859 + return ret;
860 + }
861 +
862 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
863 +index e31d9d1fb6a6..e4e632e025d3 100644
864 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
865 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
866 +@@ -487,13 +487,19 @@ static int __init xgbe_mod_init(void)
867 +
868 + ret = xgbe_platform_init();
869 + if (ret)
870 +- return ret;
871 ++ goto err_platform_init;
872 +
873 + ret = xgbe_pci_init();
874 + if (ret)
875 +- return ret;
876 ++ goto err_pci_init;
877 +
878 + return 0;
879 ++
880 ++err_pci_init:
881 ++ xgbe_platform_exit();
882 ++err_platform_init:
883 ++ unregister_netdevice_notifier(&xgbe_netdev_notifier);
884 ++ return ret;
885 + }
886 +
887 + static void __exit xgbe_mod_exit(void)
888 +diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
889 +index 7b239af6cc04..5046efdad539 100644
890 +--- a/drivers/net/ethernet/marvell/sky2.c
891 ++++ b/drivers/net/ethernet/marvell/sky2.c
892 +@@ -4954,6 +4954,13 @@ static const struct dmi_system_id msi_blacklist[] = {
893 + DMI_MATCH(DMI_BOARD_NAME, "P6T"),
894 + },
895 + },
896 ++ {
897 ++ .ident = "ASUS P6X",
898 ++ .matches = {
899 ++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
900 ++ DMI_MATCH(DMI_BOARD_NAME, "P6X"),
901 ++ },
902 ++ },
903 + {}
904 + };
905 +
906 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
907 +index ecc2d4296526..557332f1f886 100644
908 +--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
909 ++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
910 +@@ -1081,7 +1081,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
911 + &drv_version);
912 + if (rc) {
913 + DP_NOTICE(cdev, "Failed sending drv version command\n");
914 +- return rc;
915 ++ goto err4;
916 + }
917 + }
918 +
919 +@@ -1089,6 +1089,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
920 +
921 + return 0;
922 +
923 ++err4:
924 ++ qed_ll2_dealloc_if(cdev);
925 + err3:
926 + qed_hw_stop(cdev);
927 + err2:
928 +diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
929 +index 84a42ed97601..49a18439bea2 100644
930 +--- a/drivers/net/ethernet/seeq/sgiseeq.c
931 ++++ b/drivers/net/ethernet/seeq/sgiseeq.c
932 +@@ -792,15 +792,16 @@ static int sgiseeq_probe(struct platform_device *pdev)
933 + printk(KERN_ERR "Sgiseeq: Cannot register net device, "
934 + "aborting.\n");
935 + err = -ENODEV;
936 +- goto err_out_free_page;
937 ++ goto err_out_free_attrs;
938 + }
939 +
940 + printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
941 +
942 + return 0;
943 +
944 +-err_out_free_page:
945 +- free_page((unsigned long) sp->srings);
946 ++err_out_free_attrs:
947 ++ dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
948 ++ sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
949 + err_out_free_dev:
950 + free_netdev(dev);
951 +
952 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
953 +index 66beff4d7646..455eec3c4694 100644
954 +--- a/drivers/net/usb/r8152.c
955 ++++ b/drivers/net/usb/r8152.c
956 +@@ -787,8 +787,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
957 + ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
958 + RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
959 + value, index, tmp, size, 500);
960 ++ if (ret < 0)
961 ++ memset(data, 0xff, size);
962 ++ else
963 ++ memcpy(data, tmp, size);
964 +
965 +- memcpy(data, tmp, size);
966 + kfree(tmp);
967 +
968 + return ret;
969 +diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
970 +index 32853496fe8c..853b59e19922 100644
971 +--- a/drivers/net/wireless/marvell/mwifiex/ie.c
972 ++++ b/drivers/net/wireless/marvell/mwifiex/ie.c
973 +@@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
974 + }
975 +
976 + vs_ie = (struct ieee_types_header *)vendor_ie;
977 ++ if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
978 ++ IEEE_MAX_IE_SIZE)
979 ++ return -EINVAL;
980 + memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
981 + vs_ie, vs_ie->len + 2);
982 + le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
983 +diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
984 +index 18f7d9bf30b2..0939a8c8f3ab 100644
985 +--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
986 ++++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
987 +@@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
988 +
989 + rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
990 + if (rate_ie) {
991 ++ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
992 ++ return;
993 + memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
994 + rate_len = rate_ie->len;
995 + }
996 +@@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
997 + rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
998 + params->beacon.tail,
999 + params->beacon.tail_len);
1000 +- if (rate_ie)
1001 ++ if (rate_ie) {
1002 ++ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
1003 ++ return;
1004 + memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
1005 ++ }
1006 +
1007 + return;
1008 + }
1009 +@@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
1010 + params->beacon.tail_len);
1011 + if (vendor_ie) {
1012 + wmm_ie = vendor_ie;
1013 ++ if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
1014 ++ return;
1015 + memcpy(&bss_cfg->wmm_info, wmm_ie +
1016 + sizeof(struct ieee_types_header), *(wmm_ie + 1));
1017 + priv->wmm_enabled = 1;
1018 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
1019 +index 4af4e5c12d53..5cb3edae586f 100644
1020 +--- a/drivers/net/xen-netfront.c
1021 ++++ b/drivers/net/xen-netfront.c
1022 +@@ -908,7 +908,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
1023 + __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1024 + }
1025 + if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1026 +- queue->rx.rsp_cons = ++cons;
1027 ++ queue->rx.rsp_cons = ++cons + skb_queue_len(list);
1028 + kfree_skb(nskb);
1029 + return ~0U;
1030 + }
1031 +diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c
1032 +index efc317e7669d..03d88a1f1d4f 100644
1033 +--- a/drivers/pci/dwc/pcie-kirin.c
1034 ++++ b/drivers/pci/dwc/pcie-kirin.c
1035 +@@ -449,8 +449,8 @@ static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
1036 + .host_init = kirin_pcie_host_init,
1037 + };
1038 +
1039 +-static int __init kirin_add_pcie_port(struct dw_pcie *pci,
1040 +- struct platform_device *pdev)
1041 ++static int kirin_add_pcie_port(struct dw_pcie *pci,
1042 ++ struct platform_device *pdev)
1043 + {
1044 + pci->pp.ops = &kirin_pcie_host_ops;
1045 +
1046 +diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
1047 +index 54c34298a000..e8fe80312820 100644
1048 +--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
1049 ++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
1050 +@@ -64,6 +64,7 @@
1051 + USB2_OBINT_IDDIGCHG)
1052 +
1053 + /* VBCTRL */
1054 ++#define USB2_VBCTRL_OCCLREN BIT(16)
1055 + #define USB2_VBCTRL_DRVVBUSSEL BIT(8)
1056 +
1057 + /* LINECTRL1 */
1058 +@@ -278,6 +279,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
1059 + u32 val;
1060 +
1061 + val = readl(usb2_base + USB2_VBCTRL);
1062 ++ val &= ~USB2_VBCTRL_OCCLREN;
1063 + writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
1064 + writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
1065 + val = readl(usb2_base + USB2_OBINTEN);
1066 +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
1067 +index f747f1a1780c..9ee41ba0e55b 100644
1068 +--- a/drivers/tty/serial/atmel_serial.c
1069 ++++ b/drivers/tty/serial/atmel_serial.c
1070 +@@ -1276,7 +1276,6 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1071 +
1072 + atmel_port->hd_start_rx = false;
1073 + atmel_start_rx(port);
1074 +- return;
1075 + }
1076 +
1077 + atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1078 +diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
1079 +index e902494ebbd5..943619ebee38 100644
1080 +--- a/drivers/tty/serial/sprd_serial.c
1081 ++++ b/drivers/tty/serial/sprd_serial.c
1082 +@@ -240,7 +240,7 @@ static inline void sprd_rx(struct uart_port *port)
1083 +
1084 + if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE |
1085 + SPRD_LSR_FE | SPRD_LSR_OE))
1086 +- if (handle_lsr_errors(port, &lsr, &flag))
1087 ++ if (handle_lsr_errors(port, &flag, &lsr))
1088 + continue;
1089 + if (uart_handle_sysrq_char(port, ch))
1090 + continue;
1091 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
1092 +index f105a5f4927e..d03d0e46b121 100644
1093 +--- a/drivers/usb/core/config.c
1094 ++++ b/drivers/usb/core/config.c
1095 +@@ -925,7 +925,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
1096 + struct usb_bos_descriptor *bos;
1097 + struct usb_dev_cap_header *cap;
1098 + struct usb_ssp_cap_descriptor *ssp_cap;
1099 +- unsigned char *buffer;
1100 ++ unsigned char *buffer, *buffer0;
1101 + int length, total_len, num, i, ssac;
1102 + __u8 cap_type;
1103 + int ret;
1104 +@@ -970,10 +970,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
1105 + ret = -ENOMSG;
1106 + goto err;
1107 + }
1108 ++
1109 ++ buffer0 = buffer;
1110 + total_len -= length;
1111 ++ buffer += length;
1112 +
1113 + for (i = 0; i < num; i++) {
1114 +- buffer += length;
1115 + cap = (struct usb_dev_cap_header *)buffer;
1116 +
1117 + if (total_len < sizeof(*cap) || total_len < cap->bLength) {
1118 +@@ -987,8 +989,6 @@ int usb_get_bos_descriptor(struct usb_device *dev)
1119 + break;
1120 + }
1121 +
1122 +- total_len -= length;
1123 +-
1124 + if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
1125 + dev_warn(ddev, "descriptor type invalid, skip\n");
1126 + continue;
1127 +@@ -1023,7 +1023,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
1128 + default:
1129 + break;
1130 + }
1131 ++
1132 ++ total_len -= length;
1133 ++ buffer += length;
1134 + }
1135 ++ dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0);
1136 +
1137 + return 0;
1138 +
1139 +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
1140 +index 469666df91da..8096cca87fe7 100644
1141 +--- a/fs/binfmt_elf.c
1142 ++++ b/fs/binfmt_elf.c
1143 +@@ -1116,6 +1116,17 @@ static int load_elf_binary(struct linux_binprm *bprm)
1144 + current->mm->start_stack = bprm->p;
1145 +
1146 + if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
1147 ++ /*
1148 ++ * For architectures with ELF randomization, when executing
1149 ++ * a loader directly (i.e. no interpreter listed in ELF
1150 ++ * headers), move the brk area out of the mmap region
1151 ++ * (since it grows up, and may collide early with the stack
1152 ++ * growing down), and into the unused ELF_ET_DYN_BASE region.
1153 ++ */
1154 ++ if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && !interpreter)
1155 ++ current->mm->brk = current->mm->start_brk =
1156 ++ ELF_ET_DYN_BASE;
1157 ++
1158 + current->mm->brk = current->mm->start_brk =
1159 + arch_randomize_brk(current->mm);
1160 + #ifdef compat_brk_randomized
1161 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
1162 +index 57c62ff4e8d6..f523a9ca9574 100644
1163 +--- a/fs/cifs/connect.c
1164 ++++ b/fs/cifs/connect.c
1165 +@@ -2542,6 +2542,7 @@ static int
1166 + cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
1167 + {
1168 + int rc = 0;
1169 ++ int is_domain = 0;
1170 + const char *delim, *payload;
1171 + char *desc;
1172 + ssize_t len;
1173 +@@ -2589,6 +2590,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
1174 + rc = PTR_ERR(key);
1175 + goto out_err;
1176 + }
1177 ++ is_domain = 1;
1178 + }
1179 +
1180 + down_read(&key->sem);
1181 +@@ -2646,6 +2648,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
1182 + goto out_key_put;
1183 + }
1184 +
1185 ++ /*
1186 ++ * If we have a domain key then we must set the domainName in the
1187 ++ * for the request.
1188 ++ */
1189 ++ if (is_domain && ses->domainName) {
1190 ++ vol->domainname = kstrndup(ses->domainName,
1191 ++ strlen(ses->domainName),
1192 ++ GFP_KERNEL);
1193 ++ if (!vol->domainname) {
1194 ++ cifs_dbg(FYI, "Unable to allocate %zd bytes for "
1195 ++ "domain\n", len);
1196 ++ rc = -ENOMEM;
1197 ++ kfree(vol->username);
1198 ++ vol->username = NULL;
1199 ++ kzfree(vol->password);
1200 ++ vol->password = NULL;
1201 ++ goto out_key_put;
1202 ++ }
1203 ++ }
1204 ++
1205 + out_key_put:
1206 + up_read(&key->sem);
1207 + key_put(key);
1208 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
1209 +index 85a6fdd76e20..50c181fa0025 100644
1210 +--- a/fs/nfs/dir.c
1211 ++++ b/fs/nfs/dir.c
1212 +@@ -1470,7 +1470,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
1213 + if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
1214 + nfs_file_set_open_context(file, ctx);
1215 + else
1216 +- err = -ESTALE;
1217 ++ err = -EOPENSTALE;
1218 + out:
1219 + return err;
1220 + }
1221 +diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
1222 +index 2b3e0f1ca572..b8d316a338bc 100644
1223 +--- a/fs/nfs/nfs4file.c
1224 ++++ b/fs/nfs/nfs4file.c
1225 +@@ -74,13 +74,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
1226 + if (IS_ERR(inode)) {
1227 + err = PTR_ERR(inode);
1228 + switch (err) {
1229 +- case -EPERM:
1230 +- case -EACCES:
1231 +- case -EDQUOT:
1232 +- case -ENOSPC:
1233 +- case -EROFS:
1234 +- goto out_put_ctx;
1235 + default:
1236 ++ goto out_put_ctx;
1237 ++ case -ENOENT:
1238 ++ case -ESTALE:
1239 ++ case -EISDIR:
1240 ++ case -ENOTDIR:
1241 ++ case -ELOOP:
1242 + goto out_drop;
1243 + }
1244 + }
1245 +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
1246 +index 132e568524df..ceb6892d9bbd 100644
1247 +--- a/fs/nfs/pagelist.c
1248 ++++ b/fs/nfs/pagelist.c
1249 +@@ -566,7 +566,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
1250 + }
1251 +
1252 + hdr->res.fattr = &hdr->fattr;
1253 +- hdr->res.count = count;
1254 ++ hdr->res.count = 0;
1255 + hdr->res.eof = 0;
1256 + hdr->res.verf = &hdr->verf;
1257 + nfs_fattr_init(&hdr->fattr);
1258 +diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
1259 +index f7fd9192d4bc..eff93315572e 100644
1260 +--- a/fs/nfs/proc.c
1261 ++++ b/fs/nfs/proc.c
1262 +@@ -589,7 +589,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
1263 + /* Emulate the eof flag, which isn't normally needed in NFSv2
1264 + * as it is guaranteed to always return the file attributes
1265 + */
1266 +- if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
1267 ++ if ((hdr->res.count == 0 && hdr->args.count > 0) ||
1268 ++ hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
1269 + hdr->res.eof = 1;
1270 + }
1271 + return 0;
1272 +@@ -610,8 +611,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
1273 +
1274 + static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
1275 + {
1276 +- if (task->tk_status >= 0)
1277 ++ if (task->tk_status >= 0) {
1278 ++ hdr->res.count = hdr->args.count;
1279 + nfs_writeback_update_inode(hdr);
1280 ++ }
1281 + return 0;
1282 + }
1283 +
1284 +diff --git a/include/uapi/linux/netfilter/xt_nfacct.h b/include/uapi/linux/netfilter/xt_nfacct.h
1285 +index 5c8a4d760ee3..b5123ab8d54a 100644
1286 +--- a/include/uapi/linux/netfilter/xt_nfacct.h
1287 ++++ b/include/uapi/linux/netfilter/xt_nfacct.h
1288 +@@ -11,4 +11,9 @@ struct xt_nfacct_match_info {
1289 + struct nf_acct *nfacct;
1290 + };
1291 +
1292 ++struct xt_nfacct_match_info_v1 {
1293 ++ char name[NFACCT_NAME_MAX];
1294 ++ struct nf_acct *nfacct __attribute__((aligned(8)));
1295 ++};
1296 ++
1297 + #endif /* _XT_NFACCT_MATCH_H */
1298 +diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
1299 +index 127e7cfafa55..3e1b66366ac2 100644
1300 +--- a/kernel/kallsyms.c
1301 ++++ b/kernel/kallsyms.c
1302 +@@ -296,8 +296,10 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
1303 + {
1304 + char namebuf[KSYM_NAME_LEN];
1305 +
1306 +- if (is_ksym_addr(addr))
1307 +- return !!get_symbol_pos(addr, symbolsize, offset);
1308 ++ if (is_ksym_addr(addr)) {
1309 ++ get_symbol_pos(addr, symbolsize, offset);
1310 ++ return 1;
1311 ++ }
1312 + return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
1313 + !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
1314 + }
1315 +diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
1316 +index 8be61734fc43..e07f636160b6 100644
1317 +--- a/net/batman-adv/bat_v_ogm.c
1318 ++++ b/net/batman-adv/bat_v_ogm.c
1319 +@@ -642,17 +642,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
1320 + * batadv_v_ogm_aggr_packet - checks if there is another OGM aggregated
1321 + * @buff_pos: current position in the skb
1322 + * @packet_len: total length of the skb
1323 +- * @tvlv_len: tvlv length of the previously considered OGM
1324 ++ * @ogm2_packet: potential OGM2 in buffer
1325 + *
1326 + * Return: true if there is enough space for another OGM, false otherwise.
1327 + */
1328 +-static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
1329 +- __be16 tvlv_len)
1330 ++static bool
1331 ++batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
1332 ++ const struct batadv_ogm2_packet *ogm2_packet)
1333 + {
1334 + int next_buff_pos = 0;
1335 +
1336 +- next_buff_pos += buff_pos + BATADV_OGM2_HLEN;
1337 +- next_buff_pos += ntohs(tvlv_len);
1338 ++ /* check if there is enough space for the header */
1339 ++ next_buff_pos += buff_pos + sizeof(*ogm2_packet);
1340 ++ if (next_buff_pos > packet_len)
1341 ++ return false;
1342 ++
1343 ++ /* check if there is enough space for the optional TVLV */
1344 ++ next_buff_pos += ntohs(ogm2_packet->tvlv_len);
1345 +
1346 + return (next_buff_pos <= packet_len) &&
1347 + (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
1348 +@@ -829,7 +835,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
1349 + ogm_packet = (struct batadv_ogm2_packet *)skb->data;
1350 +
1351 + while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
1352 +- ogm_packet->tvlv_len)) {
1353 ++ ogm_packet)) {
1354 + batadv_v_ogm_process(skb, ogm_offset, if_incoming);
1355 +
1356 + ogm_offset += BATADV_OGM2_HLEN;
1357 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1358 +index 5ce069ce2a97..c1f59a53f68f 100644
1359 +--- a/net/ipv4/tcp.c
1360 ++++ b/net/ipv4/tcp.c
1361 +@@ -922,10 +922,10 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
1362 + */
1363 + static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
1364 + {
1365 +- if (skb && !skb->len) {
1366 ++ if (skb && !skb->len &&
1367 ++ TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
1368 + tcp_unlink_write_queue(skb, sk);
1369 +- if (tcp_write_queue_empty(sk))
1370 +- tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1371 ++ tcp_check_send_head(sk, skb);
1372 + sk_wmem_free_skb(sk, skb);
1373 + }
1374 + }
1375 +diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
1376 +index f0e9a7511e1a..c236c7d1655d 100644
1377 +--- a/net/netfilter/nf_conntrack_ftp.c
1378 ++++ b/net/netfilter/nf_conntrack_ftp.c
1379 +@@ -323,7 +323,7 @@ static int find_pattern(const char *data, size_t dlen,
1380 + i++;
1381 + }
1382 +
1383 +- pr_debug("Skipped up to `%c'!\n", skip);
1384 ++ pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
1385 +
1386 + *numoff = i;
1387 + *numlen = getnum(data + i, dlen - i, cmd, term, numoff);
1388 +diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
1389 +index 6f92d25590a8..ea447b437f12 100644
1390 +--- a/net/netfilter/xt_nfacct.c
1391 ++++ b/net/netfilter/xt_nfacct.c
1392 +@@ -55,25 +55,39 @@ nfacct_mt_destroy(const struct xt_mtdtor_param *par)
1393 + nfnl_acct_put(info->nfacct);
1394 + }
1395 +
1396 +-static struct xt_match nfacct_mt_reg __read_mostly = {
1397 +- .name = "nfacct",
1398 +- .family = NFPROTO_UNSPEC,
1399 +- .checkentry = nfacct_mt_checkentry,
1400 +- .match = nfacct_mt,
1401 +- .destroy = nfacct_mt_destroy,
1402 +- .matchsize = sizeof(struct xt_nfacct_match_info),
1403 +- .usersize = offsetof(struct xt_nfacct_match_info, nfacct),
1404 +- .me = THIS_MODULE,
1405 ++static struct xt_match nfacct_mt_reg[] __read_mostly = {
1406 ++ {
1407 ++ .name = "nfacct",
1408 ++ .revision = 0,
1409 ++ .family = NFPROTO_UNSPEC,
1410 ++ .checkentry = nfacct_mt_checkentry,
1411 ++ .match = nfacct_mt,
1412 ++ .destroy = nfacct_mt_destroy,
1413 ++ .matchsize = sizeof(struct xt_nfacct_match_info),
1414 ++ .usersize = offsetof(struct xt_nfacct_match_info, nfacct),
1415 ++ .me = THIS_MODULE,
1416 ++ },
1417 ++ {
1418 ++ .name = "nfacct",
1419 ++ .revision = 1,
1420 ++ .family = NFPROTO_UNSPEC,
1421 ++ .checkentry = nfacct_mt_checkentry,
1422 ++ .match = nfacct_mt,
1423 ++ .destroy = nfacct_mt_destroy,
1424 ++ .matchsize = sizeof(struct xt_nfacct_match_info_v1),
1425 ++ .usersize = offsetof(struct xt_nfacct_match_info_v1, nfacct),
1426 ++ .me = THIS_MODULE,
1427 ++ },
1428 + };
1429 +
1430 + static int __init nfacct_mt_init(void)
1431 + {
1432 +- return xt_register_match(&nfacct_mt_reg);
1433 ++ return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
1434 + }
1435 +
1436 + static void __exit nfacct_mt_exit(void)
1437 + {
1438 +- xt_unregister_match(&nfacct_mt_reg);
1439 ++ xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
1440 + }
1441 +
1442 + module_init(nfacct_mt_init);
1443 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
1444 +index 79549baf5804..21b981abbacb 100644
1445 +--- a/net/sched/sch_generic.c
1446 ++++ b/net/sched/sch_generic.c
1447 +@@ -703,7 +703,11 @@ static void qdisc_rcu_free(struct rcu_head *head)
1448 +
1449 + void qdisc_destroy(struct Qdisc *qdisc)
1450 + {
1451 +- const struct Qdisc_ops *ops = qdisc->ops;
1452 ++ const struct Qdisc_ops *ops;
1453 ++
1454 ++ if (!qdisc)
1455 ++ return;
1456 ++ ops = qdisc->ops;
1457 +
1458 + if (qdisc->flags & TCQ_F_BUILTIN ||
1459 + !refcount_dec_and_test(&qdisc->refcnt))
1460 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
1461 +index c672a790df1c..f19d5a55f09e 100644
1462 +--- a/net/wireless/nl80211.c
1463 ++++ b/net/wireless/nl80211.c
1464 +@@ -9753,9 +9753,11 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
1465 + hyst = wdev->cqm_config->rssi_hyst;
1466 + n = wdev->cqm_config->n_rssi_thresholds;
1467 +
1468 +- for (i = 0; i < n; i++)
1469 ++ for (i = 0; i < n; i++) {
1470 ++ i = array_index_nospec(i, n);
1471 + if (last < wdev->cqm_config->rssi_thresholds[i])
1472 + break;
1473 ++ }
1474 +
1475 + low_index = i - 1;
1476 + if (low_index >= 0) {
1477 +diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
1478 +index 5e515791ccd1..1d34b2a5f485 100644
1479 +--- a/security/keys/request_key_auth.c
1480 ++++ b/security/keys/request_key_auth.c
1481 +@@ -71,6 +71,9 @@ static void request_key_auth_describe(const struct key *key,
1482 + {
1483 + struct request_key_auth *rka = get_request_key_auth(key);
1484 +
1485 ++ if (!rka)
1486 ++ return;
1487 ++
1488 + seq_puts(m, "key:");
1489 + seq_puts(m, key->description);
1490 + if (key_is_positive(key))
1491 +@@ -88,6 +91,9 @@ static long request_key_auth_read(const struct key *key,
1492 + size_t datalen;
1493 + long ret;
1494 +
1495 ++ if (!rka)
1496 ++ return -EKEYREVOKED;
1497 ++
1498 + datalen = rka->callout_len;
1499 + ret = datalen;
1500 +
1501 +diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
1502 +index 3e5f8b3db272..19e345cf8193 100644
1503 +--- a/tools/power/x86/turbostat/turbostat.c
1504 ++++ b/tools/power/x86/turbostat/turbostat.c
1505 +@@ -4488,7 +4488,7 @@ int initialize_counters(int cpu_id)
1506 +
1507 + void allocate_output_buffer()
1508 + {
1509 +- output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
1510 ++ output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
1511 + outp = output_buffer;
1512 + if (outp == NULL)
1513 + err(-1, "calloc output buffer");
1514 +diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1515 +index 65bbe627a425..2aba622d1c5a 100644
1516 +--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1517 ++++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1518 +@@ -546,7 +546,7 @@ void cmdline(int argc, char **argv)
1519 +
1520 + progname = argv[0];
1521 +
1522 +- while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw",
1523 ++ while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw:",
1524 + long_options, &option_index)) != -1) {
1525 + switch (opt) {
1526 + case 'a':
1527 +@@ -1260,6 +1260,15 @@ void probe_dev_msr(void)
1528 + if (system("/sbin/modprobe msr > /dev/null 2>&1"))
1529 + err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
1530 + }
1531 ++
1532 ++static void get_cpuid_or_exit(unsigned int leaf,
1533 ++ unsigned int *eax, unsigned int *ebx,
1534 ++ unsigned int *ecx, unsigned int *edx)
1535 ++{
1536 ++ if (!__get_cpuid(leaf, eax, ebx, ecx, edx))
1537 ++ errx(1, "Processor not supported\n");
1538 ++}
1539 ++
1540 + /*
1541 + * early_cpuid()
1542 + * initialize turbo_is_enabled, has_hwp, has_epb
1543 +@@ -1267,15 +1276,10 @@ void probe_dev_msr(void)
1544 + */
1545 + void early_cpuid(void)
1546 + {
1547 +- unsigned int eax, ebx, ecx, edx, max_level;
1548 ++ unsigned int eax, ebx, ecx, edx;
1549 + unsigned int fms, family, model;
1550 +
1551 +- __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
1552 +-
1553 +- if (max_level < 6)
1554 +- errx(1, "Processor not supported\n");
1555 +-
1556 +- __get_cpuid(1, &fms, &ebx, &ecx, &edx);
1557 ++ get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
1558 + family = (fms >> 8) & 0xf;
1559 + model = (fms >> 4) & 0xf;
1560 + if (family == 6 || family == 0xf)
1561 +@@ -1289,7 +1293,7 @@ void early_cpuid(void)
1562 + bdx_highest_ratio = msr & 0xFF;
1563 + }
1564 +
1565 +- __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
1566 ++ get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
1567 + turbo_is_enabled = (eax >> 1) & 1;
1568 + has_hwp = (eax >> 7) & 1;
1569 + has_epb = (ecx >> 3) & 1;
1570 +@@ -1307,7 +1311,7 @@ void parse_cpuid(void)
1571 +
1572 + eax = ebx = ecx = edx = 0;
1573 +
1574 +- __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
1575 ++ get_cpuid_or_exit(0, &max_level, &ebx, &ecx, &edx);
1576 +
1577 + if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
1578 + genuine_intel = 1;
1579 +@@ -1316,7 +1320,7 @@ void parse_cpuid(void)
1580 + fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
1581 + (char *)&ebx, (char *)&edx, (char *)&ecx);
1582 +
1583 +- __get_cpuid(1, &fms, &ebx, &ecx, &edx);
1584 ++ get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
1585 + family = (fms >> 8) & 0xf;
1586 + model = (fms >> 4) & 0xf;
1587 + stepping = fms & 0xf;
1588 +@@ -1341,7 +1345,7 @@ void parse_cpuid(void)
1589 + errx(1, "CPUID: no MSR");
1590 +
1591 +
1592 +- __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
1593 ++ get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
1594 + /* turbo_is_enabled already set */
1595 + /* has_hwp already set */
1596 + has_hwp_notify = eax & (1 << 8);
1597 +diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
1598 +index 9e65feb6fa58..b9336693c87e 100644
1599 +--- a/virt/kvm/coalesced_mmio.c
1600 ++++ b/virt/kvm/coalesced_mmio.c
1601 +@@ -40,7 +40,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
1602 + return 1;
1603 + }
1604 +
1605 +-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
1606 ++static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
1607 + {
1608 + struct kvm_coalesced_mmio_ring *ring;
1609 + unsigned avail;
1610 +@@ -52,7 +52,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
1611 + * there is always one unused entry in the buffer
1612 + */
1613 + ring = dev->kvm->coalesced_mmio_ring;
1614 +- avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
1615 ++ avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
1616 + if (avail == 0) {
1617 + /* full */
1618 + return 0;
1619 +@@ -67,24 +67,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
1620 + {
1621 + struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
1622 + struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
1623 ++ __u32 insert;
1624 +
1625 + if (!coalesced_mmio_in_range(dev, addr, len))
1626 + return -EOPNOTSUPP;
1627 +
1628 + spin_lock(&dev->kvm->ring_lock);
1629 +
1630 +- if (!coalesced_mmio_has_room(dev)) {
1631 ++ insert = READ_ONCE(ring->last);
1632 ++ if (!coalesced_mmio_has_room(dev, insert) ||
1633 ++ insert >= KVM_COALESCED_MMIO_MAX) {
1634 + spin_unlock(&dev->kvm->ring_lock);
1635 + return -EOPNOTSUPP;
1636 + }
1637 +
1638 + /* copy data in first free entry of the ring */
1639 +
1640 +- ring->coalesced_mmio[ring->last].phys_addr = addr;
1641 +- ring->coalesced_mmio[ring->last].len = len;
1642 +- memcpy(ring->coalesced_mmio[ring->last].data, val, len);
1643 ++ ring->coalesced_mmio[insert].phys_addr = addr;
1644 ++ ring->coalesced_mmio[insert].len = len;
1645 ++ memcpy(ring->coalesced_mmio[insert].data, val, len);
1646 + smp_wmb();
1647 +- ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
1648 ++ ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
1649 + spin_unlock(&dev->kvm->ring_lock);
1650 + return 0;
1651 + }