Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Sun, 17 Oct 2021 13:12:21
Message-Id: 1634476325.3f283992744a023735e3b1f8f39a0c9c4d16bc74.mpagano@gentoo
1 commit: 3f283992744a023735e3b1f8f39a0c9c4d16bc74
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Oct 17 13:12:05 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Oct 17 13:12:05 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3f283992
7
8 Linux patch 5.4.154
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1153_linux-5.4.154.patch | 948 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 952 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 01db703..3c15edc 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -655,6 +655,10 @@ Patch: 1152_linux-5.4.153.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.153
23
24 +Patch: 1153_linux-5.4.154.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.154
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1153_linux-5.4.154.patch b/1153_linux-5.4.154.patch
33 new file mode 100644
34 index 0000000..dace064
35 --- /dev/null
36 +++ b/1153_linux-5.4.154.patch
37 @@ -0,0 +1,948 @@
38 +diff --git a/Makefile b/Makefile
39 +index df9b1d07ca097..3358f56a37f06 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 153
47 ++SUBLEVEL = 154
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
52 +index 05610e6924c16..f7121b775e5f0 100644
53 +--- a/arch/m68k/kernel/signal.c
54 ++++ b/arch/m68k/kernel/signal.c
55 +@@ -448,7 +448,7 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
56 +
57 + if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
58 + fpu_version = sc->sc_fpstate[0];
59 +- if (CPU_IS_020_OR_030 &&
60 ++ if (CPU_IS_020_OR_030 && !regs->stkadj &&
61 + regs->vector >= (VEC_FPBRUC * 4) &&
62 + regs->vector <= (VEC_FPNAN * 4)) {
63 + /* Clear pending exception in 68882 idle frame */
64 +@@ -511,7 +511,7 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
65 + if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
66 + context_size = fpstate[1];
67 + fpu_version = fpstate[0];
68 +- if (CPU_IS_020_OR_030 &&
69 ++ if (CPU_IS_020_OR_030 && !regs->stkadj &&
70 + regs->vector >= (VEC_FPBRUC * 4) &&
71 + regs->vector <= (VEC_FPNAN * 4)) {
72 + /* Clear pending exception in 68882 idle frame */
73 +@@ -829,18 +829,24 @@ badframe:
74 + return 0;
75 + }
76 +
77 ++static inline struct pt_regs *rte_regs(struct pt_regs *regs)
78 ++{
79 ++ return (void *)regs + regs->stkadj;
80 ++}
81 ++
82 + static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
83 + unsigned long mask)
84 + {
85 ++ struct pt_regs *tregs = rte_regs(regs);
86 + sc->sc_mask = mask;
87 + sc->sc_usp = rdusp();
88 + sc->sc_d0 = regs->d0;
89 + sc->sc_d1 = regs->d1;
90 + sc->sc_a0 = regs->a0;
91 + sc->sc_a1 = regs->a1;
92 +- sc->sc_sr = regs->sr;
93 +- sc->sc_pc = regs->pc;
94 +- sc->sc_formatvec = regs->format << 12 | regs->vector;
95 ++ sc->sc_sr = tregs->sr;
96 ++ sc->sc_pc = tregs->pc;
97 ++ sc->sc_formatvec = tregs->format << 12 | tregs->vector;
98 + save_a5_state(sc, regs);
99 + save_fpu_state(sc, regs);
100 + }
101 +@@ -848,6 +854,7 @@ static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
102 + static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
103 + {
104 + struct switch_stack *sw = (struct switch_stack *)regs - 1;
105 ++ struct pt_regs *tregs = rte_regs(regs);
106 + greg_t __user *gregs = uc->uc_mcontext.gregs;
107 + int err = 0;
108 +
109 +@@ -868,9 +875,9 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
110 + err |= __put_user(sw->a5, &gregs[13]);
111 + err |= __put_user(sw->a6, &gregs[14]);
112 + err |= __put_user(rdusp(), &gregs[15]);
113 +- err |= __put_user(regs->pc, &gregs[16]);
114 +- err |= __put_user(regs->sr, &gregs[17]);
115 +- err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
116 ++ err |= __put_user(tregs->pc, &gregs[16]);
117 ++ err |= __put_user(tregs->sr, &gregs[17]);
118 ++ err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec);
119 + err |= rt_save_fpu_state(uc, regs);
120 + return err;
121 + }
122 +@@ -887,13 +894,14 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
123 + struct pt_regs *regs)
124 + {
125 + struct sigframe __user *frame;
126 +- int fsize = frame_extra_sizes(regs->format);
127 ++ struct pt_regs *tregs = rte_regs(regs);
128 ++ int fsize = frame_extra_sizes(tregs->format);
129 + struct sigcontext context;
130 + int err = 0, sig = ksig->sig;
131 +
132 + if (fsize < 0) {
133 + pr_debug("setup_frame: Unknown frame format %#x\n",
134 +- regs->format);
135 ++ tregs->format);
136 + return -EFAULT;
137 + }
138 +
139 +@@ -904,7 +912,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
140 +
141 + err |= __put_user(sig, &frame->sig);
142 +
143 +- err |= __put_user(regs->vector, &frame->code);
144 ++ err |= __put_user(tregs->vector, &frame->code);
145 + err |= __put_user(&frame->sc, &frame->psc);
146 +
147 + if (_NSIG_WORDS > 1)
148 +@@ -929,34 +937,28 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
149 +
150 + push_cache ((unsigned long) &frame->retcode);
151 +
152 +- /*
153 +- * Set up registers for signal handler. All the state we are about
154 +- * to destroy is successfully copied to sigframe.
155 +- */
156 +- wrusp ((unsigned long) frame);
157 +- regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
158 +- adjustformat(regs);
159 +-
160 + /*
161 + * This is subtle; if we build more than one sigframe, all but the
162 + * first one will see frame format 0 and have fsize == 0, so we won't
163 + * screw stkadj.
164 + */
165 +- if (fsize)
166 ++ if (fsize) {
167 + regs->stkadj = fsize;
168 +-
169 +- /* Prepare to skip over the extra stuff in the exception frame. */
170 +- if (regs->stkadj) {
171 +- struct pt_regs *tregs =
172 +- (struct pt_regs *)((ulong)regs + regs->stkadj);
173 ++ tregs = rte_regs(regs);
174 + pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
175 +- /* This must be copied with decreasing addresses to
176 +- handle overlaps. */
177 + tregs->vector = 0;
178 + tregs->format = 0;
179 +- tregs->pc = regs->pc;
180 + tregs->sr = regs->sr;
181 + }
182 ++
183 ++ /*
184 ++ * Set up registers for signal handler. All the state we are about
185 ++ * to destroy is successfully copied to sigframe.
186 ++ */
187 ++ wrusp ((unsigned long) frame);
188 ++ tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
189 ++ adjustformat(regs);
190 ++
191 + return 0;
192 + }
193 +
194 +@@ -964,7 +966,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
195 + struct pt_regs *regs)
196 + {
197 + struct rt_sigframe __user *frame;
198 +- int fsize = frame_extra_sizes(regs->format);
199 ++ struct pt_regs *tregs = rte_regs(regs);
200 ++ int fsize = frame_extra_sizes(tregs->format);
201 + int err = 0, sig = ksig->sig;
202 +
203 + if (fsize < 0) {
204 +@@ -1013,34 +1016,27 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
205 +
206 + push_cache ((unsigned long) &frame->retcode);
207 +
208 +- /*
209 +- * Set up registers for signal handler. All the state we are about
210 +- * to destroy is successfully copied to sigframe.
211 +- */
212 +- wrusp ((unsigned long) frame);
213 +- regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
214 +- adjustformat(regs);
215 +-
216 + /*
217 + * This is subtle; if we build more than one sigframe, all but the
218 + * first one will see frame format 0 and have fsize == 0, so we won't
219 + * screw stkadj.
220 + */
221 +- if (fsize)
222 ++ if (fsize) {
223 + regs->stkadj = fsize;
224 +-
225 +- /* Prepare to skip over the extra stuff in the exception frame. */
226 +- if (regs->stkadj) {
227 +- struct pt_regs *tregs =
228 +- (struct pt_regs *)((ulong)regs + regs->stkadj);
229 ++ tregs = rte_regs(regs);
230 + pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
231 +- /* This must be copied with decreasing addresses to
232 +- handle overlaps. */
233 + tregs->vector = 0;
234 + tregs->format = 0;
235 +- tregs->pc = regs->pc;
236 + tregs->sr = regs->sr;
237 + }
238 ++
239 ++ /*
240 ++ * Set up registers for signal handler. All the state we are about
241 ++ * to destroy is successfully copied to sigframe.
242 ++ */
243 ++ wrusp ((unsigned long) frame);
244 ++ tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
245 ++ adjustformat(regs);
246 + return 0;
247 + }
248 +
249 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
250 +index f642e066e67a2..85ee0e849647e 100644
251 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
252 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
253 +@@ -903,6 +903,8 @@ static int gmc_v10_0_hw_fini(void *handle)
254 + {
255 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
256 +
257 ++ gmc_v10_0_gart_disable(adev);
258 ++
259 + if (amdgpu_sriov_vf(adev)) {
260 + /* full access mode, so don't touch any GMC register */
261 + DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
262 +@@ -910,7 +912,6 @@ static int gmc_v10_0_hw_fini(void *handle)
263 + }
264 +
265 + amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
266 +- gmc_v10_0_gart_disable(adev);
267 +
268 + return 0;
269 + }
270 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
271 +index 688111ef814de..63205de4a5656 100644
272 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
273 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
274 +@@ -1526,6 +1526,8 @@ static int gmc_v9_0_hw_fini(void *handle)
275 + {
276 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
277 +
278 ++ gmc_v9_0_gart_disable(adev);
279 ++
280 + if (amdgpu_sriov_vf(adev)) {
281 + /* full access mode, so don't touch any GMC register */
282 + DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
283 +@@ -1534,7 +1536,6 @@ static int gmc_v9_0_hw_fini(void *handle)
284 +
285 + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
286 + amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
287 +- gmc_v9_0_gart_disable(adev);
288 +
289 + return 0;
290 + }
291 +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
292 +index 6909c045fece1..07df64daf7dae 100644
293 +--- a/drivers/hid/hid-apple.c
294 ++++ b/drivers/hid/hid-apple.c
295 +@@ -301,12 +301,19 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field,
296 +
297 + /*
298 + * MacBook JIS keyboard has wrong logical maximum
299 ++ * Magic Keyboard JIS has wrong logical maximum
300 + */
301 + static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
302 + unsigned int *rsize)
303 + {
304 + struct apple_sc *asc = hid_get_drvdata(hdev);
305 +
306 ++ if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) {
307 ++ hid_info(hdev,
308 ++ "fixing up Magic Keyboard JIS report descriptor\n");
309 ++ rdesc[64] = rdesc[70] = 0xe7;
310 ++ }
311 ++
312 + if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
313 + rdesc[53] == 0x65 && rdesc[59] == 0x65) {
314 + hid_info(hdev,
315 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
316 +index d5425bc1ad61a..f6be2e70a4967 100644
317 +--- a/drivers/hid/wacom_wac.c
318 ++++ b/drivers/hid/wacom_wac.c
319 +@@ -4715,6 +4715,12 @@ static const struct wacom_features wacom_features_0x393 =
320 + { "Wacom Intuos Pro S", 31920, 19950, 8191, 63,
321 + INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7,
322 + .touch_max = 10 };
323 ++static const struct wacom_features wacom_features_0x3c6 =
324 ++ { "Wacom Intuos BT S", 15200, 9500, 4095, 63,
325 ++ INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
326 ++static const struct wacom_features wacom_features_0x3c8 =
327 ++ { "Wacom Intuos BT M", 21600, 13500, 4095, 63,
328 ++ INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
329 +
330 + static const struct wacom_features wacom_features_HID_ANY_ID =
331 + { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
332 +@@ -4888,6 +4894,8 @@ const struct hid_device_id wacom_ids[] = {
333 + { USB_DEVICE_WACOM(0x37A) },
334 + { USB_DEVICE_WACOM(0x37B) },
335 + { BT_DEVICE_WACOM(0x393) },
336 ++ { BT_DEVICE_WACOM(0x3c6) },
337 ++ { BT_DEVICE_WACOM(0x3c8) },
338 + { USB_DEVICE_WACOM(0x4001) },
339 + { USB_DEVICE_WACOM(0x4004) },
340 + { USB_DEVICE_WACOM(0x5000) },
341 +diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig
342 +index 7b982e02ea3a4..1080a2a3e13a2 100644
343 +--- a/drivers/net/ethernet/sun/Kconfig
344 ++++ b/drivers/net/ethernet/sun/Kconfig
345 +@@ -73,6 +73,7 @@ config CASSINI
346 + config SUNVNET_COMMON
347 + tristate "Common routines to support Sun Virtual Networking"
348 + depends on SUN_LDOMS
349 ++ depends on INET
350 + default m
351 +
352 + config SUNVNET
353 +diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
354 +index af8eabe7a6d44..d372626c603d4 100644
355 +--- a/drivers/net/phy/bcm7xxx.c
356 ++++ b/drivers/net/phy/bcm7xxx.c
357 +@@ -26,7 +26,12 @@
358 + #define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe
359 + #define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf
360 + #define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a
361 ++#define MII_BCM7XXX_SHD_3_PCS_CTRL 0x0
362 ++#define MII_BCM7XXX_SHD_3_PCS_STATUS 0x1
363 ++#define MII_BCM7XXX_SHD_3_EEE_CAP 0x2
364 + #define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x3
365 ++#define MII_BCM7XXX_SHD_3_EEE_LP 0x4
366 ++#define MII_BCM7XXX_SHD_3_EEE_WK_ERR 0x5
367 + #define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x6
368 + #define MII_BCM7XXX_PCS_CTRL_2_DEF 0x4400
369 + #define MII_BCM7XXX_SHD_3_AN_STAT 0xb
370 +@@ -210,25 +215,37 @@ static int bcm7xxx_28nm_resume(struct phy_device *phydev)
371 + return genphy_config_aneg(phydev);
372 + }
373 +
374 +-static int phy_set_clr_bits(struct phy_device *dev, int location,
375 +- int set_mask, int clr_mask)
376 ++static int __phy_set_clr_bits(struct phy_device *dev, int location,
377 ++ int set_mask, int clr_mask)
378 + {
379 + int v, ret;
380 +
381 +- v = phy_read(dev, location);
382 ++ v = __phy_read(dev, location);
383 + if (v < 0)
384 + return v;
385 +
386 + v &= ~clr_mask;
387 + v |= set_mask;
388 +
389 +- ret = phy_write(dev, location, v);
390 ++ ret = __phy_write(dev, location, v);
391 + if (ret < 0)
392 + return ret;
393 +
394 + return v;
395 + }
396 +
397 ++static int phy_set_clr_bits(struct phy_device *dev, int location,
398 ++ int set_mask, int clr_mask)
399 ++{
400 ++ int ret;
401 ++
402 ++ mutex_lock(&dev->mdio.bus->mdio_lock);
403 ++ ret = __phy_set_clr_bits(dev, location, set_mask, clr_mask);
404 ++ mutex_unlock(&dev->mdio.bus->mdio_lock);
405 ++
406 ++ return ret;
407 ++}
408 ++
409 + static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)
410 + {
411 + int ret;
412 +@@ -392,6 +409,93 @@ static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
413 + return bcm7xxx_28nm_ephy_apd_enable(phydev);
414 + }
415 +
416 ++#define MII_BCM7XXX_REG_INVALID 0xff
417 ++
418 ++static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum)
419 ++{
420 ++ switch (regnum) {
421 ++ case MDIO_CTRL1:
422 ++ return MII_BCM7XXX_SHD_3_PCS_CTRL;
423 ++ case MDIO_STAT1:
424 ++ return MII_BCM7XXX_SHD_3_PCS_STATUS;
425 ++ case MDIO_PCS_EEE_ABLE:
426 ++ return MII_BCM7XXX_SHD_3_EEE_CAP;
427 ++ case MDIO_AN_EEE_ADV:
428 ++ return MII_BCM7XXX_SHD_3_AN_EEE_ADV;
429 ++ case MDIO_AN_EEE_LPABLE:
430 ++ return MII_BCM7XXX_SHD_3_EEE_LP;
431 ++ case MDIO_PCS_EEE_WK_ERR:
432 ++ return MII_BCM7XXX_SHD_3_EEE_WK_ERR;
433 ++ default:
434 ++ return MII_BCM7XXX_REG_INVALID;
435 ++ }
436 ++}
437 ++
438 ++static bool bcm7xxx_28nm_ephy_dev_valid(int devnum)
439 ++{
440 ++ return devnum == MDIO_MMD_AN || devnum == MDIO_MMD_PCS;
441 ++}
442 ++
443 ++static int bcm7xxx_28nm_ephy_read_mmd(struct phy_device *phydev,
444 ++ int devnum, u16 regnum)
445 ++{
446 ++ u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
447 ++ int ret;
448 ++
449 ++ if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
450 ++ shd == MII_BCM7XXX_REG_INVALID)
451 ++ return -EOPNOTSUPP;
452 ++
453 ++ /* set shadow mode 2 */
454 ++ ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
455 ++ MII_BCM7XXX_SHD_MODE_2, 0);
456 ++ if (ret < 0)
457 ++ return ret;
458 ++
459 ++ /* Access the desired shadow register address */
460 ++ ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
461 ++ if (ret < 0)
462 ++ goto reset_shadow_mode;
463 ++
464 ++ ret = __phy_read(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT);
465 ++
466 ++reset_shadow_mode:
467 ++ /* reset shadow mode 2 */
468 ++ __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
469 ++ MII_BCM7XXX_SHD_MODE_2);
470 ++ return ret;
471 ++}
472 ++
473 ++static int bcm7xxx_28nm_ephy_write_mmd(struct phy_device *phydev,
474 ++ int devnum, u16 regnum, u16 val)
475 ++{
476 ++ u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
477 ++ int ret;
478 ++
479 ++ if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
480 ++ shd == MII_BCM7XXX_REG_INVALID)
481 ++ return -EOPNOTSUPP;
482 ++
483 ++ /* set shadow mode 2 */
484 ++ ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
485 ++ MII_BCM7XXX_SHD_MODE_2, 0);
486 ++ if (ret < 0)
487 ++ return ret;
488 ++
489 ++ /* Access the desired shadow register address */
490 ++ ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
491 ++ if (ret < 0)
492 ++ goto reset_shadow_mode;
493 ++
494 ++ /* Write the desired value in the shadow register */
495 ++ __phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, val);
496 ++
497 ++reset_shadow_mode:
498 ++ /* reset shadow mode 2 */
499 ++ return __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
500 ++ MII_BCM7XXX_SHD_MODE_2);
501 ++}
502 ++
503 + static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev)
504 + {
505 + int ret;
506 +@@ -563,6 +667,8 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
507 + .get_strings = bcm_phy_get_strings, \
508 + .get_stats = bcm7xxx_28nm_get_phy_stats, \
509 + .probe = bcm7xxx_28nm_probe, \
510 ++ .read_mmd = bcm7xxx_28nm_ephy_read_mmd, \
511 ++ .write_mmd = bcm7xxx_28nm_ephy_write_mmd, \
512 + }
513 +
514 + #define BCM7XXX_40NM_EPHY(_oui, _name) \
515 +diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
516 +index 43e682297fd5f..0a1734f34587d 100644
517 +--- a/drivers/scsi/ses.c
518 ++++ b/drivers/scsi/ses.c
519 +@@ -118,7 +118,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
520 + static int ses_send_diag(struct scsi_device *sdev, int page_code,
521 + void *buf, int bufflen)
522 + {
523 +- u32 result;
524 ++ int result;
525 +
526 + unsigned char cmd[] = {
527 + SEND_DIAGNOSTIC,
528 +diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
529 +index bfec84aacd90b..cb833c5fb9ce2 100644
530 +--- a/drivers/scsi/virtio_scsi.c
531 ++++ b/drivers/scsi/virtio_scsi.c
532 +@@ -297,7 +297,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
533 + }
534 + break;
535 + default:
536 +- pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
537 ++ pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
538 + }
539 + }
540 +
541 +@@ -381,7 +381,7 @@ static void virtscsi_handle_event(struct work_struct *work)
542 + virtscsi_handle_param_change(vscsi, event);
543 + break;
544 + default:
545 +- pr_err("Unsupport virtio scsi event %x\n", event->event);
546 ++ pr_err("Unsupported virtio scsi event %x\n", event->event);
547 + }
548 + virtscsi_kick_event(vscsi, event_node);
549 + }
550 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
551 +index 46151bda62368..cdb10e9fded65 100644
552 +--- a/fs/ext4/inline.c
553 ++++ b/fs/ext4/inline.c
554 +@@ -733,18 +733,13 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
555 + void *kaddr;
556 + struct ext4_iloc iloc;
557 +
558 +- if (unlikely(copied < len)) {
559 +- if (!PageUptodate(page)) {
560 +- copied = 0;
561 +- goto out;
562 +- }
563 +- }
564 ++ if (unlikely(copied < len) && !PageUptodate(page))
565 ++ return 0;
566 +
567 + ret = ext4_get_inode_loc(inode, &iloc);
568 + if (ret) {
569 + ext4_std_error(inode->i_sb, ret);
570 +- copied = 0;
571 +- goto out;
572 ++ return ret;
573 + }
574 +
575 + ext4_write_lock_xattr(inode, &no_expand);
576 +@@ -757,7 +752,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
577 + (void) ext4_find_inline_data_nolock(inode);
578 +
579 + kaddr = kmap_atomic(page);
580 +- ext4_write_inline_data(inode, &iloc, kaddr, pos, len);
581 ++ ext4_write_inline_data(inode, &iloc, kaddr, pos, copied);
582 + kunmap_atomic(kaddr);
583 + SetPageUptodate(page);
584 + /* clear page dirty so that writepages wouldn't work for us. */
585 +@@ -766,7 +761,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
586 + ext4_write_unlock_xattr(inode, &no_expand);
587 + brelse(iloc.bh);
588 + mark_inode_dirty(inode);
589 +-out:
590 ++
591 + return copied;
592 + }
593 +
594 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
595 +index 48b467353f6f1..dcbd8ac8d4711 100644
596 +--- a/fs/ext4/inode.c
597 ++++ b/fs/ext4/inode.c
598 +@@ -1439,6 +1439,7 @@ static int ext4_write_end(struct file *file,
599 + goto errout;
600 + }
601 + copied = ret;
602 ++ ret = 0;
603 + } else
604 + copied = block_write_end(file, mapping, pos,
605 + len, copied, page, fsdata);
606 +@@ -1465,13 +1466,14 @@ static int ext4_write_end(struct file *file,
607 + if (i_size_changed || inline_data)
608 + ext4_mark_inode_dirty(handle, inode);
609 +
610 ++errout:
611 + if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
612 + /* if we have allocated more blocks and copied
613 + * less. We will have blocks allocated outside
614 + * inode->i_size. So truncate them
615 + */
616 + ext4_orphan_add(handle, inode);
617 +-errout:
618 ++
619 + ret2 = ext4_journal_stop(handle);
620 + if (!ret)
621 + ret = ret2;
622 +@@ -1554,6 +1556,7 @@ static int ext4_journalled_write_end(struct file *file,
623 + goto errout;
624 + }
625 + copied = ret;
626 ++ ret = 0;
627 + } else if (unlikely(copied < len) && !PageUptodate(page)) {
628 + copied = 0;
629 + ext4_journalled_zero_new_buffers(handle, page, from, to);
630 +@@ -1583,6 +1586,7 @@ static int ext4_journalled_write_end(struct file *file,
631 + ret = ret2;
632 + }
633 +
634 ++errout:
635 + if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
636 + /* if we have allocated more blocks and copied
637 + * less. We will have blocks allocated outside
638 +@@ -1590,7 +1594,6 @@ static int ext4_journalled_write_end(struct file *file,
639 + */
640 + ext4_orphan_add(handle, inode);
641 +
642 +-errout:
643 + ret2 = ext4_journal_stop(handle);
644 + if (!ret)
645 + ret = ret2;
646 +diff --git a/include/linux/sched.h b/include/linux/sched.h
647 +index 5710b80f8050a..afee5d5eb9458 100644
648 +--- a/include/linux/sched.h
649 ++++ b/include/linux/sched.h
650 +@@ -1500,7 +1500,7 @@ extern struct pid *cad_pid;
651 + #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
652 + #define used_math() tsk_used_math(current)
653 +
654 +-static inline bool is_percpu_thread(void)
655 ++static __always_inline bool is_percpu_thread(void)
656 + {
657 + #ifdef CONFIG_SMP
658 + return (current->flags & PF_NO_SETAFFINITY) &&
659 +diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
660 +index b16f9236de147..d1585b54fb0bd 100644
661 +--- a/include/net/pkt_sched.h
662 ++++ b/include/net/pkt_sched.h
663 +@@ -11,6 +11,7 @@
664 + #include <uapi/linux/pkt_sched.h>
665 +
666 + #define DEFAULT_TX_QUEUE_LEN 1000
667 ++#define STAB_SIZE_LOG_MAX 30
668 +
669 + struct qdisc_walker {
670 + int stop;
671 +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
672 +index 8bb543b0e775e..41268612bdd4e 100644
673 +--- a/net/ipv6/netfilter/ip6_tables.c
674 ++++ b/net/ipv6/netfilter/ip6_tables.c
675 +@@ -273,6 +273,7 @@ ip6t_do_table(struct sk_buff *skb,
676 + * things we don't know, ie. tcp syn flag or ports). If the
677 + * rule is also a fragment-specific rule, non-fragments won't
678 + * match it. */
679 ++ acpar.fragoff = 0;
680 + acpar.hotdrop = false;
681 + acpar.state = state;
682 +
683 +diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
684 +index 1708b64d41094..d7ae7415d54d0 100644
685 +--- a/net/mac80211/mesh_pathtbl.c
686 ++++ b/net/mac80211/mesh_pathtbl.c
687 +@@ -60,7 +60,10 @@ static struct mesh_table *mesh_table_alloc(void)
688 + atomic_set(&newtbl->entries, 0);
689 + spin_lock_init(&newtbl->gates_lock);
690 + spin_lock_init(&newtbl->walk_lock);
691 +- rhashtable_init(&newtbl->rhead, &mesh_rht_params);
692 ++ if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
693 ++ kfree(newtbl);
694 ++ return NULL;
695 ++ }
696 +
697 + return newtbl;
698 + }
699 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
700 +index 670d84e54db73..c7e6bf7c22c78 100644
701 +--- a/net/mac80211/rx.c
702 ++++ b/net/mac80211/rx.c
703 +@@ -3952,7 +3952,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
704 + if (!bssid)
705 + return false;
706 + if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
707 +- ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
708 ++ ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
709 ++ !is_valid_ether_addr(hdr->addr2))
710 + return false;
711 + if (ieee80211_is_beacon(hdr->frame_control))
712 + return true;
713 +diff --git a/net/netfilter/nf_nat_masquerade.c b/net/netfilter/nf_nat_masquerade.c
714 +index 8e8a65d46345b..acd73f717a088 100644
715 +--- a/net/netfilter/nf_nat_masquerade.c
716 ++++ b/net/netfilter/nf_nat_masquerade.c
717 +@@ -9,8 +9,19 @@
718 +
719 + #include <net/netfilter/nf_nat_masquerade.h>
720 +
721 ++struct masq_dev_work {
722 ++ struct work_struct work;
723 ++ struct net *net;
724 ++ union nf_inet_addr addr;
725 ++ int ifindex;
726 ++ int (*iter)(struct nf_conn *i, void *data);
727 ++};
728 ++
729 ++#define MAX_MASQ_WORKER_COUNT 16
730 ++
731 + static DEFINE_MUTEX(masq_mutex);
732 + static unsigned int masq_refcnt __read_mostly;
733 ++static atomic_t masq_worker_count __read_mostly;
734 +
735 + unsigned int
736 + nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
737 +@@ -63,13 +74,71 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
738 + }
739 + EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);
740 +
741 +-static int device_cmp(struct nf_conn *i, void *ifindex)
742 ++static void iterate_cleanup_work(struct work_struct *work)
743 ++{
744 ++ struct masq_dev_work *w;
745 ++
746 ++ w = container_of(work, struct masq_dev_work, work);
747 ++
748 ++ nf_ct_iterate_cleanup_net(w->net, w->iter, (void *)w, 0, 0);
749 ++
750 ++ put_net(w->net);
751 ++ kfree(w);
752 ++ atomic_dec(&masq_worker_count);
753 ++ module_put(THIS_MODULE);
754 ++}
755 ++
756 ++/* Iterate conntrack table in the background and remove conntrack entries
757 ++ * that use the device/address being removed.
758 ++ *
759 ++ * In case too many work items have been queued already or memory allocation
760 ++ * fails iteration is skipped, conntrack entries will time out eventually.
761 ++ */
762 ++static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr,
763 ++ int ifindex,
764 ++ int (*iter)(struct nf_conn *i, void *data),
765 ++ gfp_t gfp_flags)
766 ++{
767 ++ struct masq_dev_work *w;
768 ++
769 ++ if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT)
770 ++ return;
771 ++
772 ++ net = maybe_get_net(net);
773 ++ if (!net)
774 ++ return;
775 ++
776 ++ if (!try_module_get(THIS_MODULE))
777 ++ goto err_module;
778 ++
779 ++ w = kzalloc(sizeof(*w), gfp_flags);
780 ++ if (w) {
781 ++ /* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */
782 ++ atomic_inc(&masq_worker_count);
783 ++
784 ++ INIT_WORK(&w->work, iterate_cleanup_work);
785 ++ w->ifindex = ifindex;
786 ++ w->net = net;
787 ++ w->iter = iter;
788 ++ if (addr)
789 ++ w->addr = *addr;
790 ++ schedule_work(&w->work);
791 ++ return;
792 ++ }
793 ++
794 ++ module_put(THIS_MODULE);
795 ++ err_module:
796 ++ put_net(net);
797 ++}
798 ++
799 ++static int device_cmp(struct nf_conn *i, void *arg)
800 + {
801 + const struct nf_conn_nat *nat = nfct_nat(i);
802 ++ const struct masq_dev_work *w = arg;
803 +
804 + if (!nat)
805 + return 0;
806 +- return nat->masq_index == (int)(long)ifindex;
807 ++ return nat->masq_index == w->ifindex;
808 + }
809 +
810 + static int masq_device_event(struct notifier_block *this,
811 +@@ -85,8 +154,8 @@ static int masq_device_event(struct notifier_block *this,
812 + * and forget them.
813 + */
814 +
815 +- nf_ct_iterate_cleanup_net(net, device_cmp,
816 +- (void *)(long)dev->ifindex, 0, 0);
817 ++ nf_nat_masq_schedule(net, NULL, dev->ifindex,
818 ++ device_cmp, GFP_KERNEL);
819 + }
820 +
821 + return NOTIFY_DONE;
822 +@@ -94,35 +163,45 @@ static int masq_device_event(struct notifier_block *this,
823 +
824 + static int inet_cmp(struct nf_conn *ct, void *ptr)
825 + {
826 +- struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
827 +- struct net_device *dev = ifa->ifa_dev->dev;
828 + struct nf_conntrack_tuple *tuple;
829 ++ struct masq_dev_work *w = ptr;
830 +
831 +- if (!device_cmp(ct, (void *)(long)dev->ifindex))
832 ++ if (!device_cmp(ct, ptr))
833 + return 0;
834 +
835 + tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
836 +
837 +- return ifa->ifa_address == tuple->dst.u3.ip;
838 ++ return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3);
839 + }
840 +
841 + static int masq_inet_event(struct notifier_block *this,
842 + unsigned long event,
843 + void *ptr)
844 + {
845 +- struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
846 +- struct net *net = dev_net(idev->dev);
847 ++ const struct in_ifaddr *ifa = ptr;
848 ++ const struct in_device *idev;
849 ++ const struct net_device *dev;
850 ++ union nf_inet_addr addr;
851 ++
852 ++ if (event != NETDEV_DOWN)
853 ++ return NOTIFY_DONE;
854 +
855 + /* The masq_dev_notifier will catch the case of the device going
856 + * down. So if the inetdev is dead and being destroyed we have
857 + * no work to do. Otherwise this is an individual address removal
858 + * and we have to perform the flush.
859 + */
860 ++ idev = ifa->ifa_dev;
861 + if (idev->dead)
862 + return NOTIFY_DONE;
863 +
864 +- if (event == NETDEV_DOWN)
865 +- nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0);
866 ++ memset(&addr, 0, sizeof(addr));
867 ++
868 ++ addr.ip = ifa->ifa_address;
869 ++
870 ++ dev = idev->dev;
871 ++ nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex,
872 ++ inet_cmp, GFP_KERNEL);
873 +
874 + return NOTIFY_DONE;
875 + }
876 +@@ -136,8 +215,6 @@ static struct notifier_block masq_inet_notifier = {
877 + };
878 +
879 + #if IS_ENABLED(CONFIG_IPV6)
880 +-static atomic_t v6_worker_count __read_mostly;
881 +-
882 + static int
883 + nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
884 + const struct in6_addr *daddr, unsigned int srcprefs,
885 +@@ -187,40 +264,6 @@ nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
886 + }
887 + EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);
888 +
889 +-struct masq_dev_work {
890 +- struct work_struct work;
891 +- struct net *net;
892 +- struct in6_addr addr;
893 +- int ifindex;
894 +-};
895 +-
896 +-static int inet6_cmp(struct nf_conn *ct, void *work)
897 +-{
898 +- struct masq_dev_work *w = (struct masq_dev_work *)work;
899 +- struct nf_conntrack_tuple *tuple;
900 +-
901 +- if (!device_cmp(ct, (void *)(long)w->ifindex))
902 +- return 0;
903 +-
904 +- tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
905 +-
906 +- return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6);
907 +-}
908 +-
909 +-static void iterate_cleanup_work(struct work_struct *work)
910 +-{
911 +- struct masq_dev_work *w;
912 +-
913 +- w = container_of(work, struct masq_dev_work, work);
914 +-
915 +- nf_ct_iterate_cleanup_net(w->net, inet6_cmp, (void *)w, 0, 0);
916 +-
917 +- put_net(w->net);
918 +- kfree(w);
919 +- atomic_dec(&v6_worker_count);
920 +- module_put(THIS_MODULE);
921 +-}
922 +-
923 + /* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).
924 + *
925 + * Defer it to the system workqueue.
926 +@@ -233,36 +276,19 @@ static int masq_inet6_event(struct notifier_block *this,
927 + {
928 + struct inet6_ifaddr *ifa = ptr;
929 + const struct net_device *dev;
930 +- struct masq_dev_work *w;
931 +- struct net *net;
932 ++ union nf_inet_addr addr;
933 +
934 +- if (event != NETDEV_DOWN || atomic_read(&v6_worker_count) >= 16)
935 ++ if (event != NETDEV_DOWN)
936 + return NOTIFY_DONE;
937 +
938 + dev = ifa->idev->dev;
939 +- net = maybe_get_net(dev_net(dev));
940 +- if (!net)
941 +- return NOTIFY_DONE;
942 +
943 +- if (!try_module_get(THIS_MODULE))
944 +- goto err_module;
945 ++ memset(&addr, 0, sizeof(addr));
946 +
947 +- w = kmalloc(sizeof(*w), GFP_ATOMIC);
948 +- if (w) {
949 +- atomic_inc(&v6_worker_count);
950 +-
951 +- INIT_WORK(&w->work, iterate_cleanup_work);
952 +- w->ifindex = dev->ifindex;
953 +- w->net = net;
954 +- w->addr = ifa->addr;
955 +- schedule_work(&w->work);
956 ++ addr.in6 = ifa->addr;
957 +
958 +- return NOTIFY_DONE;
959 +- }
960 +-
961 +- module_put(THIS_MODULE);
962 +- err_module:
963 +- put_net(net);
964 ++ nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp,
965 ++ GFP_ATOMIC);
966 + return NOTIFY_DONE;
967 + }
968 +
969 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
970 +index 3b1b5ee521379..e70f990334083 100644
971 +--- a/net/sched/sch_api.c
972 ++++ b/net/sched/sch_api.c
973 +@@ -510,6 +510,12 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
974 + return stab;
975 + }
976 +
977 ++ if (s->size_log > STAB_SIZE_LOG_MAX ||
978 ++ s->cell_log > STAB_SIZE_LOG_MAX) {
979 ++ NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
980 ++ return ERR_PTR(-EINVAL);
981 ++ }
982 ++
983 + stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
984 + if (!stab)
985 + return ERR_PTR(-ENOMEM);