Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Wed, 27 Apr 2022 12:03:32
Message-Id: 1651060995.30268acb55bf8f38da63af8427c716ff98501ff2.mpagano@gentoo
1 commit: 30268acb55bf8f38da63af8427c716ff98501ff2
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 27 12:03:15 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 27 12:03:15 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=30268acb
7
8 Linux patch 4.19.240
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1239_linux-4.19.240.patch | 1623 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1627 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 24846e7c..72dcbc8d 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -995,6 +995,10 @@ Patch: 1238_linux-4.19.239.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.239
23
24 +Patch: 1239_linux-4.19.240.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.240
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1239_linux-4.19.240.patch b/1239_linux-4.19.240.patch
33 new file mode 100644
34 index 00000000..b3c17320
35 --- /dev/null
36 +++ b/1239_linux-4.19.240.patch
37 @@ -0,0 +1,1623 @@
38 +diff --git a/Makefile b/Makefile
39 +index 932bae9fbbbba..546e52f8a05fa 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 239
47 ++SUBLEVEL = 240
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
52 +index 37ad245cf9899..fb458623f3860 100644
53 +--- a/arch/arc/kernel/entry.S
54 ++++ b/arch/arc/kernel/entry.S
55 +@@ -191,6 +191,7 @@ tracesys_exit:
56 + st r0, [sp, PT_r0] ; sys call return value in pt_regs
57 +
58 + ;POST Sys Call Ptrace Hook
59 ++ mov r0, sp ; pt_regs needed
60 + bl @syscall_trace_exit
61 + b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
62 + ; we'd done before calling post hook above
63 +diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
64 +index 55bbbc3b328f0..e65c04be86ccb 100644
65 +--- a/arch/arm/mach-vexpress/spc.c
66 ++++ b/arch/arm/mach-vexpress/spc.c
67 +@@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void)
68 + }
69 +
70 + cluster = topology_physical_package_id(cpu_dev->id);
71 +- if (init_opp_table[cluster])
72 ++ if (cluster < 0 || init_opp_table[cluster])
73 + continue;
74 +
75 + if (ve_init_opp_table(cpu_dev))
76 +diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
77 +index c07b1615ee39d..1aa083db77f18 100644
78 +--- a/arch/powerpc/perf/power9-pmu.c
79 ++++ b/arch/powerpc/perf/power9-pmu.c
80 +@@ -143,11 +143,11 @@ int p9_dd22_bl_ev[] = {
81 +
82 + /* Table of alternatives, sorted by column 0 */
83 + static const unsigned int power9_event_alternatives[][MAX_ALT] = {
84 +- { PM_INST_DISP, PM_INST_DISP_ALT },
85 +- { PM_RUN_CYC_ALT, PM_RUN_CYC },
86 +- { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
87 +- { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
88 + { PM_BR_2PATH, PM_BR_2PATH_ALT },
89 ++ { PM_INST_DISP, PM_INST_DISP_ALT },
90 ++ { PM_RUN_CYC_ALT, PM_RUN_CYC },
91 ++ { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
92 ++ { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
93 + };
94 +
95 + static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
96 +diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
97 +index fb97cf7c41371..1def972b6ca36 100644
98 +--- a/arch/x86/include/asm/compat.h
99 ++++ b/arch/x86/include/asm/compat.h
100 +@@ -46,15 +46,13 @@ typedef u64 __attribute__((aligned(4))) compat_u64;
101 + typedef u32 compat_uptr_t;
102 +
103 + struct compat_stat {
104 +- compat_dev_t st_dev;
105 +- u16 __pad1;
106 ++ u32 st_dev;
107 + compat_ino_t st_ino;
108 + compat_mode_t st_mode;
109 + compat_nlink_t st_nlink;
110 + __compat_uid_t st_uid;
111 + __compat_gid_t st_gid;
112 +- compat_dev_t st_rdev;
113 +- u16 __pad2;
114 ++ u32 st_rdev;
115 + u32 st_size;
116 + u32 st_blksize;
117 + u32 st_blocks;
118 +diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
119 +index 6490b2759bcb4..9ef62d42ba5b2 100644
120 +--- a/block/compat_ioctl.c
121 ++++ b/block/compat_ioctl.c
122 +@@ -391,7 +391,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
123 + return 0;
124 + case BLKGETSIZE:
125 + size = i_size_read(bdev->bd_inode);
126 +- if ((size >> 9) > ~0UL)
127 ++ if ((size >> 9) > ~(compat_ulong_t)0)
128 + return -EFBIG;
129 + return compat_put_ulong(arg, size >> 9);
130 +
131 +diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
132 +index ff468a6fd8ddc..677f582cf3d6c 100644
133 +--- a/drivers/ata/pata_marvell.c
134 ++++ b/drivers/ata/pata_marvell.c
135 +@@ -82,6 +82,8 @@ static int marvell_cable_detect(struct ata_port *ap)
136 + switch(ap->port_no)
137 + {
138 + case 0:
139 ++ if (!ap->ioaddr.bmdma_addr)
140 ++ return ATA_CBL_PATA_UNK;
141 + if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
142 + return ATA_CBL_PATA40;
143 + return ATA_CBL_PATA80;
144 +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
145 +index 04704c6376baf..d4ba0d9dd17c4 100644
146 +--- a/drivers/dma/at_xdmac.c
147 ++++ b/drivers/dma/at_xdmac.c
148 +@@ -1390,7 +1390,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
149 + {
150 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
151 + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
152 +- struct at_xdmac_desc *desc, *_desc;
153 ++ struct at_xdmac_desc *desc, *_desc, *iter;
154 + struct list_head *descs_list;
155 + enum dma_status ret;
156 + int residue, retry;
157 +@@ -1505,11 +1505,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
158 + * microblock.
159 + */
160 + descs_list = &desc->descs_list;
161 +- list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
162 +- dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
163 +- residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
164 +- if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
165 ++ list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
166 ++ dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
167 ++ residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
168 ++ if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
169 ++ desc = iter;
170 + break;
171 ++ }
172 + }
173 + residue += cur_ubc << dwidth;
174 +
175 +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
176 +index eea89c3b54c1e..709ead443fc5f 100644
177 +--- a/drivers/dma/imx-sdma.c
178 ++++ b/drivers/dma/imx-sdma.c
179 +@@ -1771,7 +1771,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
180 + u32 reg, val, shift, num_map, i;
181 + int ret = 0;
182 +
183 +- if (IS_ERR(np) || IS_ERR(gpr_np))
184 ++ if (IS_ERR(np) || !gpr_np)
185 + goto out;
186 +
187 + event_remap = of_find_property(np, propname, NULL);
188 +@@ -1819,7 +1819,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
189 + }
190 +
191 + out:
192 +- if (!IS_ERR(gpr_np))
193 ++ if (gpr_np)
194 + of_node_put(gpr_np);
195 +
196 + return ret;
197 +diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
198 +index 1ddf07514de6d..3d8eaa25bea0f 100644
199 +--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
200 ++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
201 +@@ -188,7 +188,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
202 + drm_framebuffer_unreference(plane->state->fb);
203 +
204 + kfree(to_mdp5_plane_state(plane->state));
205 ++ plane->state = NULL;
206 + mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
207 ++ if (!mdp5_state)
208 ++ return;
209 +
210 + /* assign default blend parameters */
211 + mdp5_state->alpha = 255;
212 +diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
213 +index 06bd039159738..f57eec47ef6a9 100644
214 +--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
215 ++++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
216 +@@ -233,7 +233,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
217 +
218 + ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
219 + if (ret)
220 +- dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
221 ++ dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
222 + }
223 +
224 + static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
225 +@@ -269,7 +269,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
226 + return 0;
227 + }
228 +
229 +-static int rpi_touchscreen_enable(struct drm_panel *panel)
230 ++static int rpi_touchscreen_prepare(struct drm_panel *panel)
231 + {
232 + struct rpi_touchscreen *ts = panel_to_ts(panel);
233 + int i;
234 +@@ -299,6 +299,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
235 + rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
236 + msleep(100);
237 +
238 ++ return 0;
239 ++}
240 ++
241 ++static int rpi_touchscreen_enable(struct drm_panel *panel)
242 ++{
243 ++ struct rpi_touchscreen *ts = panel_to_ts(panel);
244 ++
245 + /* Turn on the backlight. */
246 + rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
247 +
248 +@@ -353,7 +360,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel)
249 + static const struct drm_panel_funcs rpi_touchscreen_funcs = {
250 + .disable = rpi_touchscreen_disable,
251 + .unprepare = rpi_touchscreen_noop,
252 +- .prepare = rpi_touchscreen_noop,
253 ++ .prepare = rpi_touchscreen_prepare,
254 + .enable = rpi_touchscreen_enable,
255 + .get_modes = rpi_touchscreen_get_modes,
256 + };
257 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
258 +index cffd423172726..b0105d53918ab 100644
259 +--- a/drivers/md/dm-integrity.c
260 ++++ b/drivers/md/dm-integrity.c
261 +@@ -3504,6 +3504,7 @@ try_smaller_buffer:
262 + }
263 +
264 + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
265 ++ size_t recalc_tags_size;
266 + if (!ic->internal_hash) {
267 + r = -EINVAL;
268 + ti->error = "Recalculate is only valid with internal hash";
269 +@@ -3522,8 +3523,10 @@ try_smaller_buffer:
270 + r = -ENOMEM;
271 + goto bad;
272 + }
273 +- ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
274 +- ic->tag_size, GFP_KERNEL);
275 ++ recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
276 ++ if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
277 ++ recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
278 ++ ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
279 + if (!ic->recalc_tags) {
280 + ti->error = "Cannot allocate tags for recalculating";
281 + r = -ENOMEM;
282 +diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
283 +index df99354ec12aa..232f45f722f0c 100644
284 +--- a/drivers/net/can/usb/usb_8dev.c
285 ++++ b/drivers/net/can/usb/usb_8dev.c
286 +@@ -681,9 +681,20 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
287 + atomic_inc(&priv->active_tx_urbs);
288 +
289 + err = usb_submit_urb(urb, GFP_ATOMIC);
290 +- if (unlikely(err))
291 +- goto failed;
292 +- else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
293 ++ if (unlikely(err)) {
294 ++ can_free_echo_skb(netdev, context->echo_index);
295 ++
296 ++ usb_unanchor_urb(urb);
297 ++ usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
298 ++
299 ++ atomic_dec(&priv->active_tx_urbs);
300 ++
301 ++ if (err == -ENODEV)
302 ++ netif_device_detach(netdev);
303 ++ else
304 ++ netdev_warn(netdev, "failed tx_urb %d\n", err);
305 ++ stats->tx_dropped++;
306 ++ } else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
307 + /* Slow down tx path */
308 + netif_stop_queue(netdev);
309 +
310 +@@ -702,19 +713,6 @@ nofreecontext:
311 +
312 + return NETDEV_TX_BUSY;
313 +
314 +-failed:
315 +- can_free_echo_skb(netdev, context->echo_index);
316 +-
317 +- usb_unanchor_urb(urb);
318 +- usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
319 +-
320 +- atomic_dec(&priv->active_tx_urbs);
321 +-
322 +- if (err == -ENODEV)
323 +- netif_device_detach(netdev);
324 +- else
325 +- netdev_warn(netdev, "failed tx_urb %d\n", err);
326 +-
327 + nomembuf:
328 + usb_free_urb(urb);
329 +
330 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
331 +index 460bb81acf2bf..d8e4842af055b 100644
332 +--- a/drivers/net/ethernet/cadence/macb_main.c
333 ++++ b/drivers/net/ethernet/cadence/macb_main.c
334 +@@ -1364,6 +1364,7 @@ static void macb_tx_restart(struct macb_queue *queue)
335 + unsigned int head = queue->tx_head;
336 + unsigned int tail = queue->tx_tail;
337 + struct macb *bp = queue->bp;
338 ++ unsigned int head_idx, tbqp;
339 +
340 + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
341 + queue_writel(queue, ISR, MACB_BIT(TXUBR));
342 +@@ -1371,6 +1372,13 @@ static void macb_tx_restart(struct macb_queue *queue)
343 + if (head == tail)
344 + return;
345 +
346 ++ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
347 ++ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
348 ++ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
349 ++
350 ++ if (tbqp == head_idx)
351 ++ return;
352 ++
353 + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
354 + }
355 +
356 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
357 +index 3184c8f7cdd05..6e69bcdf9c402 100644
358 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
359 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
360 +@@ -530,11 +530,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
361 + info->phc_index = -1;
362 +
363 + fman_node = of_get_parent(mac_node);
364 +- if (fman_node)
365 ++ if (fman_node) {
366 + ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
367 ++ of_node_put(fman_node);
368 ++ }
369 +
370 +- if (ptp_node)
371 ++ if (ptp_node) {
372 + ptp_dev = of_find_device_by_node(ptp_node);
373 ++ of_node_put(ptp_node);
374 ++ }
375 +
376 + if (ptp_dev)
377 + ptp = platform_get_drvdata(ptp_dev);
378 +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
379 +index fbad77450725a..7ec60fbb4740e 100644
380 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
381 ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
382 +@@ -995,8 +995,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
383 + {
384 + u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
385 + link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
386 +- u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
387 +- u16 lat_enc_d = 0; /* latency decoded */
388 ++ u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
389 ++ u32 lat_enc_d = 0; /* latency decoded */
390 + u16 lat_enc = 0; /* latency encoded */
391 +
392 + if (link) {
393 +diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
394 +index aa12bace8673e..b7e2f49696b74 100644
395 +--- a/drivers/net/ethernet/micrel/Kconfig
396 ++++ b/drivers/net/ethernet/micrel/Kconfig
397 +@@ -45,7 +45,6 @@ config KS8851
398 + config KS8851_MLL
399 + tristate "Micrel KS8851 MLL"
400 + depends on HAS_IOMEM
401 +- depends on PTP_1588_CLOCK_OPTIONAL
402 + select MII
403 + ---help---
404 + This platform driver is for Micrel KS8851 Address/data bus
405 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
406 +index eacc1e32d5478..1b98a888a168e 100644
407 +--- a/drivers/net/vxlan.c
408 ++++ b/drivers/net/vxlan.c
409 +@@ -524,11 +524,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
410 +
411 + rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
412 + if (rd == NULL)
413 +- return -ENOBUFS;
414 ++ return -ENOMEM;
415 +
416 + if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
417 + kfree(rd);
418 +- return -ENOBUFS;
419 ++ return -ENOMEM;
420 + }
421 +
422 + rd->remote_ip = *ip;
423 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
424 +index a5195bdb4d9bd..0a96c1071e5b8 100644
425 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
426 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
427 +@@ -560,7 +560,7 @@ enum brcmf_sdio_frmtype {
428 + BRCMF_SDIO_FT_SUB,
429 + };
430 +
431 +-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
432 ++#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu))
433 +
434 + /* SDIO Pad drive strength to select value mappings */
435 + struct sdiod_drive_str {
436 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
437 +index 26cfda24ce085..e26947f89299d 100644
438 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
439 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
440 +@@ -73,7 +73,7 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
441 + mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
442 +
443 + /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
444 +- mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
445 ++ mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
446 +
447 + /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
448 + mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
449 +diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
450 +index 77995df7fe547..453fdfdc6311e 100644
451 +--- a/drivers/perf/arm_pmu.c
452 ++++ b/drivers/perf/arm_pmu.c
453 +@@ -321,6 +321,9 @@ validate_group(struct perf_event *event)
454 + if (!validate_event(event->pmu, &fake_pmu, leader))
455 + return -EINVAL;
456 +
457 ++ if (event == leader)
458 ++ return 0;
459 ++
460 + for_each_sibling_event(sibling, leader) {
461 + if (!validate_event(event->pmu, &fake_pmu, sibling))
462 + return -EINVAL;
463 +@@ -418,12 +421,7 @@ __hw_perf_event_init(struct perf_event *event)
464 + local64_set(&hwc->period_left, hwc->sample_period);
465 + }
466 +
467 +- if (event->group_leader != event) {
468 +- if (validate_group(event) != 0)
469 +- return -EINVAL;
470 +- }
471 +-
472 +- return 0;
473 ++ return validate_group(event);
474 + }
475 +
476 + static int armpmu_event_init(struct perf_event *event)
477 +diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
478 +index 7b160ee981152..3e66be504a0da 100644
479 +--- a/drivers/platform/x86/samsung-laptop.c
480 ++++ b/drivers/platform/x86/samsung-laptop.c
481 +@@ -1125,8 +1125,6 @@ static void kbd_led_set(struct led_classdev *led_cdev,
482 +
483 + if (value > samsung->kbd_led.max_brightness)
484 + value = samsung->kbd_led.max_brightness;
485 +- else if (value < 0)
486 +- value = 0;
487 +
488 + samsung->kbd_led_wk = value;
489 + queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
490 +diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
491 +index 5daf2ee1a396b..f9790b60f9964 100644
492 +--- a/drivers/reset/tegra/reset-bpmp.c
493 ++++ b/drivers/reset/tegra/reset-bpmp.c
494 +@@ -23,6 +23,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
495 + struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
496 + struct mrq_reset_request request;
497 + struct tegra_bpmp_message msg;
498 ++ int err;
499 +
500 + memset(&request, 0, sizeof(request));
501 + request.cmd = command;
502 +@@ -33,7 +34,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
503 + msg.tx.data = &request;
504 + msg.tx.size = sizeof(request);
505 +
506 +- return tegra_bpmp_transfer(bpmp, &msg);
507 ++ err = tegra_bpmp_transfer(bpmp, &msg);
508 ++ if (err)
509 ++ return err;
510 ++ if (msg.rx.ret)
511 ++ return -EINVAL;
512 ++
513 ++ return 0;
514 + }
515 +
516 + static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
517 +diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
518 +index 45c7f829e3872..ad06fe669d98d 100644
519 +--- a/drivers/staging/android/ion/ion.c
520 ++++ b/drivers/staging/android/ion/ion.c
521 +@@ -140,6 +140,9 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
522 + void *vaddr;
523 +
524 + if (buffer->kmap_cnt) {
525 ++ if (buffer->kmap_cnt == INT_MAX)
526 ++ return ERR_PTR(-EOVERFLOW);
527 ++
528 + buffer->kmap_cnt++;
529 + return buffer->vaddr;
530 + }
531 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
532 +index baa1713d66958..52b1524b40cdc 100644
533 +--- a/fs/cifs/cifsfs.c
534 ++++ b/fs/cifs/cifsfs.c
535 +@@ -813,7 +813,7 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
536 + ssize_t rc;
537 + struct inode *inode = file_inode(iocb->ki_filp);
538 +
539 +- if (iocb->ki_filp->f_flags & O_DIRECT)
540 ++ if (iocb->ki_flags & IOCB_DIRECT)
541 + return cifs_user_readv(iocb, iter);
542 +
543 + rc = cifs_revalidate_mapping(inode);
544 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
545 +index 96cf0f57ca950..76c887108df35 100644
546 +--- a/fs/ext4/inode.c
547 ++++ b/fs/ext4/inode.c
548 +@@ -4314,7 +4314,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
549 + struct super_block *sb = inode->i_sb;
550 + ext4_lblk_t first_block, stop_block;
551 + struct address_space *mapping = inode->i_mapping;
552 +- loff_t first_block_offset, last_block_offset;
553 ++ loff_t first_block_offset, last_block_offset, max_length;
554 ++ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
555 + handle_t *handle;
556 + unsigned int credits;
557 + int ret = 0;
558 +@@ -4360,6 +4361,14 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
559 + offset;
560 + }
561 +
562 ++ /*
563 ++ * For punch hole the length + offset needs to be within one block
564 ++ * before last range. Adjust the length if it goes beyond that limit.
565 ++ */
566 ++ max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
567 ++ if (offset + length > max_length)
568 ++ length = max_length - offset;
569 ++
570 + if (offset & (sb->s_blocksize - 1) ||
571 + (offset + length) & (sb->s_blocksize - 1)) {
572 + /*
573 +diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
574 +index 9cc79b7b0df11..3de933354a08b 100644
575 +--- a/fs/ext4/page-io.c
576 ++++ b/fs/ext4/page-io.c
577 +@@ -105,8 +105,10 @@ static void ext4_finish_bio(struct bio *bio)
578 + continue;
579 + }
580 + clear_buffer_async_write(bh);
581 +- if (bio->bi_status)
582 ++ if (bio->bi_status) {
583 ++ set_buffer_write_io_error(bh);
584 + buffer_io_error(bh);
585 ++ }
586 + } while ((bh = bh->b_this_page) != head);
587 + bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
588 + local_irq_restore(flags);
589 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
590 +index 648eb6a24e3a3..6893b87c73547 100644
591 +--- a/fs/ext4/super.c
592 ++++ b/fs/ext4/super.c
593 +@@ -3440,9 +3440,11 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
594 + ext4_fsblk_t first_block, last_block, b;
595 + ext4_group_t i, ngroups = ext4_get_groups_count(sb);
596 + int s, j, count = 0;
597 ++ int has_super = ext4_bg_has_super(sb, grp);
598 +
599 + if (!ext4_has_feature_bigalloc(sb))
600 +- return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
601 ++ return (has_super + ext4_bg_num_gdb(sb, grp) +
602 ++ (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
603 + sbi->s_itb_per_group + 2);
604 +
605 + first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
606 +@@ -4421,9 +4423,18 @@ no_journal:
607 + * Get the # of file system overhead blocks from the
608 + * superblock if present.
609 + */
610 +- if (es->s_overhead_clusters)
611 +- sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
612 +- else {
613 ++ sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
614 ++ /* ignore the precalculated value if it is ridiculous */
615 ++ if (sbi->s_overhead > ext4_blocks_count(es))
616 ++ sbi->s_overhead = 0;
617 ++ /*
618 ++ * If the bigalloc feature is not enabled recalculating the
619 ++ * overhead doesn't take long, so we might as well just redo
620 ++ * it to make sure we are using the correct value.
621 ++ */
622 ++ if (!ext4_has_feature_bigalloc(sb))
623 ++ sbi->s_overhead = 0;
624 ++ if (sbi->s_overhead == 0) {
625 + err = ext4_calculate_overhead(sb);
626 + if (err)
627 + goto failed_mount_wq;
628 +diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
629 +index 29dcf0973b73f..cd2d8d6744678 100644
630 +--- a/fs/gfs2/rgrp.c
631 ++++ b/fs/gfs2/rgrp.c
632 +@@ -926,15 +926,15 @@ static int read_rindex_entry(struct gfs2_inode *ip)
633 + rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
634 + spin_lock_init(&rgd->rd_rsspin);
635 +
636 +- error = compute_bitstructs(rgd);
637 +- if (error)
638 +- goto fail;
639 +-
640 + error = gfs2_glock_get(sdp, rgd->rd_addr,
641 + &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
642 + if (error)
643 + goto fail;
644 +
645 ++ error = compute_bitstructs(rgd);
646 ++ if (error)
647 ++ goto fail_glock;
648 ++
649 + rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
650 + rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
651 + if (rgd->rd_data > sdp->sd_max_rg_data)
652 +@@ -951,6 +951,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
653 + }
654 +
655 + error = 0; /* someone else read in the rgrp; free it and ignore it */
656 ++fail_glock:
657 + gfs2_glock_put(rgd->rd_gl);
658 +
659 + fail:
660 +diff --git a/fs/stat.c b/fs/stat.c
661 +index f8e6fb2c36576..376543199b5a2 100644
662 +--- a/fs/stat.c
663 ++++ b/fs/stat.c
664 +@@ -286,9 +286,6 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
665 + # define choose_32_64(a,b) b
666 + #endif
667 +
668 +-#define valid_dev(x) choose_32_64(old_valid_dev(x),true)
669 +-#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
670 +-
671 + #ifndef INIT_STRUCT_STAT_PADDING
672 + # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
673 + #endif
674 +@@ -297,7 +294,9 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
675 + {
676 + struct stat tmp;
677 +
678 +- if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
679 ++ if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
680 ++ return -EOVERFLOW;
681 ++ if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
682 + return -EOVERFLOW;
683 + #if BITS_PER_LONG == 32
684 + if (stat->size > MAX_NON_LFS)
685 +@@ -305,7 +304,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
686 + #endif
687 +
688 + INIT_STRUCT_STAT_PADDING(tmp);
689 +- tmp.st_dev = encode_dev(stat->dev);
690 ++ tmp.st_dev = new_encode_dev(stat->dev);
691 + tmp.st_ino = stat->ino;
692 + if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
693 + return -EOVERFLOW;
694 +@@ -315,7 +314,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
695 + return -EOVERFLOW;
696 + SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
697 + SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
698 +- tmp.st_rdev = encode_dev(stat->rdev);
699 ++ tmp.st_rdev = new_encode_dev(stat->rdev);
700 + tmp.st_size = stat->size;
701 + tmp.st_atime = stat->atime.tv_sec;
702 + tmp.st_mtime = stat->mtime.tv_sec;
703 +@@ -588,11 +587,13 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
704 + {
705 + struct compat_stat tmp;
706 +
707 +- if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
708 ++ if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
709 ++ return -EOVERFLOW;
710 ++ if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
711 + return -EOVERFLOW;
712 +
713 + memset(&tmp, 0, sizeof(tmp));
714 +- tmp.st_dev = old_encode_dev(stat->dev);
715 ++ tmp.st_dev = new_encode_dev(stat->dev);
716 + tmp.st_ino = stat->ino;
717 + if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
718 + return -EOVERFLOW;
719 +@@ -602,7 +603,7 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
720 + return -EOVERFLOW;
721 + SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
722 + SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
723 +- tmp.st_rdev = old_encode_dev(stat->rdev);
724 ++ tmp.st_rdev = new_encode_dev(stat->rdev);
725 + if ((u64) stat->size > MAX_NON_LFS)
726 + return -EOVERFLOW;
727 + tmp.st_size = stat->size;
728 +diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
729 +index 572e11bb86967..e1e9eff096d05 100644
730 +--- a/include/linux/etherdevice.h
731 ++++ b/include/linux/etherdevice.h
732 +@@ -130,7 +130,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr)
733 + #endif
734 + }
735 +
736 +-static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
737 ++static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
738 + {
739 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
740 + #ifdef __BIG_ENDIAN
741 +@@ -344,8 +344,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
742 + * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
743 + */
744 +
745 +-static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
746 +- const u8 addr2[6+2])
747 ++static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
748 + {
749 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
750 + u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
751 +diff --git a/include/net/ax25.h b/include/net/ax25.h
752 +index 8b7eb46ad72d8..aadff553e4b73 100644
753 +--- a/include/net/ax25.h
754 ++++ b/include/net/ax25.h
755 +@@ -236,6 +236,7 @@ typedef struct ax25_dev {
756 + #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
757 + ax25_dama_info dama;
758 + #endif
759 ++ refcount_t refcount;
760 + } ax25_dev;
761 +
762 + typedef struct ax25_cb {
763 +@@ -290,6 +291,17 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25)
764 + }
765 + }
766 +
767 ++static inline void ax25_dev_hold(ax25_dev *ax25_dev)
768 ++{
769 ++ refcount_inc(&ax25_dev->refcount);
770 ++}
771 ++
772 ++static inline void ax25_dev_put(ax25_dev *ax25_dev)
773 ++{
774 ++ if (refcount_dec_and_test(&ax25_dev->refcount)) {
775 ++ kfree(ax25_dev);
776 ++ }
777 ++}
778 + static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev)
779 + {
780 + skb->dev = dev;
781 +diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
782 +index b875dcef173c9..fa5fe23ca6aaa 100644
783 +--- a/include/net/inet_hashtables.h
784 ++++ b/include/net/inet_hashtables.h
785 +@@ -232,8 +232,9 @@ void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
786 + unsigned long low_limit,
787 + unsigned long high_limit);
788 +
789 +-bool inet_ehash_insert(struct sock *sk, struct sock *osk);
790 +-bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
791 ++bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
792 ++bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
793 ++ bool *found_dup_sk);
794 + int __inet_hash(struct sock *sk, struct sock *osk);
795 + int inet_hash(struct sock *sk);
796 + void inet_unhash(struct sock *sk);
797 +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
798 +index 32e573c42a68c..4b0bde304ad79 100644
799 +--- a/kernel/trace/trace_events_trigger.c
800 ++++ b/kernel/trace/trace_events_trigger.c
801 +@@ -1212,7 +1212,14 @@ static void
802 + stacktrace_trigger(struct event_trigger_data *data, void *rec,
803 + struct ring_buffer_event *event)
804 + {
805 +- trace_dump_stack(STACK_SKIP);
806 ++ struct trace_event_file *file = data->private_data;
807 ++ unsigned long flags;
808 ++
809 ++ if (file) {
810 ++ local_save_flags(flags);
811 ++ __trace_stack(file->tr, flags, STACK_SKIP, preempt_count());
812 ++ } else
813 ++ trace_dump_stack(STACK_SKIP);
814 + }
815 +
816 + static void
817 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
818 +index f60f7ad5d0ba8..9c35403d96461 100644
819 +--- a/mm/page_alloc.c
820 ++++ b/mm/page_alloc.c
821 +@@ -7117,7 +7117,7 @@ void __init mem_init_print_info(const char *str)
822 + */
823 + #define adj_init_size(start, end, size, pos, adj) \
824 + do { \
825 +- if (start <= pos && pos < end && size > adj) \
826 ++ if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
827 + size -= adj; \
828 + } while (0)
829 +
830 +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
831 +index 3170b43b9f894..7861f2747f843 100644
832 +--- a/net/ax25/af_ax25.c
833 ++++ b/net/ax25/af_ax25.c
834 +@@ -92,17 +92,21 @@ again:
835 + sk = s->sk;
836 + if (!sk) {
837 + spin_unlock_bh(&ax25_list_lock);
838 +- s->ax25_dev = NULL;
839 + ax25_disconnect(s, ENETUNREACH);
840 ++ s->ax25_dev = NULL;
841 + spin_lock_bh(&ax25_list_lock);
842 + goto again;
843 + }
844 + sock_hold(sk);
845 + spin_unlock_bh(&ax25_list_lock);
846 + lock_sock(sk);
847 ++ ax25_disconnect(s, ENETUNREACH);
848 + s->ax25_dev = NULL;
849 ++ if (sk->sk_socket) {
850 ++ dev_put(ax25_dev->dev);
851 ++ ax25_dev_put(ax25_dev);
852 ++ }
853 + release_sock(sk);
854 +- ax25_disconnect(s, ENETUNREACH);
855 + spin_lock_bh(&ax25_list_lock);
856 + sock_put(sk);
857 + /* The entry could have been deleted from the
858 +@@ -368,21 +372,25 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
859 + if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl)))
860 + return -EFAULT;
861 +
862 +- if ((ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr)) == NULL)
863 +- return -ENODEV;
864 +-
865 + if (ax25_ctl.digi_count > AX25_MAX_DIGIS)
866 + return -EINVAL;
867 +
868 + if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL)
869 + return -EINVAL;
870 +
871 ++ ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr);
872 ++ if (!ax25_dev)
873 ++ return -ENODEV;
874 ++
875 + digi.ndigi = ax25_ctl.digi_count;
876 + for (k = 0; k < digi.ndigi; k++)
877 + digi.calls[k] = ax25_ctl.digi_addr[k];
878 +
879 +- if ((ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev)) == NULL)
880 ++ ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev);
881 ++ if (!ax25) {
882 ++ ax25_dev_put(ax25_dev);
883 + return -ENOTCONN;
884 ++ }
885 +
886 + switch (ax25_ctl.cmd) {
887 + case AX25_KILL:
888 +@@ -449,6 +457,7 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
889 + }
890 +
891 + out_put:
892 ++ ax25_dev_put(ax25_dev);
893 + ax25_cb_put(ax25);
894 + return ret;
895 +
896 +@@ -974,14 +983,16 @@ static int ax25_release(struct socket *sock)
897 + {
898 + struct sock *sk = sock->sk;
899 + ax25_cb *ax25;
900 ++ ax25_dev *ax25_dev;
901 +
902 + if (sk == NULL)
903 + return 0;
904 +
905 + sock_hold(sk);
906 +- sock_orphan(sk);
907 + lock_sock(sk);
908 ++ sock_orphan(sk);
909 + ax25 = sk_to_ax25(sk);
910 ++ ax25_dev = ax25->ax25_dev;
911 +
912 + if (sk->sk_type == SOCK_SEQPACKET) {
913 + switch (ax25->state) {
914 +@@ -1043,6 +1054,15 @@ static int ax25_release(struct socket *sock)
915 + sk->sk_state_change(sk);
916 + ax25_destroy_socket(ax25);
917 + }
918 ++ if (ax25_dev) {
919 ++ del_timer_sync(&ax25->timer);
920 ++ del_timer_sync(&ax25->t1timer);
921 ++ del_timer_sync(&ax25->t2timer);
922 ++ del_timer_sync(&ax25->t3timer);
923 ++ del_timer_sync(&ax25->idletimer);
924 ++ dev_put(ax25_dev->dev);
925 ++ ax25_dev_put(ax25_dev);
926 ++ }
927 +
928 + sock->sk = NULL;
929 + release_sock(sk);
930 +@@ -1119,8 +1139,10 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
931 + }
932 + }
933 +
934 +- if (ax25_dev != NULL)
935 ++ if (ax25_dev) {
936 + ax25_fillin_cb(ax25, ax25_dev);
937 ++ dev_hold(ax25_dev->dev);
938 ++ }
939 +
940 + done:
941 + ax25_cb_add(ax25);
942 +diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
943 +index d92195cd78349..55a611f7239bc 100644
944 +--- a/net/ax25/ax25_dev.c
945 ++++ b/net/ax25/ax25_dev.c
946 +@@ -40,6 +40,7 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
947 + for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
948 + if (ax25cmp(addr, (ax25_address *)ax25_dev->dev->dev_addr) == 0) {
949 + res = ax25_dev;
950 ++ ax25_dev_hold(ax25_dev);
951 + }
952 + spin_unlock_bh(&ax25_dev_lock);
953 +
954 +@@ -59,6 +60,7 @@ void ax25_dev_device_up(struct net_device *dev)
955 + return;
956 + }
957 +
958 ++ refcount_set(&ax25_dev->refcount, 1);
959 + dev->ax25_ptr = ax25_dev;
960 + ax25_dev->dev = dev;
961 + dev_hold(dev);
962 +@@ -87,6 +89,7 @@ void ax25_dev_device_up(struct net_device *dev)
963 + ax25_dev->next = ax25_dev_list;
964 + ax25_dev_list = ax25_dev;
965 + spin_unlock_bh(&ax25_dev_lock);
966 ++ ax25_dev_hold(ax25_dev);
967 +
968 + ax25_register_dev_sysctl(ax25_dev);
969 + }
970 +@@ -116,9 +119,10 @@ void ax25_dev_device_down(struct net_device *dev)
971 + if ((s = ax25_dev_list) == ax25_dev) {
972 + ax25_dev_list = s->next;
973 + spin_unlock_bh(&ax25_dev_lock);
974 ++ ax25_dev_put(ax25_dev);
975 + dev->ax25_ptr = NULL;
976 + dev_put(dev);
977 +- kfree(ax25_dev);
978 ++ ax25_dev_put(ax25_dev);
979 + return;
980 + }
981 +
982 +@@ -126,9 +130,10 @@ void ax25_dev_device_down(struct net_device *dev)
983 + if (s->next == ax25_dev) {
984 + s->next = ax25_dev->next;
985 + spin_unlock_bh(&ax25_dev_lock);
986 ++ ax25_dev_put(ax25_dev);
987 + dev->ax25_ptr = NULL;
988 + dev_put(dev);
989 +- kfree(ax25_dev);
990 ++ ax25_dev_put(ax25_dev);
991 + return;
992 + }
993 +
994 +@@ -136,6 +141,7 @@ void ax25_dev_device_down(struct net_device *dev)
995 + }
996 + spin_unlock_bh(&ax25_dev_lock);
997 + dev->ax25_ptr = NULL;
998 ++ ax25_dev_put(ax25_dev);
999 + }
1000 +
1001 + int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
1002 +@@ -147,20 +153,32 @@ int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
1003 +
1004 + switch (cmd) {
1005 + case SIOCAX25ADDFWD:
1006 +- if ((fwd_dev = ax25_addr_ax25dev(&fwd->port_to)) == NULL)
1007 ++ fwd_dev = ax25_addr_ax25dev(&fwd->port_to);
1008 ++ if (!fwd_dev) {
1009 ++ ax25_dev_put(ax25_dev);
1010 + return -EINVAL;
1011 +- if (ax25_dev->forward != NULL)
1012 ++ }
1013 ++ if (ax25_dev->forward) {
1014 ++ ax25_dev_put(fwd_dev);
1015 ++ ax25_dev_put(ax25_dev);
1016 + return -EINVAL;
1017 ++ }
1018 + ax25_dev->forward = fwd_dev->dev;
1019 ++ ax25_dev_put(fwd_dev);
1020 ++ ax25_dev_put(ax25_dev);
1021 + break;
1022 +
1023 + case SIOCAX25DELFWD:
1024 +- if (ax25_dev->forward == NULL)
1025 ++ if (!ax25_dev->forward) {
1026 ++ ax25_dev_put(ax25_dev);
1027 + return -EINVAL;
1028 ++ }
1029 + ax25_dev->forward = NULL;
1030 ++ ax25_dev_put(ax25_dev);
1031 + break;
1032 +
1033 + default:
1034 ++ ax25_dev_put(ax25_dev);
1035 + return -EINVAL;
1036 + }
1037 +
1038 +diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
1039 +index 66d54fc11831c..8f81de88f0066 100644
1040 +--- a/net/ax25/ax25_route.c
1041 ++++ b/net/ax25/ax25_route.c
1042 +@@ -78,11 +78,13 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
1043 + ax25_dev *ax25_dev;
1044 + int i;
1045 +
1046 +- if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL)
1047 +- return -EINVAL;
1048 + if (route->digi_count > AX25_MAX_DIGIS)
1049 + return -EINVAL;
1050 +
1051 ++ ax25_dev = ax25_addr_ax25dev(&route->port_addr);
1052 ++ if (!ax25_dev)
1053 ++ return -EINVAL;
1054 ++
1055 + write_lock_bh(&ax25_route_lock);
1056 +
1057 + ax25_rt = ax25_route_list;
1058 +@@ -94,6 +96,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
1059 + if (route->digi_count != 0) {
1060 + if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
1061 + write_unlock_bh(&ax25_route_lock);
1062 ++ ax25_dev_put(ax25_dev);
1063 + return -ENOMEM;
1064 + }
1065 + ax25_rt->digipeat->lastrepeat = -1;
1066 +@@ -104,6 +107,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
1067 + }
1068 + }
1069 + write_unlock_bh(&ax25_route_lock);
1070 ++ ax25_dev_put(ax25_dev);
1071 + return 0;
1072 + }
1073 + ax25_rt = ax25_rt->next;
1074 +@@ -111,6 +115,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
1075 +
1076 + if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) {
1077 + write_unlock_bh(&ax25_route_lock);
1078 ++ ax25_dev_put(ax25_dev);
1079 + return -ENOMEM;
1080 + }
1081 +
1082 +@@ -123,6 +128,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
1083 + if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
1084 + write_unlock_bh(&ax25_route_lock);
1085 + kfree(ax25_rt);
1086 ++ ax25_dev_put(ax25_dev);
1087 + return -ENOMEM;
1088 + }
1089 + ax25_rt->digipeat->lastrepeat = -1;
1090 +@@ -135,6 +141,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
1091 + ax25_rt->next = ax25_route_list;
1092 + ax25_route_list = ax25_rt;
1093 + write_unlock_bh(&ax25_route_lock);
1094 ++ ax25_dev_put(ax25_dev);
1095 +
1096 + return 0;
1097 + }
1098 +@@ -176,6 +183,7 @@ static int ax25_rt_del(struct ax25_routes_struct *route)
1099 + }
1100 + }
1101 + write_unlock_bh(&ax25_route_lock);
1102 ++ ax25_dev_put(ax25_dev);
1103 +
1104 + return 0;
1105 + }
1106 +@@ -218,6 +226,7 @@ static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option)
1107 +
1108 + out:
1109 + write_unlock_bh(&ax25_route_lock);
1110 ++ ax25_dev_put(ax25_dev);
1111 + return err;
1112 + }
1113 +
1114 +diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
1115 +index 038b109b2be70..c129865cad9f4 100644
1116 +--- a/net/ax25/ax25_subr.c
1117 ++++ b/net/ax25/ax25_subr.c
1118 +@@ -264,12 +264,20 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
1119 + {
1120 + ax25_clear_queues(ax25);
1121 +
1122 +- if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
1123 +- ax25_stop_heartbeat(ax25);
1124 +- ax25_stop_t1timer(ax25);
1125 +- ax25_stop_t2timer(ax25);
1126 +- ax25_stop_t3timer(ax25);
1127 +- ax25_stop_idletimer(ax25);
1128 ++ if (reason == ENETUNREACH) {
1129 ++ del_timer_sync(&ax25->timer);
1130 ++ del_timer_sync(&ax25->t1timer);
1131 ++ del_timer_sync(&ax25->t2timer);
1132 ++ del_timer_sync(&ax25->t3timer);
1133 ++ del_timer_sync(&ax25->idletimer);
1134 ++ } else {
1135 ++ if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
1136 ++ ax25_stop_heartbeat(ax25);
1137 ++ ax25_stop_t1timer(ax25);
1138 ++ ax25_stop_t2timer(ax25);
1139 ++ ax25_stop_t3timer(ax25);
1140 ++ ax25_stop_idletimer(ax25);
1141 ++ }
1142 +
1143 + ax25->state = AX25_STATE_0;
1144 +
1145 +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
1146 +index 176bddacc16eb..7e93087d13667 100644
1147 +--- a/net/dccp/ipv4.c
1148 ++++ b/net/dccp/ipv4.c
1149 +@@ -428,7 +428,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
1150 +
1151 + if (__inet_inherit_port(sk, newsk) < 0)
1152 + goto put_and_exit;
1153 +- *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1154 ++ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
1155 + if (*own_req)
1156 + ireq->ireq_opt = NULL;
1157 + else
1158 +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
1159 +index 2cd3508a37869..ae4851fdbe9e5 100644
1160 +--- a/net/dccp/ipv6.c
1161 ++++ b/net/dccp/ipv6.c
1162 +@@ -538,7 +538,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
1163 + dccp_done(newsk);
1164 + goto out;
1165 + }
1166 +- *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1167 ++ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
1168 + /* Clone pktoptions received with SYN, if we own the req */
1169 + if (*own_req && ireq->pktopts) {
1170 + newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
1171 +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
1172 +index 439a55d1aa993..0a69f92da71b8 100644
1173 +--- a/net/ipv4/inet_connection_sock.c
1174 ++++ b/net/ipv4/inet_connection_sock.c
1175 +@@ -793,7 +793,7 @@ static void reqsk_queue_hash_req(struct request_sock *req,
1176 + timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1177 + mod_timer(&req->rsk_timer, jiffies + timeout);
1178 +
1179 +- inet_ehash_insert(req_to_sk(req), NULL);
1180 ++ inet_ehash_insert(req_to_sk(req), NULL, NULL);
1181 + /* before letting lookups find us, make sure all req fields
1182 + * are committed to memory and refcnt initialized.
1183 + */
1184 +diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
1185 +index 3a5f12f011cb4..c96a5871b49da 100644
1186 +--- a/net/ipv4/inet_hashtables.c
1187 ++++ b/net/ipv4/inet_hashtables.c
1188 +@@ -24,6 +24,9 @@
1189 + #include <net/addrconf.h>
1190 + #include <net/inet_connection_sock.h>
1191 + #include <net/inet_hashtables.h>
1192 ++#if IS_ENABLED(CONFIG_IPV6)
1193 ++#include <net/inet6_hashtables.h>
1194 ++#endif
1195 + #include <net/secure_seq.h>
1196 + #include <net/ip.h>
1197 + #include <net/tcp.h>
1198 +@@ -513,10 +516,52 @@ static u32 inet_sk_port_offset(const struct sock *sk)
1199 + inet->inet_dport);
1200 + }
1201 +
1202 +-/* insert a socket into ehash, and eventually remove another one
1203 +- * (The another one can be a SYN_RECV or TIMEWAIT
1204 ++/* Searches for an exsiting socket in the ehash bucket list.
1205 ++ * Returns true if found, false otherwise.
1206 + */
1207 +-bool inet_ehash_insert(struct sock *sk, struct sock *osk)
1208 ++static bool inet_ehash_lookup_by_sk(struct sock *sk,
1209 ++ struct hlist_nulls_head *list)
1210 ++{
1211 ++ const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num);
1212 ++ const int sdif = sk->sk_bound_dev_if;
1213 ++ const int dif = sk->sk_bound_dev_if;
1214 ++ const struct hlist_nulls_node *node;
1215 ++ struct net *net = sock_net(sk);
1216 ++ struct sock *esk;
1217 ++
1218 ++ INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr);
1219 ++
1220 ++ sk_nulls_for_each_rcu(esk, node, list) {
1221 ++ if (esk->sk_hash != sk->sk_hash)
1222 ++ continue;
1223 ++ if (sk->sk_family == AF_INET) {
1224 ++ if (unlikely(INET_MATCH(esk, net, acookie,
1225 ++ sk->sk_daddr,
1226 ++ sk->sk_rcv_saddr,
1227 ++ ports, dif, sdif))) {
1228 ++ return true;
1229 ++ }
1230 ++ }
1231 ++#if IS_ENABLED(CONFIG_IPV6)
1232 ++ else if (sk->sk_family == AF_INET6) {
1233 ++ if (unlikely(INET6_MATCH(esk, net,
1234 ++ &sk->sk_v6_daddr,
1235 ++ &sk->sk_v6_rcv_saddr,
1236 ++ ports, dif, sdif))) {
1237 ++ return true;
1238 ++ }
1239 ++ }
1240 ++#endif
1241 ++ }
1242 ++ return false;
1243 ++}
1244 ++
1245 ++/* Insert a socket into ehash, and eventually remove another one
1246 ++ * (The another one can be a SYN_RECV or TIMEWAIT)
1247 ++ * If an existing socket already exists, socket sk is not inserted,
1248 ++ * and sets found_dup_sk parameter to true.
1249 ++ */
1250 ++bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
1251 + {
1252 + struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
1253 + struct hlist_nulls_head *list;
1254 +@@ -535,16 +580,23 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk)
1255 + if (osk) {
1256 + WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
1257 + ret = sk_nulls_del_node_init_rcu(osk);
1258 ++ } else if (found_dup_sk) {
1259 ++ *found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
1260 ++ if (*found_dup_sk)
1261 ++ ret = false;
1262 + }
1263 ++
1264 + if (ret)
1265 + __sk_nulls_add_node_rcu(sk, list);
1266 ++
1267 + spin_unlock(lock);
1268 ++
1269 + return ret;
1270 + }
1271 +
1272 +-bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
1273 ++bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk)
1274 + {
1275 +- bool ok = inet_ehash_insert(sk, osk);
1276 ++ bool ok = inet_ehash_insert(sk, osk, found_dup_sk);
1277 +
1278 + if (ok) {
1279 + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
1280 +@@ -588,7 +640,7 @@ int __inet_hash(struct sock *sk, struct sock *osk)
1281 + int err = 0;
1282 +
1283 + if (sk->sk_state != TCP_LISTEN) {
1284 +- inet_ehash_nolisten(sk, osk);
1285 ++ inet_ehash_nolisten(sk, osk, NULL);
1286 + return 0;
1287 + }
1288 + WARN_ON(!sk_unhashed(sk));
1289 +@@ -683,7 +735,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
1290 + tb = inet_csk(sk)->icsk_bind_hash;
1291 + spin_lock_bh(&head->lock);
1292 + if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
1293 +- inet_ehash_nolisten(sk, NULL);
1294 ++ inet_ehash_nolisten(sk, NULL, NULL);
1295 + spin_unlock_bh(&head->lock);
1296 + return 0;
1297 + }
1298 +@@ -759,7 +811,7 @@ ok:
1299 + inet_bind_hash(sk, tb, port);
1300 + if (sk_unhashed(sk)) {
1301 + inet_sk(sk)->inet_sport = htons(port);
1302 +- inet_ehash_nolisten(sk, (struct sock *)tw);
1303 ++ inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
1304 + }
1305 + if (tw)
1306 + inet_twsk_bind_unhash(tw, hinfo);
1307 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1308 +index de4edfbc9e466..c9fc6e3868be3 100644
1309 +--- a/net/ipv4/tcp_ipv4.c
1310 ++++ b/net/ipv4/tcp_ipv4.c
1311 +@@ -1415,6 +1415,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1312 + bool *own_req)
1313 + {
1314 + struct inet_request_sock *ireq;
1315 ++ bool found_dup_sk = false;
1316 + struct inet_sock *newinet;
1317 + struct tcp_sock *newtp;
1318 + struct sock *newsk;
1319 +@@ -1485,12 +1486,22 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1320 +
1321 + if (__inet_inherit_port(sk, newsk) < 0)
1322 + goto put_and_exit;
1323 +- *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1324 ++ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1325 ++ &found_dup_sk);
1326 + if (likely(*own_req)) {
1327 + tcp_move_syn(newtp, req);
1328 + ireq->ireq_opt = NULL;
1329 + } else {
1330 + newinet->inet_opt = NULL;
1331 ++
1332 ++ if (!req_unhash && found_dup_sk) {
1333 ++ /* This code path should only be executed in the
1334 ++ * syncookie case only
1335 ++ */
1336 ++ bh_unlock_sock(newsk);
1337 ++ sock_put(newsk);
1338 ++ newsk = NULL;
1339 ++ }
1340 + }
1341 + return newsk;
1342 +
1343 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1344 +index e8d206725cb75..c332f75f4e9aa 100644
1345 +--- a/net/ipv6/tcp_ipv6.c
1346 ++++ b/net/ipv6/tcp_ipv6.c
1347 +@@ -1090,6 +1090,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1348 + struct ipv6_txoptions *opt;
1349 + struct tcp6_sock *newtcp6sk;
1350 + struct inet_sock *newinet;
1351 ++ bool found_dup_sk = false;
1352 + struct tcp_sock *newtp;
1353 + struct sock *newsk;
1354 + #ifdef CONFIG_TCP_MD5SIG
1355 +@@ -1258,7 +1259,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1356 + tcp_done(newsk);
1357 + goto out;
1358 + }
1359 +- *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1360 ++ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1361 ++ &found_dup_sk);
1362 + if (*own_req) {
1363 + tcp_move_syn(newtp, req);
1364 +
1365 +@@ -1273,6 +1275,15 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1366 + skb_set_owner_r(newnp->pktoptions, newsk);
1367 + }
1368 + }
1369 ++ } else {
1370 ++ if (!req_unhash && found_dup_sk) {
1371 ++ /* This code path should only be executed in the
1372 ++ * syncookie case only
1373 ++ */
1374 ++ bh_unlock_sock(newsk);
1375 ++ sock_put(newsk);
1376 ++ newsk = NULL;
1377 ++ }
1378 + }
1379 +
1380 + return newsk;
1381 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
1382 +index 5c6241964637f..e2120221b9578 100644
1383 +--- a/net/netlink/af_netlink.c
1384 ++++ b/net/netlink/af_netlink.c
1385 +@@ -2243,6 +2243,13 @@ static int netlink_dump(struct sock *sk)
1386 + * single netdev. The outcome is MSG_TRUNC error.
1387 + */
1388 + skb_reserve(skb, skb_tailroom(skb) - alloc_size);
1389 ++
1390 ++ /* Make sure malicious BPF programs can not read unitialized memory
1391 ++ * from skb->head -> skb->data
1392 ++ */
1393 ++ skb_reset_network_header(skb);
1394 ++ skb_reset_mac_header(skb);
1395 ++
1396 + netlink_skb_set_owner_r(skb, sk);
1397 +
1398 + if (nlk->dump_done_errno > 0)
1399 +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
1400 +index 4413ffdc1e030..180f5feb77177 100644
1401 +--- a/net/openvswitch/flow_netlink.c
1402 ++++ b/net/openvswitch/flow_netlink.c
1403 +@@ -2316,7 +2316,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
1404 + new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
1405 +
1406 + if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
1407 +- if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
1408 ++ if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) {
1409 + OVS_NLERR(log, "Flow action size exceeds max %u",
1410 + MAX_ACTIONS_BUFSIZE);
1411 + return ERR_PTR(-EMSGSIZE);
1412 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1413 +index b951f411ddedc..f654f79e3310c 100644
1414 +--- a/net/packet/af_packet.c
1415 ++++ b/net/packet/af_packet.c
1416 +@@ -2791,8 +2791,9 @@ tpacket_error:
1417 +
1418 + status = TP_STATUS_SEND_REQUEST;
1419 + err = po->xmit(skb);
1420 +- if (unlikely(err > 0)) {
1421 +- err = net_xmit_errno(err);
1422 ++ if (unlikely(err != 0)) {
1423 ++ if (err > 0)
1424 ++ err = net_xmit_errno(err);
1425 + if (err && __packet_get_status(po, ph) ==
1426 + TP_STATUS_AVAILABLE) {
1427 + /* skb was destructed already */
1428 +@@ -2993,8 +2994,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1429 + skb->no_fcs = 1;
1430 +
1431 + err = po->xmit(skb);
1432 +- if (err > 0 && (err = net_xmit_errno(err)) != 0)
1433 +- goto out_unlock;
1434 ++ if (unlikely(err != 0)) {
1435 ++ if (err > 0)
1436 ++ err = net_xmit_errno(err);
1437 ++ if (err)
1438 ++ goto out_unlock;
1439 ++ }
1440 +
1441 + dev_put(dev);
1442 +
1443 +diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
1444 +index 1b403c2573da2..39579cfcf9b88 100644
1445 +--- a/net/rxrpc/net_ns.c
1446 ++++ b/net/rxrpc/net_ns.c
1447 +@@ -117,7 +117,9 @@ static __net_exit void rxrpc_exit_net(struct net *net)
1448 + struct rxrpc_net *rxnet = rxrpc_net(net);
1449 +
1450 + rxnet->live = false;
1451 ++ del_timer_sync(&rxnet->peer_keepalive_timer);
1452 + cancel_work_sync(&rxnet->peer_keepalive_work);
1453 ++ /* Remove the timer again as the worker may have restarted it. */
1454 + del_timer_sync(&rxnet->peer_keepalive_timer);
1455 + rxrpc_destroy_all_calls(rxnet);
1456 + rxrpc_destroy_all_connections(rxnet);
1457 +diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
1458 +index fe246e03fcd9c..5eee26cf9011f 100644
1459 +--- a/net/sched/cls_u32.c
1460 ++++ b/net/sched/cls_u32.c
1461 +@@ -873,10 +873,6 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
1462 + new->flags = n->flags;
1463 + RCU_INIT_POINTER(new->ht_down, ht);
1464 +
1465 +- /* bump reference count as long as we hold pointer to structure */
1466 +- if (ht)
1467 +- ht->refcnt++;
1468 +-
1469 + #ifdef CONFIG_CLS_U32_PERF
1470 + /* Statistics may be incremented by readers during update
1471 + * so we must keep them in tact. When the node is later destroyed
1472 +@@ -899,6 +895,10 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
1473 + return NULL;
1474 + }
1475 +
1476 ++ /* bump reference count as long as we hold pointer to structure */
1477 ++ if (ht)
1478 ++ ht->refcnt++;
1479 ++
1480 + return new;
1481 + }
1482 +
1483 +diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
1484 +index 5041f43ee5f79..06d32257ddb60 100644
1485 +--- a/sound/soc/atmel/sam9g20_wm8731.c
1486 ++++ b/sound/soc/atmel/sam9g20_wm8731.c
1487 +@@ -59,35 +59,6 @@
1488 + */
1489 + #undef ENABLE_MIC_INPUT
1490 +
1491 +-static struct clk *mclk;
1492 +-
1493 +-static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
1494 +- struct snd_soc_dapm_context *dapm,
1495 +- enum snd_soc_bias_level level)
1496 +-{
1497 +- static int mclk_on;
1498 +- int ret = 0;
1499 +-
1500 +- switch (level) {
1501 +- case SND_SOC_BIAS_ON:
1502 +- case SND_SOC_BIAS_PREPARE:
1503 +- if (!mclk_on)
1504 +- ret = clk_enable(mclk);
1505 +- if (ret == 0)
1506 +- mclk_on = 1;
1507 +- break;
1508 +-
1509 +- case SND_SOC_BIAS_OFF:
1510 +- case SND_SOC_BIAS_STANDBY:
1511 +- if (mclk_on)
1512 +- clk_disable(mclk);
1513 +- mclk_on = 0;
1514 +- break;
1515 +- }
1516 +-
1517 +- return ret;
1518 +-}
1519 +-
1520 + static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = {
1521 + SND_SOC_DAPM_MIC("Int Mic", NULL),
1522 + SND_SOC_DAPM_SPK("Ext Spk", NULL),
1523 +@@ -146,7 +117,6 @@ static struct snd_soc_card snd_soc_at91sam9g20ek = {
1524 + .owner = THIS_MODULE,
1525 + .dai_link = &at91sam9g20ek_dai,
1526 + .num_links = 1,
1527 +- .set_bias_level = at91sam9g20ek_set_bias_level,
1528 +
1529 + .dapm_widgets = at91sam9g20ek_dapm_widgets,
1530 + .num_dapm_widgets = ARRAY_SIZE(at91sam9g20ek_dapm_widgets),
1531 +@@ -159,7 +129,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
1532 + {
1533 + struct device_node *np = pdev->dev.of_node;
1534 + struct device_node *codec_np, *cpu_np;
1535 +- struct clk *pllb;
1536 + struct snd_soc_card *card = &snd_soc_at91sam9g20ek;
1537 + int ret;
1538 +
1539 +@@ -173,31 +142,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
1540 + return -EINVAL;
1541 + }
1542 +
1543 +- /*
1544 +- * Codec MCLK is supplied by PCK0 - set it up.
1545 +- */
1546 +- mclk = clk_get(NULL, "pck0");
1547 +- if (IS_ERR(mclk)) {
1548 +- dev_err(&pdev->dev, "Failed to get MCLK\n");
1549 +- ret = PTR_ERR(mclk);
1550 +- goto err;
1551 +- }
1552 +-
1553 +- pllb = clk_get(NULL, "pllb");
1554 +- if (IS_ERR(pllb)) {
1555 +- dev_err(&pdev->dev, "Failed to get PLLB\n");
1556 +- ret = PTR_ERR(pllb);
1557 +- goto err_mclk;
1558 +- }
1559 +- ret = clk_set_parent(mclk, pllb);
1560 +- clk_put(pllb);
1561 +- if (ret != 0) {
1562 +- dev_err(&pdev->dev, "Failed to set MCLK parent\n");
1563 +- goto err_mclk;
1564 +- }
1565 +-
1566 +- clk_set_rate(mclk, MCLK_RATE);
1567 +-
1568 + card->dev = &pdev->dev;
1569 +
1570 + /* Parse device node info */
1571 +@@ -241,9 +185,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
1572 +
1573 + return ret;
1574 +
1575 +-err_mclk:
1576 +- clk_put(mclk);
1577 +- mclk = NULL;
1578 + err:
1579 + atmel_ssc_put_audio(0);
1580 + return ret;
1581 +@@ -253,8 +194,6 @@ static int at91sam9g20ek_audio_remove(struct platform_device *pdev)
1582 + {
1583 + struct snd_soc_card *card = platform_get_drvdata(pdev);
1584 +
1585 +- clk_disable(mclk);
1586 +- mclk = NULL;
1587 + snd_soc_unregister_card(card);
1588 + atmel_ssc_put_audio(0);
1589 +
1590 +diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
1591 +index e6750bda542a9..fa813ec321196 100644
1592 +--- a/sound/soc/codecs/msm8916-wcd-digital.c
1593 ++++ b/sound/soc/codecs/msm8916-wcd-digital.c
1594 +@@ -923,9 +923,16 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev)
1595 +
1596 + dev_set_drvdata(dev, priv);
1597 +
1598 +- return devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
1599 ++ ret = devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
1600 + msm8916_wcd_digital_dai,
1601 + ARRAY_SIZE(msm8916_wcd_digital_dai));
1602 ++ if (ret)
1603 ++ goto err_mclk;
1604 ++
1605 ++ return 0;
1606 ++
1607 ++err_mclk:
1608 ++ clk_disable_unprepare(priv->mclk);
1609 + err_clk:
1610 + clk_disable_unprepare(priv->ahbclk);
1611 + return ret;
1612 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
1613 +index e04c48c67458a..af9f28dd957db 100644
1614 +--- a/sound/soc/soc-dapm.c
1615 ++++ b/sound/soc/soc-dapm.c
1616 +@@ -1635,8 +1635,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
1617 + switch (w->id) {
1618 + case snd_soc_dapm_pre:
1619 + if (!w->event)
1620 +- list_for_each_entry_safe_continue(w, n, list,
1621 +- power_list);
1622 ++ continue;
1623 +
1624 + if (event == SND_SOC_DAPM_STREAM_START)
1625 + ret = w->event(w,
1626 +@@ -1648,8 +1647,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
1627 +
1628 + case snd_soc_dapm_post:
1629 + if (!w->event)
1630 +- list_for_each_entry_safe_continue(w, n, list,
1631 +- power_list);
1632 ++ continue;
1633 +
1634 + if (event == SND_SOC_DAPM_STREAM_START)
1635 + ret = w->event(w,
1636 +diff --git a/sound/usb/midi.c b/sound/usb/midi.c
1637 +index 1ac8c84c3369a..c9c604f0e1ff7 100644
1638 +--- a/sound/usb/midi.c
1639 ++++ b/sound/usb/midi.c
1640 +@@ -1211,6 +1211,7 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
1641 + } while (drain_urbs && timeout);
1642 + finish_wait(&ep->drain_wait, &wait);
1643 + }
1644 ++ port->active = 0;
1645 + spin_unlock_irq(&ep->buffer_lock);
1646 + }
1647 +
1648 +diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
1649 +index 0c7ea78317fca..0206fecfd3770 100644
1650 +--- a/sound/usb/usbaudio.h
1651 ++++ b/sound/usb/usbaudio.h
1652 +@@ -22,7 +22,7 @@
1653 + */
1654 +
1655 + /* handling of USB vendor/product ID pairs as 32-bit numbers */
1656 +-#define USB_ID(vendor, product) (((vendor) << 16) | (product))
1657 ++#define USB_ID(vendor, product) (((unsigned int)(vendor) << 16) | (product))
1658 + #define USB_ID_VENDOR(id) ((id) >> 16)
1659 + #define USB_ID_PRODUCT(id) ((u16)(id))
1660 +