Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.0 commit in: /
Date: Tue, 04 Jun 2019 11:11:03
Message-Id: 1559646642.111b09445ca154f9feee0743aa1a84f9250a2dab.mpagano@gentoo
1 commit: 111b09445ca154f9feee0743aa1a84f9250a2dab
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jun 4 11:10:42 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Jun 4 11:10:42 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=111b0944
7
8 Linux patch 5.0.21
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1020_linux-5.0.21.patch | 1443 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1447 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index cf5191b..1fe5b3d 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -123,6 +123,10 @@ Patch: 1019_linux-5.0.20.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.0.20
23
24 +Patch: 1020_linux-5.0.21.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.0.21
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1020_linux-5.0.21.patch b/1020_linux-5.0.21.patch
33 new file mode 100644
34 index 0000000..47e7232
35 --- /dev/null
36 +++ b/1020_linux-5.0.21.patch
37 @@ -0,0 +1,1443 @@
38 +diff --git a/Makefile b/Makefile
39 +index 25390977536b..93701ca8f3a6 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 0
46 +-SUBLEVEL = 20
47 ++SUBLEVEL = 21
48 + EXTRAVERSION =
49 + NAME = Shy Crocodile
50 +
51 +diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
52 +index dd8b8716467a..2d1a8cd35509 100644
53 +--- a/drivers/crypto/vmx/ghash.c
54 ++++ b/drivers/crypto/vmx/ghash.c
55 +@@ -1,22 +1,14 @@
56 ++// SPDX-License-Identifier: GPL-2.0
57 + /**
58 + * GHASH routines supporting VMX instructions on the Power 8
59 + *
60 +- * Copyright (C) 2015 International Business Machines Inc.
61 +- *
62 +- * This program is free software; you can redistribute it and/or modify
63 +- * it under the terms of the GNU General Public License as published by
64 +- * the Free Software Foundation; version 2 only.
65 +- *
66 +- * This program is distributed in the hope that it will be useful,
67 +- * but WITHOUT ANY WARRANTY; without even the implied warranty of
68 +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69 +- * GNU General Public License for more details.
70 +- *
71 +- * You should have received a copy of the GNU General Public License
72 +- * along with this program; if not, write to the Free Software
73 +- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
74 ++ * Copyright (C) 2015, 2019 International Business Machines Inc.
75 + *
76 + * Author: Marcelo Henrique Cerri <mhcerri@××××××.com>
77 ++ *
78 ++ * Extended by Daniel Axtens <dja@××××××.net> to replace the fallback
79 ++ * mechanism. The new approach is based on arm64 code, which is:
80 ++ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@××××××.org>
81 + */
82 +
83 + #include <linux/types.h>
84 +@@ -39,71 +31,25 @@ void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
85 + const u8 *in, size_t len);
86 +
87 + struct p8_ghash_ctx {
88 ++ /* key used by vector asm */
89 + u128 htable[16];
90 +- struct crypto_shash *fallback;
91 ++ /* key used by software fallback */
92 ++ be128 key;
93 + };
94 +
95 + struct p8_ghash_desc_ctx {
96 + u64 shash[2];
97 + u8 buffer[GHASH_DIGEST_SIZE];
98 + int bytes;
99 +- struct shash_desc fallback_desc;
100 + };
101 +
102 +-static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
103 +-{
104 +- const char *alg = "ghash-generic";
105 +- struct crypto_shash *fallback;
106 +- struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
107 +- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
108 +-
109 +- fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
110 +- if (IS_ERR(fallback)) {
111 +- printk(KERN_ERR
112 +- "Failed to allocate transformation for '%s': %ld\n",
113 +- alg, PTR_ERR(fallback));
114 +- return PTR_ERR(fallback);
115 +- }
116 +-
117 +- crypto_shash_set_flags(fallback,
118 +- crypto_shash_get_flags((struct crypto_shash
119 +- *) tfm));
120 +-
121 +- /* Check if the descsize defined in the algorithm is still enough. */
122 +- if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx)
123 +- + crypto_shash_descsize(fallback)) {
124 +- printk(KERN_ERR
125 +- "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n",
126 +- alg,
127 +- shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx),
128 +- crypto_shash_descsize(fallback));
129 +- return -EINVAL;
130 +- }
131 +- ctx->fallback = fallback;
132 +-
133 +- return 0;
134 +-}
135 +-
136 +-static void p8_ghash_exit_tfm(struct crypto_tfm *tfm)
137 +-{
138 +- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
139 +-
140 +- if (ctx->fallback) {
141 +- crypto_free_shash(ctx->fallback);
142 +- ctx->fallback = NULL;
143 +- }
144 +-}
145 +-
146 + static int p8_ghash_init(struct shash_desc *desc)
147 + {
148 +- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
149 + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
150 +
151 + dctx->bytes = 0;
152 + memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
153 +- dctx->fallback_desc.tfm = ctx->fallback;
154 +- dctx->fallback_desc.flags = desc->flags;
155 +- return crypto_shash_init(&dctx->fallback_desc);
156 ++ return 0;
157 + }
158 +
159 + static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
160 +@@ -121,7 +67,51 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
161 + disable_kernel_vsx();
162 + pagefault_enable();
163 + preempt_enable();
164 +- return crypto_shash_setkey(ctx->fallback, key, keylen);
165 ++
166 ++ memcpy(&ctx->key, key, GHASH_BLOCK_SIZE);
167 ++
168 ++ return 0;
169 ++}
170 ++
171 ++static inline void __ghash_block(struct p8_ghash_ctx *ctx,
172 ++ struct p8_ghash_desc_ctx *dctx)
173 ++{
174 ++ if (!IN_INTERRUPT) {
175 ++ preempt_disable();
176 ++ pagefault_disable();
177 ++ enable_kernel_vsx();
178 ++ gcm_ghash_p8(dctx->shash, ctx->htable,
179 ++ dctx->buffer, GHASH_DIGEST_SIZE);
180 ++ disable_kernel_vsx();
181 ++ pagefault_enable();
182 ++ preempt_enable();
183 ++ } else {
184 ++ crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE);
185 ++ gf128mul_lle((be128 *)dctx->shash, &ctx->key);
186 ++ }
187 ++}
188 ++
189 ++static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
190 ++ struct p8_ghash_desc_ctx *dctx,
191 ++ const u8 *src, unsigned int srclen)
192 ++{
193 ++ if (!IN_INTERRUPT) {
194 ++ preempt_disable();
195 ++ pagefault_disable();
196 ++ enable_kernel_vsx();
197 ++ gcm_ghash_p8(dctx->shash, ctx->htable,
198 ++ src, srclen);
199 ++ disable_kernel_vsx();
200 ++ pagefault_enable();
201 ++ preempt_enable();
202 ++ } else {
203 ++ while (srclen >= GHASH_BLOCK_SIZE) {
204 ++ crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
205 ++ gf128mul_lle((be128 *)dctx->shash, &ctx->key);
206 ++ srclen -= GHASH_BLOCK_SIZE;
207 ++ src += GHASH_BLOCK_SIZE;
208 ++ }
209 ++ }
210 + }
211 +
212 + static int p8_ghash_update(struct shash_desc *desc,
213 +@@ -131,49 +121,33 @@ static int p8_ghash_update(struct shash_desc *desc,
214 + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
215 + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
216 +
217 +- if (IN_INTERRUPT) {
218 +- return crypto_shash_update(&dctx->fallback_desc, src,
219 +- srclen);
220 +- } else {
221 +- if (dctx->bytes) {
222 +- if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
223 +- memcpy(dctx->buffer + dctx->bytes, src,
224 +- srclen);
225 +- dctx->bytes += srclen;
226 +- return 0;
227 +- }
228 ++ if (dctx->bytes) {
229 ++ if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
230 + memcpy(dctx->buffer + dctx->bytes, src,
231 +- GHASH_DIGEST_SIZE - dctx->bytes);
232 +- preempt_disable();
233 +- pagefault_disable();
234 +- enable_kernel_vsx();
235 +- gcm_ghash_p8(dctx->shash, ctx->htable,
236 +- dctx->buffer, GHASH_DIGEST_SIZE);
237 +- disable_kernel_vsx();
238 +- pagefault_enable();
239 +- preempt_enable();
240 +- src += GHASH_DIGEST_SIZE - dctx->bytes;
241 +- srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
242 +- dctx->bytes = 0;
243 +- }
244 +- len = srclen & ~(GHASH_DIGEST_SIZE - 1);
245 +- if (len) {
246 +- preempt_disable();
247 +- pagefault_disable();
248 +- enable_kernel_vsx();
249 +- gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
250 +- disable_kernel_vsx();
251 +- pagefault_enable();
252 +- preempt_enable();
253 +- src += len;
254 +- srclen -= len;
255 +- }
256 +- if (srclen) {
257 +- memcpy(dctx->buffer, src, srclen);
258 +- dctx->bytes = srclen;
259 ++ srclen);
260 ++ dctx->bytes += srclen;
261 ++ return 0;
262 + }
263 +- return 0;
264 ++ memcpy(dctx->buffer + dctx->bytes, src,
265 ++ GHASH_DIGEST_SIZE - dctx->bytes);
266 ++
267 ++ __ghash_block(ctx, dctx);
268 ++
269 ++ src += GHASH_DIGEST_SIZE - dctx->bytes;
270 ++ srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
271 ++ dctx->bytes = 0;
272 ++ }
273 ++ len = srclen & ~(GHASH_DIGEST_SIZE - 1);
274 ++ if (len) {
275 ++ __ghash_blocks(ctx, dctx, src, len);
276 ++ src += len;
277 ++ srclen -= len;
278 + }
279 ++ if (srclen) {
280 ++ memcpy(dctx->buffer, src, srclen);
281 ++ dctx->bytes = srclen;
282 ++ }
283 ++ return 0;
284 + }
285 +
286 + static int p8_ghash_final(struct shash_desc *desc, u8 *out)
287 +@@ -182,25 +156,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
288 + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
289 + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
290 +
291 +- if (IN_INTERRUPT) {
292 +- return crypto_shash_final(&dctx->fallback_desc, out);
293 +- } else {
294 +- if (dctx->bytes) {
295 +- for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
296 +- dctx->buffer[i] = 0;
297 +- preempt_disable();
298 +- pagefault_disable();
299 +- enable_kernel_vsx();
300 +- gcm_ghash_p8(dctx->shash, ctx->htable,
301 +- dctx->buffer, GHASH_DIGEST_SIZE);
302 +- disable_kernel_vsx();
303 +- pagefault_enable();
304 +- preempt_enable();
305 +- dctx->bytes = 0;
306 +- }
307 +- memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
308 +- return 0;
309 ++ if (dctx->bytes) {
310 ++ for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
311 ++ dctx->buffer[i] = 0;
312 ++ __ghash_block(ctx, dctx);
313 ++ dctx->bytes = 0;
314 + }
315 ++ memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
316 ++ return 0;
317 + }
318 +
319 + struct shash_alg p8_ghash_alg = {
320 +@@ -215,11 +178,8 @@ struct shash_alg p8_ghash_alg = {
321 + .cra_name = "ghash",
322 + .cra_driver_name = "p8_ghash",
323 + .cra_priority = 1000,
324 +- .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
325 + .cra_blocksize = GHASH_BLOCK_SIZE,
326 + .cra_ctxsize = sizeof(struct p8_ghash_ctx),
327 + .cra_module = THIS_MODULE,
328 +- .cra_init = p8_ghash_init_tfm,
329 +- .cra_exit = p8_ghash_exit_tfm,
330 + },
331 + };
332 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
333 +index f89fc6ea6078..4eeece3576e1 100644
334 +--- a/drivers/net/bonding/bond_main.c
335 ++++ b/drivers/net/bonding/bond_main.c
336 +@@ -3123,13 +3123,18 @@ static int bond_slave_netdev_event(unsigned long event,
337 + case NETDEV_CHANGE:
338 + /* For 802.3ad mode only:
339 + * Getting invalid Speed/Duplex values here will put slave
340 +- * in weird state. So mark it as link-fail for the time
341 +- * being and let link-monitoring (miimon) set it right when
342 +- * correct speeds/duplex are available.
343 ++ * in weird state. Mark it as link-fail if the link was
344 ++ * previously up or link-down if it hasn't yet come up, and
345 ++ * let link-monitoring (miimon) set it right when correct
346 ++ * speeds/duplex are available.
347 + */
348 + if (bond_update_speed_duplex(slave) &&
349 +- BOND_MODE(bond) == BOND_MODE_8023AD)
350 +- slave->link = BOND_LINK_FAIL;
351 ++ BOND_MODE(bond) == BOND_MODE_8023AD) {
352 ++ if (slave->last_link_up)
353 ++ slave->link = BOND_LINK_FAIL;
354 ++ else
355 ++ slave->link = BOND_LINK_DOWN;
356 ++ }
357 +
358 + if (BOND_MODE(bond) == BOND_MODE_8023AD)
359 + bond_3ad_adapter_speed_duplex_changed(slave);
360 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
361 +index 6cba05a80892..5a81ce42b808 100644
362 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
363 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
364 +@@ -892,7 +892,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
365 + err = mv88e6xxx_port_read(chip, port, s->reg + 1, &reg);
366 + if (err)
367 + return U64_MAX;
368 +- high = reg;
369 ++ low |= ((u32)reg) << 16;
370 + }
371 + break;
372 + case STATS_TYPE_BANK1:
373 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
374 +index c6ddbc0e084e..300dbfdd4ae8 100644
375 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
376 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
377 +@@ -1636,6 +1636,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
378 + skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
379 + bnxt_reuse_rx_data(rxr, cons, data);
380 + if (!skb) {
381 ++ if (agg_bufs)
382 ++ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
383 + rc = -ENOMEM;
384 + goto next_rx;
385 + }
386 +@@ -6336,7 +6338,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
387 + if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
388 + return 0;
389 +
390 +- if (bp->flags & BNXT_FLAG_ROCE_CAP) {
391 ++ if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
392 + pg_lvl = 2;
393 + extra_qps = 65536;
394 + extra_srqs = 8192;
395 +@@ -7504,22 +7506,23 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
396 + bp->flags &= ~BNXT_FLAG_USING_MSIX;
397 + }
398 +
399 +-int bnxt_reserve_rings(struct bnxt *bp)
400 ++int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
401 + {
402 + int tcs = netdev_get_num_tc(bp->dev);
403 +- bool reinit_irq = false;
404 ++ bool irq_cleared = false;
405 + int rc;
406 +
407 + if (!bnxt_need_reserve_rings(bp))
408 + return 0;
409 +
410 +- if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
411 ++ if (irq_re_init && BNXT_NEW_RM(bp) &&
412 ++ bnxt_get_num_msix(bp) != bp->total_irqs) {
413 + bnxt_ulp_irq_stop(bp);
414 + bnxt_clear_int_mode(bp);
415 +- reinit_irq = true;
416 ++ irq_cleared = true;
417 + }
418 + rc = __bnxt_reserve_rings(bp);
419 +- if (reinit_irq) {
420 ++ if (irq_cleared) {
421 + if (!rc)
422 + rc = bnxt_init_int_mode(bp);
423 + bnxt_ulp_irq_restart(bp, rc);
424 +@@ -8418,7 +8421,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
425 + return rc;
426 + }
427 + }
428 +- rc = bnxt_reserve_rings(bp);
429 ++ rc = bnxt_reserve_rings(bp, irq_re_init);
430 + if (rc)
431 + return rc;
432 + if ((bp->flags & BNXT_FLAG_RFS) &&
433 +@@ -10276,7 +10279,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
434 +
435 + if (sh)
436 + bp->flags |= BNXT_FLAG_SHARED_RINGS;
437 +- dflt_rings = netif_get_num_default_rss_queues();
438 ++ dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
439 + /* Reduce default rings on multi-port cards so that total default
440 + * rings do not exceed CPU count.
441 + */
442 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
443 +index 2fb653e0048d..c09b20b08395 100644
444 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
445 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
446 +@@ -20,6 +20,7 @@
447 +
448 + #include <linux/interrupt.h>
449 + #include <linux/rhashtable.h>
450 ++#include <linux/crash_dump.h>
451 + #include <net/devlink.h>
452 + #include <net/dst_metadata.h>
453 + #include <net/switchdev.h>
454 +@@ -1367,7 +1368,8 @@ struct bnxt {
455 + #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
456 + #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
457 + #define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
458 +- !(bp->flags & BNXT_FLAG_CHIP_P5))
459 ++ !(bp->flags & BNXT_FLAG_CHIP_P5) && \
460 ++ !is_kdump_kernel())
461 +
462 + /* Chip class phase 5 */
463 + #define BNXT_CHIP_P5(bp) \
464 +@@ -1776,7 +1778,7 @@ unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
465 + unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
466 + unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp);
467 + int bnxt_get_avail_msix(struct bnxt *bp, int num);
468 +-int bnxt_reserve_rings(struct bnxt *bp);
469 ++int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
470 + void bnxt_tx_disable(struct bnxt *bp);
471 + void bnxt_tx_enable(struct bnxt *bp);
472 + int bnxt_hwrm_set_pause(struct bnxt *);
473 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
474 +index adabbe94a259..e1460e391952 100644
475 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
476 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
477 +@@ -788,7 +788,7 @@ static int bnxt_set_channels(struct net_device *dev,
478 + */
479 + }
480 + } else {
481 +- rc = bnxt_reserve_rings(bp);
482 ++ rc = bnxt_reserve_rings(bp, true);
483 + }
484 +
485 + return rc;
486 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
487 +index ea45a9b8179e..7dd3f445afb6 100644
488 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
489 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
490 +@@ -150,7 +150,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
491 + bnxt_close_nic(bp, true, false);
492 + rc = bnxt_open_nic(bp, true, false);
493 + } else {
494 +- rc = bnxt_reserve_rings(bp);
495 ++ rc = bnxt_reserve_rings(bp, true);
496 + }
497 + }
498 + if (rc) {
499 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
500 +index c116f96956fe..f2aba5b160c2 100644
501 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
502 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
503 +@@ -228,6 +228,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
504 + fs->val.ivlan = vlan_tci;
505 + fs->mask.ivlan = vlan_tci_mask;
506 +
507 ++ fs->val.ivlan_vld = 1;
508 ++ fs->mask.ivlan_vld = 1;
509 ++
510 + /* Chelsio adapters use ivlan_vld bit to match vlan packets
511 + * as 802.1Q. Also, when vlan tag is present in packets,
512 + * ethtype match is used then to match on ethtype of inner
513 +@@ -238,8 +241,6 @@ static void cxgb4_process_flow_match(struct net_device *dev,
514 + * ethtype value with ethtype of inner header.
515 + */
516 + if (fs->val.ethtype == ETH_P_8021Q) {
517 +- fs->val.ivlan_vld = 1;
518 +- fs->mask.ivlan_vld = 1;
519 + fs->val.ethtype = 0;
520 + fs->mask.ethtype = 0;
521 + }
522 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
523 +index 2b03f6187a24..29d3399c4995 100644
524 +--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
525 ++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
526 +@@ -7139,10 +7139,21 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
527 + unsigned int cache_line_size)
528 + {
529 + unsigned int page_shift = fls(page_size) - 1;
530 ++ unsigned int sge_hps = page_shift - 10;
531 + unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
532 + unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
533 + unsigned int fl_align_log = fls(fl_align) - 1;
534 +
535 ++ t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
536 ++ HOSTPAGESIZEPF0_V(sge_hps) |
537 ++ HOSTPAGESIZEPF1_V(sge_hps) |
538 ++ HOSTPAGESIZEPF2_V(sge_hps) |
539 ++ HOSTPAGESIZEPF3_V(sge_hps) |
540 ++ HOSTPAGESIZEPF4_V(sge_hps) |
541 ++ HOSTPAGESIZEPF5_V(sge_hps) |
542 ++ HOSTPAGESIZEPF6_V(sge_hps) |
543 ++ HOSTPAGESIZEPF7_V(sge_hps));
544 ++
545 + if (is_t4(adap->params.chip)) {
546 + t4_set_reg_field(adap, SGE_CONTROL_A,
547 + INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
548 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
549 +index a96ad20ee484..878ccce1dfcd 100644
550 +--- a/drivers/net/ethernet/freescale/fec_main.c
551 ++++ b/drivers/net/ethernet/freescale/fec_main.c
552 +@@ -3556,7 +3556,7 @@ failed_init:
553 + if (fep->reg_phy)
554 + regulator_disable(fep->reg_phy);
555 + failed_reset:
556 +- pm_runtime_put(&pdev->dev);
557 ++ pm_runtime_put_noidle(&pdev->dev);
558 + pm_runtime_disable(&pdev->dev);
559 + failed_regulator:
560 + clk_disable_unprepare(fep->clk_ahb);
561 +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
562 +index 8433fb9c3eee..ea0236a2e18b 100644
563 +--- a/drivers/net/ethernet/marvell/mvneta.c
564 ++++ b/drivers/net/ethernet/marvell/mvneta.c
565 +@@ -4619,7 +4619,7 @@ static int mvneta_probe(struct platform_device *pdev)
566 + err = register_netdev(dev);
567 + if (err < 0) {
568 + dev_err(&pdev->dev, "failed to register\n");
569 +- goto err_free_stats;
570 ++ goto err_netdev;
571 + }
572 +
573 + netdev_info(dev, "Using %s mac address %pM\n", mac_from,
574 +@@ -4630,14 +4630,12 @@ static int mvneta_probe(struct platform_device *pdev)
575 + return 0;
576 +
577 + err_netdev:
578 +- unregister_netdev(dev);
579 + if (pp->bm_priv) {
580 + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
581 + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
582 + 1 << pp->id);
583 + mvneta_bm_put(pp->bm_priv);
584 + }
585 +-err_free_stats:
586 + free_percpu(pp->stats);
587 + err_free_ports:
588 + free_percpu(pp->ports);
589 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
590 +index 70031e2b2294..f063ba69eb17 100644
591 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
592 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
593 +@@ -1412,7 +1412,7 @@ static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
594 + /* Set defaults to the MVPP2 port */
595 + static void mvpp2_defaults_set(struct mvpp2_port *port)
596 + {
597 +- int tx_port_num, val, queue, ptxq, lrxq;
598 ++ int tx_port_num, val, queue, lrxq;
599 +
600 + if (port->priv->hw_version == MVPP21) {
601 + /* Update TX FIFO MIN Threshold */
602 +@@ -1433,11 +1433,9 @@ static void mvpp2_defaults_set(struct mvpp2_port *port)
603 + mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
604 +
605 + /* Close bandwidth for all queues */
606 +- for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
607 +- ptxq = mvpp2_txq_phys(port->id, queue);
608 ++ for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
609 + mvpp2_write(port->priv,
610 +- MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
611 +- }
612 ++ MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
613 +
614 + /* Set refill period to 1 usec, refill tokens
615 + * and bucket size to maximum
616 +@@ -2293,7 +2291,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
617 + txq->descs_dma = 0;
618 +
619 + /* Set minimum bandwidth for disabled TXQs */
620 +- mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
621 ++ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
622 +
623 + /* Set Tx descriptors queue starting address and size */
624 + thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
625 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
626 +index 2d269acdbc8e..631a600bec4d 100644
627 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
628 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
629 +@@ -3789,6 +3789,12 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
630 + netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
631 + }
632 +
633 ++ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
634 ++ features &= ~NETIF_F_RXHASH;
635 ++ if (netdev->features & NETIF_F_RXHASH)
636 ++ netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
637 ++ }
638 ++
639 + mutex_unlock(&priv->state_lock);
640 +
641 + return features;
642 +@@ -3915,6 +3921,9 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
643 + memcpy(&priv->tstamp, &config, sizeof(config));
644 + mutex_unlock(&priv->state_lock);
645 +
646 ++ /* might need to fix some features */
647 ++ netdev_update_features(priv->netdev);
648 ++
649 + return copy_to_user(ifr->ifr_data, &config,
650 + sizeof(config)) ? -EFAULT : 0;
651 + }
652 +@@ -4744,6 +4753,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
653 + if (!priv->channels.params.scatter_fcs_en)
654 + netdev->features &= ~NETIF_F_RXFCS;
655 +
656 ++ /* prefere CQE compression over rxhash */
657 ++ if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
658 ++ netdev->features &= ~NETIF_F_RXHASH;
659 ++
660 + #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
661 + if (FT_CAP(flow_modify_en) &&
662 + FT_CAP(modify_root) &&
663 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
664 +index abbdd4906984..158b941ae911 100644
665 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
666 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
667 +@@ -2247,7 +2247,7 @@ static struct mlx5_flow_root_namespace
668 + cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
669 +
670 + /* Create the root namespace */
671 +- root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL);
672 ++ root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
673 + if (!root_ns)
674 + return NULL;
675 +
676 +@@ -2390,6 +2390,7 @@ static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
677 + cleanup_root_ns(steering->esw_egress_root_ns[i]);
678 +
679 + kfree(steering->esw_egress_root_ns);
680 ++ steering->esw_egress_root_ns = NULL;
681 + }
682 +
683 + static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
684 +@@ -2404,6 +2405,7 @@ static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
685 + cleanup_root_ns(steering->esw_ingress_root_ns[i]);
686 +
687 + kfree(steering->esw_ingress_root_ns);
688 ++ steering->esw_ingress_root_ns = NULL;
689 + }
690 +
691 + void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
692 +@@ -2572,6 +2574,7 @@ cleanup_root_ns:
693 + for (i--; i >= 0; i--)
694 + cleanup_root_ns(steering->esw_egress_root_ns[i]);
695 + kfree(steering->esw_egress_root_ns);
696 ++ steering->esw_egress_root_ns = NULL;
697 + return err;
698 + }
699 +
700 +@@ -2599,6 +2602,7 @@ cleanup_root_ns:
701 + for (i--; i >= 0; i--)
702 + cleanup_root_ns(steering->esw_ingress_root_ns[i]);
703 + kfree(steering->esw_ingress_root_ns);
704 ++ steering->esw_ingress_root_ns = NULL;
705 + return err;
706 + }
707 +
708 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
709 +index 2941967e1cc5..2e5ebcd01b4b 100644
710 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
711 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
712 +@@ -1169,13 +1169,12 @@ mlxsw_sp_acl_erp_delta_fill(const struct mlxsw_sp_acl_erp_key *parent_key,
713 + return -EINVAL;
714 + }
715 + if (si == -1) {
716 +- /* The masks are the same, this cannot happen.
717 +- * That means the caller is broken.
718 ++ /* The masks are the same, this can happen in case eRPs with
719 ++ * the same mask were created in both A-TCAM and C-TCAM.
720 ++ * The only possible condition under which this can happen
721 ++ * is identical rule insertion. Delta is not possible here.
722 + */
723 +- WARN_ON(1);
724 +- *delta_start = 0;
725 +- *delta_mask = 0;
726 +- return 0;
727 ++ return -EINVAL;
728 + }
729 + pmask = (unsigned char) parent_key->mask[__MASK_IDX(si)];
730 + mask = (unsigned char) key->mask[__MASK_IDX(si)];
731 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
732 +index 365cddbfc684..cb65f6a48eba 100644
733 +--- a/drivers/net/ethernet/realtek/r8169.c
734 ++++ b/drivers/net/ethernet/realtek/r8169.c
735 +@@ -6814,6 +6814,8 @@ static int rtl8169_resume(struct device *device)
736 + struct net_device *dev = dev_get_drvdata(device);
737 + struct rtl8169_private *tp = netdev_priv(dev);
738 +
739 ++ rtl_rar_set(tp, dev->dev_addr);
740 ++
741 + clk_prepare_enable(tp->clk);
742 +
743 + if (netif_running(dev))
744 +@@ -6847,6 +6849,7 @@ static int rtl8169_runtime_resume(struct device *device)
745 + {
746 + struct net_device *dev = dev_get_drvdata(device);
747 + struct rtl8169_private *tp = netdev_priv(dev);
748 ++
749 + rtl_rar_set(tp, dev->dev_addr);
750 +
751 + if (!tp->TxDescArray)
752 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
753 +index 3c749c327cbd..e09522c5509a 100644
754 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
755 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
756 +@@ -460,7 +460,7 @@ stmmac_get_pauseparam(struct net_device *netdev,
757 + } else {
758 + if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
759 + netdev->phydev->supported) ||
760 +- linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
761 ++ !linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
762 + netdev->phydev->supported))
763 + return;
764 + }
765 +@@ -491,7 +491,7 @@ stmmac_set_pauseparam(struct net_device *netdev,
766 + } else {
767 + if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
768 + phy->supported) ||
769 +- linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
770 ++ !linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
771 + phy->supported))
772 + return -EOPNOTSUPP;
773 + }
774 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
775 +index f0e0593e54f3..8841c5de8979 100644
776 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
777 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
778 +@@ -2190,6 +2190,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
779 + if (priv->plat->axi)
780 + stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
781 +
782 ++ /* DMA CSR Channel configuration */
783 ++ for (chan = 0; chan < dma_csr_ch; chan++)
784 ++ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
785 ++
786 + /* DMA RX Channel Configuration */
787 + for (chan = 0; chan < rx_channels_count; chan++) {
788 + rx_q = &priv->rx_queue[chan];
789 +@@ -2215,10 +2219,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
790 + tx_q->tx_tail_addr, chan);
791 + }
792 +
793 +- /* DMA CSR Channel configuration */
794 +- for (chan = 0; chan < dma_csr_ch; chan++)
795 +- stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
796 +-
797 + return ret;
798 + }
799 +
800 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
801 +index bdd351597b55..093a223fe408 100644
802 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
803 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
804 +@@ -267,7 +267,8 @@ int stmmac_mdio_reset(struct mii_bus *bus)
805 + of_property_read_u32_array(np,
806 + "snps,reset-delays-us", data->delays, 3);
807 +
808 +- if (gpio_request(data->reset_gpio, "mdio-reset"))
809 ++ if (devm_gpio_request(priv->device, data->reset_gpio,
810 ++ "mdio-reset"))
811 + return 0;
812 + }
813 +
814 +diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
815 +index 6bac602094bd..8438f2f40d3d 100644
816 +--- a/drivers/net/phy/marvell10g.c
817 ++++ b/drivers/net/phy/marvell10g.c
818 +@@ -29,6 +29,9 @@
819 + #define MDIO_AN_10GBT_CTRL_ADV_NBT_MASK 0x01e0
820 +
821 + enum {
822 ++ MV_PMA_BOOT = 0xc050,
823 ++ MV_PMA_BOOT_FATAL = BIT(0),
824 ++
825 + MV_PCS_BASE_T = 0x0000,
826 + MV_PCS_BASE_R = 0x1000,
827 + MV_PCS_1000BASEX = 0x2000,
828 +@@ -228,6 +231,16 @@ static int mv3310_probe(struct phy_device *phydev)
829 + (phydev->c45_ids.devices_in_package & mmd_mask) != mmd_mask)
830 + return -ENODEV;
831 +
832 ++ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_BOOT);
833 ++ if (ret < 0)
834 ++ return ret;
835 ++
836 ++ if (ret & MV_PMA_BOOT_FATAL) {
837 ++ dev_warn(&phydev->mdio.dev,
838 ++ "PHY failed to boot firmware, status=%04x\n", ret);
839 ++ return -ENODEV;
840 ++ }
841 ++
842 + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
843 + if (!priv)
844 + return -ENOMEM;
845 +diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
846 +index 504282af27e5..921cc0571bd0 100644
847 +--- a/drivers/net/usb/usbnet.c
848 ++++ b/drivers/net/usb/usbnet.c
849 +@@ -506,6 +506,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
850 +
851 + if (netif_running (dev->net) &&
852 + netif_device_present (dev->net) &&
853 ++ test_bit(EVENT_DEV_OPEN, &dev->flags) &&
854 + !test_bit (EVENT_RX_HALT, &dev->flags) &&
855 + !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
856 + switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
857 +@@ -1431,6 +1432,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
858 + spin_unlock_irqrestore(&dev->txq.lock, flags);
859 + goto drop;
860 + }
861 ++ if (netif_queue_stopped(net)) {
862 ++ usb_autopm_put_interface_async(dev->intf);
863 ++ spin_unlock_irqrestore(&dev->txq.lock, flags);
864 ++ goto drop;
865 ++ }
866 +
867 + #ifdef CONFIG_PM
868 + /* if this triggers the device is still a sleep */
869 +diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
870 +index ea4a08b83fa0..787966f44589 100644
871 +--- a/drivers/xen/xen-pciback/pciback_ops.c
872 ++++ b/drivers/xen/xen-pciback/pciback_ops.c
873 +@@ -127,8 +127,6 @@ void xen_pcibk_reset_device(struct pci_dev *dev)
874 + if (pci_is_enabled(dev))
875 + pci_disable_device(dev);
876 +
877 +- pci_write_config_word(dev, PCI_COMMAND, 0);
878 +-
879 + dev->is_busmaster = 0;
880 + } else {
881 + pci_read_config_word(dev, PCI_COMMAND, &cmd);
882 +diff --git a/include/linux/siphash.h b/include/linux/siphash.h
883 +index fa7a6b9cedbf..bf21591a9e5e 100644
884 +--- a/include/linux/siphash.h
885 ++++ b/include/linux/siphash.h
886 +@@ -21,6 +21,11 @@ typedef struct {
887 + u64 key[2];
888 + } siphash_key_t;
889 +
890 ++static inline bool siphash_key_is_zero(const siphash_key_t *key)
891 ++{
892 ++ return !(key->key[0] | key->key[1]);
893 ++}
894 ++
895 + u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
896 + #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
897 + u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
898 +diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
899 +index 104a6669e344..7698460a3dd1 100644
900 +--- a/include/net/netns/ipv4.h
901 ++++ b/include/net/netns/ipv4.h
902 +@@ -9,6 +9,7 @@
903 + #include <linux/uidgid.h>
904 + #include <net/inet_frag.h>
905 + #include <linux/rcupdate.h>
906 ++#include <linux/siphash.h>
907 +
908 + struct tcpm_hash_bucket;
909 + struct ctl_table_header;
910 +@@ -217,5 +218,6 @@ struct netns_ipv4 {
911 + unsigned int ipmr_seq; /* protected by rtnl_mutex */
912 +
913 + atomic_t rt_genid;
914 ++ siphash_key_t ip_id_key;
915 + };
916 + #endif
917 +diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h
918 +index 4b2c93b1934c..4955e1a9f1bc 100644
919 +--- a/include/uapi/linux/tipc_config.h
920 ++++ b/include/uapi/linux/tipc_config.h
921 +@@ -307,8 +307,10 @@ static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
922 + tlv_ptr = (struct tlv_desc *)tlv;
923 + tlv_ptr->tlv_type = htons(type);
924 + tlv_ptr->tlv_len = htons(tlv_len);
925 +- if (len && data)
926 +- memcpy(TLV_DATA(tlv_ptr), data, tlv_len);
927 ++ if (len && data) {
928 ++ memcpy(TLV_DATA(tlv_ptr), data, len);
929 ++ memset(TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len);
930 ++ }
931 + return TLV_SPACE(len);
932 + }
933 +
934 +@@ -405,8 +407,10 @@ static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags,
935 + tcm_hdr->tcm_len = htonl(msg_len);
936 + tcm_hdr->tcm_type = htons(cmd);
937 + tcm_hdr->tcm_flags = htons(flags);
938 +- if (data_len && data)
939 ++ if (data_len && data) {
940 + memcpy(TCM_DATA(msg), data, data_len);
941 ++ memset(TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - msg_len);
942 ++ }
943 + return TCM_SPACE(data_len);
944 + }
945 +
946 +diff --git a/net/core/dev.c b/net/core/dev.c
947 +index c8e672ac32cb..a8d017035ae9 100644
948 +--- a/net/core/dev.c
949 ++++ b/net/core/dev.c
950 +@@ -5804,7 +5804,6 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
951 + skb_reset_mac_header(skb);
952 + skb_gro_reset_offset(skb);
953 +
954 +- eth = skb_gro_header_fast(skb, 0);
955 + if (unlikely(skb_gro_header_hard(skb, hlen))) {
956 + eth = skb_gro_header_slow(skb, hlen, 0);
957 + if (unlikely(!eth)) {
958 +@@ -5814,6 +5813,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
959 + return NULL;
960 + }
961 + } else {
962 ++ eth = (const struct ethhdr *)skb->data;
963 + gro_pull_from_frag0(skb, hlen);
964 + NAPI_GRO_CB(skb)->frag0 += hlen;
965 + NAPI_GRO_CB(skb)->frag0_len -= hlen;
966 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
967 +index 40796b8bf820..e5bfd42fd083 100644
968 +--- a/net/core/skbuff.c
969 ++++ b/net/core/skbuff.c
970 +@@ -1001,7 +1001,11 @@ struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
971 + uarg->len++;
972 + uarg->bytelen = bytelen;
973 + atomic_set(&sk->sk_zckey, ++next);
974 +- sock_zerocopy_get(uarg);
975 ++
976 ++ /* no extra ref when appending to datagram (MSG_MORE) */
977 ++ if (sk->sk_type == SOCK_STREAM)
978 ++ sock_zerocopy_get(uarg);
979 ++
980 + return uarg;
981 + }
982 + }
983 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
984 +index 765b2b32c4a4..1e79e1bca13c 100644
985 +--- a/net/ipv4/igmp.c
986 ++++ b/net/ipv4/igmp.c
987 +@@ -187,6 +187,17 @@ static void ip_ma_put(struct ip_mc_list *im)
988 + pmc != NULL; \
989 + pmc = rtnl_dereference(pmc->next_rcu))
990 +
991 ++static void ip_sf_list_clear_all(struct ip_sf_list *psf)
992 ++{
993 ++ struct ip_sf_list *next;
994 ++
995 ++ while (psf) {
996 ++ next = psf->sf_next;
997 ++ kfree(psf);
998 ++ psf = next;
999 ++ }
1000 ++}
1001 ++
1002 + #ifdef CONFIG_IP_MULTICAST
1003 +
1004 + /*
1005 +@@ -632,6 +643,13 @@ static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
1006 + }
1007 + }
1008 +
1009 ++static void kfree_pmc(struct ip_mc_list *pmc)
1010 ++{
1011 ++ ip_sf_list_clear_all(pmc->sources);
1012 ++ ip_sf_list_clear_all(pmc->tomb);
1013 ++ kfree(pmc);
1014 ++}
1015 ++
1016 + static void igmpv3_send_cr(struct in_device *in_dev)
1017 + {
1018 + struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
1019 +@@ -668,7 +686,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
1020 + else
1021 + in_dev->mc_tomb = pmc_next;
1022 + in_dev_put(pmc->interface);
1023 +- kfree(pmc);
1024 ++ kfree_pmc(pmc);
1025 + } else
1026 + pmc_prev = pmc;
1027 + }
1028 +@@ -1213,14 +1231,18 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1029 + im->interface = pmc->interface;
1030 + if (im->sfmode == MCAST_INCLUDE) {
1031 + im->tomb = pmc->tomb;
1032 ++ pmc->tomb = NULL;
1033 ++
1034 + im->sources = pmc->sources;
1035 ++ pmc->sources = NULL;
1036 ++
1037 + for (psf = im->sources; psf; psf = psf->sf_next)
1038 + psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
1039 + } else {
1040 + im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
1041 + }
1042 + in_dev_put(pmc->interface);
1043 +- kfree(pmc);
1044 ++ kfree_pmc(pmc);
1045 + }
1046 + spin_unlock_bh(&im->lock);
1047 + }
1048 +@@ -1241,21 +1263,18 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
1049 + nextpmc = pmc->next;
1050 + ip_mc_clear_src(pmc);
1051 + in_dev_put(pmc->interface);
1052 +- kfree(pmc);
1053 ++ kfree_pmc(pmc);
1054 + }
1055 + /* clear dead sources, too */
1056 + rcu_read_lock();
1057 + for_each_pmc_rcu(in_dev, pmc) {
1058 +- struct ip_sf_list *psf, *psf_next;
1059 ++ struct ip_sf_list *psf;
1060 +
1061 + spin_lock_bh(&pmc->lock);
1062 + psf = pmc->tomb;
1063 + pmc->tomb = NULL;
1064 + spin_unlock_bh(&pmc->lock);
1065 +- for (; psf; psf = psf_next) {
1066 +- psf_next = psf->sf_next;
1067 +- kfree(psf);
1068 +- }
1069 ++ ip_sf_list_clear_all(psf);
1070 + }
1071 + rcu_read_unlock();
1072 + }
1073 +@@ -2133,7 +2152,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1074 +
1075 + static void ip_mc_clear_src(struct ip_mc_list *pmc)
1076 + {
1077 +- struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
1078 ++ struct ip_sf_list *tomb, *sources;
1079 +
1080 + spin_lock_bh(&pmc->lock);
1081 + tomb = pmc->tomb;
1082 +@@ -2145,14 +2164,8 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
1083 + pmc->sfcount[MCAST_EXCLUDE] = 1;
1084 + spin_unlock_bh(&pmc->lock);
1085 +
1086 +- for (psf = tomb; psf; psf = nextpsf) {
1087 +- nextpsf = psf->sf_next;
1088 +- kfree(psf);
1089 +- }
1090 +- for (psf = sources; psf; psf = nextpsf) {
1091 +- nextpsf = psf->sf_next;
1092 +- kfree(psf);
1093 +- }
1094 ++ ip_sf_list_clear_all(tomb);
1095 ++ ip_sf_list_clear_all(sources);
1096 + }
1097 +
1098 + /* Join a multicast group
1099 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
1100 +index e8bb2e85c5a4..ac770940adb9 100644
1101 +--- a/net/ipv4/ip_output.c
1102 ++++ b/net/ipv4/ip_output.c
1103 +@@ -883,7 +883,7 @@ static int __ip_append_data(struct sock *sk,
1104 + int csummode = CHECKSUM_NONE;
1105 + struct rtable *rt = (struct rtable *)cork->dst;
1106 + unsigned int wmem_alloc_delta = 0;
1107 +- bool paged, extra_uref;
1108 ++ bool paged, extra_uref = false;
1109 + u32 tskey = 0;
1110 +
1111 + skb = skb_peek_tail(queue);
1112 +@@ -923,7 +923,7 @@ static int __ip_append_data(struct sock *sk,
1113 + uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
1114 + if (!uarg)
1115 + return -ENOBUFS;
1116 +- extra_uref = true;
1117 ++ extra_uref = !skb; /* only extra ref if !MSG_MORE */
1118 + if (rt->dst.dev->features & NETIF_F_SG &&
1119 + csummode == CHECKSUM_PARTIAL) {
1120 + paged = true;
1121 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1122 +index 3c89ca325947..b66f78fad98c 100644
1123 +--- a/net/ipv4/route.c
1124 ++++ b/net/ipv4/route.c
1125 +@@ -500,15 +500,17 @@ EXPORT_SYMBOL(ip_idents_reserve);
1126 +
1127 + void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
1128 + {
1129 +- static u32 ip_idents_hashrnd __read_mostly;
1130 + u32 hash, id;
1131 +
1132 +- net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
1133 ++ /* Note the following code is not safe, but this is okay. */
1134 ++ if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
1135 ++ get_random_bytes(&net->ipv4.ip_id_key,
1136 ++ sizeof(net->ipv4.ip_id_key));
1137 +
1138 +- hash = jhash_3words((__force u32)iph->daddr,
1139 ++ hash = siphash_3u32((__force u32)iph->daddr,
1140 + (__force u32)iph->saddr,
1141 +- iph->protocol ^ net_hash_mix(net),
1142 +- ip_idents_hashrnd);
1143 ++ iph->protocol,
1144 ++ &net->ipv4.ip_id_key);
1145 + id = ip_idents_reserve(hash, segs);
1146 + iph->id = htons(id);
1147 + }
1148 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1149 +index e71227390bec..de16c2e343ef 100644
1150 +--- a/net/ipv6/ip6_output.c
1151 ++++ b/net/ipv6/ip6_output.c
1152 +@@ -1269,7 +1269,7 @@ static int __ip6_append_data(struct sock *sk,
1153 + int csummode = CHECKSUM_NONE;
1154 + unsigned int maxnonfragsize, headersize;
1155 + unsigned int wmem_alloc_delta = 0;
1156 +- bool paged, extra_uref;
1157 ++ bool paged, extra_uref = false;
1158 +
1159 + skb = skb_peek_tail(queue);
1160 + if (!skb) {
1161 +@@ -1338,7 +1338,7 @@ emsgsize:
1162 + uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
1163 + if (!uarg)
1164 + return -ENOBUFS;
1165 +- extra_uref = true;
1166 ++ extra_uref = !skb; /* only extra ref if !MSG_MORE */
1167 + if (rt->dst.dev->features & NETIF_F_SG &&
1168 + csummode == CHECKSUM_PARTIAL) {
1169 + paged = true;
1170 +diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
1171 +index 4fe7c90962dd..868ae23dbae1 100644
1172 +--- a/net/ipv6/output_core.c
1173 ++++ b/net/ipv6/output_core.c
1174 +@@ -10,15 +10,25 @@
1175 + #include <net/secure_seq.h>
1176 + #include <linux/netfilter.h>
1177 +
1178 +-static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
1179 ++static u32 __ipv6_select_ident(struct net *net,
1180 + const struct in6_addr *dst,
1181 + const struct in6_addr *src)
1182 + {
1183 ++ const struct {
1184 ++ struct in6_addr dst;
1185 ++ struct in6_addr src;
1186 ++ } __aligned(SIPHASH_ALIGNMENT) combined = {
1187 ++ .dst = *dst,
1188 ++ .src = *src,
1189 ++ };
1190 + u32 hash, id;
1191 +
1192 +- hash = __ipv6_addr_jhash(dst, hashrnd);
1193 +- hash = __ipv6_addr_jhash(src, hash);
1194 +- hash ^= net_hash_mix(net);
1195 ++ /* Note the following code is not safe, but this is okay. */
1196 ++ if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
1197 ++ get_random_bytes(&net->ipv4.ip_id_key,
1198 ++ sizeof(net->ipv4.ip_id_key));
1199 ++
1200 ++ hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key);
1201 +
1202 + /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
1203 + * set the hight order instead thus minimizing possible future
1204 +@@ -41,7 +51,6 @@ static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
1205 + */
1206 + __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
1207 + {
1208 +- static u32 ip6_proxy_idents_hashrnd __read_mostly;
1209 + struct in6_addr buf[2];
1210 + struct in6_addr *addrs;
1211 + u32 id;
1212 +@@ -53,11 +62,7 @@ __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
1213 + if (!addrs)
1214 + return 0;
1215 +
1216 +- net_get_random_once(&ip6_proxy_idents_hashrnd,
1217 +- sizeof(ip6_proxy_idents_hashrnd));
1218 +-
1219 +- id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
1220 +- &addrs[1], &addrs[0]);
1221 ++ id = __ipv6_select_ident(net, &addrs[1], &addrs[0]);
1222 + return htonl(id);
1223 + }
1224 + EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
1225 +@@ -66,12 +71,9 @@ __be32 ipv6_select_ident(struct net *net,
1226 + const struct in6_addr *daddr,
1227 + const struct in6_addr *saddr)
1228 + {
1229 +- static u32 ip6_idents_hashrnd __read_mostly;
1230 + u32 id;
1231 +
1232 +- net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
1233 +-
1234 +- id = __ipv6_select_ident(net, ip6_idents_hashrnd, daddr, saddr);
1235 ++ id = __ipv6_select_ident(net, daddr, saddr);
1236 + return htonl(id);
1237 + }
1238 + EXPORT_SYMBOL(ipv6_select_ident);
1239 +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
1240 +index 5a426226c762..5cb14eabfc65 100644
1241 +--- a/net/ipv6/raw.c
1242 ++++ b/net/ipv6/raw.c
1243 +@@ -287,7 +287,9 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
1244 + /* Binding to link-local address requires an interface */
1245 + if (!sk->sk_bound_dev_if)
1246 + goto out_unlock;
1247 ++ }
1248 +
1249 ++ if (sk->sk_bound_dev_if) {
1250 + err = -ENODEV;
1251 + dev = dev_get_by_index_rcu(sock_net(sk),
1252 + sk->sk_bound_dev_if);
1253 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1254 +index b471afce1330..457a27016e74 100644
1255 +--- a/net/ipv6/route.c
1256 ++++ b/net/ipv6/route.c
1257 +@@ -2448,6 +2448,12 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
1258 + struct fib6_info *rt;
1259 + struct fib6_node *fn;
1260 +
1261 ++ /* l3mdev_update_flow overrides oif if the device is enslaved; in
1262 ++ * this case we must match on the real ingress device, so reset it
1263 ++ */
1264 ++ if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1265 ++ fl6->flowi6_oif = skb->dev->ifindex;
1266 ++
1267 + /* Get the "current" route for this destination and
1268 + * check if the redirect has come from appropriate router.
1269 + *
1270 +diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
1271 +index 94425e421213..9e4b6bcf6920 100644
1272 +--- a/net/llc/llc_output.c
1273 ++++ b/net/llc/llc_output.c
1274 +@@ -72,6 +72,8 @@ int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
1275 + rc = llc_mac_hdr_init(skb, skb->dev->dev_addr, dmac);
1276 + if (likely(!rc))
1277 + rc = dev_queue_xmit(skb);
1278 ++ else
1279 ++ kfree_skb(skb);
1280 + return rc;
1281 + }
1282 +
1283 +diff --git a/net/sched/act_api.c b/net/sched/act_api.c
1284 +index d4b8355737d8..9d4ed81a33b9 100644
1285 +--- a/net/sched/act_api.c
1286 ++++ b/net/sched/act_api.c
1287 +@@ -766,7 +766,7 @@ int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
1288 +
1289 + for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1290 + a = actions[i];
1291 +- nest = nla_nest_start(skb, a->order);
1292 ++ nest = nla_nest_start(skb, i + 1);
1293 + if (nest == NULL)
1294 + goto nla_put_failure;
1295 + err = tcf_action_dump_1(skb, a, bind, ref);
1296 +@@ -1283,7 +1283,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1297 + ret = PTR_ERR(act);
1298 + goto err;
1299 + }
1300 +- act->order = i;
1301 + attr_size += tcf_action_fill_size(act);
1302 + actions[i - 1] = act;
1303 + }
1304 +diff --git a/net/tipc/core.c b/net/tipc/core.c
1305 +index d7b0688c98dd..3ecca3b88bf8 100644
1306 +--- a/net/tipc/core.c
1307 ++++ b/net/tipc/core.c
1308 +@@ -66,10 +66,6 @@ static int __net_init tipc_init_net(struct net *net)
1309 + INIT_LIST_HEAD(&tn->node_list);
1310 + spin_lock_init(&tn->node_list_lock);
1311 +
1312 +- err = tipc_socket_init();
1313 +- if (err)
1314 +- goto out_socket;
1315 +-
1316 + err = tipc_sk_rht_init(net);
1317 + if (err)
1318 + goto out_sk_rht;
1319 +@@ -79,9 +75,6 @@ static int __net_init tipc_init_net(struct net *net)
1320 + goto out_nametbl;
1321 +
1322 + INIT_LIST_HEAD(&tn->dist_queue);
1323 +- err = tipc_topsrv_start(net);
1324 +- if (err)
1325 +- goto out_subscr;
1326 +
1327 + err = tipc_bcast_init(net);
1328 + if (err)
1329 +@@ -90,25 +83,19 @@ static int __net_init tipc_init_net(struct net *net)
1330 + return 0;
1331 +
1332 + out_bclink:
1333 +- tipc_bcast_stop(net);
1334 +-out_subscr:
1335 + tipc_nametbl_stop(net);
1336 + out_nametbl:
1337 + tipc_sk_rht_destroy(net);
1338 + out_sk_rht:
1339 +- tipc_socket_stop();
1340 +-out_socket:
1341 + return err;
1342 + }
1343 +
1344 + static void __net_exit tipc_exit_net(struct net *net)
1345 + {
1346 +- tipc_topsrv_stop(net);
1347 + tipc_net_stop(net);
1348 + tipc_bcast_stop(net);
1349 + tipc_nametbl_stop(net);
1350 + tipc_sk_rht_destroy(net);
1351 +- tipc_socket_stop();
1352 + }
1353 +
1354 + static struct pernet_operations tipc_net_ops = {
1355 +@@ -118,6 +105,11 @@ static struct pernet_operations tipc_net_ops = {
1356 + .size = sizeof(struct tipc_net),
1357 + };
1358 +
1359 ++static struct pernet_operations tipc_topsrv_net_ops = {
1360 ++ .init = tipc_topsrv_init_net,
1361 ++ .exit = tipc_topsrv_exit_net,
1362 ++};
1363 ++
1364 + static int __init tipc_init(void)
1365 + {
1366 + int err;
1367 +@@ -144,6 +136,14 @@ static int __init tipc_init(void)
1368 + if (err)
1369 + goto out_pernet;
1370 +
1371 ++ err = tipc_socket_init();
1372 ++ if (err)
1373 ++ goto out_socket;
1374 ++
1375 ++ err = register_pernet_subsys(&tipc_topsrv_net_ops);
1376 ++ if (err)
1377 ++ goto out_pernet_topsrv;
1378 ++
1379 + err = tipc_bearer_setup();
1380 + if (err)
1381 + goto out_bearer;
1382 +@@ -151,6 +151,10 @@ static int __init tipc_init(void)
1383 + pr_info("Started in single node mode\n");
1384 + return 0;
1385 + out_bearer:
1386 ++ unregister_pernet_subsys(&tipc_topsrv_net_ops);
1387 ++out_pernet_topsrv:
1388 ++ tipc_socket_stop();
1389 ++out_socket:
1390 + unregister_pernet_subsys(&tipc_net_ops);
1391 + out_pernet:
1392 + tipc_unregister_sysctl();
1393 +@@ -166,6 +170,8 @@ out_netlink:
1394 + static void __exit tipc_exit(void)
1395 + {
1396 + tipc_bearer_cleanup();
1397 ++ unregister_pernet_subsys(&tipc_topsrv_net_ops);
1398 ++ tipc_socket_stop();
1399 + unregister_pernet_subsys(&tipc_net_ops);
1400 + tipc_netlink_stop();
1401 + tipc_netlink_compat_stop();
1402 +diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
1403 +index d793b4343885..aa015c233898 100644
1404 +--- a/net/tipc/subscr.h
1405 ++++ b/net/tipc/subscr.h
1406 +@@ -77,8 +77,9 @@ void tipc_sub_report_overlap(struct tipc_subscription *sub,
1407 + u32 found_lower, u32 found_upper,
1408 + u32 event, u32 port, u32 node,
1409 + u32 scope, int must);
1410 +-int tipc_topsrv_start(struct net *net);
1411 +-void tipc_topsrv_stop(struct net *net);
1412 ++
1413 ++int __net_init tipc_topsrv_init_net(struct net *net);
1414 ++void __net_exit tipc_topsrv_exit_net(struct net *net);
1415 +
1416 + void tipc_sub_put(struct tipc_subscription *subscription);
1417 + void tipc_sub_get(struct tipc_subscription *subscription);
1418 +diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
1419 +index f5edb213d760..00f25640877a 100644
1420 +--- a/net/tipc/topsrv.c
1421 ++++ b/net/tipc/topsrv.c
1422 +@@ -637,7 +637,7 @@ static void tipc_topsrv_work_stop(struct tipc_topsrv *s)
1423 + destroy_workqueue(s->send_wq);
1424 + }
1425 +
1426 +-int tipc_topsrv_start(struct net *net)
1427 ++static int tipc_topsrv_start(struct net *net)
1428 + {
1429 + struct tipc_net *tn = tipc_net(net);
1430 + const char name[] = "topology_server";
1431 +@@ -671,7 +671,7 @@ int tipc_topsrv_start(struct net *net)
1432 + return ret;
1433 + }
1434 +
1435 +-void tipc_topsrv_stop(struct net *net)
1436 ++static void tipc_topsrv_stop(struct net *net)
1437 + {
1438 + struct tipc_topsrv *srv = tipc_topsrv(net);
1439 + struct socket *lsock = srv->listener;
1440 +@@ -696,3 +696,13 @@ void tipc_topsrv_stop(struct net *net)
1441 + idr_destroy(&srv->conn_idr);
1442 + kfree(srv);
1443 + }
1444 ++
1445 ++int __net_init tipc_topsrv_init_net(struct net *net)
1446 ++{
1447 ++ return tipc_topsrv_start(net);
1448 ++}
1449 ++
1450 ++void __net_exit tipc_topsrv_exit_net(struct net *net)
1451 ++{
1452 ++ tipc_topsrv_stop(net);
1453 ++}
1454 +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
1455 +index 7d5136ecee78..84f6b6906bcc 100644
1456 +--- a/net/tls/tls_device.c
1457 ++++ b/net/tls/tls_device.c
1458 +@@ -923,12 +923,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
1459 + if (!netdev)
1460 + goto out;
1461 +
1462 +- if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
1463 +- pr_err_ratelimited("%s: device is missing NETIF_F_HW_TLS_RX cap\n",
1464 +- __func__);
1465 +- goto out;
1466 +- }
1467 +-
1468 + netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
1469 + TLS_OFFLOAD_CTX_DIR_RX);
1470 +
1471 +@@ -987,7 +981,8 @@ static int tls_dev_event(struct notifier_block *this, unsigned long event,
1472 + {
1473 + struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1474 +
1475 +- if (!(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
1476 ++ if (!dev->tlsdev_ops &&
1477 ++ !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
1478 + return NOTIFY_DONE;
1479 +
1480 + switch (event) {