Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.20 commit in: /
Date: Sat, 23 Feb 2019 11:07:52
Message-Id: 1550920019.329cbc43a969a4b84509a3df1673799d84cc9413.mpagano@gentoo
1 commit: 329cbc43a969a4b84509a3df1673799d84cc9413
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Feb 23 11:06:59 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Feb 23 11:06:59 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=329cbc43
7
8 proj/linux-patches: Linux patch 4.20.12
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1011_linux-4.20.12.patch | 936 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 940 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 068574e..1c8cc61 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -87,6 +87,10 @@ Patch: 1010_linux-4.20.11.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.20.11
23
24 +Patch: 1011_linux-4.20.12.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.20.12
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1011_linux-4.20.12.patch b/1011_linux-4.20.12.patch
33 new file mode 100644
34 index 0000000..aa9759c
35 --- /dev/null
36 +++ b/1011_linux-4.20.12.patch
37 @@ -0,0 +1,936 @@
38 +diff --git a/Makefile b/Makefile
39 +index 193cfe3a3d70..0a92b4e11621 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 20
46 +-SUBLEVEL = 11
47 ++SUBLEVEL = 12
48 + EXTRAVERSION =
49 + NAME = Shy Crocodile
50 +
51 +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
52 +index 778af0b7f7fd..c67081301035 100644
53 +--- a/arch/arm64/include/asm/memory.h
54 ++++ b/arch/arm64/include/asm/memory.h
55 +@@ -303,6 +303,17 @@ static inline void *phys_to_virt(phys_addr_t x)
56 + #define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
57 + _virt_addr_valid(kaddr))
58 +
59 ++/*
60 ++ * Given that the GIC architecture permits ITS implementations that can only be
61 ++ * configured with a LPI table address once, GICv3 systems with many CPUs may
62 ++ * end up reserving a lot of different regions after a kexec for their LPI
63 ++ * tables (one per CPU), as we are forced to reuse the same memory after kexec
64 ++ * (and thus reserve it persistently with EFI beforehand)
65 ++ */
66 ++#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS)
67 ++# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + 2*(NR_CPUS + 1))
68 ++#endif
69 ++
70 + #include <asm-generic/memory_model.h>
71 +
72 + #endif
73 +diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
74 +index f4fc1e0544b7..953e316521fc 100644
75 +--- a/arch/arm64/kernel/setup.c
76 ++++ b/arch/arm64/kernel/setup.c
77 +@@ -313,7 +313,6 @@ void __init setup_arch(char **cmdline_p)
78 + arm64_memblock_init();
79 +
80 + paging_init();
81 +- efi_apply_persistent_mem_reservations();
82 +
83 + acpi_table_upgrade();
84 +
85 +diff --git a/crypto/af_alg.c b/crypto/af_alg.c
86 +index 17eb09d222ff..ec78a04eb136 100644
87 +--- a/crypto/af_alg.c
88 ++++ b/crypto/af_alg.c
89 +@@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private)
90 +
91 + int af_alg_release(struct socket *sock)
92 + {
93 +- if (sock->sk)
94 ++ if (sock->sk) {
95 + sock_put(sock->sk);
96 ++ sock->sk = NULL;
97 ++ }
98 + return 0;
99 + }
100 + EXPORT_SYMBOL_GPL(af_alg_release);
101 +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
102 +index 415849bab233..bde3822cf539 100644
103 +--- a/drivers/firmware/efi/efi.c
104 ++++ b/drivers/firmware/efi/efi.c
105 +@@ -592,11 +592,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
106 +
107 + early_memunmap(tbl, sizeof(*tbl));
108 + }
109 +- return 0;
110 +-}
111 +
112 +-int __init efi_apply_persistent_mem_reservations(void)
113 +-{
114 + if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
115 + unsigned long prsv = efi.mem_reserve;
116 +
117 +diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
118 +index 3d36142cf812..30ac0c975f8a 100644
119 +--- a/drivers/firmware/efi/libstub/arm-stub.c
120 ++++ b/drivers/firmware/efi/libstub/arm-stub.c
121 +@@ -75,9 +75,6 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
122 + efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
123 + efi_status_t status;
124 +
125 +- if (IS_ENABLED(CONFIG_ARM))
126 +- return;
127 +-
128 + status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
129 + (void **)&rsv);
130 + if (status != EFI_SUCCESS) {
131 +diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
132 +index 0e30fa00204c..f9b8e3e23a8e 100644
133 +--- a/drivers/hwmon/lm80.c
134 ++++ b/drivers/hwmon/lm80.c
135 +@@ -393,8 +393,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
136 + }
137 +
138 + rv = lm80_read_value(client, LM80_REG_FANDIV);
139 +- if (rv < 0)
140 ++ if (rv < 0) {
141 ++ mutex_unlock(&data->update_lock);
142 + return rv;
143 ++ }
144 + reg = (rv & ~(3 << (2 * (nr + 1))))
145 + | (data->fan_div[nr] << (2 * (nr + 1)));
146 + lm80_write_value(client, LM80_REG_FANDIV, reg);
147 +diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
148 +index 211ed6cffd10..578978711887 100644
149 +--- a/drivers/isdn/mISDN/timerdev.c
150 ++++ b/drivers/isdn/mISDN/timerdev.c
151 +@@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t)
152 + spin_lock_irqsave(&timer->dev->lock, flags);
153 + if (timer->id >= 0)
154 + list_move_tail(&timer->list, &timer->dev->expired);
155 +- spin_unlock_irqrestore(&timer->dev->lock, flags);
156 + wake_up_interruptible(&timer->dev->wait);
157 ++ spin_unlock_irqrestore(&timer->dev->lock, flags);
158 + }
159 +
160 + static int
161 +diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
162 +index ef9deaa361c7..ddd98cdd33bc 100644
163 +--- a/drivers/mmc/host/meson-gx-mmc.c
164 ++++ b/drivers/mmc/host/meson-gx-mmc.c
165 +@@ -1286,7 +1286,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
166 + host->regs + SD_EMMC_IRQ_EN);
167 +
168 + ret = request_threaded_irq(host->irq, meson_mmc_irq,
169 +- meson_mmc_irq_thread, IRQF_SHARED, NULL, host);
170 ++ meson_mmc_irq_thread, IRQF_SHARED,
171 ++ dev_name(&pdev->dev), host);
172 + if (ret)
173 + goto err_init_clk;
174 +
175 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
176 +index b2a0e59b6252..b0113f6fdbb4 100644
177 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
178 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
179 +@@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
180 + unsigned int sub_irq;
181 + unsigned int n;
182 + u16 reg;
183 ++ u16 ctl1;
184 + int err;
185 +
186 + mutex_lock(&chip->reg_lock);
187 +@@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
188 + if (err)
189 + goto out;
190 +
191 +- for (n = 0; n < chip->g1_irq.nirqs; ++n) {
192 +- if (reg & (1 << n)) {
193 +- sub_irq = irq_find_mapping(chip->g1_irq.domain, n);
194 +- handle_nested_irq(sub_irq);
195 +- ++nhandled;
196 ++ do {
197 ++ for (n = 0; n < chip->g1_irq.nirqs; ++n) {
198 ++ if (reg & (1 << n)) {
199 ++ sub_irq = irq_find_mapping(chip->g1_irq.domain,
200 ++ n);
201 ++ handle_nested_irq(sub_irq);
202 ++ ++nhandled;
203 ++ }
204 + }
205 +- }
206 ++
207 ++ mutex_lock(&chip->reg_lock);
208 ++ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1);
209 ++ if (err)
210 ++ goto unlock;
211 ++ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &reg);
212 ++unlock:
213 ++ mutex_unlock(&chip->reg_lock);
214 ++ if (err)
215 ++ goto out;
216 ++ ctl1 &= GENMASK(chip->g1_irq.nirqs, 0);
217 ++ } while (reg & ctl1);
218 ++
219 + out:
220 + return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
221 + }
222 +diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
223 +index 697d9b374f5e..ae2f35039343 100644
224 +--- a/drivers/net/ethernet/marvell/sky2.c
225 ++++ b/drivers/net/ethernet/marvell/sky2.c
226 +@@ -5087,7 +5087,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
227 + INIT_WORK(&hw->restart_work, sky2_restart);
228 +
229 + pci_set_drvdata(pdev, hw);
230 +- pdev->d3_delay = 200;
231 ++ pdev->d3_delay = 300;
232 +
233 + return 0;
234 +
235 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
236 +index 8f1180fff955..280173b48962 100644
237 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
238 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
239 +@@ -845,8 +845,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
240 + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
241 + bool configure = false;
242 + bool pfc = false;
243 ++ u16 thres_cells;
244 ++ u16 delay_cells;
245 + bool lossy;
246 +- u16 thres;
247 +
248 + for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
249 + if (prio_tc[j] == i) {
250 +@@ -860,10 +861,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
251 + continue;
252 +
253 + lossy = !(pfc || pause_en);
254 +- thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
255 +- delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
256 +- pause_en);
257 +- mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
258 ++ thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
259 ++ delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
260 ++ pfc, pause_en);
261 ++ mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
262 ++ thres_cells, lossy);
263 + }
264 +
265 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
266 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
267 +index 20299f6f65fc..736e29635b77 100644
268 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
269 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
270 +@@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
271 + static int dwmac4_rx_check_timestamp(void *desc)
272 + {
273 + struct dma_desc *p = (struct dma_desc *)desc;
274 ++ unsigned int rdes0 = le32_to_cpu(p->des0);
275 ++ unsigned int rdes1 = le32_to_cpu(p->des1);
276 ++ unsigned int rdes3 = le32_to_cpu(p->des3);
277 + u32 own, ctxt;
278 + int ret = 1;
279 +
280 +- own = p->des3 & RDES3_OWN;
281 +- ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
282 ++ own = rdes3 & RDES3_OWN;
283 ++ ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
284 + >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
285 +
286 + if (likely(!own && ctxt)) {
287 +- if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
288 ++ if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
289 + /* Corrupted value */
290 + ret = -EINVAL;
291 + else
292 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
293 +index 5710864fa809..9caf79ba5ef1 100644
294 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
295 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
296 +@@ -692,25 +692,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
297 + struct ethtool_eee *edata)
298 + {
299 + struct stmmac_priv *priv = netdev_priv(dev);
300 ++ int ret;
301 +
302 +- priv->eee_enabled = edata->eee_enabled;
303 +-
304 +- if (!priv->eee_enabled)
305 ++ if (!edata->eee_enabled) {
306 + stmmac_disable_eee_mode(priv);
307 +- else {
308 ++ } else {
309 + /* We are asking for enabling the EEE but it is safe
310 + * to verify all by invoking the eee_init function.
311 + * In case of failure it will return an error.
312 + */
313 +- priv->eee_enabled = stmmac_eee_init(priv);
314 +- if (!priv->eee_enabled)
315 ++ edata->eee_enabled = stmmac_eee_init(priv);
316 ++ if (!edata->eee_enabled)
317 + return -EOPNOTSUPP;
318 +-
319 +- /* Do not change tx_lpi_timer in case of failure */
320 +- priv->tx_lpi_timer = edata->tx_lpi_timer;
321 + }
322 +
323 +- return phy_ethtool_set_eee(dev->phydev, edata);
324 ++ ret = phy_ethtool_set_eee(dev->phydev, edata);
325 ++ if (ret)
326 ++ return ret;
327 ++
328 ++ priv->eee_enabled = edata->eee_enabled;
329 ++ priv->tx_lpi_timer = edata->tx_lpi_timer;
330 ++ return 0;
331 + }
332 +
333 + static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
334 +diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
335 +index 1f612268c998..d847f672a705 100644
336 +--- a/drivers/net/ethernet/ti/netcp_core.c
337 ++++ b/drivers/net/ethernet/ti/netcp_core.c
338 +@@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
339 + const char *name;
340 + char node_name[32];
341 +
342 +- if (of_property_read_string(node, "label", &name) < 0) {
343 ++ if (of_property_read_string(child, "label", &name) < 0) {
344 + snprintf(node_name, sizeof(node_name), "%pOFn", child);
345 + name = node_name;
346 + }
347 +diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
348 +index 74a8782313cf..bd6084e315de 100644
349 +--- a/drivers/net/phy/xilinx_gmii2rgmii.c
350 ++++ b/drivers/net/phy/xilinx_gmii2rgmii.c
351 +@@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
352 + u16 val = 0;
353 + int err;
354 +
355 +- err = priv->phy_drv->read_status(phydev);
356 ++ if (priv->phy_drv->read_status)
357 ++ err = priv->phy_drv->read_status(phydev);
358 ++ else
359 ++ err = genphy_read_status(phydev);
360 + if (err < 0)
361 + return err;
362 +
363 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
364 +index 0565f8880199..8f022964b2d1 100644
365 +--- a/drivers/net/vxlan.c
366 ++++ b/drivers/net/vxlan.c
367 +@@ -2072,7 +2072,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
368 + struct pcpu_sw_netstats *tx_stats, *rx_stats;
369 + union vxlan_addr loopback;
370 + union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
371 +- struct net_device *dev = skb->dev;
372 ++ struct net_device *dev;
373 + int len = skb->len;
374 +
375 + tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
376 +@@ -2092,9 +2092,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
377 + #endif
378 + }
379 +
380 ++ rcu_read_lock();
381 ++ dev = skb->dev;
382 ++ if (unlikely(!(dev->flags & IFF_UP))) {
383 ++ kfree_skb(skb);
384 ++ goto drop;
385 ++ }
386 ++
387 + if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
388 +- vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0,
389 +- vni);
390 ++ vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
391 +
392 + u64_stats_update_begin(&tx_stats->syncp);
393 + tx_stats->tx_packets++;
394 +@@ -2107,8 +2113,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
395 + rx_stats->rx_bytes += len;
396 + u64_stats_update_end(&rx_stats->syncp);
397 + } else {
398 ++drop:
399 + dev->stats.rx_dropped++;
400 + }
401 ++ rcu_read_unlock();
402 + }
403 +
404 + static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
405 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
406 +index c9d8e3c837de..c25acace7d91 100644
407 +--- a/drivers/pci/pci.c
408 ++++ b/drivers/pci/pci.c
409 +@@ -6195,7 +6195,8 @@ static int __init pci_setup(char *str)
410 + } else if (!strncmp(str, "pcie_scan_all", 13)) {
411 + pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
412 + } else if (!strncmp(str, "disable_acs_redir=", 18)) {
413 +- disable_acs_redir_param = str + 18;
414 ++ disable_acs_redir_param =
415 ++ kstrdup(str + 18, GFP_KERNEL);
416 + } else {
417 + printk(KERN_ERR "PCI: Unknown option `%s'\n",
418 + str);
419 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
420 +index ffa5b9f771b5..900442605c72 100644
421 +--- a/drivers/target/target_core_transport.c
422 ++++ b/drivers/target/target_core_transport.c
423 +@@ -266,7 +266,7 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
424 + }
425 + ret = transport_init_session(se_sess);
426 + if (ret < 0) {
427 +- kfree(se_sess);
428 ++ kmem_cache_free(se_sess_cache, se_sess);
429 + return ERR_PTR(ret);
430 + }
431 + se_sess->sup_prot_ops = sup_prot_ops;
432 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
433 +index cf82e7266397..5eaeca805c95 100644
434 +--- a/drivers/vhost/vhost.c
435 ++++ b/drivers/vhost/vhost.c
436 +@@ -1784,7 +1784,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
437 +
438 + ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
439 + len, iov, 64, VHOST_ACCESS_WO);
440 +- if (ret)
441 ++ if (ret < 0)
442 + return ret;
443 +
444 + for (i = 0; i < ret; i++) {
445 +diff --git a/include/linux/efi.h b/include/linux/efi.h
446 +index 100ce4a4aff6..845174e113ce 100644
447 +--- a/include/linux/efi.h
448 ++++ b/include/linux/efi.h
449 +@@ -1167,8 +1167,6 @@ static inline bool efi_enabled(int feature)
450 + extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
451 +
452 + extern bool efi_is_table_address(unsigned long phys_addr);
453 +-
454 +-extern int efi_apply_persistent_mem_reservations(void);
455 + #else
456 + static inline bool efi_enabled(int feature)
457 + {
458 +@@ -1187,11 +1185,6 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
459 + {
460 + return false;
461 + }
462 +-
463 +-static inline int efi_apply_persistent_mem_reservations(void)
464 +-{
465 +- return 0;
466 +-}
467 + #endif
468 +
469 + extern int efi_status_to_err(efi_status_t status);
470 +diff --git a/include/linux/memblock.h b/include/linux/memblock.h
471 +index 3ef3086ed52f..ecff64ff365d 100644
472 +--- a/include/linux/memblock.h
473 ++++ b/include/linux/memblock.h
474 +@@ -29,9 +29,6 @@ extern unsigned long max_pfn;
475 + */
476 + extern unsigned long long max_possible_pfn;
477 +
478 +-#define INIT_MEMBLOCK_REGIONS 128
479 +-#define INIT_PHYSMEM_REGIONS 4
480 +-
481 + /**
482 + * enum memblock_flags - definition of memory region attributes
483 + * @MEMBLOCK_NONE: no special request
484 +diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
485 +index 2b2a6dce1630..4c76fe2c8488 100644
486 +--- a/include/linux/netdev_features.h
487 ++++ b/include/linux/netdev_features.h
488 +@@ -11,6 +11,8 @@
489 + #define _LINUX_NETDEV_FEATURES_H
490 +
491 + #include <linux/types.h>
492 ++#include <linux/bitops.h>
493 ++#include <asm/byteorder.h>
494 +
495 + typedef u64 netdev_features_t;
496 +
497 +@@ -154,8 +156,26 @@ enum {
498 + #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
499 + #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
500 +
501 +-#define for_each_netdev_feature(mask_addr, bit) \
502 +- for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
503 ++/* Finds the next feature with the highest number of the range of start till 0.
504 ++ */
505 ++static inline int find_next_netdev_feature(u64 feature, unsigned long start)
506 ++{
507 ++ /* like BITMAP_LAST_WORD_MASK() for u64
508 ++ * this sets the most significant 64 - start to 0.
509 ++ */
510 ++ feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
511 ++
512 ++ return fls64(feature) - 1;
513 ++}
514 ++
515 ++/* This goes for the MSB to the LSB through the set feature bits,
516 ++ * mask_addr should be a u64 and bit an int
517 ++ */
518 ++#define for_each_netdev_feature(mask_addr, bit) \
519 ++ for ((bit) = find_next_netdev_feature((mask_addr), \
520 ++ NETDEV_FEATURE_COUNT); \
521 ++ (bit) >= 0; \
522 ++ (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
523 +
524 + /* Features valid for ethtool to change */
525 + /* = all defined minus driver/device-class-related */
526 +diff --git a/include/net/ax25.h b/include/net/ax25.h
527 +index 3f9aea8087e3..8b7eb46ad72d 100644
528 +--- a/include/net/ax25.h
529 ++++ b/include/net/ax25.h
530 +@@ -201,6 +201,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt)
531 +
532 + void __ax25_put_route(ax25_route *ax25_rt);
533 +
534 ++extern rwlock_t ax25_route_lock;
535 ++
536 ++static inline void ax25_route_lock_use(void)
537 ++{
538 ++ read_lock(&ax25_route_lock);
539 ++}
540 ++
541 ++static inline void ax25_route_lock_unuse(void)
542 ++{
543 ++ read_unlock(&ax25_route_lock);
544 ++}
545 ++
546 + static inline void ax25_put_route(ax25_route *ax25_rt)
547 + {
548 + if (refcount_dec_and_test(&ax25_rt->refcount))
549 +diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
550 +index 00b5e7825508..74ff688568a0 100644
551 +--- a/include/net/inetpeer.h
552 ++++ b/include/net/inetpeer.h
553 +@@ -39,6 +39,7 @@ struct inet_peer {
554 +
555 + u32 metrics[RTAX_MAX];
556 + u32 rate_tokens; /* rate limiting for ICMP */
557 ++ u32 n_redirects;
558 + unsigned long rate_last;
559 + /*
560 + * Once inet_peer is queued for deletion (refcnt == 0), following field
561 +diff --git a/mm/memblock.c b/mm/memblock.c
562 +index f45a049532fe..74ac4f89018a 100644
563 +--- a/mm/memblock.c
564 ++++ b/mm/memblock.c
565 +@@ -26,6 +26,13 @@
566 +
567 + #include "internal.h"
568 +
569 ++#define INIT_MEMBLOCK_REGIONS 128
570 ++#define INIT_PHYSMEM_REGIONS 4
571 ++
572 ++#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
573 ++# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
574 ++#endif
575 ++
576 + /**
577 + * DOC: memblock overview
578 + *
579 +@@ -92,7 +99,7 @@ unsigned long max_pfn;
580 + unsigned long long max_possible_pfn;
581 +
582 + static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
583 +-static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
584 ++static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
585 + #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
586 + static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
587 + #endif
588 +@@ -105,7 +112,7 @@ struct memblock memblock __initdata_memblock = {
589 +
590 + .reserved.regions = memblock_reserved_init_regions,
591 + .reserved.cnt = 1, /* empty dummy entry */
592 +- .reserved.max = INIT_MEMBLOCK_REGIONS,
593 ++ .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
594 + .reserved.name = "reserved",
595 +
596 + #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
597 +diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
598 +index 70417e9b932d..314bbc8010fb 100644
599 +--- a/net/ax25/ax25_ip.c
600 ++++ b/net/ax25/ax25_ip.c
601 +@@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
602 + dst = (ax25_address *)(bp + 1);
603 + src = (ax25_address *)(bp + 8);
604 +
605 ++ ax25_route_lock_use();
606 + route = ax25_get_route(dst, NULL);
607 + if (route) {
608 + digipeat = route->digipeat;
609 +@@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
610 + ax25_queue_xmit(skb, dev);
611 +
612 + put:
613 +- if (route)
614 +- ax25_put_route(route);
615 +
616 ++ ax25_route_lock_unuse();
617 + return NETDEV_TX_OK;
618 + }
619 +
620 +diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
621 +index a0eff323af12..66f74c85cf6b 100644
622 +--- a/net/ax25/ax25_route.c
623 ++++ b/net/ax25/ax25_route.c
624 +@@ -40,7 +40,7 @@
625 + #include <linux/export.h>
626 +
627 + static ax25_route *ax25_route_list;
628 +-static DEFINE_RWLOCK(ax25_route_lock);
629 ++DEFINE_RWLOCK(ax25_route_lock);
630 +
631 + void ax25_rt_device_down(struct net_device *dev)
632 + {
633 +@@ -335,6 +335,7 @@ const struct seq_operations ax25_rt_seqops = {
634 + * Find AX.25 route
635 + *
636 + * Only routes with a reference count of zero can be destroyed.
637 ++ * Must be called with ax25_route_lock read locked.
638 + */
639 + ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
640 + {
641 +@@ -342,7 +343,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
642 + ax25_route *ax25_def_rt = NULL;
643 + ax25_route *ax25_rt;
644 +
645 +- read_lock(&ax25_route_lock);
646 + /*
647 + * Bind to the physical interface we heard them on, or the default
648 + * route if none is found;
649 +@@ -365,11 +365,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
650 + if (ax25_spe_rt != NULL)
651 + ax25_rt = ax25_spe_rt;
652 +
653 +- if (ax25_rt != NULL)
654 +- ax25_hold_route(ax25_rt);
655 +-
656 +- read_unlock(&ax25_route_lock);
657 +-
658 + return ax25_rt;
659 + }
660 +
661 +@@ -400,9 +395,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
662 + ax25_route *ax25_rt;
663 + int err = 0;
664 +
665 +- if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
666 ++ ax25_route_lock_use();
667 ++ ax25_rt = ax25_get_route(addr, NULL);
668 ++ if (!ax25_rt) {
669 ++ ax25_route_lock_unuse();
670 + return -EHOSTUNREACH;
671 +-
672 ++ }
673 + if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
674 + err = -EHOSTUNREACH;
675 + goto put;
676 +@@ -437,8 +435,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
677 + }
678 +
679 + put:
680 +- ax25_put_route(ax25_rt);
681 +-
682 ++ ax25_route_lock_unuse();
683 + return err;
684 + }
685 +
686 +diff --git a/net/core/dev.c b/net/core/dev.c
687 +index 43f8a4fd4968..de0690e5b3df 100644
688 +--- a/net/core/dev.c
689 ++++ b/net/core/dev.c
690 +@@ -8064,7 +8064,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
691 + netdev_features_t feature;
692 + int feature_bit;
693 +
694 +- for_each_netdev_feature(&upper_disables, feature_bit) {
695 ++ for_each_netdev_feature(upper_disables, feature_bit) {
696 + feature = __NETIF_F_BIT(feature_bit);
697 + if (!(upper->wanted_features & feature)
698 + && (features & feature)) {
699 +@@ -8084,7 +8084,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
700 + netdev_features_t feature;
701 + int feature_bit;
702 +
703 +- for_each_netdev_feature(&upper_disables, feature_bit) {
704 ++ for_each_netdev_feature(upper_disables, feature_bit) {
705 + feature = __NETIF_F_BIT(feature_bit);
706 + if (!(features & feature) && (lower->features & feature)) {
707 + netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
708 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
709 +index eebc3106d30e..fc3d652a2de0 100644
710 +--- a/net/core/skbuff.c
711 ++++ b/net/core/skbuff.c
712 +@@ -353,6 +353,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
713 + */
714 + void *netdev_alloc_frag(unsigned int fragsz)
715 + {
716 ++ fragsz = SKB_DATA_ALIGN(fragsz);
717 ++
718 + return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
719 + }
720 + EXPORT_SYMBOL(netdev_alloc_frag);
721 +@@ -366,6 +368,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
722 +
723 + void *napi_alloc_frag(unsigned int fragsz)
724 + {
725 ++ fragsz = SKB_DATA_ALIGN(fragsz);
726 ++
727 + return __napi_alloc_frag(fragsz, GFP_ATOMIC);
728 + }
729 + EXPORT_SYMBOL(napi_alloc_frag);
730 +diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
731 +index d757b9642d0d..be778599bfed 100644
732 +--- a/net/ipv4/inetpeer.c
733 ++++ b/net/ipv4/inetpeer.c
734 +@@ -216,6 +216,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
735 + atomic_set(&p->rid, 0);
736 + p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
737 + p->rate_tokens = 0;
738 ++ p->n_redirects = 0;
739 + /* 60*HZ is arbitrary, but chosen enough high so that the first
740 + * calculation of tokens is at its maximum.
741 + */
742 +diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
743 +index a0aa13bcabda..0a8a60c1bf9a 100644
744 +--- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
745 ++++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
746 +@@ -105,6 +105,8 @@ static void fast_csum(struct snmp_ctx *ctx, unsigned char offset)
747 + int snmp_version(void *context, size_t hdrlen, unsigned char tag,
748 + const void *data, size_t datalen)
749 + {
750 ++ if (datalen != 1)
751 ++ return -EINVAL;
752 + if (*(unsigned char *)data > 1)
753 + return -ENOTSUPP;
754 + return 1;
755 +@@ -114,8 +116,11 @@ int snmp_helper(void *context, size_t hdrlen, unsigned char tag,
756 + const void *data, size_t datalen)
757 + {
758 + struct snmp_ctx *ctx = (struct snmp_ctx *)context;
759 +- __be32 *pdata = (__be32 *)data;
760 ++ __be32 *pdata;
761 +
762 ++ if (datalen != 4)
763 ++ return -EINVAL;
764 ++ pdata = (__be32 *)data;
765 + if (*pdata == ctx->from) {
766 + pr_debug("%s: %pI4 to %pI4\n", __func__,
767 + (void *)&ctx->from, (void *)&ctx->to);
768 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
769 +index c0a9d26c06ce..d1ddf1d03721 100644
770 +--- a/net/ipv4/route.c
771 ++++ b/net/ipv4/route.c
772 +@@ -887,13 +887,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
773 + /* No redirected packets during ip_rt_redirect_silence;
774 + * reset the algorithm.
775 + */
776 +- if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
777 ++ if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
778 + peer->rate_tokens = 0;
779 ++ peer->n_redirects = 0;
780 ++ }
781 +
782 + /* Too many ignored redirects; do not send anything
783 + * set dst.rate_last to the last seen redirected packet.
784 + */
785 +- if (peer->rate_tokens >= ip_rt_redirect_number) {
786 ++ if (peer->n_redirects >= ip_rt_redirect_number) {
787 + peer->rate_last = jiffies;
788 + goto out_put_peer;
789 + }
790 +@@ -910,6 +912,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
791 + icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
792 + peer->rate_last = jiffies;
793 + ++peer->rate_tokens;
794 ++ ++peer->n_redirects;
795 + #ifdef CONFIG_IP_ROUTE_VERBOSE
796 + if (log_martians &&
797 + peer->rate_tokens == ip_rt_redirect_number)
798 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
799 +index 40cbe5609663..b102973102b9 100644
800 +--- a/net/ipv4/tcp.c
801 ++++ b/net/ipv4/tcp.c
802 +@@ -2532,6 +2532,7 @@ void tcp_write_queue_purge(struct sock *sk)
803 + sk_mem_reclaim(sk);
804 + tcp_clear_all_retrans_hints(tcp_sk(sk));
805 + tcp_sk(sk)->packets_out = 0;
806 ++ inet_csk(sk)->icsk_backoff = 0;
807 + }
808 +
809 + int tcp_disconnect(struct sock *sk, int flags)
810 +@@ -2580,7 +2581,6 @@ int tcp_disconnect(struct sock *sk, int flags)
811 + tp->write_seq += tp->max_window + 2;
812 + if (tp->write_seq == 0)
813 + tp->write_seq = 1;
814 +- icsk->icsk_backoff = 0;
815 + tp->snd_cwnd = 2;
816 + icsk->icsk_probes_out = 0;
817 + tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
818 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
819 +index de47038afdf0..b654f21064bb 100644
820 +--- a/net/ipv4/tcp_ipv4.c
821 ++++ b/net/ipv4/tcp_ipv4.c
822 +@@ -535,14 +535,15 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
823 + if (sock_owned_by_user(sk))
824 + break;
825 +
826 ++ skb = tcp_rtx_queue_head(sk);
827 ++ if (WARN_ON_ONCE(!skb))
828 ++ break;
829 ++
830 + icsk->icsk_backoff--;
831 + icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
832 + TCP_TIMEOUT_INIT;
833 + icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
834 +
835 +- skb = tcp_rtx_queue_head(sk);
836 +- BUG_ON(!skb);
837 +-
838 + tcp_mstamp_refresh(tp);
839 + delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
840 + remaining = icsk->icsk_rto -
841 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
842 +index d2b597674d60..9fa51ab01ac4 100644
843 +--- a/net/ipv6/addrconf.c
844 ++++ b/net/ipv6/addrconf.c
845 +@@ -1165,7 +1165,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
846 + list_for_each_entry(ifa, &idev->addr_list, if_list) {
847 + if (ifa == ifp)
848 + continue;
849 +- if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
850 ++ if (ifa->prefix_len != ifp->prefix_len ||
851 ++ !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
852 + ifp->prefix_len))
853 + continue;
854 + if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
855 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
856 +index 94903061f324..e83c41c53f4a 100644
857 +--- a/net/ipv6/ip6_gre.c
858 ++++ b/net/ipv6/ip6_gre.c
859 +@@ -1717,6 +1717,24 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
860 + return 0;
861 + }
862 +
863 ++static void ip6erspan_set_version(struct nlattr *data[],
864 ++ struct __ip6_tnl_parm *parms)
865 ++{
866 ++ parms->erspan_ver = 1;
867 ++ if (data[IFLA_GRE_ERSPAN_VER])
868 ++ parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
869 ++
870 ++ if (parms->erspan_ver == 1) {
871 ++ if (data[IFLA_GRE_ERSPAN_INDEX])
872 ++ parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
873 ++ } else if (parms->erspan_ver == 2) {
874 ++ if (data[IFLA_GRE_ERSPAN_DIR])
875 ++ parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
876 ++ if (data[IFLA_GRE_ERSPAN_HWID])
877 ++ parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
878 ++ }
879 ++}
880 ++
881 + static void ip6gre_netlink_parms(struct nlattr *data[],
882 + struct __ip6_tnl_parm *parms)
883 + {
884 +@@ -1765,20 +1783,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
885 +
886 + if (data[IFLA_GRE_COLLECT_METADATA])
887 + parms->collect_md = true;
888 +-
889 +- parms->erspan_ver = 1;
890 +- if (data[IFLA_GRE_ERSPAN_VER])
891 +- parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
892 +-
893 +- if (parms->erspan_ver == 1) {
894 +- if (data[IFLA_GRE_ERSPAN_INDEX])
895 +- parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
896 +- } else if (parms->erspan_ver == 2) {
897 +- if (data[IFLA_GRE_ERSPAN_DIR])
898 +- parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
899 +- if (data[IFLA_GRE_ERSPAN_HWID])
900 +- parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
901 +- }
902 + }
903 +
904 + static int ip6gre_tap_init(struct net_device *dev)
905 +@@ -2207,6 +2211,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
906 + int err;
907 +
908 + ip6gre_netlink_parms(data, &nt->parms);
909 ++ ip6erspan_set_version(data, &nt->parms);
910 + ign = net_generic(net, ip6gre_net_id);
911 +
912 + if (nt->parms.collect_md) {
913 +@@ -2252,6 +2257,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
914 + if (IS_ERR(t))
915 + return PTR_ERR(t);
916 +
917 ++ ip6erspan_set_version(data, &p);
918 + ip6gre_tunnel_unlink_md(ign, t);
919 + ip6gre_tunnel_unlink(ign, t);
920 + ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
921 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
922 +index d0945253f43b..3b1a78906bc0 100644
923 +--- a/net/packet/af_packet.c
924 ++++ b/net/packet/af_packet.c
925 +@@ -2887,7 +2887,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
926 + goto out_free;
927 + } else if (reserve) {
928 + skb_reserve(skb, -reserve);
929 +- if (len < reserve)
930 ++ if (len < reserve + sizeof(struct ipv6hdr) &&
931 ++ dev->min_header_len != dev->hard_header_len)
932 + skb_reset_network_header(skb);
933 + }
934 +
935 +diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
936 +index c361ce782412..c3d5ab01fba7 100644
937 +--- a/net/vmw_vsock/vmci_transport.c
938 ++++ b/net/vmw_vsock/vmci_transport.c
939 +@@ -1651,6 +1651,10 @@ static void vmci_transport_cleanup(struct work_struct *work)
940 +
941 + static void vmci_transport_destruct(struct vsock_sock *vsk)
942 + {
943 ++ /* transport can be NULL if we hit a failure at init() time */
944 ++ if (!vmci_trans(vsk))
945 ++ return;
946 ++
947 + /* Ensure that the detach callback doesn't use the sk/vsk
948 + * we are about to destruct.
949 + */
950 +diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
951 +index 5121729b8b63..ec3a828672ef 100644
952 +--- a/net/x25/af_x25.c
953 ++++ b/net/x25/af_x25.c
954 +@@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb)
955 + unsigned int lci = 1;
956 + struct sock *sk;
957 +
958 +- read_lock_bh(&x25_list_lock);
959 +-
960 +- while ((sk = __x25_find_socket(lci, nb)) != NULL) {
961 ++ while ((sk = x25_find_socket(lci, nb)) != NULL) {
962 + sock_put(sk);
963 + if (++lci == 4096) {
964 + lci = 0;
965 + break;
966 + }
967 ++ cond_resched();
968 + }
969 +
970 +- read_unlock_bh(&x25_list_lock);
971 + return lci;
972 + }
973 +