Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Wed, 06 Feb 2019 20:14:42
Message-Id: 1549484056.35c2ea6ad304109f6f512a74f585da8d9194ebae.mpagano@gentoo
1 commit: 35c2ea6ad304109f6f512a74f585da8d9194ebae
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 6 20:14:16 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 6 20:14:16 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=35c2ea6a
7
8 proj/linux-patches: Linux patch 4.9.155
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1154_linux-4.9.155.patch | 1021 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1025 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index dfd18f6..0ed3743 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -659,6 +659,10 @@ Patch: 1153_linux-4.9.154.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.154
23
24 +Patch: 1154_linux-4.9.155.patch
25 +From: http://www.k5rnel.org
26 +Desc: Linux 4.9.155
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1154_linux-4.9.155.patch b/1154_linux-4.9.155.patch
33 new file mode 100644
34 index 0000000..b8dc104
35 --- /dev/null
36 +++ b/1154_linux-4.9.155.patch
37 @@ -0,0 +1,1021 @@
38 +diff --git a/Makefile b/Makefile
39 +index 9964792e200f..1933ac9c3406 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 9
45 +-SUBLEVEL = 154
46 ++SUBLEVEL = 155
47 + EXTRAVERSION =
48 + NAME = Roaring Lionus
49 +
50 +diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
51 +index 318394ed5c7a..5e11ad3164e0 100644
52 +--- a/arch/arm/mach-cns3xxx/pcie.c
53 ++++ b/arch/arm/mach-cns3xxx/pcie.c
54 +@@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
55 + } else /* remote PCI bus */
56 + base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
57 +
58 +- return base + (where & 0xffc) + (devfn << 12);
59 ++ return base + where + (devfn << 12);
60 + }
61 +
62 + static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
63 +diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
64 +index f6e71c73cceb..76c9b51fa7f1 100644
65 +--- a/arch/arm64/kernel/hibernate.c
66 ++++ b/arch/arm64/kernel/hibernate.c
67 +@@ -297,8 +297,10 @@ int swsusp_arch_suspend(void)
68 + dcache_clean_range(__idmap_text_start, __idmap_text_end);
69 +
70 + /* Clean kvm setup code to PoC? */
71 +- if (el2_reset_needed())
72 ++ if (el2_reset_needed()) {
73 + dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
74 ++ dcache_clean_range(__hyp_text_start, __hyp_text_end);
75 ++ }
76 +
77 + /*
78 + * Tell the hibernation core that we've just restored
79 +diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
80 +index d3b5f75e652e..fcb486d09555 100644
81 +--- a/arch/arm64/kernel/hyp-stub.S
82 ++++ b/arch/arm64/kernel/hyp-stub.S
83 +@@ -28,6 +28,8 @@
84 + #include <asm/virt.h>
85 +
86 + .text
87 ++ .pushsection .hyp.text, "ax"
88 ++
89 + .align 11
90 +
91 + ENTRY(__hyp_stub_vectors)
92 +diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
93 +index 2a21318fed1d..c9ca903462a6 100644
94 +--- a/arch/arm64/kernel/kaslr.c
95 ++++ b/arch/arm64/kernel/kaslr.c
96 +@@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
97 + * we end up running with module randomization disabled.
98 + */
99 + module_alloc_base = (u64)_etext - MODULES_VSIZE;
100 ++ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
101 +
102 + /*
103 + * Try to map the FDT early. If this fails, we simply bail,
104 +diff --git a/drivers/base/core.c b/drivers/base/core.c
105 +index f43caad30e1e..901aec4bb01d 100644
106 +--- a/drivers/base/core.c
107 ++++ b/drivers/base/core.c
108 +@@ -862,6 +862,8 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
109 + return;
110 +
111 + mutex_lock(&gdp_mutex);
112 ++ if (!kobject_has_children(glue_dir))
113 ++ kobject_del(glue_dir);
114 + kobject_put(glue_dir);
115 + mutex_unlock(&gdp_mutex);
116 + }
117 +diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
118 +index 524c8e0b72fd..40bdeca6d692 100644
119 +--- a/drivers/mmc/host/sdhci-iproc.c
120 ++++ b/drivers/mmc/host/sdhci-iproc.c
121 +@@ -242,7 +242,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
122 +
123 + iproc_host->data = iproc_data;
124 +
125 +- mmc_of_parse(host->mmc);
126 ++ ret = mmc_of_parse(host->mmc);
127 ++ if (ret)
128 ++ goto err;
129 ++
130 + sdhci_get_of_property(pdev);
131 +
132 + host->mmc->caps |= iproc_host->data->mmc_caps;
133 +diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
134 +index ef9bc26ebc1a..714593023bbc 100644
135 +--- a/drivers/net/ethernet/freescale/ucc_geth.c
136 ++++ b/drivers/net/ethernet/freescale/ucc_geth.c
137 +@@ -1888,6 +1888,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
138 + u16 i, j;
139 + u8 __iomem *bd;
140 +
141 ++ netdev_reset_queue(ugeth->ndev);
142 ++
143 + ug_info = ugeth->ug_info;
144 + uf_info = &ug_info->uf_info;
145 +
146 +diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
147 +index 84bab9f0732e..9af0887c8a29 100644
148 +--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
149 ++++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
150 +@@ -2037,9 +2037,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
151 + {
152 + struct mlx4_cmd_mailbox *mailbox;
153 + __be32 *outbox;
154 ++ u64 qword_field;
155 + u32 dword_field;
156 +- int err;
157 ++ u16 word_field;
158 + u8 byte_field;
159 ++ int err;
160 + static const u8 a0_dmfs_query_hw_steering[] = {
161 + [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
162 + [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
163 +@@ -2067,19 +2069,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
164 +
165 + /* QPC/EEC/CQC/EQC/RDMARC attributes */
166 +
167 +- MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
168 +- MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
169 +- MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
170 +- MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
171 +- MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
172 +- MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
173 +- MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
174 +- MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
175 +- MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
176 +- MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
177 +- MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
178 +- MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
179 +- MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
180 ++ MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
181 ++ param->qpc_base = qword_field & ~((u64)0x1f);
182 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
183 ++ param->log_num_qps = byte_field & 0x1f;
184 ++ MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
185 ++ param->srqc_base = qword_field & ~((u64)0x1f);
186 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
187 ++ param->log_num_srqs = byte_field & 0x1f;
188 ++ MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
189 ++ param->cqc_base = qword_field & ~((u64)0x1f);
190 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
191 ++ param->log_num_cqs = byte_field & 0x1f;
192 ++ MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
193 ++ param->altc_base = qword_field;
194 ++ MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
195 ++ param->auxc_base = qword_field;
196 ++ MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
197 ++ param->eqc_base = qword_field & ~((u64)0x1f);
198 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
199 ++ param->log_num_eqs = byte_field & 0x1f;
200 ++ MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
201 ++ param->num_sys_eqs = word_field & 0xfff;
202 ++ MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
203 ++ param->rdmarc_base = qword_field & ~((u64)0x1f);
204 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
205 ++ param->log_rd_per_qp = byte_field & 0x7;
206 +
207 + MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
208 + if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
209 +@@ -2098,22 +2113,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
210 + /* steering attributes */
211 + if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
212 + MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
213 +- MLX4_GET(param->log_mc_entry_sz, outbox,
214 +- INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
215 +- MLX4_GET(param->log_mc_table_sz, outbox,
216 +- INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
217 +- MLX4_GET(byte_field, outbox,
218 +- INIT_HCA_FS_A0_OFFSET);
219 ++ MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
220 ++ param->log_mc_entry_sz = byte_field & 0x1f;
221 ++ MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
222 ++ param->log_mc_table_sz = byte_field & 0x1f;
223 ++ MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
224 + param->dmfs_high_steer_mode =
225 + a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
226 + } else {
227 + MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
228 +- MLX4_GET(param->log_mc_entry_sz, outbox,
229 +- INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
230 +- MLX4_GET(param->log_mc_hash_sz, outbox,
231 +- INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
232 +- MLX4_GET(param->log_mc_table_sz, outbox,
233 +- INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
234 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
235 ++ param->log_mc_entry_sz = byte_field & 0x1f;
236 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
237 ++ param->log_mc_hash_sz = byte_field & 0x1f;
238 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
239 ++ param->log_mc_table_sz = byte_field & 0x1f;
240 + }
241 +
242 + /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
243 +@@ -2137,15 +2151,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
244 + /* TPT attributes */
245 +
246 + MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
247 +- MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
248 +- MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
249 ++ MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
250 ++ param->mw_enabled = byte_field >> 7;
251 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
252 ++ param->log_mpt_sz = byte_field & 0x3f;
253 + MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
254 + MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
255 +
256 + /* UAR attributes */
257 +
258 + MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
259 +- MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
260 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
261 ++ param->log_uar_sz = byte_field & 0xf;
262 +
263 + /* phv_check enable */
264 + MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
265 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
266 +index 5d6eab19a9d8..da9246f6c31e 100644
267 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
268 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
269 +@@ -1216,14 +1216,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
270 + int err = 0;
271 + u8 *smac_v;
272 +
273 +- if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
274 +- mlx5_core_warn(esw->dev,
275 +- "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
276 +- vport->vport);
277 +- return -EPERM;
278 +-
279 +- }
280 +-
281 + esw_vport_cleanup_ingress_rules(esw, vport);
282 +
283 + if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
284 +@@ -1709,13 +1701,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
285 + mutex_lock(&esw->state_lock);
286 + evport = &esw->vports[vport];
287 +
288 +- if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
289 ++ if (evport->info.spoofchk && !is_valid_ether_addr(mac))
290 + mlx5_core_warn(esw->dev,
291 +- "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
292 ++ "Set invalid MAC while spoofchk is on, vport(%d)\n",
293 + vport);
294 +- err = -EPERM;
295 +- goto unlock;
296 +- }
297 +
298 + err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
299 + if (err) {
300 +@@ -1859,6 +1848,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
301 + evport = &esw->vports[vport];
302 + pschk = evport->info.spoofchk;
303 + evport->info.spoofchk = spoofchk;
304 ++ if (pschk && !is_valid_ether_addr(evport->info.mac))
305 ++ mlx5_core_warn(esw->dev,
306 ++ "Spoofchk in set while MAC is invalid, vport(%d)\n",
307 ++ evport->vport);
308 + if (evport->enabled && esw->mode == SRIOV_LEGACY)
309 + err = esw_vport_ingress_config(esw, evport);
310 + if (err)
311 +diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
312 +index b299277361b7..4a2609c4dd6e 100644
313 +--- a/drivers/net/ipvlan/ipvlan_main.c
314 ++++ b/drivers/net/ipvlan/ipvlan_main.c
315 +@@ -85,12 +85,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
316 + err = ipvlan_register_nf_hook();
317 + if (!err) {
318 + mdev->l3mdev_ops = &ipvl_l3mdev_ops;
319 +- mdev->priv_flags |= IFF_L3MDEV_MASTER;
320 ++ mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
321 + } else
322 + goto fail;
323 + } else if (port->mode == IPVLAN_MODE_L3S) {
324 + /* Old mode was L3S */
325 +- mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
326 ++ mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
327 + ipvlan_unregister_nf_hook();
328 + mdev->l3mdev_ops = NULL;
329 + }
330 +@@ -158,7 +158,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
331 +
332 + dev->priv_flags &= ~IFF_IPVLAN_MASTER;
333 + if (port->mode == IPVLAN_MODE_L3S) {
334 +- dev->priv_flags &= ~IFF_L3MDEV_MASTER;
335 ++ dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
336 + ipvlan_unregister_nf_hook();
337 + dev->l3mdev_ops = NULL;
338 + }
339 +diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
340 +index c857d2d7bbec..69ffbd7b76f7 100644
341 +--- a/drivers/platform/x86/asus-nb-wmi.c
342 ++++ b/drivers/platform/x86/asus-nb-wmi.c
343 +@@ -477,8 +477,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
344 + { KE_KEY, 0x30, { KEY_VOLUMEUP } },
345 + { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
346 + { KE_KEY, 0x32, { KEY_MUTE } },
347 +- { KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
348 +- { KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
349 ++ { KE_KEY, 0x35, { KEY_SCREENLOCK } },
350 + { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
351 + { KE_KEY, 0x41, { KEY_NEXTSONG } },
352 + { KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
353 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
354 +index 441d434a48c1..33e65b71c49a 100644
355 +--- a/fs/cifs/connect.c
356 ++++ b/fs/cifs/connect.c
357 +@@ -48,6 +48,7 @@
358 + #include "cifs_unicode.h"
359 + #include "cifs_debug.h"
360 + #include "cifs_fs_sb.h"
361 ++#include "dns_resolve.h"
362 + #include "ntlmssp.h"
363 + #include "nterr.h"
364 + #include "rfc1002pdu.h"
365 +@@ -306,6 +307,53 @@ static void cifs_prune_tlinks(struct work_struct *work);
366 + static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
367 + const char *devname);
368 +
369 ++/*
370 ++ * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
371 ++ * get their ip addresses changed at some point.
372 ++ *
373 ++ * This should be called with server->srv_mutex held.
374 ++ */
375 ++#ifdef CONFIG_CIFS_DFS_UPCALL
376 ++static int reconn_set_ipaddr(struct TCP_Server_Info *server)
377 ++{
378 ++ int rc;
379 ++ int len;
380 ++ char *unc, *ipaddr = NULL;
381 ++
382 ++ if (!server->hostname)
383 ++ return -EINVAL;
384 ++
385 ++ len = strlen(server->hostname) + 3;
386 ++
387 ++ unc = kmalloc(len, GFP_KERNEL);
388 ++ if (!unc) {
389 ++ cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
390 ++ return -ENOMEM;
391 ++ }
392 ++ snprintf(unc, len, "\\\\%s", server->hostname);
393 ++
394 ++ rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
395 ++ kfree(unc);
396 ++
397 ++ if (rc < 0) {
398 ++ cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
399 ++ __func__, server->hostname, rc);
400 ++ return rc;
401 ++ }
402 ++
403 ++ rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
404 ++ strlen(ipaddr));
405 ++ kfree(ipaddr);
406 ++
407 ++ return !rc ? -1 : 0;
408 ++}
409 ++#else
410 ++static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
411 ++{
412 ++ return 0;
413 ++}
414 ++#endif
415 ++
416 + /*
417 + * cifs tcp session reconnection
418 + *
419 +@@ -403,6 +451,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
420 + rc = generic_ip_connect(server);
421 + if (rc) {
422 + cifs_dbg(FYI, "reconnect error %d\n", rc);
423 ++ rc = reconn_set_ipaddr(server);
424 ++ if (rc) {
425 ++ cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
426 ++ __func__, rc);
427 ++ }
428 + mutex_unlock(&server->srv_mutex);
429 + msleep(3000);
430 + } else {
431 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
432 +index 50251a8af0ce..52b6e4a40748 100644
433 +--- a/fs/cifs/smb2pdu.c
434 ++++ b/fs/cifs/smb2pdu.c
435 +@@ -2686,8 +2686,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
436 + if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) {
437 + srch_inf->endOfSearch = true;
438 + rc = 0;
439 +- }
440 +- cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
441 ++ } else
442 ++ cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
443 + goto qdir_exit;
444 + }
445 +
446 +diff --git a/fs/dcache.c b/fs/dcache.c
447 +index f903b86b06e5..29c0286bd638 100644
448 +--- a/fs/dcache.c
449 ++++ b/fs/dcache.c
450 +@@ -1164,15 +1164,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
451 + */
452 + void shrink_dcache_sb(struct super_block *sb)
453 + {
454 +- long freed;
455 +-
456 + do {
457 + LIST_HEAD(dispose);
458 +
459 +- freed = list_lru_walk(&sb->s_dentry_lru,
460 ++ list_lru_walk(&sb->s_dentry_lru,
461 + dentry_lru_isolate_shrink, &dispose, 1024);
462 +-
463 +- this_cpu_sub(nr_dentry_unused, freed);
464 + shrink_dentry_list(&dispose);
465 + cond_resched();
466 + } while (list_lru_count(&sb->s_dentry_lru) > 0);
467 +diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
468 +index 05f1ec728840..073126707270 100644
469 +--- a/fs/gfs2/rgrp.c
470 ++++ b/fs/gfs2/rgrp.c
471 +@@ -1705,9 +1705,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
472 + goto next_iter;
473 + }
474 + if (ret == -E2BIG) {
475 +- n += rbm->bii - initial_bii;
476 + rbm->bii = 0;
477 + rbm->offset = 0;
478 ++ n += (rbm->bii - initial_bii);
479 + goto res_covered_end_of_rgrp;
480 + }
481 + return ret;
482 +diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
483 +index a64adc2fced9..56b4f855fa9b 100644
484 +--- a/fs/notify/fsnotify.c
485 ++++ b/fs/notify/fsnotify.c
486 +@@ -101,9 +101,9 @@ int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
487 + parent = dget_parent(dentry);
488 + p_inode = parent->d_inode;
489 +
490 +- if (unlikely(!fsnotify_inode_watches_children(p_inode)))
491 ++ if (unlikely(!fsnotify_inode_watches_children(p_inode))) {
492 + __fsnotify_update_child_dentry_flags(p_inode);
493 +- else if (p_inode->i_fsnotify_mask & mask) {
494 ++ } else if (p_inode->i_fsnotify_mask & mask & ~FS_EVENT_ON_CHILD) {
495 + struct name_snapshot name;
496 +
497 + /* we are notifying a parent so come up with the new mask which
498 +@@ -207,6 +207,10 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
499 + else
500 + mnt = NULL;
501 +
502 ++ /* An event "on child" is not intended for a mount mark */
503 ++ if (mask & FS_EVENT_ON_CHILD)
504 ++ mnt = NULL;
505 ++
506 + /*
507 + * Optimization: srcu_read_lock() has a memory barrier which can
508 + * be expensive. It protects walking the *_fsnotify_marks lists.
509 +diff --git a/fs/read_write.c b/fs/read_write.c
510 +index ba280596ec78..9819f7c6c8c5 100644
511 +--- a/fs/read_write.c
512 ++++ b/fs/read_write.c
513 +@@ -392,8 +392,10 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos)
514 + iter->type |= WRITE;
515 + ret = file->f_op->write_iter(&kiocb, iter);
516 + BUG_ON(ret == -EIOCBQUEUED);
517 +- if (ret > 0)
518 ++ if (ret > 0) {
519 + *ppos = kiocb.ki_pos;
520 ++ fsnotify_modify(file);
521 ++ }
522 + return ret;
523 + }
524 + EXPORT_SYMBOL(vfs_iter_write);
525 +diff --git a/fs/super.c b/fs/super.c
526 +index 7e9beab77259..abe2541fb28c 100644
527 +--- a/fs/super.c
528 ++++ b/fs/super.c
529 +@@ -119,13 +119,23 @@ static unsigned long super_cache_count(struct shrinker *shrink,
530 + sb = container_of(shrink, struct super_block, s_shrink);
531 +
532 + /*
533 +- * Don't call trylock_super as it is a potential
534 +- * scalability bottleneck. The counts could get updated
535 +- * between super_cache_count and super_cache_scan anyway.
536 +- * Call to super_cache_count with shrinker_rwsem held
537 +- * ensures the safety of call to list_lru_shrink_count() and
538 +- * s_op->nr_cached_objects().
539 ++ * We don't call trylock_super() here as it is a scalability bottleneck,
540 ++ * so we're exposed to partial setup state. The shrinker rwsem does not
541 ++ * protect filesystem operations backing list_lru_shrink_count() or
542 ++ * s_op->nr_cached_objects(). Counts can change between
543 ++ * super_cache_count and super_cache_scan, so we really don't need locks
544 ++ * here.
545 ++ *
546 ++ * However, if we are currently mounting the superblock, the underlying
547 ++ * filesystem might be in a state of partial construction and hence it
548 ++ * is dangerous to access it. trylock_super() uses a MS_BORN check to
549 ++ * avoid this situation, so do the same here. The memory barrier is
550 ++ * matched with the one in mount_fs() as we don't hold locks here.
551 + */
552 ++ if (!(sb->s_flags & MS_BORN))
553 ++ return 0;
554 ++ smp_rmb();
555 ++
556 + if (sb->s_op && sb->s_op->nr_cached_objects)
557 + total_objects = sb->s_op->nr_cached_objects(sb, sc);
558 +
559 +@@ -1193,6 +1203,14 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
560 + sb = root->d_sb;
561 + BUG_ON(!sb);
562 + WARN_ON(!sb->s_bdi);
563 ++
564 ++ /*
565 ++ * Write barrier is for super_cache_count(). We place it before setting
566 ++ * MS_BORN as the data dependency between the two functions is the
567 ++ * superblock structure contents that we just set up, not the MS_BORN
568 ++ * flag.
569 ++ */
570 ++ smp_wmb();
571 + sb->s_flags |= MS_BORN;
572 +
573 + error = security_sb_kern_mount(sb, flags, secdata);
574 +diff --git a/include/linux/kobject.h b/include/linux/kobject.h
575 +index e6284591599e..5957c6a3fd7f 100644
576 +--- a/include/linux/kobject.h
577 ++++ b/include/linux/kobject.h
578 +@@ -113,6 +113,23 @@ extern void kobject_put(struct kobject *kobj);
579 + extern const void *kobject_namespace(struct kobject *kobj);
580 + extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
581 +
582 ++/**
583 ++ * kobject_has_children - Returns whether a kobject has children.
584 ++ * @kobj: the object to test
585 ++ *
586 ++ * This will return whether a kobject has other kobjects as children.
587 ++ *
588 ++ * It does NOT account for the presence of attribute files, only sub
589 ++ * directories. It also assumes there is no concurrent addition or
590 ++ * removal of such children, and thus relies on external locking.
591 ++ */
592 ++static inline bool kobject_has_children(struct kobject *kobj)
593 ++{
594 ++ WARN_ON_ONCE(atomic_read(&kobj->kref.refcount) == 0);
595 ++
596 ++ return kobj->sd && kobj->sd->dir.subdirs;
597 ++}
598 ++
599 + struct kobj_type {
600 + void (*release)(struct kobject *kobj);
601 + const struct sysfs_ops *sysfs_ops;
602 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
603 +index f254982e1a8f..2ecf0f32444e 100644
604 +--- a/include/linux/netdevice.h
605 ++++ b/include/linux/netdevice.h
606 +@@ -1368,6 +1368,7 @@ struct net_device_ops {
607 + * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
608 + * entity (i.e. the master device for bridged veth)
609 + * @IFF_MACSEC: device is a MACsec device
610 ++ * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
611 + */
612 + enum netdev_priv_flags {
613 + IFF_802_1Q_VLAN = 1<<0,
614 +@@ -1398,6 +1399,7 @@ enum netdev_priv_flags {
615 + IFF_RXFH_CONFIGURED = 1<<25,
616 + IFF_PHONY_HEADROOM = 1<<26,
617 + IFF_MACSEC = 1<<27,
618 ++ IFF_L3MDEV_RX_HANDLER = 1<<28,
619 + };
620 +
621 + #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
622 +@@ -1427,6 +1429,7 @@ enum netdev_priv_flags {
623 + #define IFF_TEAM IFF_TEAM
624 + #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
625 + #define IFF_MACSEC IFF_MACSEC
626 ++#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
627 +
628 + /**
629 + * struct net_device - The DEVICE structure.
630 +@@ -4244,6 +4247,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
631 + return dev->priv_flags & IFF_SUPP_NOFCS;
632 + }
633 +
634 ++static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
635 ++{
636 ++ return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
637 ++}
638 ++
639 + static inline bool netif_is_l3_master(const struct net_device *dev)
640 + {
641 + return dev->priv_flags & IFF_L3MDEV_MASTER;
642 +diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
643 +index 3832099289c5..128487658ff7 100644
644 +--- a/include/net/l3mdev.h
645 ++++ b/include/net/l3mdev.h
646 +@@ -142,7 +142,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
647 +
648 + if (netif_is_l3_slave(skb->dev))
649 + master = netdev_master_upper_dev_get_rcu(skb->dev);
650 +- else if (netif_is_l3_master(skb->dev))
651 ++ else if (netif_is_l3_master(skb->dev) ||
652 ++ netif_has_l3_rx_handler(skb->dev))
653 + master = skb->dev;
654 +
655 + if (master && master->l3mdev_ops->l3mdev_l3_rcv)
656 +diff --git a/kernel/exit.c b/kernel/exit.c
657 +index 6dd7ff4b337a..d9394fcd0e2c 100644
658 +--- a/kernel/exit.c
659 ++++ b/kernel/exit.c
660 +@@ -525,12 +525,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p)
661 + return NULL;
662 + }
663 +
664 +-static struct task_struct *find_child_reaper(struct task_struct *father)
665 ++static struct task_struct *find_child_reaper(struct task_struct *father,
666 ++ struct list_head *dead)
667 + __releases(&tasklist_lock)
668 + __acquires(&tasklist_lock)
669 + {
670 + struct pid_namespace *pid_ns = task_active_pid_ns(father);
671 + struct task_struct *reaper = pid_ns->child_reaper;
672 ++ struct task_struct *p, *n;
673 +
674 + if (likely(reaper != father))
675 + return reaper;
676 +@@ -546,6 +548,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father)
677 + panic("Attempted to kill init! exitcode=0x%08x\n",
678 + father->signal->group_exit_code ?: father->exit_code);
679 + }
680 ++
681 ++ list_for_each_entry_safe(p, n, dead, ptrace_entry) {
682 ++ list_del_init(&p->ptrace_entry);
683 ++ release_task(p);
684 ++ }
685 ++
686 + zap_pid_ns_processes(pid_ns);
687 + write_lock_irq(&tasklist_lock);
688 +
689 +@@ -632,7 +640,7 @@ static void forget_original_parent(struct task_struct *father,
690 + exit_ptrace(father, dead);
691 +
692 + /* Can drop and reacquire tasklist_lock */
693 +- reaper = find_child_reaper(father);
694 ++ reaper = find_child_reaper(father, dead);
695 + if (list_empty(&father->children))
696 + return;
697 +
698 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
699 +index 851efb004857..4f1f5fd12042 100644
700 +--- a/mm/memory-failure.c
701 ++++ b/mm/memory-failure.c
702 +@@ -336,7 +336,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
703 + if (fail || tk->addr_valid == 0) {
704 + pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
705 + pfn, tk->tsk->comm, tk->tsk->pid);
706 +- force_sig(SIGKILL, tk->tsk);
707 ++ do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
708 ++ tk->tsk, PIDTYPE_PID);
709 + }
710 +
711 + /*
712 +diff --git a/mm/migrate.c b/mm/migrate.c
713 +index 821623fc7091..b08c1a4a1c22 100644
714 +--- a/mm/migrate.c
715 ++++ b/mm/migrate.c
716 +@@ -1044,10 +1044,13 @@ out:
717 + * If migration is successful, decrease refcount of the newpage
718 + * which will not free the page because new page owner increased
719 + * refcounter. As well, if it is LRU page, add the page to LRU
720 +- * list in here.
721 ++ * list in here. Use the old state of the isolated source page to
722 ++ * determine if we migrated a LRU page. newpage was already unlocked
723 ++ * and possibly modified by its owner - don't rely on the page
724 ++ * state.
725 + */
726 + if (rc == MIGRATEPAGE_SUCCESS) {
727 +- if (unlikely(__PageMovable(newpage)))
728 ++ if (unlikely(!is_lru))
729 + put_page(newpage);
730 + else
731 + putback_lru_page(newpage);
732 +diff --git a/mm/oom_kill.c b/mm/oom_kill.c
733 +index 4a184157cc3d..1de3695cb419 100644
734 +--- a/mm/oom_kill.c
735 ++++ b/mm/oom_kill.c
736 +@@ -861,6 +861,13 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
737 + * still freeing memory.
738 + */
739 + read_lock(&tasklist_lock);
740 ++
741 ++ /*
742 ++ * The task 'p' might have already exited before reaching here. The
743 ++ * put_task_struct() will free task_struct 'p' while the loop still try
744 ++ * to access the field of 'p', so, get an extra reference.
745 ++ */
746 ++ get_task_struct(p);
747 + for_each_thread(p, t) {
748 + list_for_each_entry(child, &t->children, sibling) {
749 + unsigned int child_points;
750 +@@ -880,6 +887,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
751 + }
752 + }
753 + }
754 ++ put_task_struct(p);
755 + read_unlock(&tasklist_lock);
756 +
757 + p = find_lock_task_mm(victim);
758 +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
759 +index 496f8d86b503..c7334d1e392a 100644
760 +--- a/net/ipv4/ip_fragment.c
761 ++++ b/net/ipv4/ip_fragment.c
762 +@@ -423,6 +423,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
763 + * fragment.
764 + */
765 +
766 ++ err = -EINVAL;
767 + /* Find out where to put this fragment. */
768 + prev_tail = qp->q.fragments_tail;
769 + if (!prev_tail)
770 +@@ -499,7 +500,6 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
771 +
772 + discard_qp:
773 + inet_frag_kill(&qp->q);
774 +- err = -EINVAL;
775 + __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
776 + err:
777 + kfree_skb(skb);
778 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
779 +index c81b2c5caf26..8885dbad217b 100644
780 +--- a/net/ipv6/af_inet6.c
781 ++++ b/net/ipv6/af_inet6.c
782 +@@ -359,6 +359,9 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
783 + err = -EINVAL;
784 + goto out_unlock;
785 + }
786 ++ }
787 ++
788 ++ if (sk->sk_bound_dev_if) {
789 + dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
790 + if (!dev) {
791 + err = -ENODEV;
792 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
793 +index b96dbe38ecad..4ae758bcb2cf 100644
794 +--- a/net/l2tp/l2tp_core.c
795 ++++ b/net/l2tp/l2tp_core.c
796 +@@ -83,8 +83,7 @@
797 + #define L2TP_SLFLAG_S 0x40000000
798 + #define L2TP_SL_SEQ_MASK 0x00ffffff
799 +
800 +-#define L2TP_HDR_SIZE_SEQ 10
801 +-#define L2TP_HDR_SIZE_NOSEQ 6
802 ++#define L2TP_HDR_SIZE_MAX 14
803 +
804 + /* Default trace flags */
805 + #define L2TP_DEFAULT_DEBUG_FLAGS 0
806 +@@ -796,11 +795,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
807 + "%s: recv data ns=%u, session nr=%u\n",
808 + session->name, ns, session->nr);
809 + }
810 ++ ptr += 4;
811 + }
812 +
813 +- /* Advance past L2-specific header, if present */
814 +- ptr += session->l2specific_len;
815 +-
816 + if (L2TP_SKB_CB(skb)->has_seq) {
817 + /* Received a packet with sequence numbers. If we're the LNS,
818 + * check if we sre sending sequence numbers and if not,
819 +@@ -944,7 +941,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
820 + __skb_pull(skb, sizeof(struct udphdr));
821 +
822 + /* Short packet? */
823 +- if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
824 ++ if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
825 + l2tp_info(tunnel, L2TP_MSG_DATA,
826 + "%s: recv short packet (len=%d)\n",
827 + tunnel->name, skb->len);
828 +@@ -1023,6 +1020,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
829 + goto error;
830 + }
831 +
832 ++ if (tunnel->version == L2TP_HDR_VER_3 &&
833 ++ l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
834 ++ goto error;
835 ++
836 + l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
837 + l2tp_session_dec_refcount(session);
838 +
839 +@@ -1122,21 +1123,20 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
840 + memcpy(bufp, &session->cookie[0], session->cookie_len);
841 + bufp += session->cookie_len;
842 + }
843 +- if (session->l2specific_len) {
844 +- if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
845 +- u32 l2h = 0;
846 +- if (session->send_seq) {
847 +- l2h = 0x40000000 | session->ns;
848 +- session->ns++;
849 +- session->ns &= 0xffffff;
850 +- l2tp_dbg(session, L2TP_MSG_SEQ,
851 +- "%s: updated ns to %u\n",
852 +- session->name, session->ns);
853 +- }
854 ++ if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
855 ++ u32 l2h = 0;
856 +
857 +- *((__be32 *) bufp) = htonl(l2h);
858 ++ if (session->send_seq) {
859 ++ l2h = 0x40000000 | session->ns;
860 ++ session->ns++;
861 ++ session->ns &= 0xffffff;
862 ++ l2tp_dbg(session, L2TP_MSG_SEQ,
863 ++ "%s: updated ns to %u\n",
864 ++ session->name, session->ns);
865 + }
866 +- bufp += session->l2specific_len;
867 ++
868 ++ *((__be32 *)bufp) = htonl(l2h);
869 ++ bufp += 4;
870 + }
871 +
872 + return bufp - optr;
873 +@@ -1813,7 +1813,7 @@ int l2tp_session_delete(struct l2tp_session *session)
874 + EXPORT_SYMBOL_GPL(l2tp_session_delete);
875 +
876 + /* We come here whenever a session's send_seq, cookie_len or
877 +- * l2specific_len parameters are set.
878 ++ * l2specific_type parameters are set.
879 + */
880 + void l2tp_session_set_header_len(struct l2tp_session *session, int version)
881 + {
882 +@@ -1822,7 +1822,8 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
883 + if (session->send_seq)
884 + session->hdr_len += 4;
885 + } else {
886 +- session->hdr_len = 4 + session->cookie_len + session->l2specific_len;
887 ++ session->hdr_len = 4 + session->cookie_len;
888 ++ session->hdr_len += l2tp_get_l2specific_len(session);
889 + if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
890 + session->hdr_len += 4;
891 + }
892 +diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
893 +index 86356a23a0a7..7cc49715606e 100644
894 +--- a/net/l2tp/l2tp_core.h
895 ++++ b/net/l2tp/l2tp_core.h
896 +@@ -314,6 +314,37 @@ do { \
897 + #define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s)
898 + #endif
899 +
900 ++static inline int l2tp_get_l2specific_len(struct l2tp_session *session)
901 ++{
902 ++ switch (session->l2specific_type) {
903 ++ case L2TP_L2SPECTYPE_DEFAULT:
904 ++ return 4;
905 ++ case L2TP_L2SPECTYPE_NONE:
906 ++ default:
907 ++ return 0;
908 ++ }
909 ++}
910 ++
911 ++static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
912 ++ unsigned char **ptr, unsigned char **optr)
913 ++{
914 ++ int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
915 ++
916 ++ if (opt_len > 0) {
917 ++ int off = *ptr - *optr;
918 ++
919 ++ if (!pskb_may_pull(skb, off + opt_len))
920 ++ return -1;
921 ++
922 ++ if (skb->data != *optr) {
923 ++ *optr = skb->data;
924 ++ *ptr = skb->data + off;
925 ++ }
926 ++ }
927 ++
928 ++ return 0;
929 ++}
930 ++
931 + #define l2tp_printk(ptr, type, func, fmt, ...) \
932 + do { \
933 + if (((ptr)->debug) & (type)) \
934 +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
935 +index 9d77a54e8854..03a696d3bcd9 100644
936 +--- a/net/l2tp/l2tp_ip.c
937 ++++ b/net/l2tp/l2tp_ip.c
938 +@@ -157,6 +157,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
939 + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
940 + }
941 +
942 ++ if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
943 ++ goto discard_sess;
944 ++
945 + l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
946 + l2tp_session_dec_refcount(session);
947 +
948 +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
949 +index 247097289fd0..5e6d09863480 100644
950 +--- a/net/l2tp/l2tp_ip6.c
951 ++++ b/net/l2tp/l2tp_ip6.c
952 +@@ -169,6 +169,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
953 + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
954 + }
955 +
956 ++ if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
957 ++ goto discard_sess;
958 ++
959 + l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
960 + tunnel->recv_payload_hook);
961 + l2tp_session_dec_refcount(session);
962 +diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
963 +index 94d05806a9a2..f0ecaec1ff3d 100644
964 +--- a/net/netrom/nr_timer.c
965 ++++ b/net/netrom/nr_timer.c
966 +@@ -53,21 +53,21 @@ void nr_start_t1timer(struct sock *sk)
967 + {
968 + struct nr_sock *nr = nr_sk(sk);
969 +
970 +- mod_timer(&nr->t1timer, jiffies + nr->t1);
971 ++ sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
972 + }
973 +
974 + void nr_start_t2timer(struct sock *sk)
975 + {
976 + struct nr_sock *nr = nr_sk(sk);
977 +
978 +- mod_timer(&nr->t2timer, jiffies + nr->t2);
979 ++ sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
980 + }
981 +
982 + void nr_start_t4timer(struct sock *sk)
983 + {
984 + struct nr_sock *nr = nr_sk(sk);
985 +
986 +- mod_timer(&nr->t4timer, jiffies + nr->t4);
987 ++ sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
988 + }
989 +
990 + void nr_start_idletimer(struct sock *sk)
991 +@@ -75,37 +75,37 @@ void nr_start_idletimer(struct sock *sk)
992 + struct nr_sock *nr = nr_sk(sk);
993 +
994 + if (nr->idle > 0)
995 +- mod_timer(&nr->idletimer, jiffies + nr->idle);
996 ++ sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
997 + }
998 +
999 + void nr_start_heartbeat(struct sock *sk)
1000 + {
1001 +- mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
1002 ++ sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
1003 + }
1004 +
1005 + void nr_stop_t1timer(struct sock *sk)
1006 + {
1007 +- del_timer(&nr_sk(sk)->t1timer);
1008 ++ sk_stop_timer(sk, &nr_sk(sk)->t1timer);
1009 + }
1010 +
1011 + void nr_stop_t2timer(struct sock *sk)
1012 + {
1013 +- del_timer(&nr_sk(sk)->t2timer);
1014 ++ sk_stop_timer(sk, &nr_sk(sk)->t2timer);
1015 + }
1016 +
1017 + void nr_stop_t4timer(struct sock *sk)
1018 + {
1019 +- del_timer(&nr_sk(sk)->t4timer);
1020 ++ sk_stop_timer(sk, &nr_sk(sk)->t4timer);
1021 + }
1022 +
1023 + void nr_stop_idletimer(struct sock *sk)
1024 + {
1025 +- del_timer(&nr_sk(sk)->idletimer);
1026 ++ sk_stop_timer(sk, &nr_sk(sk)->idletimer);
1027 + }
1028 +
1029 + void nr_stop_heartbeat(struct sock *sk)
1030 + {
1031 +- del_timer(&sk->sk_timer);
1032 ++ sk_stop_timer(sk, &sk->sk_timer);
1033 + }
1034 +
1035 + int nr_t1timer_running(struct sock *sk)
1036 +diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
1037 +index 0fc76d845103..9f704a7f2a28 100644
1038 +--- a/net/rose/rose_route.c
1039 ++++ b/net/rose/rose_route.c
1040 +@@ -848,6 +848,7 @@ void rose_link_device_down(struct net_device *dev)
1041 +
1042 + /*
1043 + * Route a frame to an appropriate AX.25 connection.
1044 ++ * A NULL ax25_cb indicates an internally generated frame.
1045 + */
1046 + int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
1047 + {
1048 +@@ -865,6 +866,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
1049 +
1050 + if (skb->len < ROSE_MIN_LEN)
1051 + return res;
1052 ++
1053 ++ if (!ax25)
1054 ++ return rose_loopback_queue(skb, NULL);
1055 ++
1056 + frametype = skb->data[2];
1057 + lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
1058 + if (frametype == ROSE_CALL_REQUEST &&