Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Thu, 02 May 2019 10:16:13
Message-Id: 1556792152.e52d5380938fb702f641bac06fa791a7477aa322.mpagano@gentoo
1 commit: e52d5380938fb702f641bac06fa791a7477aa322
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu May 2 10:15:52 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu May 2 10:15:52 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e52d5380
7
8 Linux patch 4.9.172
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1171_linux-4.9.172.patch | 3013 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3017 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 5425b73..ab73916 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -727,6 +727,10 @@ Patch: 1170_linux-4.9.171.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.171
23
24 +Patch: 1171_linux-4.9.172.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.9.172
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1171_linux-4.9.172.patch b/1171_linux-4.9.172.patch
33 new file mode 100644
34 index 0000000..709313d
35 --- /dev/null
36 +++ b/1171_linux-4.9.172.patch
37 @@ -0,0 +1,3013 @@
38 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
39 +index c708a50b060e..a1472b48ee22 100644
40 +--- a/Documentation/kernel-parameters.txt
41 ++++ b/Documentation/kernel-parameters.txt
42 +@@ -2758,6 +2758,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
43 +
44 + nohugeiomap [KNL,x86] Disable kernel huge I/O mappings.
45 +
46 ++ nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds
47 ++ check bypass). With this option data leaks are possible
48 ++ in the system.
49 ++
50 + nosmt [KNL,S390] Disable symmetric multithreading (SMT).
51 + Equivalent to smt=1.
52 +
53 +@@ -2765,7 +2769,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
54 + nosmt=force: Force disable SMT, cannot be undone
55 + via the sysfs control file.
56 +
57 +- nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
58 ++ nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
59 + (indirect branch prediction) vulnerability. System may
60 + allow data leaks with this option, which is equivalent
61 + to spectre_v2=off.
62 +diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
63 +index dbdc4130e149..0335285f3918 100644
64 +--- a/Documentation/networking/ip-sysctl.txt
65 ++++ b/Documentation/networking/ip-sysctl.txt
66 +@@ -405,6 +405,7 @@ tcp_min_rtt_wlen - INTEGER
67 + minimum RTT when it is moved to a longer path (e.g., due to traffic
68 + engineering). A longer window makes the filter more resistant to RTT
69 + inflations such as transient congestion. The unit is seconds.
70 ++ Possible values: 0 - 86400 (1 day)
71 + Default: 300
72 +
73 + tcp_moderate_rcvbuf - BOOLEAN
74 +diff --git a/Makefile b/Makefile
75 +index dbdef749e1c8..75cba5fbdb46 100644
76 +--- a/Makefile
77 ++++ b/Makefile
78 +@@ -1,6 +1,6 @@
79 + VERSION = 4
80 + PATCHLEVEL = 9
81 +-SUBLEVEL = 171
82 ++SUBLEVEL = 172
83 + EXTRAVERSION =
84 + NAME = Roaring Lionus
85 +
86 +diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
87 +index 2d7f2bb0d66a..a67ed746b0e3 100644
88 +--- a/arch/arm/boot/compressed/head.S
89 ++++ b/arch/arm/boot/compressed/head.S
90 +@@ -1383,7 +1383,21 @@ ENTRY(efi_stub_entry)
91 +
92 + @ Preserve return value of efi_entry() in r4
93 + mov r4, r0
94 +- bl cache_clean_flush
95 ++
96 ++ @ our cache maintenance code relies on CP15 barrier instructions
97 ++ @ but since we arrived here with the MMU and caches configured
98 ++ @ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
99 ++ @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
100 ++ @ the enable path will be executed on v7+ only.
101 ++ mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
102 ++ tst r1, #(1 << 5) @ CP15BEN bit set?
103 ++ bne 0f
104 ++ orr r1, r1, #(1 << 5) @ CP15 barrier instructions
105 ++ mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
106 ++ ARM( .inst 0xf57ff06f @ v7+ isb )
107 ++ THUMB( isb )
108 ++
109 ++0: bl cache_clean_flush
110 + bl cache_off
111 +
112 + @ Set parameters for booting zImage according to boot protocol
113 +diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
114 +index 7913a5cf6806..b9c788790c0f 100644
115 +--- a/arch/mips/kernel/scall64-o32.S
116 ++++ b/arch/mips/kernel/scall64-o32.S
117 +@@ -125,7 +125,7 @@ trace_a_syscall:
118 + subu t1, v0, __NR_O32_Linux
119 + move a1, v0
120 + bnez t1, 1f /* __NR_syscall at offset 0 */
121 +- lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
122 ++ ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
123 + .set pop
124 +
125 + 1: jal syscall_trace_enter
126 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
127 +index 28ce17405aab..9f840d9fdfcb 100644
128 +--- a/drivers/block/loop.c
129 ++++ b/drivers/block/loop.c
130 +@@ -82,7 +82,6 @@
131 +
132 + static DEFINE_IDR(loop_index_idr);
133 + static DEFINE_MUTEX(loop_index_mutex);
134 +-static DEFINE_MUTEX(loop_ctl_mutex);
135 +
136 + static int max_part;
137 + static int part_shift;
138 +@@ -1034,7 +1033,7 @@ static int loop_clr_fd(struct loop_device *lo)
139 + */
140 + if (atomic_read(&lo->lo_refcnt) > 1) {
141 + lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
142 +- mutex_unlock(&loop_ctl_mutex);
143 ++ mutex_unlock(&lo->lo_ctl_mutex);
144 + return 0;
145 + }
146 +
147 +@@ -1083,12 +1082,12 @@ static int loop_clr_fd(struct loop_device *lo)
148 + if (!part_shift)
149 + lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
150 + loop_unprepare_queue(lo);
151 +- mutex_unlock(&loop_ctl_mutex);
152 ++ mutex_unlock(&lo->lo_ctl_mutex);
153 + /*
154 +- * Need not hold loop_ctl_mutex to fput backing file.
155 +- * Calling fput holding loop_ctl_mutex triggers a circular
156 ++ * Need not hold lo_ctl_mutex to fput backing file.
157 ++ * Calling fput holding lo_ctl_mutex triggers a circular
158 + * lock dependency possibility warning as fput can take
159 +- * bd_mutex which is usually taken before loop_ctl_mutex.
160 ++ * bd_mutex which is usually taken before lo_ctl_mutex.
161 + */
162 + fput(filp);
163 + return 0;
164 +@@ -1351,7 +1350,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
165 + struct loop_device *lo = bdev->bd_disk->private_data;
166 + int err;
167 +
168 +- mutex_lock_nested(&loop_ctl_mutex, 1);
169 ++ mutex_lock_nested(&lo->lo_ctl_mutex, 1);
170 + switch (cmd) {
171 + case LOOP_SET_FD:
172 + err = loop_set_fd(lo, mode, bdev, arg);
173 +@@ -1360,7 +1359,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
174 + err = loop_change_fd(lo, bdev, arg);
175 + break;
176 + case LOOP_CLR_FD:
177 +- /* loop_clr_fd would have unlocked loop_ctl_mutex on success */
178 ++ /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
179 + err = loop_clr_fd(lo);
180 + if (!err)
181 + goto out_unlocked;
182 +@@ -1396,7 +1395,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
183 + default:
184 + err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
185 + }
186 +- mutex_unlock(&loop_ctl_mutex);
187 ++ mutex_unlock(&lo->lo_ctl_mutex);
188 +
189 + out_unlocked:
190 + return err;
191 +@@ -1529,16 +1528,16 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
192 +
193 + switch(cmd) {
194 + case LOOP_SET_STATUS:
195 +- mutex_lock(&loop_ctl_mutex);
196 ++ mutex_lock(&lo->lo_ctl_mutex);
197 + err = loop_set_status_compat(
198 + lo, (const struct compat_loop_info __user *) arg);
199 +- mutex_unlock(&loop_ctl_mutex);
200 ++ mutex_unlock(&lo->lo_ctl_mutex);
201 + break;
202 + case LOOP_GET_STATUS:
203 +- mutex_lock(&loop_ctl_mutex);
204 ++ mutex_lock(&lo->lo_ctl_mutex);
205 + err = loop_get_status_compat(
206 + lo, (struct compat_loop_info __user *) arg);
207 +- mutex_unlock(&loop_ctl_mutex);
208 ++ mutex_unlock(&lo->lo_ctl_mutex);
209 + break;
210 + case LOOP_SET_CAPACITY:
211 + case LOOP_CLR_FD:
212 +@@ -1582,7 +1581,7 @@ static void __lo_release(struct loop_device *lo)
213 + if (atomic_dec_return(&lo->lo_refcnt))
214 + return;
215 +
216 +- mutex_lock(&loop_ctl_mutex);
217 ++ mutex_lock(&lo->lo_ctl_mutex);
218 + if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
219 + /*
220 + * In autoclear mode, stop the loop thread
221 +@@ -1599,7 +1598,7 @@ static void __lo_release(struct loop_device *lo)
222 + loop_flush(lo);
223 + }
224 +
225 +- mutex_unlock(&loop_ctl_mutex);
226 ++ mutex_unlock(&lo->lo_ctl_mutex);
227 + }
228 +
229 + static void lo_release(struct gendisk *disk, fmode_t mode)
230 +@@ -1645,10 +1644,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
231 + struct loop_device *lo = ptr;
232 + struct loop_func_table *xfer = data;
233 +
234 +- mutex_lock(&loop_ctl_mutex);
235 ++ mutex_lock(&lo->lo_ctl_mutex);
236 + if (lo->lo_encryption == xfer)
237 + loop_release_xfer(lo);
238 +- mutex_unlock(&loop_ctl_mutex);
239 ++ mutex_unlock(&lo->lo_ctl_mutex);
240 + return 0;
241 + }
242 +
243 +@@ -1814,6 +1813,7 @@ static int loop_add(struct loop_device **l, int i)
244 + if (!part_shift)
245 + disk->flags |= GENHD_FL_NO_PART_SCAN;
246 + disk->flags |= GENHD_FL_EXT_DEVT;
247 ++ mutex_init(&lo->lo_ctl_mutex);
248 + atomic_set(&lo->lo_refcnt, 0);
249 + lo->lo_number = i;
250 + spin_lock_init(&lo->lo_lock);
251 +@@ -1926,19 +1926,19 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
252 + ret = loop_lookup(&lo, parm);
253 + if (ret < 0)
254 + break;
255 +- mutex_lock(&loop_ctl_mutex);
256 ++ mutex_lock(&lo->lo_ctl_mutex);
257 + if (lo->lo_state != Lo_unbound) {
258 + ret = -EBUSY;
259 +- mutex_unlock(&loop_ctl_mutex);
260 ++ mutex_unlock(&lo->lo_ctl_mutex);
261 + break;
262 + }
263 + if (atomic_read(&lo->lo_refcnt) > 0) {
264 + ret = -EBUSY;
265 +- mutex_unlock(&loop_ctl_mutex);
266 ++ mutex_unlock(&lo->lo_ctl_mutex);
267 + break;
268 + }
269 + lo->lo_disk->private_data = NULL;
270 +- mutex_unlock(&loop_ctl_mutex);
271 ++ mutex_unlock(&lo->lo_ctl_mutex);
272 + idr_remove(&loop_index_idr, lo->lo_number);
273 + loop_remove(lo);
274 + break;
275 +diff --git a/drivers/block/loop.h b/drivers/block/loop.h
276 +index a923e74495ce..60f0fd2c0c65 100644
277 +--- a/drivers/block/loop.h
278 ++++ b/drivers/block/loop.h
279 +@@ -55,6 +55,7 @@ struct loop_device {
280 +
281 + spinlock_t lo_lock;
282 + int lo_state;
283 ++ struct mutex lo_ctl_mutex;
284 + struct kthread_worker worker;
285 + struct task_struct *worker_task;
286 + bool use_dio;
287 +diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
288 +index d032032337e7..f37a6ef4f544 100644
289 +--- a/drivers/dma/sh/rcar-dmac.c
290 ++++ b/drivers/dma/sh/rcar-dmac.c
291 +@@ -1311,6 +1311,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
292 + enum dma_status status;
293 + unsigned long flags;
294 + unsigned int residue;
295 ++ bool cyclic;
296 +
297 + status = dma_cookie_status(chan, cookie, txstate);
298 + if (status == DMA_COMPLETE || !txstate)
299 +@@ -1318,10 +1319,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
300 +
301 + spin_lock_irqsave(&rchan->lock, flags);
302 + residue = rcar_dmac_chan_get_residue(rchan, cookie);
303 ++ cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
304 + spin_unlock_irqrestore(&rchan->lock, flags);
305 +
306 + /* if there's no residue, the cookie is complete */
307 +- if (!residue)
308 ++ if (!residue && !cyclic)
309 + return DMA_COMPLETE;
310 +
311 + dma_set_residue(txstate, residue);
312 +diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
313 +index c7e6c9839c9a..51d34e7275ab 100644
314 +--- a/drivers/gpu/drm/vc4/vc4_crtc.c
315 ++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
316 +@@ -846,7 +846,7 @@ static void
317 + vc4_crtc_reset(struct drm_crtc *crtc)
318 + {
319 + if (crtc->state)
320 +- __drm_atomic_helper_crtc_destroy_state(crtc->state);
321 ++ vc4_crtc_destroy_state(crtc, crtc->state);
322 +
323 + crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
324 + if (crtc->state)
325 +diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
326 +index b0502e2782c1..98a4cb5d4993 100644
327 +--- a/drivers/hwtracing/intel_th/gth.c
328 ++++ b/drivers/hwtracing/intel_th/gth.c
329 +@@ -605,7 +605,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
330 + othdev->output.port = -1;
331 + othdev->output.active = false;
332 + gth->output[port].output = NULL;
333 +- for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
334 ++ for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
335 + if (gth->master[master] == port)
336 + gth->master[master] = -1;
337 + spin_unlock(&gth->gth_lock);
338 +diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
339 +index 46b64970058e..49d55a0322f6 100644
340 +--- a/drivers/infiniband/sw/rdmavt/mr.c
341 ++++ b/drivers/infiniband/sw/rdmavt/mr.c
342 +@@ -497,11 +497,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
343 + if (unlikely(mapped_segs == mr->mr.max_segs))
344 + return -ENOMEM;
345 +
346 +- if (mr->mr.length == 0) {
347 +- mr->mr.user_base = addr;
348 +- mr->mr.iova = addr;
349 +- }
350 +-
351 + m = mapped_segs / RVT_SEGSZ;
352 + n = mapped_segs % RVT_SEGSZ;
353 + mr->mr.map[m]->segs[n].vaddr = (void *)addr;
354 +@@ -518,17 +513,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
355 + * @sg_nents: number of entries in sg
356 + * @sg_offset: offset in bytes into sg
357 + *
358 ++ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
359 ++ *
360 + * Return: number of sg elements mapped to the memory region
361 + */
362 + int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
363 + int sg_nents, unsigned int *sg_offset)
364 + {
365 + struct rvt_mr *mr = to_imr(ibmr);
366 ++ int ret;
367 +
368 + mr->mr.length = 0;
369 + mr->mr.page_shift = PAGE_SHIFT;
370 +- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
371 +- rvt_set_page);
372 ++ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
373 ++ mr->mr.user_base = ibmr->iova;
374 ++ mr->mr.iova = ibmr->iova;
375 ++ mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
376 ++ mr->mr.length = (size_t)ibmr->length;
377 ++ return ret;
378 + }
379 +
380 + /**
381 +@@ -559,6 +561,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
382 + ibmr->rkey = key;
383 + mr->mr.lkey = key;
384 + mr->mr.access_flags = access;
385 ++ mr->mr.iova = ibmr->iova;
386 + atomic_set(&mr->mr.lkey_invalid, 0);
387 +
388 + return 0;
389 +diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
390 +index f798f427a46f..275f957604f7 100644
391 +--- a/drivers/input/rmi4/rmi_f11.c
392 ++++ b/drivers/input/rmi4/rmi_f11.c
393 +@@ -1198,7 +1198,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
394 + ctrl->ctrl0_11[11] = ctrl->ctrl0_11[11] & ~BIT(0);
395 +
396 + rc = f11_write_control_regs(fn, &f11->sens_query,
397 +- &f11->dev_controls, fn->fd.query_base_addr);
398 ++ &f11->dev_controls, fn->fd.control_base_addr);
399 + if (rc)
400 + dev_warn(&fn->dev, "Failed to write control registers\n");
401 +
402 +diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
403 +index 2aae6f88dca0..a52663745051 100644
404 +--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
405 ++++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
406 +@@ -58,6 +58,8 @@ static int __init fm10k_init_module(void)
407 + /* create driver workqueue */
408 + fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
409 + fm10k_driver_name);
410 ++ if (!fm10k_workqueue)
411 ++ return -ENOMEM;
412 +
413 + fm10k_dbg_init();
414 +
415 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
416 +index d5e8ac86c195..54872f8f2f7d 100644
417 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
418 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
419 +@@ -1365,7 +1365,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
420 + break;
421 + case MLX5_MODULE_ID_SFP:
422 + modinfo->type = ETH_MODULE_SFF_8472;
423 +- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
424 ++ modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
425 + break;
426 + default:
427 + netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
428 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
429 +index 43d7c8378fb4..0bad09d06206 100644
430 +--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
431 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
432 +@@ -368,10 +368,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
433 + size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
434 +
435 + i2c_addr = MLX5_I2C_ADDR_LOW;
436 +- if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
437 +- i2c_addr = MLX5_I2C_ADDR_HIGH;
438 +- offset -= MLX5_EEPROM_PAGE_LENGTH;
439 +- }
440 +
441 + MLX5_SET(mcia_reg, in, l, 0);
442 + MLX5_SET(mcia_reg, in, module, module_num);
443 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
444 +index cc847e0cac2d..e3ed70a24029 100644
445 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
446 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
447 +@@ -2059,11 +2059,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
448 + if (err)
449 + return err;
450 +
451 ++ mlxsw_sp_port->link.autoneg = autoneg;
452 ++
453 + if (!netif_running(dev))
454 + return 0;
455 +
456 +- mlxsw_sp_port->link.autoneg = autoneg;
457 +-
458 + mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
459 + mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
460 +
461 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
462 +index b46b56ad7517..2c04a0739fd6 100644
463 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
464 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
465 +@@ -1796,8 +1796,6 @@ static int stmmac_open(struct net_device *dev)
466 + struct stmmac_priv *priv = netdev_priv(dev);
467 + int ret;
468 +
469 +- stmmac_check_ether_addr(priv);
470 +-
471 + if (priv->hw->pcs != STMMAC_PCS_RGMII &&
472 + priv->hw->pcs != STMMAC_PCS_TBI &&
473 + priv->hw->pcs != STMMAC_PCS_RTBI) {
474 +@@ -3355,6 +3353,8 @@ int stmmac_dvr_probe(struct device *device,
475 + if (ret)
476 + goto error_hw_init;
477 +
478 ++ stmmac_check_ether_addr(priv);
479 ++
480 + ndev->netdev_ops = &stmmac_netdev_ops;
481 +
482 + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
483 +diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
484 +index cfd81eb1b532..ddceed3c5a4a 100644
485 +--- a/drivers/net/slip/slhc.c
486 ++++ b/drivers/net/slip/slhc.c
487 +@@ -153,7 +153,7 @@ out_fail:
488 + void
489 + slhc_free(struct slcompress *comp)
490 + {
491 +- if ( comp == NULLSLCOMPR )
492 ++ if ( IS_ERR_OR_NULL(comp) )
493 + return;
494 +
495 + if ( comp->tstate != NULLSLSTATE )
496 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
497 +index b8874faaa813..3eb6d48c3148 100644
498 +--- a/drivers/net/team/team.c
499 ++++ b/drivers/net/team/team.c
500 +@@ -1163,6 +1163,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
501 + return -EINVAL;
502 + }
503 +
504 ++ if (netdev_has_upper_dev(dev, port_dev)) {
505 ++ netdev_err(dev, "Device %s is already an upper device of the team interface\n",
506 ++ portname);
507 ++ return -EBUSY;
508 ++ }
509 ++
510 + if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
511 + vlan_uses_dev(dev)) {
512 + netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
513 +diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
514 +index e9d6cf146fcc..c17b254e4f64 100644
515 +--- a/drivers/usb/core/driver.c
516 ++++ b/drivers/usb/core/driver.c
517 +@@ -1888,14 +1888,11 @@ int usb_runtime_idle(struct device *dev)
518 + return -EBUSY;
519 + }
520 +
521 +-int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
522 ++static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
523 + {
524 + struct usb_hcd *hcd = bus_to_hcd(udev->bus);
525 + int ret = -EPERM;
526 +
527 +- if (enable && !udev->usb2_hw_lpm_allowed)
528 +- return 0;
529 +-
530 + if (hcd->driver->set_usb2_hw_lpm) {
531 + ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
532 + if (!ret)
533 +@@ -1905,6 +1902,24 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
534 + return ret;
535 + }
536 +
537 ++int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
538 ++{
539 ++ if (!udev->usb2_hw_lpm_capable ||
540 ++ !udev->usb2_hw_lpm_allowed ||
541 ++ udev->usb2_hw_lpm_enabled)
542 ++ return 0;
543 ++
544 ++ return usb_set_usb2_hardware_lpm(udev, 1);
545 ++}
546 ++
547 ++int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
548 ++{
549 ++ if (!udev->usb2_hw_lpm_enabled)
550 ++ return 0;
551 ++
552 ++ return usb_set_usb2_hardware_lpm(udev, 0);
553 ++}
554 ++
555 + #endif /* CONFIG_PM */
556 +
557 + struct bus_type usb_bus_type = {
558 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
559 +index 7b6919086539..8fddb94f1874 100644
560 +--- a/drivers/usb/core/hub.c
561 ++++ b/drivers/usb/core/hub.c
562 +@@ -3168,8 +3168,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
563 + }
564 +
565 + /* disable USB2 hardware LPM */
566 +- if (udev->usb2_hw_lpm_enabled == 1)
567 +- usb_set_usb2_hardware_lpm(udev, 0);
568 ++ usb_disable_usb2_hardware_lpm(udev);
569 +
570 + if (usb_disable_ltm(udev)) {
571 + dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
572 +@@ -3215,8 +3214,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
573 + usb_enable_ltm(udev);
574 + err_ltm:
575 + /* Try to enable USB2 hardware LPM again */
576 +- if (udev->usb2_hw_lpm_capable == 1)
577 +- usb_set_usb2_hardware_lpm(udev, 1);
578 ++ usb_enable_usb2_hardware_lpm(udev);
579 +
580 + if (udev->do_remote_wakeup)
581 + (void) usb_disable_remote_wakeup(udev);
582 +@@ -3499,8 +3497,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
583 + hub_port_logical_disconnect(hub, port1);
584 + } else {
585 + /* Try to enable USB2 hardware LPM */
586 +- if (udev->usb2_hw_lpm_capable == 1)
587 +- usb_set_usb2_hardware_lpm(udev, 1);
588 ++ usb_enable_usb2_hardware_lpm(udev);
589 +
590 + /* Try to enable USB3 LTM and LPM */
591 + usb_enable_ltm(udev);
592 +@@ -4337,7 +4334,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
593 + if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
594 + connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
595 + udev->usb2_hw_lpm_allowed = 1;
596 +- usb_set_usb2_hardware_lpm(udev, 1);
597 ++ usb_enable_usb2_hardware_lpm(udev);
598 + }
599 + }
600 +
601 +@@ -5481,8 +5478,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
602 + /* Disable USB2 hardware LPM.
603 + * It will be re-enabled by the enumeration process.
604 + */
605 +- if (udev->usb2_hw_lpm_enabled == 1)
606 +- usb_set_usb2_hardware_lpm(udev, 0);
607 ++ usb_disable_usb2_hardware_lpm(udev);
608 +
609 + /* Disable LPM and LTM while we reset the device and reinstall the alt
610 + * settings. Device-initiated LPM settings, and system exit latency
611 +@@ -5592,7 +5588,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
612 +
613 + done:
614 + /* Now that the alt settings are re-installed, enable LTM and LPM. */
615 +- usb_set_usb2_hardware_lpm(udev, 1);
616 ++ usb_enable_usb2_hardware_lpm(udev);
617 + usb_unlocked_enable_lpm(udev);
618 + usb_enable_ltm(udev);
619 + usb_release_bos_descriptor(udev);
620 +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
621 +index c0c5d5b3ec40..0e6ab0a17c08 100644
622 +--- a/drivers/usb/core/message.c
623 ++++ b/drivers/usb/core/message.c
624 +@@ -1181,8 +1181,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
625 + dev->actconfig->interface[i] = NULL;
626 + }
627 +
628 +- if (dev->usb2_hw_lpm_enabled == 1)
629 +- usb_set_usb2_hardware_lpm(dev, 0);
630 ++ usb_disable_usb2_hardware_lpm(dev);
631 + usb_unlocked_disable_lpm(dev);
632 + usb_disable_ltm(dev);
633 +
634 +diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
635 +index c953a0f1c695..1a232b4ffe71 100644
636 +--- a/drivers/usb/core/sysfs.c
637 ++++ b/drivers/usb/core/sysfs.c
638 +@@ -494,7 +494,10 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev,
639 +
640 + if (!ret) {
641 + udev->usb2_hw_lpm_allowed = value;
642 +- ret = usb_set_usb2_hardware_lpm(udev, value);
643 ++ if (value)
644 ++ ret = usb_enable_usb2_hardware_lpm(udev);
645 ++ else
646 ++ ret = usb_disable_usb2_hardware_lpm(udev);
647 + }
648 +
649 + usb_unlock_device(udev);
650 +diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
651 +index 53318126ed91..6b2f11544283 100644
652 +--- a/drivers/usb/core/usb.h
653 ++++ b/drivers/usb/core/usb.h
654 +@@ -84,7 +84,8 @@ extern int usb_remote_wakeup(struct usb_device *dev);
655 + extern int usb_runtime_suspend(struct device *dev);
656 + extern int usb_runtime_resume(struct device *dev);
657 + extern int usb_runtime_idle(struct device *dev);
658 +-extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
659 ++extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev);
660 ++extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev);
661 +
662 + #else
663 +
664 +@@ -104,7 +105,12 @@ static inline int usb_autoresume_device(struct usb_device *udev)
665 + return 0;
666 + }
667 +
668 +-static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
669 ++static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
670 ++{
671 ++ return 0;
672 ++}
673 ++
674 ++static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
675 + {
676 + return 0;
677 + }
678 +diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
679 +index cec25691cbae..2ffc7fe8da52 100644
680 +--- a/fs/ceph/dir.c
681 ++++ b/fs/ceph/dir.c
682 +@@ -1471,6 +1471,7 @@ void ceph_dentry_lru_del(struct dentry *dn)
683 + unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
684 + {
685 + struct ceph_inode_info *dci = ceph_inode(dir);
686 ++ unsigned hash;
687 +
688 + switch (dci->i_dir_layout.dl_dir_hash) {
689 + case 0: /* for backward compat */
690 +@@ -1478,8 +1479,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
691 + return dn->d_name.hash;
692 +
693 + default:
694 +- return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
695 ++ spin_lock(&dn->d_lock);
696 ++ hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
697 + dn->d_name.name, dn->d_name.len);
698 ++ spin_unlock(&dn->d_lock);
699 ++ return hash;
700 + }
701 + }
702 +
703 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
704 +index 6cbd0d805c9d..67cb9d078bfa 100644
705 +--- a/fs/ceph/mds_client.c
706 ++++ b/fs/ceph/mds_client.c
707 +@@ -1187,6 +1187,15 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
708 + list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
709 + ci->i_prealloc_cap_flush = NULL;
710 + }
711 ++
712 ++ if (drop &&
713 ++ ci->i_wrbuffer_ref_head == 0 &&
714 ++ ci->i_wr_ref == 0 &&
715 ++ ci->i_dirty_caps == 0 &&
716 ++ ci->i_flushing_caps == 0) {
717 ++ ceph_put_snap_context(ci->i_head_snapc);
718 ++ ci->i_head_snapc = NULL;
719 ++ }
720 + }
721 + spin_unlock(&ci->i_ceph_lock);
722 + while (!list_empty(&to_remove)) {
723 +diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
724 +index 411e9df0d40e..3a76ae001360 100644
725 +--- a/fs/ceph/snap.c
726 ++++ b/fs/ceph/snap.c
727 +@@ -563,7 +563,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
728 + old_snapc = NULL;
729 +
730 + update_snapc:
731 +- if (ci->i_head_snapc) {
732 ++ if (ci->i_wrbuffer_ref_head == 0 &&
733 ++ ci->i_wr_ref == 0 &&
734 ++ ci->i_dirty_caps == 0 &&
735 ++ ci->i_flushing_caps == 0) {
736 ++ ci->i_head_snapc = NULL;
737 ++ } else {
738 + ci->i_head_snapc = ceph_get_snap_context(new_snapc);
739 + dout(" new snapc is %p\n", new_snapc);
740 + }
741 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
742 +index a8a2fc9ae056..786f67bee43a 100644
743 +--- a/fs/cifs/inode.c
744 ++++ b/fs/cifs/inode.c
745 +@@ -1722,6 +1722,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
746 + if (rc == 0 || rc != -EBUSY)
747 + goto do_rename_exit;
748 +
749 ++ /* Don't fall back to using SMB on SMB 2+ mount */
750 ++ if (server->vals->protocol_id != 0)
751 ++ goto do_rename_exit;
752 ++
753 + /* open-file renames don't work across directories */
754 + if (to_dentry->d_parent != from_dentry->d_parent)
755 + goto do_rename_exit;
756 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
757 +index 659ad12e33ba..42c31587a936 100644
758 +--- a/fs/nfs/super.c
759 ++++ b/fs/nfs/super.c
760 +@@ -2047,7 +2047,8 @@ static int nfs23_validate_mount_data(void *options,
761 + memcpy(sap, &data->addr, sizeof(data->addr));
762 + args->nfs_server.addrlen = sizeof(data->addr);
763 + args->nfs_server.port = ntohs(data->addr.sin_port);
764 +- if (!nfs_verify_server_address(sap))
765 ++ if (sap->sa_family != AF_INET ||
766 ++ !nfs_verify_server_address(sap))
767 + goto out_no_address;
768 +
769 + if (!(data->flags & NFS_MOUNT_TCP))
770 +diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
771 +index 3069cd46ea66..8d842282111b 100644
772 +--- a/fs/nfsd/nfs4callback.c
773 ++++ b/fs/nfsd/nfs4callback.c
774 +@@ -934,8 +934,9 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
775 + cb->cb_seq_status = 1;
776 + cb->cb_status = 0;
777 + if (minorversion) {
778 +- if (!nfsd41_cb_get_slot(clp, task))
779 ++ if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
780 + return;
781 ++ cb->cb_holds_slot = true;
782 + }
783 + rpc_call_start(task);
784 + }
785 +@@ -962,6 +963,9 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
786 + return true;
787 + }
788 +
789 ++ if (!cb->cb_holds_slot)
790 ++ goto need_restart;
791 ++
792 + switch (cb->cb_seq_status) {
793 + case 0:
794 + /*
795 +@@ -999,6 +1003,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
796 + cb->cb_seq_status);
797 + }
798 +
799 ++ cb->cb_holds_slot = false;
800 + clear_bit(0, &clp->cl_cb_slot_busy);
801 + rpc_wake_up_next(&clp->cl_cb_waitq);
802 + dprintk("%s: freed slot, new seqid=%d\n", __func__,
803 +@@ -1206,6 +1211,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
804 + cb->cb_seq_status = 1;
805 + cb->cb_status = 0;
806 + cb->cb_need_restart = false;
807 ++ cb->cb_holds_slot = false;
808 + }
809 +
810 + void nfsd4_run_cb(struct nfsd4_callback *cb)
811 +diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
812 +index 86aa92d200e1..133d8bf62a5c 100644
813 +--- a/fs/nfsd/state.h
814 ++++ b/fs/nfsd/state.h
815 +@@ -69,6 +69,7 @@ struct nfsd4_callback {
816 + int cb_seq_status;
817 + int cb_status;
818 + bool cb_need_restart;
819 ++ bool cb_holds_slot;
820 + };
821 +
822 + struct nfsd4_callback_ops {
823 +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
824 +index 6f30cf8ef7a1..5b32c054df71 100644
825 +--- a/fs/proc/proc_sysctl.c
826 ++++ b/fs/proc/proc_sysctl.c
827 +@@ -1604,9 +1604,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
828 + if (--header->nreg)
829 + return;
830 +
831 +- if (parent)
832 ++ if (parent) {
833 + put_links(header);
834 +- start_unregistering(header);
835 ++ start_unregistering(header);
836 ++ }
837 ++
838 + if (!--header->count)
839 + kfree_rcu(header, rcu);
840 +
841 +diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
842 +index a3812e9c8fee..c2c724abde57 100644
843 +--- a/include/net/inet_frag.h
844 ++++ b/include/net/inet_frag.h
845 +@@ -76,8 +76,8 @@ struct inet_frag_queue {
846 + struct timer_list timer;
847 + spinlock_t lock;
848 + atomic_t refcnt;
849 +- struct sk_buff *fragments; /* Used in IPv6. */
850 +- struct rb_root rb_fragments; /* Used in IPv4. */
851 ++ struct sk_buff *fragments; /* used in 6lopwpan IPv6. */
852 ++ struct rb_root rb_fragments; /* Used in IPv4/IPv6. */
853 + struct sk_buff *fragments_tail;
854 + struct sk_buff *last_run_head;
855 + ktime_t stamp;
856 +@@ -152,4 +152,16 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
857 +
858 + extern const u8 ip_frag_ecn_table[16];
859 +
860 ++/* Return values of inet_frag_queue_insert() */
861 ++#define IPFRAG_OK 0
862 ++#define IPFRAG_DUP 1
863 ++#define IPFRAG_OVERLAP 2
864 ++int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
865 ++ int offset, int end);
866 ++void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
867 ++ struct sk_buff *parent);
868 ++void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
869 ++ void *reasm_data);
870 ++struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
871 ++
872 + #endif
873 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
874 +index 7cb100d25bb5..168009eef5e4 100644
875 +--- a/include/net/ipv6.h
876 ++++ b/include/net/ipv6.h
877 +@@ -511,35 +511,6 @@ static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
878 + }
879 + #endif
880 +
881 +-struct inet_frag_queue;
882 +-
883 +-enum ip6_defrag_users {
884 +- IP6_DEFRAG_LOCAL_DELIVER,
885 +- IP6_DEFRAG_CONNTRACK_IN,
886 +- __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
887 +- IP6_DEFRAG_CONNTRACK_OUT,
888 +- __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
889 +- IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
890 +- __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
891 +-};
892 +-
893 +-void ip6_frag_init(struct inet_frag_queue *q, const void *a);
894 +-extern const struct rhashtable_params ip6_rhash_params;
895 +-
896 +-/*
897 +- * Equivalent of ipv4 struct ip
898 +- */
899 +-struct frag_queue {
900 +- struct inet_frag_queue q;
901 +-
902 +- int iif;
903 +- unsigned int csum;
904 +- __u16 nhoffset;
905 +- u8 ecn;
906 +-};
907 +-
908 +-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
909 +-
910 + static inline bool ipv6_addr_any(const struct in6_addr *a)
911 + {
912 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
913 +diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
914 +new file mode 100644
915 +index 000000000000..28aa9b30aece
916 +--- /dev/null
917 ++++ b/include/net/ipv6_frag.h
918 +@@ -0,0 +1,111 @@
919 ++/* SPDX-License-Identifier: GPL-2.0 */
920 ++#ifndef _IPV6_FRAG_H
921 ++#define _IPV6_FRAG_H
922 ++#include <linux/kernel.h>
923 ++#include <net/addrconf.h>
924 ++#include <net/ipv6.h>
925 ++#include <net/inet_frag.h>
926 ++
927 ++enum ip6_defrag_users {
928 ++ IP6_DEFRAG_LOCAL_DELIVER,
929 ++ IP6_DEFRAG_CONNTRACK_IN,
930 ++ __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
931 ++ IP6_DEFRAG_CONNTRACK_OUT,
932 ++ __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
933 ++ IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
934 ++ __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
935 ++};
936 ++
937 ++/*
938 ++ * Equivalent of ipv4 struct ip
939 ++ */
940 ++struct frag_queue {
941 ++ struct inet_frag_queue q;
942 ++
943 ++ int iif;
944 ++ __u16 nhoffset;
945 ++ u8 ecn;
946 ++};
947 ++
948 ++#if IS_ENABLED(CONFIG_IPV6)
949 ++static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
950 ++{
951 ++ struct frag_queue *fq = container_of(q, struct frag_queue, q);
952 ++ const struct frag_v6_compare_key *key = a;
953 ++
954 ++ q->key.v6 = *key;
955 ++ fq->ecn = 0;
956 ++}
957 ++
958 ++static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed)
959 ++{
960 ++ return jhash2(data,
961 ++ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
962 ++}
963 ++
964 ++static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed)
965 ++{
966 ++ const struct inet_frag_queue *fq = data;
967 ++
968 ++ return jhash2((const u32 *)&fq->key.v6,
969 ++ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
970 ++}
971 ++
972 ++static inline int
973 ++ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
974 ++{
975 ++ const struct frag_v6_compare_key *key = arg->key;
976 ++ const struct inet_frag_queue *fq = ptr;
977 ++
978 ++ return !!memcmp(&fq->key, key, sizeof(*key));
979 ++}
980 ++
981 ++static inline void
982 ++ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
983 ++{
984 ++ struct net_device *dev = NULL;
985 ++ struct sk_buff *head;
986 ++
987 ++ rcu_read_lock();
988 ++ spin_lock(&fq->q.lock);
989 ++
990 ++ if (fq->q.flags & INET_FRAG_COMPLETE)
991 ++ goto out;
992 ++
993 ++ inet_frag_kill(&fq->q);
994 ++
995 ++ dev = dev_get_by_index_rcu(net, fq->iif);
996 ++ if (!dev)
997 ++ goto out;
998 ++
999 ++ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
1000 ++ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
1001 ++
1002 ++ /* Don't send error if the first segment did not arrive. */
1003 ++ if (!(fq->q.flags & INET_FRAG_FIRST_IN))
1004 ++ goto out;
1005 ++
1006 ++ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
1007 ++ * pull the head out of the tree in order to be able to
1008 ++ * deal with head->dev.
1009 ++ */
1010 ++ head = inet_frag_pull_head(&fq->q);
1011 ++ if (!head)
1012 ++ goto out;
1013 ++
1014 ++ head->dev = dev;
1015 ++ skb_get(head);
1016 ++ spin_unlock(&fq->q.lock);
1017 ++
1018 ++ icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
1019 ++ kfree_skb(head);
1020 ++ goto out_rcu_unlock;
1021 ++
1022 ++out:
1023 ++ spin_unlock(&fq->q.lock);
1024 ++out_rcu_unlock:
1025 ++ rcu_read_unlock();
1026 ++ inet_frag_put(&fq->q);
1027 ++}
1028 ++#endif
1029 ++#endif
1030 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
1031 +index 4b1e0669740c..f0c9b6925687 100644
1032 +--- a/kernel/sched/fair.c
1033 ++++ b/kernel/sched/fair.c
1034 +@@ -1925,6 +1925,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1035 + if (p->last_task_numa_placement) {
1036 + delta = runtime - p->last_sum_exec_runtime;
1037 + *period = now - p->last_task_numa_placement;
1038 ++
1039 ++ /* Avoid time going backwards, prevent potential divide error: */
1040 ++ if (unlikely((s64)*period < 0))
1041 ++ *period = 0;
1042 + } else {
1043 + delta = p->se.avg.load_sum / p->se.load.weight;
1044 + *period = LOAD_AVG_MAX;
1045 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
1046 +index 5473dcaaca8d..2cfe11e1190b 100644
1047 +--- a/kernel/trace/ring_buffer.c
1048 ++++ b/kernel/trace/ring_buffer.c
1049 +@@ -701,7 +701,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
1050 +
1051 + preempt_disable_notrace();
1052 + time = rb_time_stamp(buffer);
1053 +- preempt_enable_no_resched_notrace();
1054 ++ preempt_enable_notrace();
1055 +
1056 + return time;
1057 + }
1058 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1059 +index d4773939c054..a2d8bd68c16e 100644
1060 +--- a/kernel/trace/trace.c
1061 ++++ b/kernel/trace/trace.c
1062 +@@ -500,8 +500,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
1063 + * not modified.
1064 + */
1065 + pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
1066 +- if (!pid_list)
1067 ++ if (!pid_list) {
1068 ++ trace_parser_put(&parser);
1069 + return -ENOMEM;
1070 ++ }
1071 +
1072 + pid_list->pid_max = READ_ONCE(pid_max);
1073 +
1074 +@@ -511,6 +513,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
1075 +
1076 + pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
1077 + if (!pid_list->pids) {
1078 ++ trace_parser_put(&parser);
1079 + kfree(pid_list);
1080 + return -ENOMEM;
1081 + }
1082 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
1083 +index c7e5aaf2eeb8..142ccaae9c7b 100644
1084 +--- a/net/bridge/netfilter/ebtables.c
1085 ++++ b/net/bridge/netfilter/ebtables.c
1086 +@@ -2056,7 +2056,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1087 + if (match_kern)
1088 + match_kern->match_size = ret;
1089 +
1090 +- if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
1091 ++ /* rule should have no remaining data after target */
1092 ++ if (type == EBT_COMPAT_TARGET && size_left)
1093 + return -EINVAL;
1094 +
1095 + match32 = (struct compat_ebt_entry_mwt *) buf;
1096 +diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
1097 +index aab1e2dfdfca..c01df341b5f6 100644
1098 +--- a/net/ieee802154/6lowpan/reassembly.c
1099 ++++ b/net/ieee802154/6lowpan/reassembly.c
1100 +@@ -25,7 +25,7 @@
1101 +
1102 + #include <net/ieee802154_netdev.h>
1103 + #include <net/6lowpan.h>
1104 +-#include <net/ipv6.h>
1105 ++#include <net/ipv6_frag.h>
1106 + #include <net/inet_frag.h>
1107 +
1108 + #include "6lowpan_i.h"
1109 +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
1110 +index 0fb49dedc9fb..2325cd3454a6 100644
1111 +--- a/net/ipv4/inet_fragment.c
1112 ++++ b/net/ipv4/inet_fragment.c
1113 +@@ -24,6 +24,62 @@
1114 + #include <net/sock.h>
1115 + #include <net/inet_frag.h>
1116 + #include <net/inet_ecn.h>
1117 ++#include <net/ip.h>
1118 ++#include <net/ipv6.h>
1119 ++
1120 ++/* Use skb->cb to track consecutive/adjacent fragments coming at
1121 ++ * the end of the queue. Nodes in the rb-tree queue will
1122 ++ * contain "runs" of one or more adjacent fragments.
1123 ++ *
1124 ++ * Invariants:
1125 ++ * - next_frag is NULL at the tail of a "run";
1126 ++ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
1127 ++ */
1128 ++struct ipfrag_skb_cb {
1129 ++ union {
1130 ++ struct inet_skb_parm h4;
1131 ++ struct inet6_skb_parm h6;
1132 ++ };
1133 ++ struct sk_buff *next_frag;
1134 ++ int frag_run_len;
1135 ++};
1136 ++
1137 ++#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
1138 ++
1139 ++static void fragcb_clear(struct sk_buff *skb)
1140 ++{
1141 ++ RB_CLEAR_NODE(&skb->rbnode);
1142 ++ FRAG_CB(skb)->next_frag = NULL;
1143 ++ FRAG_CB(skb)->frag_run_len = skb->len;
1144 ++}
1145 ++
1146 ++/* Append skb to the last "run". */
1147 ++static void fragrun_append_to_last(struct inet_frag_queue *q,
1148 ++ struct sk_buff *skb)
1149 ++{
1150 ++ fragcb_clear(skb);
1151 ++
1152 ++ FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
1153 ++ FRAG_CB(q->fragments_tail)->next_frag = skb;
1154 ++ q->fragments_tail = skb;
1155 ++}
1156 ++
1157 ++/* Create a new "run" with the skb. */
1158 ++static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
1159 ++{
1160 ++ BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
1161 ++ fragcb_clear(skb);
1162 ++
1163 ++ if (q->last_run_head)
1164 ++ rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
1165 ++ &q->last_run_head->rbnode.rb_right);
1166 ++ else
1167 ++ rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
1168 ++ rb_insert_color(&skb->rbnode, &q->rb_fragments);
1169 ++
1170 ++ q->fragments_tail = skb;
1171 ++ q->last_run_head = skb;
1172 ++}
1173 +
1174 + /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
1175 + * Value : 0xff if frame should be dropped.
1176 +@@ -122,6 +178,28 @@ static void inet_frag_destroy_rcu(struct rcu_head *head)
1177 + kmem_cache_free(f->frags_cachep, q);
1178 + }
1179 +
1180 ++unsigned int inet_frag_rbtree_purge(struct rb_root *root)
1181 ++{
1182 ++ struct rb_node *p = rb_first(root);
1183 ++ unsigned int sum = 0;
1184 ++
1185 ++ while (p) {
1186 ++ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
1187 ++
1188 ++ p = rb_next(p);
1189 ++ rb_erase(&skb->rbnode, root);
1190 ++ while (skb) {
1191 ++ struct sk_buff *next = FRAG_CB(skb)->next_frag;
1192 ++
1193 ++ sum += skb->truesize;
1194 ++ kfree_skb(skb);
1195 ++ skb = next;
1196 ++ }
1197 ++ }
1198 ++ return sum;
1199 ++}
1200 ++EXPORT_SYMBOL(inet_frag_rbtree_purge);
1201 ++
1202 + void inet_frag_destroy(struct inet_frag_queue *q)
1203 + {
1204 + struct sk_buff *fp;
1205 +@@ -223,3 +301,218 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
1206 + return fq;
1207 + }
1208 + EXPORT_SYMBOL(inet_frag_find);
1209 ++
1210 ++int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
1211 ++ int offset, int end)
1212 ++{
1213 ++ struct sk_buff *last = q->fragments_tail;
1214 ++
1215 ++ /* RFC5722, Section 4, amended by Errata ID : 3089
1216 ++ * When reassembling an IPv6 datagram, if
1217 ++ * one or more its constituent fragments is determined to be an
1218 ++ * overlapping fragment, the entire datagram (and any constituent
1219 ++ * fragments) MUST be silently discarded.
1220 ++ *
1221 ++ * Duplicates, however, should be ignored (i.e. skb dropped, but the
1222 ++ * queue/fragments kept for later reassembly).
1223 ++ */
1224 ++ if (!last)
1225 ++ fragrun_create(q, skb); /* First fragment. */
1226 ++ else if (last->ip_defrag_offset + last->len < end) {
1227 ++ /* This is the common case: skb goes to the end. */
1228 ++ /* Detect and discard overlaps. */
1229 ++ if (offset < last->ip_defrag_offset + last->len)
1230 ++ return IPFRAG_OVERLAP;
1231 ++ if (offset == last->ip_defrag_offset + last->len)
1232 ++ fragrun_append_to_last(q, skb);
1233 ++ else
1234 ++ fragrun_create(q, skb);
1235 ++ } else {
1236 ++ /* Binary search. Note that skb can become the first fragment,
1237 ++ * but not the last (covered above).
1238 ++ */
1239 ++ struct rb_node **rbn, *parent;
1240 ++
1241 ++ rbn = &q->rb_fragments.rb_node;
1242 ++ do {
1243 ++ struct sk_buff *curr;
1244 ++ int curr_run_end;
1245 ++
1246 ++ parent = *rbn;
1247 ++ curr = rb_to_skb(parent);
1248 ++ curr_run_end = curr->ip_defrag_offset +
1249 ++ FRAG_CB(curr)->frag_run_len;
1250 ++ if (end <= curr->ip_defrag_offset)
1251 ++ rbn = &parent->rb_left;
1252 ++ else if (offset >= curr_run_end)
1253 ++ rbn = &parent->rb_right;
1254 ++ else if (offset >= curr->ip_defrag_offset &&
1255 ++ end <= curr_run_end)
1256 ++ return IPFRAG_DUP;
1257 ++ else
1258 ++ return IPFRAG_OVERLAP;
1259 ++ } while (*rbn);
1260 ++ /* Here we have parent properly set, and rbn pointing to
1261 ++ * one of its NULL left/right children. Insert skb.
1262 ++ */
1263 ++ fragcb_clear(skb);
1264 ++ rb_link_node(&skb->rbnode, parent, rbn);
1265 ++ rb_insert_color(&skb->rbnode, &q->rb_fragments);
1266 ++ }
1267 ++
1268 ++ skb->ip_defrag_offset = offset;
1269 ++
1270 ++ return IPFRAG_OK;
1271 ++}
1272 ++EXPORT_SYMBOL(inet_frag_queue_insert);
1273 ++
1274 ++void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
1275 ++ struct sk_buff *parent)
1276 ++{
1277 ++ struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
1278 ++ struct sk_buff **nextp;
1279 ++ int delta;
1280 ++
1281 ++ if (head != skb) {
1282 ++ fp = skb_clone(skb, GFP_ATOMIC);
1283 ++ if (!fp)
1284 ++ return NULL;
1285 ++ FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
1286 ++ if (RB_EMPTY_NODE(&skb->rbnode))
1287 ++ FRAG_CB(parent)->next_frag = fp;
1288 ++ else
1289 ++ rb_replace_node(&skb->rbnode, &fp->rbnode,
1290 ++ &q->rb_fragments);
1291 ++ if (q->fragments_tail == skb)
1292 ++ q->fragments_tail = fp;
1293 ++ skb_morph(skb, head);
1294 ++ FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
1295 ++ rb_replace_node(&head->rbnode, &skb->rbnode,
1296 ++ &q->rb_fragments);
1297 ++ consume_skb(head);
1298 ++ head = skb;
1299 ++ }
1300 ++ WARN_ON(head->ip_defrag_offset != 0);
1301 ++
1302 ++ delta = -head->truesize;
1303 ++
1304 ++ /* Head of list must not be cloned. */
1305 ++ if (skb_unclone(head, GFP_ATOMIC))
1306 ++ return NULL;
1307 ++
1308 ++ delta += head->truesize;
1309 ++ if (delta)
1310 ++ add_frag_mem_limit(q->net, delta);
1311 ++
1312 ++ /* If the first fragment is fragmented itself, we split
1313 ++ * it to two chunks: the first with data and paged part
1314 ++ * and the second, holding only fragments.
1315 ++ */
1316 ++ if (skb_has_frag_list(head)) {
1317 ++ struct sk_buff *clone;
1318 ++ int i, plen = 0;
1319 ++
1320 ++ clone = alloc_skb(0, GFP_ATOMIC);
1321 ++ if (!clone)
1322 ++ return NULL;
1323 ++ skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
1324 ++ skb_frag_list_init(head);
1325 ++ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
1326 ++ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
1327 ++ clone->data_len = head->data_len - plen;
1328 ++ clone->len = clone->data_len;
1329 ++ head->truesize += clone->truesize;
1330 ++ clone->csum = 0;
1331 ++ clone->ip_summed = head->ip_summed;
1332 ++ add_frag_mem_limit(q->net, clone->truesize);
1333 ++ skb_shinfo(head)->frag_list = clone;
1334 ++ nextp = &clone->next;
1335 ++ } else {
1336 ++ nextp = &skb_shinfo(head)->frag_list;
1337 ++ }
1338 ++
1339 ++ return nextp;
1340 ++}
1341 ++EXPORT_SYMBOL(inet_frag_reasm_prepare);
1342 ++
1343 ++void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
1344 ++ void *reasm_data)
1345 ++{
1346 ++ struct sk_buff **nextp = (struct sk_buff **)reasm_data;
1347 ++ struct rb_node *rbn;
1348 ++ struct sk_buff *fp;
1349 ++
1350 ++ skb_push(head, head->data - skb_network_header(head));
1351 ++
1352 ++ /* Traverse the tree in order, to build frag_list. */
1353 ++ fp = FRAG_CB(head)->next_frag;
1354 ++ rbn = rb_next(&head->rbnode);
1355 ++ rb_erase(&head->rbnode, &q->rb_fragments);
1356 ++ while (rbn || fp) {
1357 ++ /* fp points to the next sk_buff in the current run;
1358 ++ * rbn points to the next run.
1359 ++ */
1360 ++ /* Go through the current run. */
1361 ++ while (fp) {
1362 ++ *nextp = fp;
1363 ++ nextp = &fp->next;
1364 ++ fp->prev = NULL;
1365 ++ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
1366 ++ fp->sk = NULL;
1367 ++ head->data_len += fp->len;
1368 ++ head->len += fp->len;
1369 ++ if (head->ip_summed != fp->ip_summed)
1370 ++ head->ip_summed = CHECKSUM_NONE;
1371 ++ else if (head->ip_summed == CHECKSUM_COMPLETE)
1372 ++ head->csum = csum_add(head->csum, fp->csum);
1373 ++ head->truesize += fp->truesize;
1374 ++ fp = FRAG_CB(fp)->next_frag;
1375 ++ }
1376 ++ /* Move to the next run. */
1377 ++ if (rbn) {
1378 ++ struct rb_node *rbnext = rb_next(rbn);
1379 ++
1380 ++ fp = rb_to_skb(rbn);
1381 ++ rb_erase(rbn, &q->rb_fragments);
1382 ++ rbn = rbnext;
1383 ++ }
1384 ++ }
1385 ++ sub_frag_mem_limit(q->net, head->truesize);
1386 ++
1387 ++ *nextp = NULL;
1388 ++ head->next = NULL;
1389 ++ head->prev = NULL;
1390 ++ head->tstamp = q->stamp;
1391 ++}
1392 ++EXPORT_SYMBOL(inet_frag_reasm_finish);
1393 ++
1394 ++struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
1395 ++{
1396 ++ struct sk_buff *head;
1397 ++
1398 ++ if (q->fragments) {
1399 ++ head = q->fragments;
1400 ++ q->fragments = head->next;
1401 ++ } else {
1402 ++ struct sk_buff *skb;
1403 ++
1404 ++ head = skb_rb_first(&q->rb_fragments);
1405 ++ if (!head)
1406 ++ return NULL;
1407 ++ skb = FRAG_CB(head)->next_frag;
1408 ++ if (skb)
1409 ++ rb_replace_node(&head->rbnode, &skb->rbnode,
1410 ++ &q->rb_fragments);
1411 ++ else
1412 ++ rb_erase(&head->rbnode, &q->rb_fragments);
1413 ++ memset(&head->rbnode, 0, sizeof(head->rbnode));
1414 ++ barrier();
1415 ++ }
1416 ++ if (head == q->fragments_tail)
1417 ++ q->fragments_tail = NULL;
1418 ++
1419 ++ sub_frag_mem_limit(q->net, head->truesize);
1420 ++
1421 ++ return head;
1422 ++}
1423 ++EXPORT_SYMBOL(inet_frag_pull_head);
1424 +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
1425 +index c7334d1e392a..6e9ba9dfb5b2 100644
1426 +--- a/net/ipv4/ip_fragment.c
1427 ++++ b/net/ipv4/ip_fragment.c
1428 +@@ -56,57 +56,6 @@
1429 + */
1430 + static const char ip_frag_cache_name[] = "ip4-frags";
1431 +
1432 +-/* Use skb->cb to track consecutive/adjacent fragments coming at
1433 +- * the end of the queue. Nodes in the rb-tree queue will
1434 +- * contain "runs" of one or more adjacent fragments.
1435 +- *
1436 +- * Invariants:
1437 +- * - next_frag is NULL at the tail of a "run";
1438 +- * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
1439 +- */
1440 +-struct ipfrag_skb_cb {
1441 +- struct inet_skb_parm h;
1442 +- struct sk_buff *next_frag;
1443 +- int frag_run_len;
1444 +-};
1445 +-
1446 +-#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
1447 +-
1448 +-static void ip4_frag_init_run(struct sk_buff *skb)
1449 +-{
1450 +- BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
1451 +-
1452 +- FRAG_CB(skb)->next_frag = NULL;
1453 +- FRAG_CB(skb)->frag_run_len = skb->len;
1454 +-}
1455 +-
1456 +-/* Append skb to the last "run". */
1457 +-static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
1458 +- struct sk_buff *skb)
1459 +-{
1460 +- RB_CLEAR_NODE(&skb->rbnode);
1461 +- FRAG_CB(skb)->next_frag = NULL;
1462 +-
1463 +- FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
1464 +- FRAG_CB(q->fragments_tail)->next_frag = skb;
1465 +- q->fragments_tail = skb;
1466 +-}
1467 +-
1468 +-/* Create a new "run" with the skb. */
1469 +-static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
1470 +-{
1471 +- if (q->last_run_head)
1472 +- rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
1473 +- &q->last_run_head->rbnode.rb_right);
1474 +- else
1475 +- rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
1476 +- rb_insert_color(&skb->rbnode, &q->rb_fragments);
1477 +-
1478 +- ip4_frag_init_run(skb);
1479 +- q->fragments_tail = skb;
1480 +- q->last_run_head = skb;
1481 +-}
1482 +-
1483 + /* Describe an entry in the "incomplete datagrams" queue. */
1484 + struct ipq {
1485 + struct inet_frag_queue q;
1486 +@@ -210,27 +159,9 @@ static void ip_expire(unsigned long arg)
1487 + * pull the head out of the tree in order to be able to
1488 + * deal with head->dev.
1489 + */
1490 +- if (qp->q.fragments) {
1491 +- head = qp->q.fragments;
1492 +- qp->q.fragments = head->next;
1493 +- } else {
1494 +- head = skb_rb_first(&qp->q.rb_fragments);
1495 +- if (!head)
1496 +- goto out;
1497 +- if (FRAG_CB(head)->next_frag)
1498 +- rb_replace_node(&head->rbnode,
1499 +- &FRAG_CB(head)->next_frag->rbnode,
1500 +- &qp->q.rb_fragments);
1501 +- else
1502 +- rb_erase(&head->rbnode, &qp->q.rb_fragments);
1503 +- memset(&head->rbnode, 0, sizeof(head->rbnode));
1504 +- barrier();
1505 +- }
1506 +- if (head == qp->q.fragments_tail)
1507 +- qp->q.fragments_tail = NULL;
1508 +-
1509 +- sub_frag_mem_limit(qp->q.net, head->truesize);
1510 +-
1511 ++ head = inet_frag_pull_head(&qp->q);
1512 ++ if (!head)
1513 ++ goto out;
1514 + head->dev = dev_get_by_index_rcu(net, qp->iif);
1515 + if (!head->dev)
1516 + goto out;
1517 +@@ -343,12 +274,10 @@ static int ip_frag_reinit(struct ipq *qp)
1518 + static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
1519 + {
1520 + struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
1521 +- struct rb_node **rbn, *parent;
1522 +- struct sk_buff *skb1, *prev_tail;
1523 +- int ihl, end, skb1_run_end;
1524 ++ int ihl, end, flags, offset;
1525 ++ struct sk_buff *prev_tail;
1526 + struct net_device *dev;
1527 + unsigned int fragsize;
1528 +- int flags, offset;
1529 + int err = -ENOENT;
1530 + u8 ecn;
1531 +
1532 +@@ -380,7 +309,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
1533 + */
1534 + if (end < qp->q.len ||
1535 + ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
1536 +- goto err;
1537 ++ goto discard_qp;
1538 + qp->q.flags |= INET_FRAG_LAST_IN;
1539 + qp->q.len = end;
1540 + } else {
1541 +@@ -392,82 +321,33 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
1542 + if (end > qp->q.len) {
1543 + /* Some bits beyond end -> corruption. */
1544 + if (qp->q.flags & INET_FRAG_LAST_IN)
1545 +- goto err;
1546 ++ goto discard_qp;
1547 + qp->q.len = end;
1548 + }
1549 + }
1550 + if (end == offset)
1551 +- goto err;
1552 ++ goto discard_qp;
1553 +
1554 + err = -ENOMEM;
1555 + if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
1556 +- goto err;
1557 ++ goto discard_qp;
1558 +
1559 + err = pskb_trim_rcsum(skb, end - offset);
1560 + if (err)
1561 +- goto err;
1562 ++ goto discard_qp;
1563 +
1564 + /* Note : skb->rbnode and skb->dev share the same location. */
1565 + dev = skb->dev;
1566 + /* Makes sure compiler wont do silly aliasing games */
1567 + barrier();
1568 +
1569 +- /* RFC5722, Section 4, amended by Errata ID : 3089
1570 +- * When reassembling an IPv6 datagram, if
1571 +- * one or more its constituent fragments is determined to be an
1572 +- * overlapping fragment, the entire datagram (and any constituent
1573 +- * fragments) MUST be silently discarded.
1574 +- *
1575 +- * We do the same here for IPv4 (and increment an snmp counter) but
1576 +- * we do not want to drop the whole queue in response to a duplicate
1577 +- * fragment.
1578 +- */
1579 +-
1580 +- err = -EINVAL;
1581 +- /* Find out where to put this fragment. */
1582 + prev_tail = qp->q.fragments_tail;
1583 +- if (!prev_tail)
1584 +- ip4_frag_create_run(&qp->q, skb); /* First fragment. */
1585 +- else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
1586 +- /* This is the common case: skb goes to the end. */
1587 +- /* Detect and discard overlaps. */
1588 +- if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
1589 +- goto discard_qp;
1590 +- if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
1591 +- ip4_frag_append_to_last_run(&qp->q, skb);
1592 +- else
1593 +- ip4_frag_create_run(&qp->q, skb);
1594 +- } else {
1595 +- /* Binary search. Note that skb can become the first fragment,
1596 +- * but not the last (covered above).
1597 +- */
1598 +- rbn = &qp->q.rb_fragments.rb_node;
1599 +- do {
1600 +- parent = *rbn;
1601 +- skb1 = rb_to_skb(parent);
1602 +- skb1_run_end = skb1->ip_defrag_offset +
1603 +- FRAG_CB(skb1)->frag_run_len;
1604 +- if (end <= skb1->ip_defrag_offset)
1605 +- rbn = &parent->rb_left;
1606 +- else if (offset >= skb1_run_end)
1607 +- rbn = &parent->rb_right;
1608 +- else if (offset >= skb1->ip_defrag_offset &&
1609 +- end <= skb1_run_end)
1610 +- goto err; /* No new data, potential duplicate */
1611 +- else
1612 +- goto discard_qp; /* Found an overlap */
1613 +- } while (*rbn);
1614 +- /* Here we have parent properly set, and rbn pointing to
1615 +- * one of its NULL left/right children. Insert skb.
1616 +- */
1617 +- ip4_frag_init_run(skb);
1618 +- rb_link_node(&skb->rbnode, parent, rbn);
1619 +- rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
1620 +- }
1621 ++ err = inet_frag_queue_insert(&qp->q, skb, offset, end);
1622 ++ if (err)
1623 ++ goto insert_error;
1624 +
1625 + if (dev)
1626 + qp->iif = dev->ifindex;
1627 +- skb->ip_defrag_offset = offset;
1628 +
1629 + qp->q.stamp = skb->tstamp;
1630 + qp->q.meat += skb->len;
1631 +@@ -492,15 +372,24 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
1632 + skb->_skb_refdst = 0UL;
1633 + err = ip_frag_reasm(qp, skb, prev_tail, dev);
1634 + skb->_skb_refdst = orefdst;
1635 ++ if (err)
1636 ++ inet_frag_kill(&qp->q);
1637 + return err;
1638 + }
1639 +
1640 + skb_dst_drop(skb);
1641 + return -EINPROGRESS;
1642 +
1643 ++insert_error:
1644 ++ if (err == IPFRAG_DUP) {
1645 ++ kfree_skb(skb);
1646 ++ return -EINVAL;
1647 ++ }
1648 ++ err = -EINVAL;
1649 ++ __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
1650 + discard_qp:
1651 + inet_frag_kill(&qp->q);
1652 +- __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
1653 ++ __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
1654 + err:
1655 + kfree_skb(skb);
1656 + return err;
1657 +@@ -512,12 +401,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
1658 + {
1659 + struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
1660 + struct iphdr *iph;
1661 +- struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
1662 +- struct sk_buff **nextp; /* To build frag_list. */
1663 +- struct rb_node *rbn;
1664 +- int len;
1665 +- int ihlen;
1666 +- int err;
1667 ++ void *reasm_data;
1668 ++ int len, err;
1669 + u8 ecn;
1670 +
1671 + ipq_kill(qp);
1672 +@@ -527,111 +412,23 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
1673 + err = -EINVAL;
1674 + goto out_fail;
1675 + }
1676 +- /* Make the one we just received the head. */
1677 +- if (head != skb) {
1678 +- fp = skb_clone(skb, GFP_ATOMIC);
1679 +- if (!fp)
1680 +- goto out_nomem;
1681 +- FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
1682 +- if (RB_EMPTY_NODE(&skb->rbnode))
1683 +- FRAG_CB(prev_tail)->next_frag = fp;
1684 +- else
1685 +- rb_replace_node(&skb->rbnode, &fp->rbnode,
1686 +- &qp->q.rb_fragments);
1687 +- if (qp->q.fragments_tail == skb)
1688 +- qp->q.fragments_tail = fp;
1689 +- skb_morph(skb, head);
1690 +- FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
1691 +- rb_replace_node(&head->rbnode, &skb->rbnode,
1692 +- &qp->q.rb_fragments);
1693 +- consume_skb(head);
1694 +- head = skb;
1695 +- }
1696 +-
1697 +- WARN_ON(head->ip_defrag_offset != 0);
1698 +
1699 +- /* Allocate a new buffer for the datagram. */
1700 +- ihlen = ip_hdrlen(head);
1701 +- len = ihlen + qp->q.len;
1702 ++ /* Make the one we just received the head. */
1703 ++ reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
1704 ++ if (!reasm_data)
1705 ++ goto out_nomem;
1706 +
1707 ++ len = ip_hdrlen(skb) + qp->q.len;
1708 + err = -E2BIG;
1709 + if (len > 65535)
1710 + goto out_oversize;
1711 +
1712 +- /* Head of list must not be cloned. */
1713 +- if (skb_unclone(head, GFP_ATOMIC))
1714 +- goto out_nomem;
1715 +-
1716 +- /* If the first fragment is fragmented itself, we split
1717 +- * it to two chunks: the first with data and paged part
1718 +- * and the second, holding only fragments. */
1719 +- if (skb_has_frag_list(head)) {
1720 +- struct sk_buff *clone;
1721 +- int i, plen = 0;
1722 +-
1723 +- clone = alloc_skb(0, GFP_ATOMIC);
1724 +- if (!clone)
1725 +- goto out_nomem;
1726 +- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
1727 +- skb_frag_list_init(head);
1728 +- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
1729 +- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
1730 +- clone->len = clone->data_len = head->data_len - plen;
1731 +- head->truesize += clone->truesize;
1732 +- clone->csum = 0;
1733 +- clone->ip_summed = head->ip_summed;
1734 +- add_frag_mem_limit(qp->q.net, clone->truesize);
1735 +- skb_shinfo(head)->frag_list = clone;
1736 +- nextp = &clone->next;
1737 +- } else {
1738 +- nextp = &skb_shinfo(head)->frag_list;
1739 +- }
1740 ++ inet_frag_reasm_finish(&qp->q, skb, reasm_data);
1741 +
1742 +- skb_push(head, head->data - skb_network_header(head));
1743 ++ skb->dev = dev;
1744 ++ IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
1745 +
1746 +- /* Traverse the tree in order, to build frag_list. */
1747 +- fp = FRAG_CB(head)->next_frag;
1748 +- rbn = rb_next(&head->rbnode);
1749 +- rb_erase(&head->rbnode, &qp->q.rb_fragments);
1750 +- while (rbn || fp) {
1751 +- /* fp points to the next sk_buff in the current run;
1752 +- * rbn points to the next run.
1753 +- */
1754 +- /* Go through the current run. */
1755 +- while (fp) {
1756 +- *nextp = fp;
1757 +- nextp = &fp->next;
1758 +- fp->prev = NULL;
1759 +- memset(&fp->rbnode, 0, sizeof(fp->rbnode));
1760 +- fp->sk = NULL;
1761 +- head->data_len += fp->len;
1762 +- head->len += fp->len;
1763 +- if (head->ip_summed != fp->ip_summed)
1764 +- head->ip_summed = CHECKSUM_NONE;
1765 +- else if (head->ip_summed == CHECKSUM_COMPLETE)
1766 +- head->csum = csum_add(head->csum, fp->csum);
1767 +- head->truesize += fp->truesize;
1768 +- fp = FRAG_CB(fp)->next_frag;
1769 +- }
1770 +- /* Move to the next run. */
1771 +- if (rbn) {
1772 +- struct rb_node *rbnext = rb_next(rbn);
1773 +-
1774 +- fp = rb_to_skb(rbn);
1775 +- rb_erase(rbn, &qp->q.rb_fragments);
1776 +- rbn = rbnext;
1777 +- }
1778 +- }
1779 +- sub_frag_mem_limit(qp->q.net, head->truesize);
1780 +-
1781 +- *nextp = NULL;
1782 +- head->next = NULL;
1783 +- head->prev = NULL;
1784 +- head->dev = dev;
1785 +- head->tstamp = qp->q.stamp;
1786 +- IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
1787 +-
1788 +- iph = ip_hdr(head);
1789 ++ iph = ip_hdr(skb);
1790 + iph->tot_len = htons(len);
1791 + iph->tos |= ecn;
1792 +
1793 +@@ -644,7 +441,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
1794 + * from one very small df-fragment and one large non-df frag.
1795 + */
1796 + if (qp->max_df_size == qp->q.max_size) {
1797 +- IPCB(head)->flags |= IPSKB_FRAG_PMTU;
1798 ++ IPCB(skb)->flags |= IPSKB_FRAG_PMTU;
1799 + iph->frag_off = htons(IP_DF);
1800 + } else {
1801 + iph->frag_off = 0;
1802 +@@ -742,28 +539,6 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
1803 + }
1804 + EXPORT_SYMBOL(ip_check_defrag);
1805 +
1806 +-unsigned int inet_frag_rbtree_purge(struct rb_root *root)
1807 +-{
1808 +- struct rb_node *p = rb_first(root);
1809 +- unsigned int sum = 0;
1810 +-
1811 +- while (p) {
1812 +- struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
1813 +-
1814 +- p = rb_next(p);
1815 +- rb_erase(&skb->rbnode, root);
1816 +- while (skb) {
1817 +- struct sk_buff *next = FRAG_CB(skb)->next_frag;
1818 +-
1819 +- sum += skb->truesize;
1820 +- kfree_skb(skb);
1821 +- skb = next;
1822 +- }
1823 +- }
1824 +- return sum;
1825 +-}
1826 +-EXPORT_SYMBOL(inet_frag_rbtree_purge);
1827 +-
1828 + #ifdef CONFIG_SYSCTL
1829 + static int dist_min;
1830 +
1831 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1832 +index 0e2cf9634541..02c49857b5a7 100644
1833 +--- a/net/ipv4/route.c
1834 ++++ b/net/ipv4/route.c
1835 +@@ -1168,25 +1168,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1836 + return dst;
1837 + }
1838 +
1839 +-static void ipv4_link_failure(struct sk_buff *skb)
1840 ++static void ipv4_send_dest_unreach(struct sk_buff *skb)
1841 + {
1842 + struct ip_options opt;
1843 +- struct rtable *rt;
1844 + int res;
1845 +
1846 + /* Recompile ip options since IPCB may not be valid anymore.
1847 ++ * Also check we have a reasonable ipv4 header.
1848 + */
1849 +- memset(&opt, 0, sizeof(opt));
1850 +- opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
1851 ++ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1852 ++ ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1853 ++ return;
1854 +
1855 +- rcu_read_lock();
1856 +- res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1857 +- rcu_read_unlock();
1858 ++ memset(&opt, 0, sizeof(opt));
1859 ++ if (ip_hdr(skb)->ihl > 5) {
1860 ++ if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1861 ++ return;
1862 ++ opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1863 +
1864 +- if (res)
1865 +- return;
1866 ++ rcu_read_lock();
1867 ++ res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1868 ++ rcu_read_unlock();
1869 +
1870 ++ if (res)
1871 ++ return;
1872 ++ }
1873 + __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1874 ++}
1875 ++
1876 ++static void ipv4_link_failure(struct sk_buff *skb)
1877 ++{
1878 ++ struct rtable *rt;
1879 ++
1880 ++ ipv4_send_dest_unreach(skb);
1881 +
1882 + rt = skb_rtable(skb);
1883 + if (rt)
1884 +diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
1885 +index 024ab833557d..85713adf2770 100644
1886 +--- a/net/ipv4/sysctl_net_ipv4.c
1887 ++++ b/net/ipv4/sysctl_net_ipv4.c
1888 +@@ -41,6 +41,7 @@ static int tcp_syn_retries_min = 1;
1889 + static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
1890 + static int ip_ping_group_range_min[] = { 0, 0 };
1891 + static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
1892 ++static int one_day_secs = 24 * 3600;
1893 +
1894 + /* Update system visible IP port range */
1895 + static void set_local_port_range(struct net *net, int range[2])
1896 +@@ -460,7 +461,9 @@ static struct ctl_table ipv4_table[] = {
1897 + .data = &sysctl_tcp_min_rtt_wlen,
1898 + .maxlen = sizeof(int),
1899 + .mode = 0644,
1900 +- .proc_handler = proc_dointvec
1901 ++ .proc_handler = proc_dointvec_minmax,
1902 ++ .extra1 = &zero,
1903 ++ .extra2 = &one_day_secs
1904 + },
1905 + {
1906 + .procname = "tcp_low_latency",
1907 +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
1908 +index e46185377981..1e1fa99b3243 100644
1909 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
1910 ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
1911 +@@ -33,9 +33,8 @@
1912 +
1913 + #include <net/sock.h>
1914 + #include <net/snmp.h>
1915 +-#include <net/inet_frag.h>
1916 ++#include <net/ipv6_frag.h>
1917 +
1918 +-#include <net/ipv6.h>
1919 + #include <net/protocol.h>
1920 + #include <net/transp_v6.h>
1921 + #include <net/rawv6.h>
1922 +@@ -52,14 +51,6 @@
1923 +
1924 + static const char nf_frags_cache_name[] = "nf-frags";
1925 +
1926 +-struct nf_ct_frag6_skb_cb
1927 +-{
1928 +- struct inet6_skb_parm h;
1929 +- int offset;
1930 +-};
1931 +-
1932 +-#define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb *)((skb)->cb))
1933 +-
1934 + static struct inet_frags nf_frags;
1935 +
1936 + #ifdef CONFIG_SYSCTL
1937 +@@ -145,6 +136,9 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
1938 + }
1939 + #endif
1940 +
1941 ++static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
1942 ++ struct sk_buff *prev_tail, struct net_device *dev);
1943 ++
1944 + static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
1945 + {
1946 + return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
1947 +@@ -158,7 +152,7 @@ static void nf_ct_frag6_expire(unsigned long data)
1948 + fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
1949 + net = container_of(fq->q.net, struct net, nf_frag.frags);
1950 +
1951 +- ip6_expire_frag_queue(net, fq);
1952 ++ ip6frag_expire_frag_queue(net, fq);
1953 + }
1954 +
1955 + /* Creation primitives. */
1956 +@@ -185,9 +179,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
1957 + static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
1958 + const struct frag_hdr *fhdr, int nhoff)
1959 + {
1960 +- struct sk_buff *prev, *next;
1961 + unsigned int payload_len;
1962 +- int offset, end;
1963 ++ struct net_device *dev;
1964 ++ struct sk_buff *prev;
1965 ++ int offset, end, err;
1966 + u8 ecn;
1967 +
1968 + if (fq->q.flags & INET_FRAG_COMPLETE) {
1969 +@@ -262,55 +257,19 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
1970 + goto err;
1971 + }
1972 +
1973 +- /* Find out which fragments are in front and at the back of us
1974 +- * in the chain of fragments so far. We must know where to put
1975 +- * this fragment, right?
1976 +- */
1977 ++ /* Note : skb->rbnode and skb->dev share the same location. */
1978 ++ dev = skb->dev;
1979 ++ /* Makes sure compiler wont do silly aliasing games */
1980 ++ barrier();
1981 ++
1982 + prev = fq->q.fragments_tail;
1983 +- if (!prev || NFCT_FRAG6_CB(prev)->offset < offset) {
1984 +- next = NULL;
1985 +- goto found;
1986 +- }
1987 +- prev = NULL;
1988 +- for (next = fq->q.fragments; next != NULL; next = next->next) {
1989 +- if (NFCT_FRAG6_CB(next)->offset >= offset)
1990 +- break; /* bingo! */
1991 +- prev = next;
1992 +- }
1993 ++ err = inet_frag_queue_insert(&fq->q, skb, offset, end);
1994 ++ if (err)
1995 ++ goto insert_error;
1996 +
1997 +-found:
1998 +- /* RFC5722, Section 4:
1999 +- * When reassembling an IPv6 datagram, if
2000 +- * one or more its constituent fragments is determined to be an
2001 +- * overlapping fragment, the entire datagram (and any constituent
2002 +- * fragments, including those not yet received) MUST be silently
2003 +- * discarded.
2004 +- */
2005 ++ if (dev)
2006 ++ fq->iif = dev->ifindex;
2007 +
2008 +- /* Check for overlap with preceding fragment. */
2009 +- if (prev &&
2010 +- (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset)
2011 +- goto discard_fq;
2012 +-
2013 +- /* Look for overlap with succeeding segment. */
2014 +- if (next && NFCT_FRAG6_CB(next)->offset < end)
2015 +- goto discard_fq;
2016 +-
2017 +- NFCT_FRAG6_CB(skb)->offset = offset;
2018 +-
2019 +- /* Insert this fragment in the chain of fragments. */
2020 +- skb->next = next;
2021 +- if (!next)
2022 +- fq->q.fragments_tail = skb;
2023 +- if (prev)
2024 +- prev->next = skb;
2025 +- else
2026 +- fq->q.fragments = skb;
2027 +-
2028 +- if (skb->dev) {
2029 +- fq->iif = skb->dev->ifindex;
2030 +- skb->dev = NULL;
2031 +- }
2032 + fq->q.stamp = skb->tstamp;
2033 + fq->q.meat += skb->len;
2034 + fq->ecn |= ecn;
2035 +@@ -326,11 +285,25 @@ found:
2036 + fq->q.flags |= INET_FRAG_FIRST_IN;
2037 + }
2038 +
2039 +- return 0;
2040 ++ if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
2041 ++ fq->q.meat == fq->q.len) {
2042 ++ unsigned long orefdst = skb->_skb_refdst;
2043 +
2044 +-discard_fq:
2045 ++ skb->_skb_refdst = 0UL;
2046 ++ err = nf_ct_frag6_reasm(fq, skb, prev, dev);
2047 ++ skb->_skb_refdst = orefdst;
2048 ++ return err;
2049 ++ }
2050 ++
2051 ++ skb_dst_drop(skb);
2052 ++ return -EINPROGRESS;
2053 ++
2054 ++insert_error:
2055 ++ if (err == IPFRAG_DUP)
2056 ++ goto err;
2057 + inet_frag_kill(&fq->q);
2058 + err:
2059 ++ skb_dst_drop(skb);
2060 + return -EINVAL;
2061 + }
2062 +
2063 +@@ -340,141 +313,67 @@ err:
2064 + * It is called with locked fq, and caller must check that
2065 + * queue is eligible for reassembly i.e. it is not COMPLETE,
2066 + * the last and the first frames arrived and all the bits are here.
2067 +- *
2068 +- * returns true if *prev skb has been transformed into the reassembled
2069 +- * skb, false otherwise.
2070 + */
2071 +-static bool
2072 +-nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
2073 ++static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
2074 ++ struct sk_buff *prev_tail, struct net_device *dev)
2075 + {
2076 +- struct sk_buff *fp, *head = fq->q.fragments;
2077 +- int payload_len;
2078 ++ void *reasm_data;
2079 ++ int payload_len;
2080 + u8 ecn;
2081 +
2082 + inet_frag_kill(&fq->q);
2083 +
2084 +- WARN_ON(head == NULL);
2085 +- WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
2086 +-
2087 + ecn = ip_frag_ecn_table[fq->ecn];
2088 + if (unlikely(ecn == 0xff))
2089 +- return false;
2090 ++ goto err;
2091 +
2092 +- /* Unfragmented part is taken from the first segment. */
2093 +- payload_len = ((head->data - skb_network_header(head)) -
2094 ++ reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
2095 ++ if (!reasm_data)
2096 ++ goto err;
2097 ++
2098 ++ payload_len = ((skb->data - skb_network_header(skb)) -
2099 + sizeof(struct ipv6hdr) + fq->q.len -
2100 + sizeof(struct frag_hdr));
2101 + if (payload_len > IPV6_MAXPLEN) {
2102 + net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
2103 + payload_len);
2104 +- return false;
2105 +- }
2106 +-
2107 +- /* Head of list must not be cloned. */
2108 +- if (skb_unclone(head, GFP_ATOMIC))
2109 +- return false;
2110 +-
2111 +- /* If the first fragment is fragmented itself, we split
2112 +- * it to two chunks: the first with data and paged part
2113 +- * and the second, holding only fragments. */
2114 +- if (skb_has_frag_list(head)) {
2115 +- struct sk_buff *clone;
2116 +- int i, plen = 0;
2117 +-
2118 +- clone = alloc_skb(0, GFP_ATOMIC);
2119 +- if (clone == NULL)
2120 +- return false;
2121 +-
2122 +- clone->next = head->next;
2123 +- head->next = clone;
2124 +- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
2125 +- skb_frag_list_init(head);
2126 +- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
2127 +- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
2128 +- clone->len = clone->data_len = head->data_len - plen;
2129 +- head->data_len -= clone->len;
2130 +- head->len -= clone->len;
2131 +- clone->csum = 0;
2132 +- clone->ip_summed = head->ip_summed;
2133 +-
2134 +- add_frag_mem_limit(fq->q.net, clone->truesize);
2135 +- }
2136 +-
2137 +- /* morph head into last received skb: prev.
2138 +- *
2139 +- * This allows callers of ipv6 conntrack defrag to continue
2140 +- * to use the last skb(frag) passed into the reasm engine.
2141 +- * The last skb frag 'silently' turns into the full reassembled skb.
2142 +- *
2143 +- * Since prev is also part of q->fragments we have to clone it first.
2144 +- */
2145 +- if (head != prev) {
2146 +- struct sk_buff *iter;
2147 +-
2148 +- fp = skb_clone(prev, GFP_ATOMIC);
2149 +- if (!fp)
2150 +- return false;
2151 +-
2152 +- fp->next = prev->next;
2153 +-
2154 +- iter = head;
2155 +- while (iter) {
2156 +- if (iter->next == prev) {
2157 +- iter->next = fp;
2158 +- break;
2159 +- }
2160 +- iter = iter->next;
2161 +- }
2162 +-
2163 +- skb_morph(prev, head);
2164 +- prev->next = head->next;
2165 +- consume_skb(head);
2166 +- head = prev;
2167 ++ goto err;
2168 + }
2169 +
2170 + /* We have to remove fragment header from datagram and to relocate
2171 + * header in order to calculate ICV correctly. */
2172 +- skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
2173 +- memmove(head->head + sizeof(struct frag_hdr), head->head,
2174 +- (head->data - head->head) - sizeof(struct frag_hdr));
2175 +- head->mac_header += sizeof(struct frag_hdr);
2176 +- head->network_header += sizeof(struct frag_hdr);
2177 +-
2178 +- skb_shinfo(head)->frag_list = head->next;
2179 +- skb_reset_transport_header(head);
2180 +- skb_push(head, head->data - skb_network_header(head));
2181 +-
2182 +- for (fp = head->next; fp; fp = fp->next) {
2183 +- head->data_len += fp->len;
2184 +- head->len += fp->len;
2185 +- if (head->ip_summed != fp->ip_summed)
2186 +- head->ip_summed = CHECKSUM_NONE;
2187 +- else if (head->ip_summed == CHECKSUM_COMPLETE)
2188 +- head->csum = csum_add(head->csum, fp->csum);
2189 +- head->truesize += fp->truesize;
2190 +- fp->sk = NULL;
2191 +- }
2192 +- sub_frag_mem_limit(fq->q.net, head->truesize);
2193 ++ skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
2194 ++ memmove(skb->head + sizeof(struct frag_hdr), skb->head,
2195 ++ (skb->data - skb->head) - sizeof(struct frag_hdr));
2196 ++ skb->mac_header += sizeof(struct frag_hdr);
2197 ++ skb->network_header += sizeof(struct frag_hdr);
2198 ++
2199 ++ skb_reset_transport_header(skb);
2200 +
2201 +- head->ignore_df = 1;
2202 +- head->next = NULL;
2203 +- head->dev = dev;
2204 +- head->tstamp = fq->q.stamp;
2205 +- ipv6_hdr(head)->payload_len = htons(payload_len);
2206 +- ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
2207 +- IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
2208 ++ inet_frag_reasm_finish(&fq->q, skb, reasm_data);
2209 ++
2210 ++ skb->ignore_df = 1;
2211 ++ skb->dev = dev;
2212 ++ ipv6_hdr(skb)->payload_len = htons(payload_len);
2213 ++ ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
2214 ++ IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
2215 +
2216 + /* Yes, and fold redundant checksum back. 8) */
2217 +- if (head->ip_summed == CHECKSUM_COMPLETE)
2218 +- head->csum = csum_partial(skb_network_header(head),
2219 +- skb_network_header_len(head),
2220 +- head->csum);
2221 ++ if (skb->ip_summed == CHECKSUM_COMPLETE)
2222 ++ skb->csum = csum_partial(skb_network_header(skb),
2223 ++ skb_network_header_len(skb),
2224 ++ skb->csum);
2225 +
2226 + fq->q.fragments = NULL;
2227 + fq->q.rb_fragments = RB_ROOT;
2228 + fq->q.fragments_tail = NULL;
2229 ++ fq->q.last_run_head = NULL;
2230 ++
2231 ++ return 0;
2232 +
2233 +- return true;
2234 ++err:
2235 ++ inet_frag_kill(&fq->q);
2236 ++ return -EINVAL;
2237 + }
2238 +
2239 + /*
2240 +@@ -543,7 +442,6 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
2241 + int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
2242 + {
2243 + u16 savethdr = skb->transport_header;
2244 +- struct net_device *dev = skb->dev;
2245 + int fhoff, nhoff, ret;
2246 + struct frag_hdr *fhdr;
2247 + struct frag_queue *fq;
2248 +@@ -566,10 +464,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
2249 + hdr = ipv6_hdr(skb);
2250 + fhdr = (struct frag_hdr *)skb_transport_header(skb);
2251 +
2252 +- if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
2253 +- fhdr->frag_off & htons(IP6_MF))
2254 +- return -EINVAL;
2255 +-
2256 + skb_orphan(skb);
2257 + fq = fq_find(net, fhdr->identification, user, hdr,
2258 + skb->dev ? skb->dev->ifindex : 0);
2259 +@@ -581,24 +475,17 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
2260 + spin_lock_bh(&fq->q.lock);
2261 +
2262 + ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
2263 +- if (ret < 0) {
2264 +- if (ret == -EPROTO) {
2265 +- skb->transport_header = savethdr;
2266 +- ret = 0;
2267 +- }
2268 +- goto out_unlock;
2269 ++ if (ret == -EPROTO) {
2270 ++ skb->transport_header = savethdr;
2271 ++ ret = 0;
2272 + }
2273 +
2274 + /* after queue has assumed skb ownership, only 0 or -EINPROGRESS
2275 + * must be returned.
2276 + */
2277 +- ret = -EINPROGRESS;
2278 +- if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
2279 +- fq->q.meat == fq->q.len &&
2280 +- nf_ct_frag6_reasm(fq, skb, dev))
2281 +- ret = 0;
2282 ++ if (ret)
2283 ++ ret = -EINPROGRESS;
2284 +
2285 +-out_unlock:
2286 + spin_unlock_bh(&fq->q.lock);
2287 + inet_frag_put(&fq->q);
2288 + return ret;
2289 +@@ -634,16 +521,24 @@ static struct pernet_operations nf_ct_net_ops = {
2290 + .exit = nf_ct_net_exit,
2291 + };
2292 +
2293 ++static const struct rhashtable_params nfct_rhash_params = {
2294 ++ .head_offset = offsetof(struct inet_frag_queue, node),
2295 ++ .hashfn = ip6frag_key_hashfn,
2296 ++ .obj_hashfn = ip6frag_obj_hashfn,
2297 ++ .obj_cmpfn = ip6frag_obj_cmpfn,
2298 ++ .automatic_shrinking = true,
2299 ++};
2300 ++
2301 + int nf_ct_frag6_init(void)
2302 + {
2303 + int ret = 0;
2304 +
2305 +- nf_frags.constructor = ip6_frag_init;
2306 ++ nf_frags.constructor = ip6frag_init;
2307 + nf_frags.destructor = NULL;
2308 + nf_frags.qsize = sizeof(struct frag_queue);
2309 + nf_frags.frag_expire = nf_ct_frag6_expire;
2310 + nf_frags.frags_cache_name = nf_frags_cache_name;
2311 +- nf_frags.rhash_params = ip6_rhash_params;
2312 ++ nf_frags.rhash_params = nfct_rhash_params;
2313 + ret = inet_frags_init(&nf_frags);
2314 + if (ret)
2315 + goto out;
2316 +diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
2317 +index f06b0471f39f..c4070e9c4260 100644
2318 +--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
2319 ++++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
2320 +@@ -14,8 +14,7 @@
2321 + #include <linux/skbuff.h>
2322 + #include <linux/icmp.h>
2323 + #include <linux/sysctl.h>
2324 +-#include <net/ipv6.h>
2325 +-#include <net/inet_frag.h>
2326 ++#include <net/ipv6_frag.h>
2327 +
2328 + #include <linux/netfilter_ipv6.h>
2329 + #include <linux/netfilter_bridge.h>
2330 +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
2331 +index 74ffbcb306a6..4aed9c45a91a 100644
2332 +--- a/net/ipv6/reassembly.c
2333 ++++ b/net/ipv6/reassembly.c
2334 +@@ -57,18 +57,11 @@
2335 + #include <net/rawv6.h>
2336 + #include <net/ndisc.h>
2337 + #include <net/addrconf.h>
2338 +-#include <net/inet_frag.h>
2339 ++#include <net/ipv6_frag.h>
2340 + #include <net/inet_ecn.h>
2341 +
2342 + static const char ip6_frag_cache_name[] = "ip6-frags";
2343 +
2344 +-struct ip6frag_skb_cb {
2345 +- struct inet6_skb_parm h;
2346 +- int offset;
2347 +-};
2348 +-
2349 +-#define FRAG6_CB(skb) ((struct ip6frag_skb_cb *)((skb)->cb))
2350 +-
2351 + static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
2352 + {
2353 + return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
2354 +@@ -76,63 +69,8 @@ static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
2355 +
2356 + static struct inet_frags ip6_frags;
2357 +
2358 +-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
2359 +- struct net_device *dev);
2360 +-
2361 +-void ip6_frag_init(struct inet_frag_queue *q, const void *a)
2362 +-{
2363 +- struct frag_queue *fq = container_of(q, struct frag_queue, q);
2364 +- const struct frag_v6_compare_key *key = a;
2365 +-
2366 +- q->key.v6 = *key;
2367 +- fq->ecn = 0;
2368 +-}
2369 +-EXPORT_SYMBOL(ip6_frag_init);
2370 +-
2371 +-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
2372 +-{
2373 +- struct net_device *dev = NULL;
2374 +- struct sk_buff *head;
2375 +-
2376 +- rcu_read_lock();
2377 +- spin_lock(&fq->q.lock);
2378 +-
2379 +- if (fq->q.flags & INET_FRAG_COMPLETE)
2380 +- goto out;
2381 +-
2382 +- inet_frag_kill(&fq->q);
2383 +-
2384 +- dev = dev_get_by_index_rcu(net, fq->iif);
2385 +- if (!dev)
2386 +- goto out;
2387 +-
2388 +- __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
2389 +- __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
2390 +-
2391 +- /* Don't send error if the first segment did not arrive. */
2392 +- head = fq->q.fragments;
2393 +- if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
2394 +- goto out;
2395 +-
2396 +- /* But use as source device on which LAST ARRIVED
2397 +- * segment was received. And do not use fq->dev
2398 +- * pointer directly, device might already disappeared.
2399 +- */
2400 +- head->dev = dev;
2401 +- skb_get(head);
2402 +- spin_unlock(&fq->q.lock);
2403 +-
2404 +- icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
2405 +- kfree_skb(head);
2406 +- goto out_rcu_unlock;
2407 +-
2408 +-out:
2409 +- spin_unlock(&fq->q.lock);
2410 +-out_rcu_unlock:
2411 +- rcu_read_unlock();
2412 +- inet_frag_put(&fq->q);
2413 +-}
2414 +-EXPORT_SYMBOL(ip6_expire_frag_queue);
2415 ++static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
2416 ++ struct sk_buff *prev_tail, struct net_device *dev);
2417 +
2418 + static void ip6_frag_expire(unsigned long data)
2419 + {
2420 +@@ -142,7 +80,7 @@ static void ip6_frag_expire(unsigned long data)
2421 + fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
2422 + net = container_of(fq->q.net, struct net, ipv6.frags);
2423 +
2424 +- ip6_expire_frag_queue(net, fq);
2425 ++ ip6frag_expire_frag_queue(net, fq);
2426 + }
2427 +
2428 + static struct frag_queue *
2429 +@@ -169,27 +107,29 @@ fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif)
2430 + }
2431 +
2432 + static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
2433 +- struct frag_hdr *fhdr, int nhoff)
2434 ++ struct frag_hdr *fhdr, int nhoff,
2435 ++ u32 *prob_offset)
2436 + {
2437 +- struct sk_buff *prev, *next;
2438 +- struct net_device *dev;
2439 +- int offset, end;
2440 + struct net *net = dev_net(skb_dst(skb)->dev);
2441 ++ int offset, end, fragsize;
2442 ++ struct sk_buff *prev_tail;
2443 ++ struct net_device *dev;
2444 ++ int err = -ENOENT;
2445 + u8 ecn;
2446 +
2447 + if (fq->q.flags & INET_FRAG_COMPLETE)
2448 + goto err;
2449 +
2450 ++ err = -EINVAL;
2451 + offset = ntohs(fhdr->frag_off) & ~0x7;
2452 + end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
2453 + ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
2454 +
2455 + if ((unsigned int)end > IPV6_MAXPLEN) {
2456 +- __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2457 +- IPSTATS_MIB_INHDRERRORS);
2458 +- icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
2459 +- ((u8 *)&fhdr->frag_off -
2460 +- skb_network_header(skb)));
2461 ++ *prob_offset = (u8 *)&fhdr->frag_off - skb_network_header(skb);
2462 ++ /* note that if prob_offset is set, the skb is freed elsewhere,
2463 ++ * we do not free it here.
2464 ++ */
2465 + return -1;
2466 + }
2467 +
2468 +@@ -209,7 +149,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
2469 + */
2470 + if (end < fq->q.len ||
2471 + ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
2472 +- goto err;
2473 ++ goto discard_fq;
2474 + fq->q.flags |= INET_FRAG_LAST_IN;
2475 + fq->q.len = end;
2476 + } else {
2477 +@@ -220,84 +160,51 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
2478 + /* RFC2460 says always send parameter problem in
2479 + * this case. -DaveM
2480 + */
2481 +- __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2482 +- IPSTATS_MIB_INHDRERRORS);
2483 +- icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
2484 +- offsetof(struct ipv6hdr, payload_len));
2485 ++ *prob_offset = offsetof(struct ipv6hdr, payload_len);
2486 + return -1;
2487 + }
2488 + if (end > fq->q.len) {
2489 + /* Some bits beyond end -> corruption. */
2490 + if (fq->q.flags & INET_FRAG_LAST_IN)
2491 +- goto err;
2492 ++ goto discard_fq;
2493 + fq->q.len = end;
2494 + }
2495 + }
2496 +
2497 + if (end == offset)
2498 +- goto err;
2499 ++ goto discard_fq;
2500 +
2501 ++ err = -ENOMEM;
2502 + /* Point into the IP datagram 'data' part. */
2503 + if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
2504 +- goto err;
2505 +-
2506 +- if (pskb_trim_rcsum(skb, end - offset))
2507 +- goto err;
2508 +-
2509 +- /* Find out which fragments are in front and at the back of us
2510 +- * in the chain of fragments so far. We must know where to put
2511 +- * this fragment, right?
2512 +- */
2513 +- prev = fq->q.fragments_tail;
2514 +- if (!prev || FRAG6_CB(prev)->offset < offset) {
2515 +- next = NULL;
2516 +- goto found;
2517 +- }
2518 +- prev = NULL;
2519 +- for (next = fq->q.fragments; next != NULL; next = next->next) {
2520 +- if (FRAG6_CB(next)->offset >= offset)
2521 +- break; /* bingo! */
2522 +- prev = next;
2523 +- }
2524 +-
2525 +-found:
2526 +- /* RFC5722, Section 4, amended by Errata ID : 3089
2527 +- * When reassembling an IPv6 datagram, if
2528 +- * one or more its constituent fragments is determined to be an
2529 +- * overlapping fragment, the entire datagram (and any constituent
2530 +- * fragments) MUST be silently discarded.
2531 +- */
2532 +-
2533 +- /* Check for overlap with preceding fragment. */
2534 +- if (prev &&
2535 +- (FRAG6_CB(prev)->offset + prev->len) > offset)
2536 + goto discard_fq;
2537 +
2538 +- /* Look for overlap with succeeding segment. */
2539 +- if (next && FRAG6_CB(next)->offset < end)
2540 ++ err = pskb_trim_rcsum(skb, end - offset);
2541 ++ if (err)
2542 + goto discard_fq;
2543 +
2544 +- FRAG6_CB(skb)->offset = offset;
2545 ++ /* Note : skb->rbnode and skb->dev share the same location. */
2546 ++ dev = skb->dev;
2547 ++ /* Makes sure compiler wont do silly aliasing games */
2548 ++ barrier();
2549 +
2550 +- /* Insert this fragment in the chain of fragments. */
2551 +- skb->next = next;
2552 +- if (!next)
2553 +- fq->q.fragments_tail = skb;
2554 +- if (prev)
2555 +- prev->next = skb;
2556 +- else
2557 +- fq->q.fragments = skb;
2558 ++ prev_tail = fq->q.fragments_tail;
2559 ++ err = inet_frag_queue_insert(&fq->q, skb, offset, end);
2560 ++ if (err)
2561 ++ goto insert_error;
2562 +
2563 +- dev = skb->dev;
2564 +- if (dev) {
2565 ++ if (dev)
2566 + fq->iif = dev->ifindex;
2567 +- skb->dev = NULL;
2568 +- }
2569 ++
2570 + fq->q.stamp = skb->tstamp;
2571 + fq->q.meat += skb->len;
2572 + fq->ecn |= ecn;
2573 + add_frag_mem_limit(fq->q.net, skb->truesize);
2574 +
2575 ++ fragsize = -skb_network_offset(skb) + skb->len;
2576 ++ if (fragsize > fq->q.max_size)
2577 ++ fq->q.max_size = fragsize;
2578 ++
2579 + /* The first fragment.
2580 + * nhoffset is obtained from the first fragment, of course.
2581 + */
2582 +@@ -308,44 +215,48 @@ found:
2583 +
2584 + if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
2585 + fq->q.meat == fq->q.len) {
2586 +- int res;
2587 + unsigned long orefdst = skb->_skb_refdst;
2588 +
2589 + skb->_skb_refdst = 0UL;
2590 +- res = ip6_frag_reasm(fq, prev, dev);
2591 ++ err = ip6_frag_reasm(fq, skb, prev_tail, dev);
2592 + skb->_skb_refdst = orefdst;
2593 +- return res;
2594 ++ return err;
2595 + }
2596 +
2597 + skb_dst_drop(skb);
2598 +- return -1;
2599 ++ return -EINPROGRESS;
2600 +
2601 ++insert_error:
2602 ++ if (err == IPFRAG_DUP) {
2603 ++ kfree_skb(skb);
2604 ++ return -EINVAL;
2605 ++ }
2606 ++ err = -EINVAL;
2607 ++ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2608 ++ IPSTATS_MIB_REASM_OVERLAPS);
2609 + discard_fq:
2610 + inet_frag_kill(&fq->q);
2611 +-err:
2612 + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2613 + IPSTATS_MIB_REASMFAILS);
2614 ++err:
2615 + kfree_skb(skb);
2616 +- return -1;
2617 ++ return err;
2618 + }
2619 +
2620 + /*
2621 + * Check if this packet is complete.
2622 +- * Returns NULL on failure by any reason, and pointer
2623 +- * to current nexthdr field in reassembled frame.
2624 + *
2625 + * It is called with locked fq, and caller must check that
2626 + * queue is eligible for reassembly i.e. it is not COMPLETE,
2627 + * the last and the first frames arrived and all the bits are here.
2628 + */
2629 +-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
2630 +- struct net_device *dev)
2631 ++static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
2632 ++ struct sk_buff *prev_tail, struct net_device *dev)
2633 + {
2634 + struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
2635 +- struct sk_buff *fp, *head = fq->q.fragments;
2636 +- int payload_len;
2637 + unsigned int nhoff;
2638 +- int sum_truesize;
2639 ++ void *reasm_data;
2640 ++ int payload_len;
2641 + u8 ecn;
2642 +
2643 + inet_frag_kill(&fq->q);
2644 +@@ -354,113 +265,40 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
2645 + if (unlikely(ecn == 0xff))
2646 + goto out_fail;
2647 +
2648 +- /* Make the one we just received the head. */
2649 +- if (prev) {
2650 +- head = prev->next;
2651 +- fp = skb_clone(head, GFP_ATOMIC);
2652 +-
2653 +- if (!fp)
2654 +- goto out_oom;
2655 +-
2656 +- fp->next = head->next;
2657 +- if (!fp->next)
2658 +- fq->q.fragments_tail = fp;
2659 +- prev->next = fp;
2660 +-
2661 +- skb_morph(head, fq->q.fragments);
2662 +- head->next = fq->q.fragments->next;
2663 +-
2664 +- consume_skb(fq->q.fragments);
2665 +- fq->q.fragments = head;
2666 +- }
2667 +-
2668 +- WARN_ON(head == NULL);
2669 +- WARN_ON(FRAG6_CB(head)->offset != 0);
2670 ++ reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
2671 ++ if (!reasm_data)
2672 ++ goto out_oom;
2673 +
2674 +- /* Unfragmented part is taken from the first segment. */
2675 +- payload_len = ((head->data - skb_network_header(head)) -
2676 ++ payload_len = ((skb->data - skb_network_header(skb)) -
2677 + sizeof(struct ipv6hdr) + fq->q.len -
2678 + sizeof(struct frag_hdr));
2679 + if (payload_len > IPV6_MAXPLEN)
2680 + goto out_oversize;
2681 +
2682 +- /* Head of list must not be cloned. */
2683 +- if (skb_unclone(head, GFP_ATOMIC))
2684 +- goto out_oom;
2685 +-
2686 +- /* If the first fragment is fragmented itself, we split
2687 +- * it to two chunks: the first with data and paged part
2688 +- * and the second, holding only fragments. */
2689 +- if (skb_has_frag_list(head)) {
2690 +- struct sk_buff *clone;
2691 +- int i, plen = 0;
2692 +-
2693 +- clone = alloc_skb(0, GFP_ATOMIC);
2694 +- if (!clone)
2695 +- goto out_oom;
2696 +- clone->next = head->next;
2697 +- head->next = clone;
2698 +- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
2699 +- skb_frag_list_init(head);
2700 +- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
2701 +- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
2702 +- clone->len = clone->data_len = head->data_len - plen;
2703 +- head->data_len -= clone->len;
2704 +- head->len -= clone->len;
2705 +- clone->csum = 0;
2706 +- clone->ip_summed = head->ip_summed;
2707 +- add_frag_mem_limit(fq->q.net, clone->truesize);
2708 +- }
2709 +-
2710 + /* We have to remove fragment header from datagram and to relocate
2711 + * header in order to calculate ICV correctly. */
2712 + nhoff = fq->nhoffset;
2713 +- skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
2714 +- memmove(head->head + sizeof(struct frag_hdr), head->head,
2715 +- (head->data - head->head) - sizeof(struct frag_hdr));
2716 +- if (skb_mac_header_was_set(head))
2717 +- head->mac_header += sizeof(struct frag_hdr);
2718 +- head->network_header += sizeof(struct frag_hdr);
2719 +-
2720 +- skb_reset_transport_header(head);
2721 +- skb_push(head, head->data - skb_network_header(head));
2722 +-
2723 +- sum_truesize = head->truesize;
2724 +- for (fp = head->next; fp;) {
2725 +- bool headstolen;
2726 +- int delta;
2727 +- struct sk_buff *next = fp->next;
2728 +-
2729 +- sum_truesize += fp->truesize;
2730 +- if (head->ip_summed != fp->ip_summed)
2731 +- head->ip_summed = CHECKSUM_NONE;
2732 +- else if (head->ip_summed == CHECKSUM_COMPLETE)
2733 +- head->csum = csum_add(head->csum, fp->csum);
2734 +-
2735 +- if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
2736 +- kfree_skb_partial(fp, headstolen);
2737 +- } else {
2738 +- if (!skb_shinfo(head)->frag_list)
2739 +- skb_shinfo(head)->frag_list = fp;
2740 +- head->data_len += fp->len;
2741 +- head->len += fp->len;
2742 +- head->truesize += fp->truesize;
2743 +- }
2744 +- fp = next;
2745 +- }
2746 +- sub_frag_mem_limit(fq->q.net, sum_truesize);
2747 ++ skb_network_header(skb)[nhoff] = skb_transport_header(skb)[0];
2748 ++ memmove(skb->head + sizeof(struct frag_hdr), skb->head,
2749 ++ (skb->data - skb->head) - sizeof(struct frag_hdr));
2750 ++ if (skb_mac_header_was_set(skb))
2751 ++ skb->mac_header += sizeof(struct frag_hdr);
2752 ++ skb->network_header += sizeof(struct frag_hdr);
2753 +
2754 +- head->next = NULL;
2755 +- head->dev = dev;
2756 +- head->tstamp = fq->q.stamp;
2757 +- ipv6_hdr(head)->payload_len = htons(payload_len);
2758 +- ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
2759 +- IP6CB(head)->nhoff = nhoff;
2760 +- IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
2761 ++ skb_reset_transport_header(skb);
2762 ++
2763 ++ inet_frag_reasm_finish(&fq->q, skb, reasm_data);
2764 ++
2765 ++ skb->dev = dev;
2766 ++ ipv6_hdr(skb)->payload_len = htons(payload_len);
2767 ++ ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
2768 ++ IP6CB(skb)->nhoff = nhoff;
2769 ++ IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
2770 ++ IP6CB(skb)->frag_max_size = fq->q.max_size;
2771 +
2772 + /* Yes, and fold redundant checksum back. 8) */
2773 +- skb_postpush_rcsum(head, skb_network_header(head),
2774 +- skb_network_header_len(head));
2775 ++ skb_postpush_rcsum(skb, skb_network_header(skb),
2776 ++ skb_network_header_len(skb));
2777 +
2778 + rcu_read_lock();
2779 + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
2780 +@@ -468,6 +306,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
2781 + fq->q.fragments = NULL;
2782 + fq->q.rb_fragments = RB_ROOT;
2783 + fq->q.fragments_tail = NULL;
2784 ++ fq->q.last_run_head = NULL;
2785 + return 1;
2786 +
2787 + out_oversize:
2788 +@@ -479,6 +318,7 @@ out_fail:
2789 + rcu_read_lock();
2790 + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
2791 + rcu_read_unlock();
2792 ++ inet_frag_kill(&fq->q);
2793 + return -1;
2794 + }
2795 +
2796 +@@ -517,22 +357,26 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
2797 + return 1;
2798 + }
2799 +
2800 +- if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
2801 +- fhdr->frag_off & htons(IP6_MF))
2802 +- goto fail_hdr;
2803 +-
2804 + iif = skb->dev ? skb->dev->ifindex : 0;
2805 + fq = fq_find(net, fhdr->identification, hdr, iif);
2806 + if (fq) {
2807 ++ u32 prob_offset = 0;
2808 + int ret;
2809 +
2810 + spin_lock(&fq->q.lock);
2811 +
2812 + fq->iif = iif;
2813 +- ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
2814 ++ ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff,
2815 ++ &prob_offset);
2816 +
2817 + spin_unlock(&fq->q.lock);
2818 + inet_frag_put(&fq->q);
2819 ++ if (prob_offset) {
2820 ++ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2821 ++ IPSTATS_MIB_INHDRERRORS);
2822 ++ /* icmpv6_param_prob() calls kfree_skb(skb) */
2823 ++ icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, prob_offset);
2824 ++ }
2825 + return ret;
2826 + }
2827 +
2828 +@@ -700,42 +544,19 @@ static struct pernet_operations ip6_frags_ops = {
2829 + .exit = ipv6_frags_exit_net,
2830 + };
2831 +
2832 +-static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed)
2833 +-{
2834 +- return jhash2(data,
2835 +- sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
2836 +-}
2837 +-
2838 +-static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed)
2839 +-{
2840 +- const struct inet_frag_queue *fq = data;
2841 +-
2842 +- return jhash2((const u32 *)&fq->key.v6,
2843 +- sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
2844 +-}
2845 +-
2846 +-static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
2847 +-{
2848 +- const struct frag_v6_compare_key *key = arg->key;
2849 +- const struct inet_frag_queue *fq = ptr;
2850 +-
2851 +- return !!memcmp(&fq->key, key, sizeof(*key));
2852 +-}
2853 +-
2854 +-const struct rhashtable_params ip6_rhash_params = {
2855 ++static const struct rhashtable_params ip6_rhash_params = {
2856 + .head_offset = offsetof(struct inet_frag_queue, node),
2857 +- .hashfn = ip6_key_hashfn,
2858 +- .obj_hashfn = ip6_obj_hashfn,
2859 +- .obj_cmpfn = ip6_obj_cmpfn,
2860 ++ .hashfn = ip6frag_key_hashfn,
2861 ++ .obj_hashfn = ip6frag_obj_hashfn,
2862 ++ .obj_cmpfn = ip6frag_obj_cmpfn,
2863 + .automatic_shrinking = true,
2864 + };
2865 +-EXPORT_SYMBOL(ip6_rhash_params);
2866 +
2867 + int __init ipv6_frag_init(void)
2868 + {
2869 + int ret;
2870 +
2871 +- ip6_frags.constructor = ip6_frag_init;
2872 ++ ip6_frags.constructor = ip6frag_init;
2873 + ip6_frags.destructor = NULL;
2874 + ip6_frags.qsize = sizeof(struct frag_queue);
2875 + ip6_frags.frag_expire = ip6_frag_expire;
2876 +diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
2877 +index f135814c34ad..02d6f38f7869 100644
2878 +--- a/net/openvswitch/conntrack.c
2879 ++++ b/net/openvswitch/conntrack.c
2880 +@@ -23,6 +23,7 @@
2881 + #include <net/netfilter/nf_conntrack_seqadj.h>
2882 + #include <net/netfilter/nf_conntrack_zones.h>
2883 + #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
2884 ++#include <net/ipv6_frag.h>
2885 +
2886 + #ifdef CONFIG_NF_NAT_NEEDED
2887 + #include <linux/netfilter/nf_nat.h>
2888 +diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
2889 +index 4fe8f4fec4ee..da84d6b2f72c 100644
2890 +--- a/net/rds/ib_fmr.c
2891 ++++ b/net/rds/ib_fmr.c
2892 +@@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
2893 + else
2894 + pool = rds_ibdev->mr_1m_pool;
2895 +
2896 ++ if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
2897 ++ queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
2898 ++
2899 ++ /* Switch pools if one of the pool is reaching upper limit */
2900 ++ if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
2901 ++ if (pool->pool_type == RDS_IB_MR_8K_POOL)
2902 ++ pool = rds_ibdev->mr_1m_pool;
2903 ++ else
2904 ++ pool = rds_ibdev->mr_8k_pool;
2905 ++ }
2906 ++
2907 + ibmr = rds_ib_try_reuse_ibmr(pool);
2908 + if (ibmr)
2909 + return ibmr;
2910 +diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
2911 +index 977f69886c00..91b53d462fc0 100644
2912 +--- a/net/rds/ib_rdma.c
2913 ++++ b/net/rds/ib_rdma.c
2914 +@@ -442,9 +442,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
2915 + struct rds_ib_mr *ibmr = NULL;
2916 + int iter = 0;
2917 +
2918 +- if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
2919 +- queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
2920 +-
2921 + while (1) {
2922 + ibmr = rds_ib_reuse_mr(pool);
2923 + if (ibmr)
2924 +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
2925 +index cab50ece6f3d..cdcc0fea9f5a 100644
2926 +--- a/net/sunrpc/cache.c
2927 ++++ b/net/sunrpc/cache.c
2928 +@@ -54,6 +54,7 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
2929 + h->last_refresh = now;
2930 + }
2931 +
2932 ++static inline int cache_is_valid(struct cache_head *h);
2933 + static void cache_fresh_locked(struct cache_head *head, time_t expiry,
2934 + struct cache_detail *detail);
2935 + static void cache_fresh_unlocked(struct cache_head *head,
2936 +@@ -100,6 +101,8 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
2937 + if (cache_is_expired(detail, tmp)) {
2938 + hlist_del_init(&tmp->cache_list);
2939 + detail->entries --;
2940 ++ if (cache_is_valid(tmp) == -EAGAIN)
2941 ++ set_bit(CACHE_NEGATIVE, &tmp->flags);
2942 + cache_fresh_locked(tmp, 0, detail);
2943 + freeme = tmp;
2944 + break;
2945 +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
2946 +index d947b8210399..0cf9403b4c44 100644
2947 +--- a/net/tipc/netlink_compat.c
2948 ++++ b/net/tipc/netlink_compat.c
2949 +@@ -262,8 +262,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
2950 + if (msg->rep_type)
2951 + tipc_tlv_init(msg->rep, msg->rep_type);
2952 +
2953 +- if (cmd->header)
2954 +- (*cmd->header)(msg);
2955 ++ if (cmd->header) {
2956 ++ err = (*cmd->header)(msg);
2957 ++ if (err) {
2958 ++ kfree_skb(msg->rep);
2959 ++ msg->rep = NULL;
2960 ++ return err;
2961 ++ }
2962 ++ }
2963 +
2964 + arg = nlmsg_new(0, GFP_KERNEL);
2965 + if (!arg) {
2966 +@@ -388,7 +394,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
2967 + if (!bearer)
2968 + return -EMSGSIZE;
2969 +
2970 +- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
2971 ++ len = TLV_GET_DATA_LEN(msg->req);
2972 ++ len -= offsetof(struct tipc_bearer_config, name);
2973 ++ if (len <= 0)
2974 ++ return -EINVAL;
2975 ++
2976 ++ len = min_t(int, len, TIPC_MAX_BEARER_NAME);
2977 + if (!string_is_valid(b->name, len))
2978 + return -EINVAL;
2979 +
2980 +@@ -757,7 +768,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
2981 +
2982 + lc = (struct tipc_link_config *)TLV_DATA(msg->req);
2983 +
2984 +- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
2985 ++ len = TLV_GET_DATA_LEN(msg->req);
2986 ++ len -= offsetof(struct tipc_link_config, name);
2987 ++ if (len <= 0)
2988 ++ return -EINVAL;
2989 ++
2990 ++ len = min_t(int, len, TIPC_MAX_LINK_NAME);
2991 + if (!string_is_valid(lc->name, len))
2992 + return -EINVAL;
2993 +
2994 +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
2995 +index 9c07c76c504d..cc4b4abb2759 100644
2996 +--- a/net/vmw_vsock/virtio_transport_common.c
2997 ++++ b/net/vmw_vsock/virtio_transport_common.c
2998 +@@ -601,6 +601,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
2999 + */
3000 + static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
3001 + {
3002 ++ const struct virtio_transport *t;
3003 ++ struct virtio_vsock_pkt *reply;
3004 + struct virtio_vsock_pkt_info info = {
3005 + .op = VIRTIO_VSOCK_OP_RST,
3006 + .type = le16_to_cpu(pkt->hdr.type),
3007 +@@ -611,15 +613,21 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
3008 + if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
3009 + return 0;
3010 +
3011 +- pkt = virtio_transport_alloc_pkt(&info, 0,
3012 +- le64_to_cpu(pkt->hdr.dst_cid),
3013 +- le32_to_cpu(pkt->hdr.dst_port),
3014 +- le64_to_cpu(pkt->hdr.src_cid),
3015 +- le32_to_cpu(pkt->hdr.src_port));
3016 +- if (!pkt)
3017 ++ reply = virtio_transport_alloc_pkt(&info, 0,
3018 ++ le64_to_cpu(pkt->hdr.dst_cid),
3019 ++ le32_to_cpu(pkt->hdr.dst_port),
3020 ++ le64_to_cpu(pkt->hdr.src_cid),
3021 ++ le32_to_cpu(pkt->hdr.src_port));
3022 ++ if (!reply)
3023 + return -ENOMEM;
3024 +
3025 +- return virtio_transport_get_ops()->send_pkt(pkt);
3026 ++ t = virtio_transport_get_ops();
3027 ++ if (!t) {
3028 ++ virtio_transport_free_pkt(reply);
3029 ++ return -ENOTCONN;
3030 ++ }
3031 ++
3032 ++ return t->send_pkt(reply);
3033 + }
3034 +
3035 + static void virtio_transport_wait_close(struct sock *sk, long timeout)
3036 +diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
3037 +index 7f430778f418..558dea61db11 100644
3038 +--- a/scripts/Kbuild.include
3039 ++++ b/scripts/Kbuild.include
3040 +@@ -166,9 +166,7 @@ cc-ldoption = $(call try-run,\
3041 +
3042 + # ld-option
3043 + # Usage: LDFLAGS += $(call ld-option, -X)
3044 +-ld-option = $(call try-run,\
3045 +- $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -x c /dev/null -c -o "$$TMPO"; \
3046 +- $(LD) $(LDFLAGS) $(1) "$$TMPO" -o "$$TMP",$(1),$(2))
3047 ++ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2))
3048 +
3049 + # ar-option
3050 + # Usage: KBUILD_ARFLAGS := $(call ar-option,D)