Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Wed, 14 Nov 2018 14:01:06
Message-Id: 1542204041.f48fbe77bdf41c85593fae02d1b760d2f357872e.mpagano@gentoo
1 commit: f48fbe77bdf41c85593fae02d1b760d2f357872e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 19 22:39:57 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Nov 14 14:00:41 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f48fbe77
7
8 Linux patch 4.14.71
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1070_linux-4.14.71.patch | 6264 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6268 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 2e98e70..cc63ee7 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -323,6 +323,10 @@ Patch: 1069_linux-4.14.70.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.14.70
23
24 +Patch: 1070_linux-4.14.71.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.14.71
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1070_linux-4.14.71.patch b/1070_linux-4.14.71.patch
33 new file mode 100644
34 index 0000000..cdd7c12
35 --- /dev/null
36 +++ b/1070_linux-4.14.71.patch
37 @@ -0,0 +1,6264 @@
38 +diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
39 +index d499676890d8..a054b5ad410a 100644
40 +--- a/Documentation/networking/ip-sysctl.txt
41 ++++ b/Documentation/networking/ip-sysctl.txt
42 +@@ -133,14 +133,11 @@ min_adv_mss - INTEGER
43 +
44 + IP Fragmentation:
45 +
46 +-ipfrag_high_thresh - INTEGER
47 +- Maximum memory used to reassemble IP fragments. When
48 +- ipfrag_high_thresh bytes of memory is allocated for this purpose,
49 +- the fragment handler will toss packets until ipfrag_low_thresh
50 +- is reached. This also serves as a maximum limit to namespaces
51 +- different from the initial one.
52 +-
53 +-ipfrag_low_thresh - INTEGER
54 ++ipfrag_high_thresh - LONG INTEGER
55 ++ Maximum memory used to reassemble IP fragments.
56 ++
57 ++ipfrag_low_thresh - LONG INTEGER
58 ++ (Obsolete since linux-4.17)
59 + Maximum memory used to reassemble IP fragments before the kernel
60 + begins to remove incomplete fragment queues to free up resources.
61 + The kernel still accepts new fragments for defragmentation.
62 +diff --git a/Makefile b/Makefile
63 +index aa458afa7fa2..dd4eaeeb2050 100644
64 +--- a/Makefile
65 ++++ b/Makefile
66 +@@ -1,7 +1,7 @@
67 + # SPDX-License-Identifier: GPL-2.0
68 + VERSION = 4
69 + PATCHLEVEL = 14
70 +-SUBLEVEL = 70
71 ++SUBLEVEL = 71
72 + EXTRAVERSION =
73 + NAME = Petit Gorille
74 +
75 +diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
76 +index a8242362e551..ece78630d711 100644
77 +--- a/arch/arc/configs/axs101_defconfig
78 ++++ b/arch/arc/configs/axs101_defconfig
79 +@@ -1,5 +1,4 @@
80 + CONFIG_DEFAULT_HOSTNAME="ARCLinux"
81 +-# CONFIG_SWAP is not set
82 + CONFIG_SYSVIPC=y
83 + CONFIG_POSIX_MQUEUE=y
84 + # CONFIG_CROSS_MEMORY_ATTACH is not set
85 +diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
86 +index ef3c31cd7737..240c9251a7d4 100644
87 +--- a/arch/arc/configs/axs103_defconfig
88 ++++ b/arch/arc/configs/axs103_defconfig
89 +@@ -1,5 +1,4 @@
90 + CONFIG_DEFAULT_HOSTNAME="ARCLinux"
91 +-# CONFIG_SWAP is not set
92 + CONFIG_SYSVIPC=y
93 + CONFIG_POSIX_MQUEUE=y
94 + # CONFIG_CROSS_MEMORY_ATTACH is not set
95 +diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
96 +index 1757ac9cecbc..af54b96abee0 100644
97 +--- a/arch/arc/configs/axs103_smp_defconfig
98 ++++ b/arch/arc/configs/axs103_smp_defconfig
99 +@@ -1,5 +1,4 @@
100 + CONFIG_DEFAULT_HOSTNAME="ARCLinux"
101 +-# CONFIG_SWAP is not set
102 + CONFIG_SYSVIPC=y
103 + CONFIG_POSIX_MQUEUE=y
104 + # CONFIG_CROSS_MEMORY_ATTACH is not set
105 +diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
106 +index 8505db478904..1d92efb82c37 100644
107 +--- a/arch/mips/cavium-octeon/octeon-platform.c
108 ++++ b/arch/mips/cavium-octeon/octeon-platform.c
109 +@@ -322,6 +322,7 @@ static int __init octeon_ehci_device_init(void)
110 + return 0;
111 +
112 + pd = of_find_device_by_node(ehci_node);
113 ++ of_node_put(ehci_node);
114 + if (!pd)
115 + return 0;
116 +
117 +@@ -384,6 +385,7 @@ static int __init octeon_ohci_device_init(void)
118 + return 0;
119 +
120 + pd = of_find_device_by_node(ohci_node);
121 ++ of_node_put(ohci_node);
122 + if (!pd)
123 + return 0;
124 +
125 +diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
126 +index 5ba6fcc26fa7..94a78dbbc91f 100644
127 +--- a/arch/mips/generic/init.c
128 ++++ b/arch/mips/generic/init.c
129 +@@ -204,6 +204,7 @@ void __init arch_init_irq(void)
130 + "mti,cpu-interrupt-controller");
131 + if (!cpu_has_veic && !intc_node)
132 + mips_cpu_irq_init();
133 ++ of_node_put(intc_node);
134 +
135 + irqchip_init();
136 + }
137 +diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
138 +index cea8ad864b3f..57b34257be2b 100644
139 +--- a/arch/mips/include/asm/io.h
140 ++++ b/arch/mips/include/asm/io.h
141 +@@ -141,14 +141,14 @@ static inline void * phys_to_virt(unsigned long address)
142 + /*
143 + * ISA I/O bus memory addresses are 1:1 with the physical address.
144 + */
145 +-static inline unsigned long isa_virt_to_bus(volatile void * address)
146 ++static inline unsigned long isa_virt_to_bus(volatile void *address)
147 + {
148 +- return (unsigned long)address - PAGE_OFFSET;
149 ++ return virt_to_phys(address);
150 + }
151 +
152 +-static inline void * isa_bus_to_virt(unsigned long address)
153 ++static inline void *isa_bus_to_virt(unsigned long address)
154 + {
155 +- return (void *)(address + PAGE_OFFSET);
156 ++ return phys_to_virt(address);
157 + }
158 +
159 + #define isa_page_to_bus page_to_phys
160 +diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
161 +index 019035d7225c..8f845f6e5f42 100644
162 +--- a/arch/mips/kernel/vdso.c
163 ++++ b/arch/mips/kernel/vdso.c
164 +@@ -13,6 +13,7 @@
165 + #include <linux/err.h>
166 + #include <linux/init.h>
167 + #include <linux/ioport.h>
168 ++#include <linux/kernel.h>
169 + #include <linux/mm.h>
170 + #include <linux/sched.h>
171 + #include <linux/slab.h>
172 +@@ -20,6 +21,7 @@
173 +
174 + #include <asm/abi.h>
175 + #include <asm/mips-cps.h>
176 ++#include <asm/page.h>
177 + #include <asm/vdso.h>
178 +
179 + /* Kernel-provided data used by the VDSO. */
180 +@@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
181 + vvar_size = gic_size + PAGE_SIZE;
182 + size = vvar_size + image->size;
183 +
184 ++ /*
185 ++ * Find a region that's large enough for us to perform the
186 ++ * colour-matching alignment below.
187 ++ */
188 ++ if (cpu_has_dc_aliases)
189 ++ size += shm_align_mask + 1;
190 ++
191 + base = get_unmapped_area(NULL, 0, size, 0, 0);
192 + if (IS_ERR_VALUE(base)) {
193 + ret = base;
194 + goto out;
195 + }
196 +
197 ++ /*
198 ++ * If we suffer from dcache aliasing, ensure that the VDSO data page
199 ++ * mapping is coloured the same as the kernel's mapping of that memory.
200 ++ * This ensures that when the kernel updates the VDSO data userland
201 ++ * will observe it without requiring cache invalidations.
202 ++ */
203 ++ if (cpu_has_dc_aliases) {
204 ++ base = __ALIGN_MASK(base, shm_align_mask);
205 ++ base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
206 ++ }
207 ++
208 + data_addr = base + gic_size;
209 + vdso_addr = data_addr + PAGE_SIZE;
210 +
211 +diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
212 +index e12dfa48b478..a5893b2cdc0e 100644
213 +--- a/arch/mips/mm/c-r4k.c
214 ++++ b/arch/mips/mm/c-r4k.c
215 +@@ -835,7 +835,8 @@ static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
216 + static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
217 + {
218 + /* Catch bad driver code */
219 +- BUG_ON(size == 0);
220 ++ if (WARN_ON(size == 0))
221 ++ return;
222 +
223 + preempt_disable();
224 + if (cpu_has_inclusive_pcaches) {
225 +@@ -871,7 +872,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
226 + static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
227 + {
228 + /* Catch bad driver code */
229 +- BUG_ON(size == 0);
230 ++ if (WARN_ON(size == 0))
231 ++ return;
232 +
233 + preempt_disable();
234 + if (cpu_has_inclusive_pcaches) {
235 +diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
236 +index 63f007f2de7e..4b95bdde22aa 100644
237 +--- a/arch/powerpc/platforms/powernv/npu-dma.c
238 ++++ b/arch/powerpc/platforms/powernv/npu-dma.c
239 +@@ -427,8 +427,9 @@ static int get_mmio_atsd_reg(struct npu *npu)
240 + int i;
241 +
242 + for (i = 0; i < npu->mmio_atsd_count; i++) {
243 +- if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
244 +- return i;
245 ++ if (!test_bit(i, &npu->mmio_atsd_usage))
246 ++ if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
247 ++ return i;
248 + }
249 +
250 + return -ENOSPC;
251 +diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
252 +index 4f1f5fc8139d..061906f98dc5 100644
253 +--- a/arch/s390/kvm/vsie.c
254 ++++ b/arch/s390/kvm/vsie.c
255 +@@ -170,7 +170,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
256 + return set_validity_icpt(scb_s, 0x0039U);
257 +
258 + /* copy only the wrapping keys */
259 +- if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
260 ++ if (read_guest_real(vcpu, crycb_addr + 72,
261 ++ vsie_page->crycb.dea_wrapping_key_mask, 56))
262 + return set_validity_icpt(scb_s, 0x0035U);
263 +
264 + scb_s->ecb3 |= ecb3_flags;
265 +diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
266 +index 48179928ff38..9d33dbf2489e 100644
267 +--- a/arch/x86/kernel/cpu/microcode/amd.c
268 ++++ b/arch/x86/kernel/cpu/microcode/amd.c
269 +@@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
270 + struct microcode_amd *mc_amd;
271 + struct ucode_cpu_info *uci;
272 + struct ucode_patch *p;
273 ++ enum ucode_state ret;
274 + u32 rev, dummy;
275 +
276 + BUG_ON(raw_smp_processor_id() != cpu);
277 +@@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu)
278 +
279 + /* need to apply patch? */
280 + if (rev >= mc_amd->hdr.patch_id) {
281 +- c->microcode = rev;
282 +- uci->cpu_sig.rev = rev;
283 +- return UCODE_OK;
284 ++ ret = UCODE_OK;
285 ++ goto out;
286 + }
287 +
288 + if (__apply_microcode_amd(mc_amd)) {
289 +@@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu)
290 + cpu, mc_amd->hdr.patch_id);
291 + return UCODE_ERROR;
292 + }
293 +- pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
294 +- mc_amd->hdr.patch_id);
295 +
296 +- uci->cpu_sig.rev = mc_amd->hdr.patch_id;
297 +- c->microcode = mc_amd->hdr.patch_id;
298 ++ rev = mc_amd->hdr.patch_id;
299 ++ ret = UCODE_UPDATED;
300 ++
301 ++ pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
302 +
303 +- return UCODE_UPDATED;
304 ++out:
305 ++ uci->cpu_sig.rev = rev;
306 ++ c->microcode = rev;
307 ++
308 ++ /* Update boot_cpu_data's revision too, if we're on the BSP: */
309 ++ if (c->cpu_index == boot_cpu_data.cpu_index)
310 ++ boot_cpu_data.microcode = rev;
311 ++
312 ++ return ret;
313 + }
314 +
315 + static int install_equiv_cpu_table(const u8 *buf)
316 +diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
317 +index 97ccf4c3b45b..16936a24795c 100644
318 +--- a/arch/x86/kernel/cpu/microcode/intel.c
319 ++++ b/arch/x86/kernel/cpu/microcode/intel.c
320 +@@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
321 + struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
322 + struct cpuinfo_x86 *c = &cpu_data(cpu);
323 + struct microcode_intel *mc;
324 ++ enum ucode_state ret;
325 + static int prev_rev;
326 + u32 rev;
327 +
328 +@@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu)
329 + */
330 + rev = intel_get_microcode_revision();
331 + if (rev >= mc->hdr.rev) {
332 +- uci->cpu_sig.rev = rev;
333 +- c->microcode = rev;
334 +- return UCODE_OK;
335 ++ ret = UCODE_OK;
336 ++ goto out;
337 + }
338 +
339 + /*
340 +@@ -848,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu)
341 + prev_rev = rev;
342 + }
343 +
344 ++ ret = UCODE_UPDATED;
345 ++
346 ++out:
347 + uci->cpu_sig.rev = rev;
348 +- c->microcode = rev;
349 ++ c->microcode = rev;
350 ++
351 ++ /* Update boot_cpu_data's revision too, if we're on the BSP: */
352 ++ if (c->cpu_index == boot_cpu_data.cpu_index)
353 ++ boot_cpu_data.microcode = rev;
354 +
355 +- return UCODE_UPDATED;
356 ++ return ret;
357 + }
358 +
359 + static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
360 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
361 +index 4e5a8e30cc4e..fd46d890296c 100644
362 +--- a/arch/x86/kvm/vmx.c
363 ++++ b/arch/x86/kvm/vmx.c
364 +@@ -6965,8 +6965,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
365 + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
366 + return kvm_skip_emulated_instruction(vcpu);
367 + else
368 +- return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
369 +- NULL, 0) == EMULATE_DONE;
370 ++ return emulate_instruction(vcpu, EMULTYPE_SKIP) ==
371 ++ EMULATE_DONE;
372 + }
373 +
374 + ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
375 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
376 +index c2faff548f59..794c35c4ca73 100644
377 +--- a/arch/x86/mm/fault.c
378 ++++ b/arch/x86/mm/fault.c
379 +@@ -317,8 +317,6 @@ static noinline int vmalloc_fault(unsigned long address)
380 + if (!(address >= VMALLOC_START && address < VMALLOC_END))
381 + return -1;
382 +
383 +- WARN_ON_ONCE(in_nmi());
384 +-
385 + /*
386 + * Synchronize this task's top level page-table
387 + * with the 'reference' page table.
388 +diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
389 +index 4b571f3ea009..afbbe5750a1f 100644
390 +--- a/block/bfq-cgroup.c
391 ++++ b/block/bfq-cgroup.c
392 +@@ -224,9 +224,9 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)
393 +
394 + void bfqg_and_blkg_put(struct bfq_group *bfqg)
395 + {
396 +- bfqg_put(bfqg);
397 +-
398 + blkg_put(bfqg_to_blkg(bfqg));
399 ++
400 ++ bfqg_put(bfqg);
401 + }
402 +
403 + void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
404 +diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
405 +index 6714507aa6c7..3d2ab65d2dd1 100644
406 +--- a/block/blk-mq-tag.c
407 ++++ b/block/blk-mq-tag.c
408 +@@ -416,8 +416,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
409 + if (tdepth <= tags->nr_reserved_tags)
410 + return -EINVAL;
411 +
412 +- tdepth -= tags->nr_reserved_tags;
413 +-
414 + /*
415 + * If we are allowed to grow beyond the original size, allocate
416 + * a new set of tags before freeing the old one.
417 +@@ -437,7 +435,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
418 + if (tdepth > 16 * BLKDEV_MAX_RQ)
419 + return -EINVAL;
420 +
421 +- new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
422 ++ new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
423 ++ tags->nr_reserved_tags);
424 + if (!new)
425 + return -ENOMEM;
426 + ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
427 +@@ -454,7 +453,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
428 + * Don't need (or can't) update reserved tags here, they
429 + * remain static and should never need resizing.
430 + */
431 +- sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
432 ++ sbitmap_queue_resize(&tags->bitmap_tags,
433 ++ tdepth - tags->nr_reserved_tags);
434 + }
435 +
436 + return 0;
437 +diff --git a/block/partitions/aix.c b/block/partitions/aix.c
438 +index 007f95eea0e1..903f3ed175d0 100644
439 +--- a/block/partitions/aix.c
440 ++++ b/block/partitions/aix.c
441 +@@ -178,7 +178,7 @@ int aix_partition(struct parsed_partitions *state)
442 + u32 vgda_sector = 0;
443 + u32 vgda_len = 0;
444 + int numlvs = 0;
445 +- struct pvd *pvd;
446 ++ struct pvd *pvd = NULL;
447 + struct lv_info {
448 + unsigned short pps_per_lv;
449 + unsigned short pps_found;
450 +@@ -232,10 +232,11 @@ int aix_partition(struct parsed_partitions *state)
451 + if (lvip[i].pps_per_lv)
452 + foundlvs += 1;
453 + }
454 ++ /* pvd loops depend on n[].name and lvip[].pps_per_lv */
455 ++ pvd = alloc_pvd(state, vgda_sector + 17);
456 + }
457 + put_dev_sector(sect);
458 + }
459 +- pvd = alloc_pvd(state, vgda_sector + 17);
460 + if (pvd) {
461 + int numpps = be16_to_cpu(pvd->pp_count);
462 + int psn_part1 = be32_to_cpu(pvd->psn_part1);
463 +@@ -282,10 +283,14 @@ int aix_partition(struct parsed_partitions *state)
464 + next_lp_ix += 1;
465 + }
466 + for (i = 0; i < state->limit; i += 1)
467 +- if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
468 ++ if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
469 ++ char tmp[sizeof(n[i].name) + 1]; // null char
470 ++
471 ++ snprintf(tmp, sizeof(tmp), "%s", n[i].name);
472 + pr_warn("partition %s (%u pp's found) is "
473 + "not contiguous\n",
474 +- n[i].name, lvip[i].pps_found);
475 ++ tmp, lvip[i].pps_found);
476 ++ }
477 + kfree(pvd);
478 + }
479 + kfree(n);
480 +diff --git a/crypto/Makefile b/crypto/Makefile
481 +index adaf2c63baeb..56282e2d75ad 100644
482 +--- a/crypto/Makefile
483 ++++ b/crypto/Makefile
484 +@@ -98,7 +98,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
485 + obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
486 + CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
487 + obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
488 +-CFLAGS_aes_generic.o := $(call cc-ifversion, -ge, 0701, -Os) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
489 ++CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
490 + obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o
491 + obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
492 + obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
493 +diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
494 +index 6cb148268676..58e4658f9dd6 100644
495 +--- a/drivers/android/binder_alloc.c
496 ++++ b/drivers/android/binder_alloc.c
497 +@@ -324,6 +324,34 @@ err_no_vma:
498 + return vma ? -ENOMEM : -ESRCH;
499 + }
500 +
501 ++static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
502 ++ struct vm_area_struct *vma)
503 ++{
504 ++ if (vma)
505 ++ alloc->vma_vm_mm = vma->vm_mm;
506 ++ /*
507 ++ * If we see alloc->vma is not NULL, buffer data structures set up
508 ++ * completely. Look at smp_rmb side binder_alloc_get_vma.
509 ++ * We also want to guarantee new alloc->vma_vm_mm is always visible
510 ++ * if alloc->vma is set.
511 ++ */
512 ++ smp_wmb();
513 ++ alloc->vma = vma;
514 ++}
515 ++
516 ++static inline struct vm_area_struct *binder_alloc_get_vma(
517 ++ struct binder_alloc *alloc)
518 ++{
519 ++ struct vm_area_struct *vma = NULL;
520 ++
521 ++ if (alloc->vma) {
522 ++ /* Look at description in binder_alloc_set_vma */
523 ++ smp_rmb();
524 ++ vma = alloc->vma;
525 ++ }
526 ++ return vma;
527 ++}
528 ++
529 + struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
530 + size_t data_size,
531 + size_t offsets_size,
532 +@@ -339,7 +367,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
533 + size_t size, data_offsets_size;
534 + int ret;
535 +
536 +- if (alloc->vma == NULL) {
537 ++ if (!binder_alloc_get_vma(alloc)) {
538 + pr_err("%d: binder_alloc_buf, no vma\n",
539 + alloc->pid);
540 + return ERR_PTR(-ESRCH);
541 +@@ -712,9 +740,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
542 + buffer->free = 1;
543 + binder_insert_free_buffer(alloc, buffer);
544 + alloc->free_async_space = alloc->buffer_size / 2;
545 +- barrier();
546 +- alloc->vma = vma;
547 +- alloc->vma_vm_mm = vma->vm_mm;
548 ++ binder_alloc_set_vma(alloc, vma);
549 + mmgrab(alloc->vma_vm_mm);
550 +
551 + return 0;
552 +@@ -741,10 +767,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
553 + int buffers, page_count;
554 + struct binder_buffer *buffer;
555 +
556 +- BUG_ON(alloc->vma);
557 +-
558 + buffers = 0;
559 + mutex_lock(&alloc->mutex);
560 ++ BUG_ON(alloc->vma);
561 ++
562 + while ((n = rb_first(&alloc->allocated_buffers))) {
563 + buffer = rb_entry(n, struct binder_buffer, rb_node);
564 +
565 +@@ -886,7 +912,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
566 + */
567 + void binder_alloc_vma_close(struct binder_alloc *alloc)
568 + {
569 +- WRITE_ONCE(alloc->vma, NULL);
570 ++ binder_alloc_set_vma(alloc, NULL);
571 + }
572 +
573 + /**
574 +@@ -921,7 +947,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
575 +
576 + index = page - alloc->pages;
577 + page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
578 +- vma = alloc->vma;
579 ++ vma = binder_alloc_get_vma(alloc);
580 + if (vma) {
581 + if (!mmget_not_zero(alloc->vma_vm_mm))
582 + goto err_mmget;
583 +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
584 +index bc562fd2b0a0..cda9a0b5bdaa 100644
585 +--- a/drivers/ata/libahci.c
586 ++++ b/drivers/ata/libahci.c
587 +@@ -2096,7 +2096,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
588 + struct ahci_host_priv *hpriv = ap->host->private_data;
589 + void __iomem *port_mmio = ahci_port_base(ap);
590 + struct ata_device *dev = ap->link.device;
591 +- u32 devslp, dm, dito, mdat, deto;
592 ++ u32 devslp, dm, dito, mdat, deto, dito_conf;
593 + int rc;
594 + unsigned int err_mask;
595 +
596 +@@ -2120,8 +2120,15 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
597 + return;
598 + }
599 +
600 +- /* device sleep was already enabled */
601 +- if (devslp & PORT_DEVSLP_ADSE)
602 ++ dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
603 ++ dito = devslp_idle_timeout / (dm + 1);
604 ++ if (dito > 0x3ff)
605 ++ dito = 0x3ff;
606 ++
607 ++ dito_conf = (devslp >> PORT_DEVSLP_DITO_OFFSET) & 0x3FF;
608 ++
609 ++ /* device sleep was already enabled and same dito */
610 ++ if ((devslp & PORT_DEVSLP_ADSE) && (dito_conf == dito))
611 + return;
612 +
613 + /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
614 +@@ -2129,11 +2136,6 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
615 + if (rc)
616 + return;
617 +
618 +- dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
619 +- dito = devslp_idle_timeout / (dm + 1);
620 +- if (dito > 0x3ff)
621 +- dito = 0x3ff;
622 +-
623 + /* Use the nominal value 10 ms if the read MDAT is zero,
624 + * the nominal value of DETO is 20 ms.
625 + */
626 +@@ -2151,6 +2153,8 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
627 + deto = 20;
628 + }
629 +
630 ++ /* Make dito, mdat, deto bits to 0s */
631 ++ devslp &= ~GENMASK_ULL(24, 2);
632 + devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
633 + (mdat << PORT_DEVSLP_MDAT_OFFSET) |
634 + (deto << PORT_DEVSLP_DETO_OFFSET) |
635 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
636 +index 5e55d03d3d01..fe1414df0f33 100644
637 +--- a/drivers/block/nbd.c
638 ++++ b/drivers/block/nbd.c
639 +@@ -1228,6 +1228,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
640 + case NBD_SET_SOCK:
641 + return nbd_add_socket(nbd, arg, false);
642 + case NBD_SET_BLKSIZE:
643 ++ if (!arg || !is_power_of_2(arg) || arg < 512 ||
644 ++ arg > PAGE_SIZE)
645 ++ return -EINVAL;
646 + nbd_size_set(nbd, arg,
647 + div_s64(config->bytesize, arg));
648 + return 0;
649 +diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
650 +index 531a0915066b..11ec92e47455 100644
651 +--- a/drivers/block/pktcdvd.c
652 ++++ b/drivers/block/pktcdvd.c
653 +@@ -67,7 +67,7 @@
654 + #include <scsi/scsi.h>
655 + #include <linux/debugfs.h>
656 + #include <linux/device.h>
657 +-
658 ++#include <linux/nospec.h>
659 + #include <linux/uaccess.h>
660 +
661 + #define DRIVER_NAME "pktcdvd"
662 +@@ -2231,6 +2231,8 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
663 + {
664 + if (dev_minor >= MAX_WRITERS)
665 + return NULL;
666 ++
667 ++ dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
668 + return pkt_devs[dev_minor];
669 + }
670 +
671 +diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
672 +index b33c8d6eb8c7..500d4d632e48 100644
673 +--- a/drivers/bluetooth/Kconfig
674 ++++ b/drivers/bluetooth/Kconfig
675 +@@ -146,6 +146,7 @@ config BT_HCIUART_LL
676 + config BT_HCIUART_3WIRE
677 + bool "Three-wire UART (H5) protocol support"
678 + depends on BT_HCIUART
679 ++ depends on BT_HCIUART_SERDEV
680 + help
681 + The HCI Three-wire UART Transport Layer makes it possible to
682 + user the Bluetooth HCI over a serial port interface. The HCI
683 +diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
684 +index 86b526b7d990..a2070ab86c82 100644
685 +--- a/drivers/char/tpm/tpm-interface.c
686 ++++ b/drivers/char/tpm/tpm-interface.c
687 +@@ -369,10 +369,13 @@ err_len:
688 + return -EINVAL;
689 + }
690 +
691 +-static int tpm_request_locality(struct tpm_chip *chip)
692 ++static int tpm_request_locality(struct tpm_chip *chip, unsigned int flags)
693 + {
694 + int rc;
695 +
696 ++ if (flags & TPM_TRANSMIT_RAW)
697 ++ return 0;
698 ++
699 + if (!chip->ops->request_locality)
700 + return 0;
701 +
702 +@@ -385,10 +388,13 @@ static int tpm_request_locality(struct tpm_chip *chip)
703 + return 0;
704 + }
705 +
706 +-static void tpm_relinquish_locality(struct tpm_chip *chip)
707 ++static void tpm_relinquish_locality(struct tpm_chip *chip, unsigned int flags)
708 + {
709 + int rc;
710 +
711 ++ if (flags & TPM_TRANSMIT_RAW)
712 ++ return;
713 ++
714 + if (!chip->ops->relinquish_locality)
715 + return;
716 +
717 +@@ -399,6 +405,28 @@ static void tpm_relinquish_locality(struct tpm_chip *chip)
718 + chip->locality = -1;
719 + }
720 +
721 ++static int tpm_cmd_ready(struct tpm_chip *chip, unsigned int flags)
722 ++{
723 ++ if (flags & TPM_TRANSMIT_RAW)
724 ++ return 0;
725 ++
726 ++ if (!chip->ops->cmd_ready)
727 ++ return 0;
728 ++
729 ++ return chip->ops->cmd_ready(chip);
730 ++}
731 ++
732 ++static int tpm_go_idle(struct tpm_chip *chip, unsigned int flags)
733 ++{
734 ++ if (flags & TPM_TRANSMIT_RAW)
735 ++ return 0;
736 ++
737 ++ if (!chip->ops->go_idle)
738 ++ return 0;
739 ++
740 ++ return chip->ops->go_idle(chip);
741 ++}
742 ++
743 + static ssize_t tpm_try_transmit(struct tpm_chip *chip,
744 + struct tpm_space *space,
745 + u8 *buf, size_t bufsiz,
746 +@@ -449,14 +477,15 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
747 + /* Store the decision as chip->locality will be changed. */
748 + need_locality = chip->locality == -1;
749 +
750 +- if (!(flags & TPM_TRANSMIT_RAW) && need_locality) {
751 +- rc = tpm_request_locality(chip);
752 ++ if (need_locality) {
753 ++ rc = tpm_request_locality(chip, flags);
754 + if (rc < 0)
755 + goto out_no_locality;
756 + }
757 +
758 +- if (chip->dev.parent)
759 +- pm_runtime_get_sync(chip->dev.parent);
760 ++ rc = tpm_cmd_ready(chip, flags);
761 ++ if (rc)
762 ++ goto out;
763 +
764 + rc = tpm2_prepare_space(chip, space, ordinal, buf);
765 + if (rc)
766 +@@ -516,13 +545,16 @@ out_recv:
767 + }
768 +
769 + rc = tpm2_commit_space(chip, space, ordinal, buf, &len);
770 ++ if (rc)
771 ++ dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
772 +
773 + out:
774 +- if (chip->dev.parent)
775 +- pm_runtime_put_sync(chip->dev.parent);
776 ++ rc = tpm_go_idle(chip, flags);
777 ++ if (rc)
778 ++ goto out;
779 +
780 + if (need_locality)
781 +- tpm_relinquish_locality(chip);
782 ++ tpm_relinquish_locality(chip, flags);
783 +
784 + out_no_locality:
785 + if (chip->ops->clk_enable != NULL)
786 +diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
787 +index b83b30a3eea5..4bb9b4aa9b49 100644
788 +--- a/drivers/char/tpm/tpm.h
789 ++++ b/drivers/char/tpm/tpm.h
790 +@@ -511,9 +511,17 @@ extern const struct file_operations tpm_fops;
791 + extern const struct file_operations tpmrm_fops;
792 + extern struct idr dev_nums_idr;
793 +
794 ++/**
795 ++ * enum tpm_transmit_flags
796 ++ *
797 ++ * @TPM_TRANSMIT_UNLOCKED: used to lock sequence of tpm_transmit calls.
798 ++ * @TPM_TRANSMIT_RAW: prevent recursive calls into setup steps
799 ++ * (go idle, locality,..). Always use with UNLOCKED
800 ++ * as it will fail on double locking.
801 ++ */
802 + enum tpm_transmit_flags {
803 +- TPM_TRANSMIT_UNLOCKED = BIT(0),
804 +- TPM_TRANSMIT_RAW = BIT(1),
805 ++ TPM_TRANSMIT_UNLOCKED = BIT(0),
806 ++ TPM_TRANSMIT_RAW = BIT(1),
807 + };
808 +
809 + ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
810 +diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
811 +index d26ea7513226..dabb2ae4e779 100644
812 +--- a/drivers/char/tpm/tpm2-space.c
813 ++++ b/drivers/char/tpm/tpm2-space.c
814 +@@ -39,7 +39,8 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
815 + for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
816 + if (space->session_tbl[i])
817 + tpm2_flush_context_cmd(chip, space->session_tbl[i],
818 +- TPM_TRANSMIT_UNLOCKED);
819 ++ TPM_TRANSMIT_UNLOCKED |
820 ++ TPM_TRANSMIT_RAW);
821 + }
822 + }
823 +
824 +@@ -84,7 +85,7 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
825 + tpm_buf_append(&tbuf, &buf[*offset], body_size);
826 +
827 + rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 4,
828 +- TPM_TRANSMIT_UNLOCKED, NULL);
829 ++ TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW, NULL);
830 + if (rc < 0) {
831 + dev_warn(&chip->dev, "%s: failed with a system error %d\n",
832 + __func__, rc);
833 +@@ -133,7 +134,7 @@ static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
834 + tpm_buf_append_u32(&tbuf, handle);
835 +
836 + rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 0,
837 +- TPM_TRANSMIT_UNLOCKED, NULL);
838 ++ TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW, NULL);
839 + if (rc < 0) {
840 + dev_warn(&chip->dev, "%s: failed with a system error %d\n",
841 + __func__, rc);
842 +@@ -170,7 +171,8 @@ static void tpm2_flush_space(struct tpm_chip *chip)
843 + for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
844 + if (space->context_tbl[i] && ~space->context_tbl[i])
845 + tpm2_flush_context_cmd(chip, space->context_tbl[i],
846 +- TPM_TRANSMIT_UNLOCKED);
847 ++ TPM_TRANSMIT_UNLOCKED |
848 ++ TPM_TRANSMIT_RAW);
849 +
850 + tpm2_flush_sessions(chip, space);
851 + }
852 +@@ -377,7 +379,8 @@ static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp,
853 +
854 + return 0;
855 + out_no_slots:
856 +- tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_UNLOCKED);
857 ++ tpm2_flush_context_cmd(chip, phandle,
858 ++ TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW);
859 + dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__,
860 + phandle);
861 + return -ENOMEM;
862 +@@ -465,7 +468,8 @@ static int tpm2_save_space(struct tpm_chip *chip)
863 + return rc;
864 +
865 + tpm2_flush_context_cmd(chip, space->context_tbl[i],
866 +- TPM_TRANSMIT_UNLOCKED);
867 ++ TPM_TRANSMIT_UNLOCKED |
868 ++ TPM_TRANSMIT_RAW);
869 + space->context_tbl[i] = ~0;
870 + }
871 +
872 +diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
873 +index bb756ad7897e..5c7ce5aaaf6f 100644
874 +--- a/drivers/char/tpm/tpm_crb.c
875 ++++ b/drivers/char/tpm/tpm_crb.c
876 +@@ -137,7 +137,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
877 + }
878 +
879 + /**
880 +- * crb_go_idle - request tpm crb device to go the idle state
881 ++ * __crb_go_idle - request tpm crb device to go the idle state
882 + *
883 + * @dev: crb device
884 + * @priv: crb private data
885 +@@ -151,7 +151,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
886 + *
887 + * Return: 0 always
888 + */
889 +-static int crb_go_idle(struct device *dev, struct crb_priv *priv)
890 ++static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
891 + {
892 + if ((priv->flags & CRB_FL_ACPI_START) ||
893 + (priv->flags & CRB_FL_CRB_SMC_START))
894 +@@ -166,11 +166,20 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
895 + dev_warn(dev, "goIdle timed out\n");
896 + return -ETIME;
897 + }
898 ++
899 + return 0;
900 + }
901 +
902 ++static int crb_go_idle(struct tpm_chip *chip)
903 ++{
904 ++ struct device *dev = &chip->dev;
905 ++ struct crb_priv *priv = dev_get_drvdata(dev);
906 ++
907 ++ return __crb_go_idle(dev, priv);
908 ++}
909 ++
910 + /**
911 +- * crb_cmd_ready - request tpm crb device to enter ready state
912 ++ * __crb_cmd_ready - request tpm crb device to enter ready state
913 + *
914 + * @dev: crb device
915 + * @priv: crb private data
916 +@@ -183,7 +192,7 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
917 + *
918 + * Return: 0 on success -ETIME on timeout;
919 + */
920 +-static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
921 ++static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
922 + {
923 + if ((priv->flags & CRB_FL_ACPI_START) ||
924 + (priv->flags & CRB_FL_CRB_SMC_START))
925 +@@ -201,6 +210,14 @@ static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
926 + return 0;
927 + }
928 +
929 ++static int crb_cmd_ready(struct tpm_chip *chip)
930 ++{
931 ++ struct device *dev = &chip->dev;
932 ++ struct crb_priv *priv = dev_get_drvdata(dev);
933 ++
934 ++ return __crb_cmd_ready(dev, priv);
935 ++}
936 ++
937 + static int __crb_request_locality(struct device *dev,
938 + struct crb_priv *priv, int loc)
939 + {
940 +@@ -393,6 +410,8 @@ static const struct tpm_class_ops tpm_crb = {
941 + .send = crb_send,
942 + .cancel = crb_cancel,
943 + .req_canceled = crb_req_canceled,
944 ++ .go_idle = crb_go_idle,
945 ++ .cmd_ready = crb_cmd_ready,
946 + .request_locality = crb_request_locality,
947 + .relinquish_locality = crb_relinquish_locality,
948 + .req_complete_mask = CRB_DRV_STS_COMPLETE,
949 +@@ -508,7 +527,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
950 + * PTT HW bug w/a: wake up the device to access
951 + * possibly not retained registers.
952 + */
953 +- ret = crb_cmd_ready(dev, priv);
954 ++ ret = __crb_cmd_ready(dev, priv);
955 + if (ret)
956 + return ret;
957 +
958 +@@ -553,7 +572,7 @@ out:
959 + if (!ret)
960 + priv->cmd_size = cmd_size;
961 +
962 +- crb_go_idle(dev, priv);
963 ++ __crb_go_idle(dev, priv);
964 +
965 + __crb_relinquish_locality(dev, priv, 0);
966 +
967 +@@ -624,32 +643,7 @@ static int crb_acpi_add(struct acpi_device *device)
968 + chip->acpi_dev_handle = device->handle;
969 + chip->flags = TPM_CHIP_FLAG_TPM2;
970 +
971 +- rc = __crb_request_locality(dev, priv, 0);
972 +- if (rc)
973 +- return rc;
974 +-
975 +- rc = crb_cmd_ready(dev, priv);
976 +- if (rc)
977 +- goto out;
978 +-
979 +- pm_runtime_get_noresume(dev);
980 +- pm_runtime_set_active(dev);
981 +- pm_runtime_enable(dev);
982 +-
983 +- rc = tpm_chip_register(chip);
984 +- if (rc) {
985 +- crb_go_idle(dev, priv);
986 +- pm_runtime_put_noidle(dev);
987 +- pm_runtime_disable(dev);
988 +- goto out;
989 +- }
990 +-
991 +- pm_runtime_put_sync(dev);
992 +-
993 +-out:
994 +- __crb_relinquish_locality(dev, priv, 0);
995 +-
996 +- return rc;
997 ++ return tpm_chip_register(chip);
998 + }
999 +
1000 + static int crb_acpi_remove(struct acpi_device *device)
1001 +@@ -659,52 +653,11 @@ static int crb_acpi_remove(struct acpi_device *device)
1002 +
1003 + tpm_chip_unregister(chip);
1004 +
1005 +- pm_runtime_disable(dev);
1006 +-
1007 + return 0;
1008 + }
1009 +
1010 +-static int __maybe_unused crb_pm_runtime_suspend(struct device *dev)
1011 +-{
1012 +- struct tpm_chip *chip = dev_get_drvdata(dev);
1013 +- struct crb_priv *priv = dev_get_drvdata(&chip->dev);
1014 +-
1015 +- return crb_go_idle(dev, priv);
1016 +-}
1017 +-
1018 +-static int __maybe_unused crb_pm_runtime_resume(struct device *dev)
1019 +-{
1020 +- struct tpm_chip *chip = dev_get_drvdata(dev);
1021 +- struct crb_priv *priv = dev_get_drvdata(&chip->dev);
1022 +-
1023 +- return crb_cmd_ready(dev, priv);
1024 +-}
1025 +-
1026 +-static int __maybe_unused crb_pm_suspend(struct device *dev)
1027 +-{
1028 +- int ret;
1029 +-
1030 +- ret = tpm_pm_suspend(dev);
1031 +- if (ret)
1032 +- return ret;
1033 +-
1034 +- return crb_pm_runtime_suspend(dev);
1035 +-}
1036 +-
1037 +-static int __maybe_unused crb_pm_resume(struct device *dev)
1038 +-{
1039 +- int ret;
1040 +-
1041 +- ret = crb_pm_runtime_resume(dev);
1042 +- if (ret)
1043 +- return ret;
1044 +-
1045 +- return tpm_pm_resume(dev);
1046 +-}
1047 +-
1048 + static const struct dev_pm_ops crb_pm = {
1049 +- SET_SYSTEM_SLEEP_PM_OPS(crb_pm_suspend, crb_pm_resume)
1050 +- SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
1051 ++ SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
1052 + };
1053 +
1054 + static const struct acpi_device_id crb_device_ids[] = {
1055 +diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
1056 +index d5b44cadac56..c619e76ce827 100644
1057 +--- a/drivers/char/tpm/tpm_i2c_infineon.c
1058 ++++ b/drivers/char/tpm/tpm_i2c_infineon.c
1059 +@@ -117,7 +117,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
1060 + /* Lock the adapter for the duration of the whole sequence. */
1061 + if (!tpm_dev.client->adapter->algo->master_xfer)
1062 + return -EOPNOTSUPP;
1063 +- i2c_lock_adapter(tpm_dev.client->adapter);
1064 ++ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1065 +
1066 + if (tpm_dev.chip_type == SLB9645) {
1067 + /* use a combined read for newer chips
1068 +@@ -192,7 +192,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
1069 + }
1070 +
1071 + out:
1072 +- i2c_unlock_adapter(tpm_dev.client->adapter);
1073 ++ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1074 + /* take care of 'guard time' */
1075 + usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
1076 +
1077 +@@ -224,7 +224,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
1078 +
1079 + if (!tpm_dev.client->adapter->algo->master_xfer)
1080 + return -EOPNOTSUPP;
1081 +- i2c_lock_adapter(tpm_dev.client->adapter);
1082 ++ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1083 +
1084 + /* prepend the 'register address' to the buffer */
1085 + tpm_dev.buf[0] = addr;
1086 +@@ -243,7 +243,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
1087 + usleep_range(sleep_low, sleep_hi);
1088 + }
1089 +
1090 +- i2c_unlock_adapter(tpm_dev.client->adapter);
1091 ++ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1092 + /* take care of 'guard time' */
1093 + usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
1094 +
1095 +diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
1096 +index 8ab0bd8445f6..b00388fc41c8 100644
1097 +--- a/drivers/char/tpm/tpm_tis_spi.c
1098 ++++ b/drivers/char/tpm/tpm_tis_spi.c
1099 +@@ -188,6 +188,7 @@ static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
1100 + static int tpm_tis_spi_probe(struct spi_device *dev)
1101 + {
1102 + struct tpm_tis_spi_phy *phy;
1103 ++ int irq;
1104 +
1105 + phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
1106 + GFP_KERNEL);
1107 +@@ -200,7 +201,13 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
1108 + if (!phy->iobuf)
1109 + return -ENOMEM;
1110 +
1111 +- return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
1112 ++ /* If the SPI device has an IRQ then use that */
1113 ++ if (dev->irq > 0)
1114 ++ irq = dev->irq;
1115 ++ else
1116 ++ irq = -1;
1117 ++
1118 ++ return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
1119 + NULL);
1120 + }
1121 +
1122 +diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
1123 +index e4b40f2b4627..9c0f7cf920af 100644
1124 +--- a/drivers/firmware/google/vpd.c
1125 ++++ b/drivers/firmware/google/vpd.c
1126 +@@ -246,6 +246,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
1127 + sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
1128 + kfree(sec->raw_name);
1129 + memunmap(sec->baseaddr);
1130 ++ sec->enabled = false;
1131 + }
1132 +
1133 + return 0;
1134 +@@ -279,8 +280,10 @@ static int vpd_sections_init(phys_addr_t physaddr)
1135 + ret = vpd_section_init("rw", &rw_vpd,
1136 + physaddr + sizeof(struct vpd_cbmem) +
1137 + header.ro_size, header.rw_size);
1138 +- if (ret)
1139 ++ if (ret) {
1140 ++ vpd_section_destroy(&ro_vpd);
1141 + return ret;
1142 ++ }
1143 + }
1144 +
1145 + return 0;
1146 +diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
1147 +index 4b80e996d976..1022fe8d09c7 100644
1148 +--- a/drivers/gpio/gpio-ml-ioh.c
1149 ++++ b/drivers/gpio/gpio-ml-ioh.c
1150 +@@ -497,9 +497,10 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
1151 + return 0;
1152 +
1153 + err_gpiochip_add:
1154 ++ chip = chip_save;
1155 + while (--i >= 0) {
1156 +- chip--;
1157 + gpiochip_remove(&chip->gpio);
1158 ++ chip++;
1159 + }
1160 + kfree(chip_save);
1161 +
1162 +diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
1163 +index fbaf974277df..1eb857e2f62f 100644
1164 +--- a/drivers/gpio/gpio-tegra.c
1165 ++++ b/drivers/gpio/gpio-tegra.c
1166 +@@ -728,4 +728,4 @@ static int __init tegra_gpio_init(void)
1167 + {
1168 + return platform_driver_register(&tegra_gpio_driver);
1169 + }
1170 +-postcore_initcall(tegra_gpio_init);
1171 ++subsys_initcall(tegra_gpio_init);
1172 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1173 +index be813b2738c1..2e706f1abe64 100644
1174 +--- a/drivers/gpu/drm/i915/i915_reg.h
1175 ++++ b/drivers/gpu/drm/i915/i915_reg.h
1176 +@@ -8462,6 +8462,7 @@ enum skl_power_gate {
1177 + #define TRANS_MSA_10_BPC (2<<5)
1178 + #define TRANS_MSA_12_BPC (3<<5)
1179 + #define TRANS_MSA_16_BPC (4<<5)
1180 ++#define TRANS_MSA_CEA_RANGE (1<<3)
1181 +
1182 + /* LCPLL Control */
1183 + #define LCPLL_CTL _MMIO(0x130040)
1184 +diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
1185 +index 5e5fe03b638c..3a4a581345c4 100644
1186 +--- a/drivers/gpu/drm/i915/intel_ddi.c
1187 ++++ b/drivers/gpu/drm/i915/intel_ddi.c
1188 +@@ -1396,6 +1396,10 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
1189 + WARN_ON(transcoder_is_dsi(cpu_transcoder));
1190 +
1191 + temp = TRANS_MSA_SYNC_CLK;
1192 ++
1193 ++ if (crtc_state->limited_color_range)
1194 ++ temp |= TRANS_MSA_CEA_RANGE;
1195 ++
1196 + switch (crtc_state->pipe_bpp) {
1197 + case 18:
1198 + temp |= TRANS_MSA_6_BPC;
1199 +diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
1200 +index 658fa2d3e40c..2c8411b8d050 100644
1201 +--- a/drivers/gpu/ipu-v3/ipu-common.c
1202 ++++ b/drivers/gpu/ipu-v3/ipu-common.c
1203 +@@ -1401,6 +1401,8 @@ static int ipu_probe(struct platform_device *pdev)
1204 + return -ENODEV;
1205 +
1206 + ipu->id = of_alias_get_id(np, "ipu");
1207 ++ if (ipu->id < 0)
1208 ++ ipu->id = 0;
1209 +
1210 + if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
1211 + IS_ENABLED(CONFIG_DRM)) {
1212 +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
1213 +index 8267439dd1ee..d8101cd28dfa 100644
1214 +--- a/drivers/hv/hv.c
1215 ++++ b/drivers/hv/hv.c
1216 +@@ -196,6 +196,10 @@ int hv_synic_alloc(void)
1217 +
1218 + return 0;
1219 + err:
1220 ++ /*
1221 ++ * Any memory allocations that succeeded will be freed when
1222 ++ * the caller cleans up by calling hv_synic_free()
1223 ++ */
1224 + return -ENOMEM;
1225 + }
1226 +
1227 +@@ -208,12 +212,10 @@ void hv_synic_free(void)
1228 + struct hv_per_cpu_context *hv_cpu
1229 + = per_cpu_ptr(hv_context.cpu_context, cpu);
1230 +
1231 +- if (hv_cpu->synic_event_page)
1232 +- free_page((unsigned long)hv_cpu->synic_event_page);
1233 +- if (hv_cpu->synic_message_page)
1234 +- free_page((unsigned long)hv_cpu->synic_message_page);
1235 +- if (hv_cpu->post_msg_page)
1236 +- free_page((unsigned long)hv_cpu->post_msg_page);
1237 ++ kfree(hv_cpu->clk_evt);
1238 ++ free_page((unsigned long)hv_cpu->synic_event_page);
1239 ++ free_page((unsigned long)hv_cpu->synic_message_page);
1240 ++ free_page((unsigned long)hv_cpu->post_msg_page);
1241 + }
1242 +
1243 + kfree(hv_context.hv_numa_map);
1244 +diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
1245 +index 284f8670dbeb..2feae9a421e6 100644
1246 +--- a/drivers/i2c/busses/i2c-aspeed.c
1247 ++++ b/drivers/i2c/busses/i2c-aspeed.c
1248 +@@ -859,7 +859,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
1249 + if (!match)
1250 + bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val;
1251 + else
1252 +- bus->get_clk_reg_val = match->data;
1253 ++ bus->get_clk_reg_val = (u32 (*)(u32))match->data;
1254 +
1255 + /* Initialize the I2C adapter */
1256 + spin_lock_init(&bus->lock);
1257 +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
1258 +index ba8df2fde1b2..67cbd9f61acc 100644
1259 +--- a/drivers/i2c/busses/i2c-i801.c
1260 ++++ b/drivers/i2c/busses/i2c-i801.c
1261 +@@ -138,6 +138,7 @@
1262 +
1263 + #define SBREG_BAR 0x10
1264 + #define SBREG_SMBCTRL 0xc6000c
1265 ++#define SBREG_SMBCTRL_DNV 0xcf000c
1266 +
1267 + /* Host status bits for SMBPCISTS */
1268 + #define SMBPCISTS_INTS BIT(3)
1269 +@@ -1395,7 +1396,11 @@ static void i801_add_tco(struct i801_priv *priv)
1270 + spin_unlock(&p2sb_spinlock);
1271 +
1272 + res = &tco_res[ICH_RES_MEM_OFF];
1273 +- res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
1274 ++ if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
1275 ++ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
1276 ++ else
1277 ++ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
1278 ++
1279 + res->end = res->start + 3;
1280 + res->flags = IORESOURCE_MEM;
1281 +
1282 +diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
1283 +index ae6ed254e01d..732d6c456a6f 100644
1284 +--- a/drivers/i2c/busses/i2c-xiic.c
1285 ++++ b/drivers/i2c/busses/i2c-xiic.c
1286 +@@ -538,6 +538,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
1287 + {
1288 + u8 rx_watermark;
1289 + struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
1290 ++ unsigned long flags;
1291 +
1292 + /* Clear and enable Rx full interrupt. */
1293 + xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
1294 +@@ -553,6 +554,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
1295 + rx_watermark = IIC_RX_FIFO_DEPTH;
1296 + xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
1297 +
1298 ++ local_irq_save(flags);
1299 + if (!(msg->flags & I2C_M_NOSTART))
1300 + /* write the address */
1301 + xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
1302 +@@ -563,6 +565,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
1303 +
1304 + xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
1305 + msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
1306 ++ local_irq_restore(flags);
1307 ++
1308 + if (i2c->nmsgs == 1)
1309 + /* very last, enable bus not busy as well */
1310 + xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
1311 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1312 +index 79843a3ca9dc..752dbc388c27 100644
1313 +--- a/drivers/infiniband/core/cma.c
1314 ++++ b/drivers/infiniband/core/cma.c
1315 +@@ -1459,9 +1459,16 @@ static bool cma_match_net_dev(const struct rdma_cm_id *id,
1316 + (addr->src_addr.ss_family == AF_IB ||
1317 + cma_protocol_roce_dev_port(id->device, port_num));
1318 +
1319 +- return !addr->dev_addr.bound_dev_if ||
1320 +- (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1321 +- addr->dev_addr.bound_dev_if == net_dev->ifindex);
1322 ++ /*
1323 ++ * Net namespaces must match, and if the listner is listening
1324 ++ * on a specific netdevice than netdevice must match as well.
1325 ++ */
1326 ++ if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1327 ++ (!!addr->dev_addr.bound_dev_if ==
1328 ++ (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
1329 ++ return true;
1330 ++ else
1331 ++ return false;
1332 + }
1333 +
1334 + static struct rdma_id_private *cma_find_listener(
1335 +diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
1336 +index fc149ea64be7..59aaac43db91 100644
1337 +--- a/drivers/input/touchscreen/atmel_mxt_ts.c
1338 ++++ b/drivers/input/touchscreen/atmel_mxt_ts.c
1339 +@@ -1647,10 +1647,11 @@ static int mxt_parse_object_table(struct mxt_data *data,
1340 + break;
1341 + case MXT_TOUCH_MULTI_T9:
1342 + data->multitouch = MXT_TOUCH_MULTI_T9;
1343 ++ /* Only handle messages from first T9 instance */
1344 + data->T9_reportid_min = min_id;
1345 +- data->T9_reportid_max = max_id;
1346 +- data->num_touchids = object->num_report_ids
1347 +- * mxt_obj_instances(object);
1348 ++ data->T9_reportid_max = min_id +
1349 ++ object->num_report_ids - 1;
1350 ++ data->num_touchids = object->num_report_ids;
1351 + break;
1352 + case MXT_SPT_MESSAGECOUNT_T44:
1353 + data->T44_address = object->start_address;
1354 +diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
1355 +index 195d6e93ac71..5d0ba5f644c4 100644
1356 +--- a/drivers/iommu/ipmmu-vmsa.c
1357 ++++ b/drivers/iommu/ipmmu-vmsa.c
1358 +@@ -54,7 +54,7 @@ struct ipmmu_vmsa_domain {
1359 + struct io_pgtable_ops *iop;
1360 +
1361 + unsigned int context_id;
1362 +- spinlock_t lock; /* Protects mappings */
1363 ++ struct mutex mutex; /* Protects mappings */
1364 + };
1365 +
1366 + struct ipmmu_vmsa_iommu_priv {
1367 +@@ -523,7 +523,7 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
1368 + if (!domain)
1369 + return NULL;
1370 +
1371 +- spin_lock_init(&domain->lock);
1372 ++ mutex_init(&domain->mutex);
1373 +
1374 + return &domain->io_domain;
1375 + }
1376 +@@ -548,7 +548,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
1377 + struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1378 + struct ipmmu_vmsa_device *mmu = priv->mmu;
1379 + struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
1380 +- unsigned long flags;
1381 + unsigned int i;
1382 + int ret = 0;
1383 +
1384 +@@ -557,7 +556,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
1385 + return -ENXIO;
1386 + }
1387 +
1388 +- spin_lock_irqsave(&domain->lock, flags);
1389 ++ mutex_lock(&domain->mutex);
1390 +
1391 + if (!domain->mmu) {
1392 + /* The domain hasn't been used yet, initialize it. */
1393 +@@ -574,7 +573,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
1394 + } else
1395 + dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
1396 +
1397 +- spin_unlock_irqrestore(&domain->lock, flags);
1398 ++ mutex_unlock(&domain->mutex);
1399 +
1400 + if (ret < 0)
1401 + return ret;
1402 +diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
1403 +index c4c2b3b85ebc..f6e040fcad9a 100644
1404 +--- a/drivers/macintosh/via-pmu.c
1405 ++++ b/drivers/macintosh/via-pmu.c
1406 +@@ -532,8 +532,9 @@ init_pmu(void)
1407 + int timeout;
1408 + struct adb_request req;
1409 +
1410 +- out_8(&via[B], via[B] | TREQ); /* negate TREQ */
1411 +- out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK); /* TACK in, TREQ out */
1412 ++ /* Negate TREQ. Set TACK to input and TREQ to output. */
1413 ++ out_8(&via[B], in_8(&via[B]) | TREQ);
1414 ++ out_8(&via[DIRB], (in_8(&via[DIRB]) | TREQ) & ~TACK);
1415 +
1416 + pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
1417 + timeout = 100000;
1418 +@@ -1455,8 +1456,8 @@ pmu_sr_intr(void)
1419 + struct adb_request *req;
1420 + int bite = 0;
1421 +
1422 +- if (via[B] & TREQ) {
1423 +- printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]);
1424 ++ if (in_8(&via[B]) & TREQ) {
1425 ++ printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via[B]));
1426 + out_8(&via[IFR], SR_INT);
1427 + return NULL;
1428 + }
1429 +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
1430 +index 71c3507df9a0..a4b7c2698096 100644
1431 +--- a/drivers/md/dm-cache-target.c
1432 ++++ b/drivers/md/dm-cache-target.c
1433 +@@ -2330,7 +2330,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
1434 + {0, 2, "Invalid number of cache feature arguments"},
1435 + };
1436 +
1437 +- int r;
1438 ++ int r, mode_ctr = 0;
1439 + unsigned argc;
1440 + const char *arg;
1441 + struct cache_features *cf = &ca->features;
1442 +@@ -2344,14 +2344,20 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
1443 + while (argc--) {
1444 + arg = dm_shift_arg(as);
1445 +
1446 +- if (!strcasecmp(arg, "writeback"))
1447 ++ if (!strcasecmp(arg, "writeback")) {
1448 + cf->io_mode = CM_IO_WRITEBACK;
1449 ++ mode_ctr++;
1450 ++ }
1451 +
1452 +- else if (!strcasecmp(arg, "writethrough"))
1453 ++ else if (!strcasecmp(arg, "writethrough")) {
1454 + cf->io_mode = CM_IO_WRITETHROUGH;
1455 ++ mode_ctr++;
1456 ++ }
1457 +
1458 +- else if (!strcasecmp(arg, "passthrough"))
1459 ++ else if (!strcasecmp(arg, "passthrough")) {
1460 + cf->io_mode = CM_IO_PASSTHROUGH;
1461 ++ mode_ctr++;
1462 ++ }
1463 +
1464 + else if (!strcasecmp(arg, "metadata2"))
1465 + cf->metadata_version = 2;
1466 +@@ -2362,6 +2368,11 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
1467 + }
1468 + }
1469 +
1470 ++ if (mode_ctr > 1) {
1471 ++ *error = "Duplicate cache io_mode features requested";
1472 ++ return -EINVAL;
1473 ++ }
1474 ++
1475 + return 0;
1476 + }
1477 +
1478 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1479 +index 07ca2fd10189..5018fb2352c2 100644
1480 +--- a/drivers/md/raid5.c
1481 ++++ b/drivers/md/raid5.c
1482 +@@ -4516,6 +4516,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
1483 + s->failed++;
1484 + if (rdev && !test_bit(Faulty, &rdev->flags))
1485 + do_recovery = 1;
1486 ++ else if (!rdev) {
1487 ++ rdev = rcu_dereference(
1488 ++ conf->disks[i].replacement);
1489 ++ if (rdev && !test_bit(Faulty, &rdev->flags))
1490 ++ do_recovery = 1;
1491 ++ }
1492 + }
1493 +
1494 + if (test_bit(R5_InJournal, &dev->flags))
1495 +diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
1496 +index 2ab8d83e5576..fcfe658a4328 100644
1497 +--- a/drivers/media/dvb-frontends/helene.c
1498 ++++ b/drivers/media/dvb-frontends/helene.c
1499 +@@ -897,7 +897,10 @@ static int helene_x_pon(struct helene_priv *priv)
1500 + helene_write_regs(priv, 0x99, cdata, sizeof(cdata));
1501 +
1502 + /* 0x81 - 0x94 */
1503 +- data[0] = 0x18; /* xtal 24 MHz */
1504 ++ if (priv->xtal == SONY_HELENE_XTAL_16000)
1505 ++ data[0] = 0x10; /* xtal 16 MHz */
1506 ++ else
1507 ++ data[0] = 0x18; /* xtal 24 MHz */
1508 + data[1] = (uint8_t)(0x80 | (0x04 & 0x1F)); /* 4 x 25 = 100uA */
1509 + data[2] = (uint8_t)(0x80 | (0x26 & 0x7F)); /* 38 x 0.25 = 9.5pF */
1510 + data[3] = 0x80; /* REFOUT signal output 500mVpp */
1511 +diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
1512 +index 56fe4e5b396e..4a65861433d6 100644
1513 +--- a/drivers/media/platform/davinci/vpif_display.c
1514 ++++ b/drivers/media/platform/davinci/vpif_display.c
1515 +@@ -1114,6 +1114,14 @@ vpif_init_free_channel_objects:
1516 + return err;
1517 + }
1518 +
1519 ++static void free_vpif_objs(void)
1520 ++{
1521 ++ int i;
1522 ++
1523 ++ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++)
1524 ++ kfree(vpif_obj.dev[i]);
1525 ++}
1526 ++
1527 + static int vpif_async_bound(struct v4l2_async_notifier *notifier,
1528 + struct v4l2_subdev *subdev,
1529 + struct v4l2_async_subdev *asd)
1530 +@@ -1250,11 +1258,6 @@ static __init int vpif_probe(struct platform_device *pdev)
1531 + return -EINVAL;
1532 + }
1533 +
1534 +- if (!pdev->dev.platform_data) {
1535 +- dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
1536 +- return -EINVAL;
1537 +- }
1538 +-
1539 + vpif_dev = &pdev->dev;
1540 + err = initialize_vpif();
1541 +
1542 +@@ -1266,7 +1269,7 @@ static __init int vpif_probe(struct platform_device *pdev)
1543 + err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
1544 + if (err) {
1545 + v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
1546 +- return err;
1547 ++ goto vpif_free;
1548 + }
1549 +
1550 + while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
1551 +@@ -1309,7 +1312,10 @@ static __init int vpif_probe(struct platform_device *pdev)
1552 + if (vpif_obj.sd[i])
1553 + vpif_obj.sd[i]->grp_id = 1 << i;
1554 + }
1555 +- vpif_probe_complete();
1556 ++ err = vpif_probe_complete();
1557 ++ if (err) {
1558 ++ goto probe_subdev_out;
1559 ++ }
1560 + } else {
1561 + vpif_obj.notifier.subdevs = vpif_obj.config->asd;
1562 + vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
1563 +@@ -1330,6 +1336,8 @@ probe_subdev_out:
1564 + kfree(vpif_obj.sd);
1565 + vpif_unregister:
1566 + v4l2_device_unregister(&vpif_obj.v4l2_dev);
1567 ++vpif_free:
1568 ++ free_vpif_objs();
1569 +
1570 + return err;
1571 + }
1572 +@@ -1351,8 +1359,8 @@ static int vpif_remove(struct platform_device *device)
1573 + ch = vpif_obj.dev[i];
1574 + /* Unregister video device */
1575 + video_unregister_device(&ch->video_dev);
1576 +- kfree(vpif_obj.dev[i]);
1577 + }
1578 ++ free_vpif_objs();
1579 +
1580 + return 0;
1581 + }
1582 +diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.c b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
1583 +index 64df82817de3..4882ee25bd75 100644
1584 +--- a/drivers/media/platform/qcom/camss-8x16/camss-csid.c
1585 ++++ b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
1586 +@@ -392,9 +392,6 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
1587 + !media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
1588 + return -ENOLINK;
1589 +
1590 +- dt = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SRC].code)->
1591 +- data_type;
1592 +-
1593 + if (tg->enabled) {
1594 + /* Config Test Generator */
1595 + struct v4l2_mbus_framefmt *f =
1596 +@@ -416,6 +413,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
1597 + writel_relaxed(val, csid->base +
1598 + CAMSS_CSID_TG_DT_n_CGG_0(0));
1599 +
1600 ++ dt = csid_get_fmt_entry(
1601 ++ csid->fmt[MSM_CSID_PAD_SRC].code)->data_type;
1602 ++
1603 + /* 5:0 data type */
1604 + val = dt;
1605 + writel_relaxed(val, csid->base +
1606 +@@ -425,6 +425,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
1607 + val = tg->payload_mode;
1608 + writel_relaxed(val, csid->base +
1609 + CAMSS_CSID_TG_DT_n_CGG_2(0));
1610 ++
1611 ++ df = csid_get_fmt_entry(
1612 ++ csid->fmt[MSM_CSID_PAD_SRC].code)->decode_format;
1613 + } else {
1614 + struct csid_phy_config *phy = &csid->phy;
1615 +
1616 +@@ -439,13 +442,16 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
1617 +
1618 + writel_relaxed(val,
1619 + csid->base + CAMSS_CSID_CORE_CTRL_1);
1620 ++
1621 ++ dt = csid_get_fmt_entry(
1622 ++ csid->fmt[MSM_CSID_PAD_SINK].code)->data_type;
1623 ++ df = csid_get_fmt_entry(
1624 ++ csid->fmt[MSM_CSID_PAD_SINK].code)->decode_format;
1625 + }
1626 +
1627 + /* Config LUT */
1628 +
1629 + dt_shift = (cid % 4) * 8;
1630 +- df = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SINK].code)->
1631 +- decode_format;
1632 +
1633 + val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
1634 + val &= ~(0xff << dt_shift);
1635 +diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
1636 +index 8e9531f7f83f..9942932ecbf9 100644
1637 +--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
1638 ++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
1639 +@@ -254,24 +254,24 @@ static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
1640 + static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
1641 + {
1642 + struct s5p_mfc_dev *dev = ctx->dev;
1643 +- struct s5p_mfc_buf *dst_buf, *src_buf;
1644 +- size_t dec_y_addr;
1645 ++ struct s5p_mfc_buf *dst_buf, *src_buf;
1646 ++ u32 dec_y_addr;
1647 + unsigned int frame_type;
1648 +
1649 + /* Make sure we actually have a new frame before continuing. */
1650 + frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
1651 + if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED)
1652 + return;
1653 +- dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
1654 ++ dec_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
1655 +
1656 + /* Copy timestamp / timecode from decoded src to dst and set
1657 + appropriate flags. */
1658 + src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1659 + list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
1660 +- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
1661 +- == dec_y_addr) {
1662 +- dst_buf->b->timecode =
1663 +- src_buf->b->timecode;
1664 ++ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
1665 ++
1666 ++ if (addr == dec_y_addr) {
1667 ++ dst_buf->b->timecode = src_buf->b->timecode;
1668 + dst_buf->b->vb2_buf.timestamp =
1669 + src_buf->b->vb2_buf.timestamp;
1670 + dst_buf->b->flags &=
1671 +@@ -307,10 +307,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
1672 + {
1673 + struct s5p_mfc_dev *dev = ctx->dev;
1674 + struct s5p_mfc_buf *dst_buf;
1675 +- size_t dspl_y_addr;
1676 ++ u32 dspl_y_addr;
1677 + unsigned int frame_type;
1678 +
1679 +- dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
1680 ++ dspl_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
1681 + if (IS_MFCV6_PLUS(dev))
1682 + frame_type = s5p_mfc_hw_call(dev->mfc_ops,
1683 + get_disp_frame_type, ctx);
1684 +@@ -329,9 +329,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
1685 + /* The MFC returns address of the buffer, now we have to
1686 + * check which videobuf does it correspond to */
1687 + list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
1688 ++ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
1689 ++
1690 + /* Check if this is the buffer we're looking for */
1691 +- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
1692 +- == dspl_y_addr) {
1693 ++ if (addr == dspl_y_addr) {
1694 + list_del(&dst_buf->list);
1695 + ctx->dst_queue_cnt--;
1696 + dst_buf->b->sequence = ctx->sequence;
1697 +diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
1698 +index b421329b21fa..3d09e1c87921 100644
1699 +--- a/drivers/media/usb/dvb-usb/dw2102.c
1700 ++++ b/drivers/media/usb/dvb-usb/dw2102.c
1701 +@@ -2103,14 +2103,12 @@ static struct dvb_usb_device_properties s6x0_properties = {
1702 + }
1703 + };
1704 +
1705 +-static struct dvb_usb_device_properties *p1100;
1706 + static const struct dvb_usb_device_description d1100 = {
1707 + "Prof 1100 USB ",
1708 + {&dw2102_table[PROF_1100], NULL},
1709 + {NULL},
1710 + };
1711 +
1712 +-static struct dvb_usb_device_properties *s660;
1713 + static const struct dvb_usb_device_description d660 = {
1714 + "TeVii S660 USB",
1715 + {&dw2102_table[TEVII_S660], NULL},
1716 +@@ -2129,14 +2127,12 @@ static const struct dvb_usb_device_description d480_2 = {
1717 + {NULL},
1718 + };
1719 +
1720 +-static struct dvb_usb_device_properties *p7500;
1721 + static const struct dvb_usb_device_description d7500 = {
1722 + "Prof 7500 USB DVB-S2",
1723 + {&dw2102_table[PROF_7500], NULL},
1724 + {NULL},
1725 + };
1726 +
1727 +-static struct dvb_usb_device_properties *s421;
1728 + static const struct dvb_usb_device_description d421 = {
1729 + "TeVii S421 PCI",
1730 + {&dw2102_table[TEVII_S421], NULL},
1731 +@@ -2336,6 +2332,11 @@ static int dw2102_probe(struct usb_interface *intf,
1732 + const struct usb_device_id *id)
1733 + {
1734 + int retval = -ENOMEM;
1735 ++ struct dvb_usb_device_properties *p1100;
1736 ++ struct dvb_usb_device_properties *s660;
1737 ++ struct dvb_usb_device_properties *p7500;
1738 ++ struct dvb_usb_device_properties *s421;
1739 ++
1740 + p1100 = kmemdup(&s6x0_properties,
1741 + sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
1742 + if (!p1100)
1743 +@@ -2404,8 +2405,16 @@ static int dw2102_probe(struct usb_interface *intf,
1744 + 0 == dvb_usb_device_init(intf, &t220_properties,
1745 + THIS_MODULE, NULL, adapter_nr) ||
1746 + 0 == dvb_usb_device_init(intf, &tt_s2_4600_properties,
1747 +- THIS_MODULE, NULL, adapter_nr))
1748 ++ THIS_MODULE, NULL, adapter_nr)) {
1749 ++
1750 ++ /* clean up copied properties */
1751 ++ kfree(s421);
1752 ++ kfree(p7500);
1753 ++ kfree(s660);
1754 ++ kfree(p1100);
1755 ++
1756 + return 0;
1757 ++ }
1758 +
1759 + retval = -ENODEV;
1760 + kfree(s421);
1761 +diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
1762 +index 0f3fab47fe48..7dc1cbcd2fb8 100644
1763 +--- a/drivers/mfd/ti_am335x_tscadc.c
1764 ++++ b/drivers/mfd/ti_am335x_tscadc.c
1765 +@@ -210,14 +210,13 @@ static int ti_tscadc_probe(struct platform_device *pdev)
1766 + * The TSC_ADC_SS controller design assumes the OCP clock is
1767 + * at least 6x faster than the ADC clock.
1768 + */
1769 +- clk = clk_get(&pdev->dev, "adc_tsc_fck");
1770 ++ clk = devm_clk_get(&pdev->dev, "adc_tsc_fck");
1771 + if (IS_ERR(clk)) {
1772 + dev_err(&pdev->dev, "failed to get TSC fck\n");
1773 + err = PTR_ERR(clk);
1774 + goto err_disable_clk;
1775 + }
1776 + clock_rate = clk_get_rate(clk);
1777 +- clk_put(clk);
1778 + tscadc->clk_div = clock_rate / ADC_CLK;
1779 +
1780 + /* TSCADC_CLKDIV needs to be configured to the value minus 1 */
1781 +diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
1782 +index ddc9e4b08b5c..56efa9d18a9a 100644
1783 +--- a/drivers/misc/mic/scif/scif_api.c
1784 ++++ b/drivers/misc/mic/scif/scif_api.c
1785 +@@ -370,11 +370,10 @@ int scif_bind(scif_epd_t epd, u16 pn)
1786 + goto scif_bind_exit;
1787 + }
1788 + } else {
1789 +- pn = scif_get_new_port();
1790 +- if (!pn) {
1791 +- ret = -ENOSPC;
1792 ++ ret = scif_get_new_port();
1793 ++ if (ret < 0)
1794 + goto scif_bind_exit;
1795 +- }
1796 ++ pn = ret;
1797 + }
1798 +
1799 + ep->state = SCIFEP_BOUND;
1800 +@@ -648,13 +647,12 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
1801 + err = -EISCONN;
1802 + break;
1803 + case SCIFEP_UNBOUND:
1804 +- ep->port.port = scif_get_new_port();
1805 +- if (!ep->port.port) {
1806 +- err = -ENOSPC;
1807 +- } else {
1808 +- ep->port.node = scif_info.nodeid;
1809 +- ep->conn_async_state = ASYNC_CONN_IDLE;
1810 +- }
1811 ++ err = scif_get_new_port();
1812 ++ if (err < 0)
1813 ++ break;
1814 ++ ep->port.port = err;
1815 ++ ep->port.node = scif_info.nodeid;
1816 ++ ep->conn_async_state = ASYNC_CONN_IDLE;
1817 + /* Fall through */
1818 + case SCIFEP_BOUND:
1819 + /*
1820 +diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
1821 +index b77aacafc3fc..dda3ed72d05b 100644
1822 +--- a/drivers/misc/ti-st/st_kim.c
1823 ++++ b/drivers/misc/ti-st/st_kim.c
1824 +@@ -756,14 +756,14 @@ static int kim_probe(struct platform_device *pdev)
1825 + err = gpio_request(kim_gdata->nshutdown, "kim");
1826 + if (unlikely(err)) {
1827 + pr_err(" gpio %d request failed ", kim_gdata->nshutdown);
1828 +- return err;
1829 ++ goto err_sysfs_group;
1830 + }
1831 +
1832 + /* Configure nShutdown GPIO as output=0 */
1833 + err = gpio_direction_output(kim_gdata->nshutdown, 0);
1834 + if (unlikely(err)) {
1835 + pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
1836 +- return err;
1837 ++ goto err_sysfs_group;
1838 + }
1839 + /* get reference of pdev for request_firmware
1840 + */
1841 +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
1842 +index 23a6986d512b..a8f74d9bba4f 100644
1843 +--- a/drivers/mtd/ubi/wl.c
1844 ++++ b/drivers/mtd/ubi/wl.c
1845 +@@ -1615,8 +1615,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1846 + cond_resched();
1847 +
1848 + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1849 +- if (!e)
1850 ++ if (!e) {
1851 ++ err = -ENOMEM;
1852 + goto out_free;
1853 ++ }
1854 +
1855 + e->pnum = aeb->pnum;
1856 + e->ec = aeb->ec;
1857 +@@ -1635,8 +1637,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1858 + cond_resched();
1859 +
1860 + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1861 +- if (!e)
1862 ++ if (!e) {
1863 ++ err = -ENOMEM;
1864 + goto out_free;
1865 ++ }
1866 +
1867 + e->pnum = aeb->pnum;
1868 + e->ec = aeb->ec;
1869 +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
1870 +index 3deaa3413313..074a5b79d691 100644
1871 +--- a/drivers/net/ethernet/marvell/mvneta.c
1872 ++++ b/drivers/net/ethernet/marvell/mvneta.c
1873 +@@ -3195,7 +3195,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
1874 +
1875 + on_each_cpu(mvneta_percpu_enable, pp, true);
1876 + mvneta_start_dev(pp);
1877 +- mvneta_port_up(pp);
1878 +
1879 + netdev_update_features(dev);
1880 +
1881 +diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
1882 +index 0c5b68e7da51..9b3167054843 100644
1883 +--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
1884 ++++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
1885 +@@ -22,7 +22,7 @@
1886 + #include <linux/mdio-mux.h>
1887 + #include <linux/delay.h>
1888 +
1889 +-#define MDIO_PARAM_OFFSET 0x00
1890 ++#define MDIO_PARAM_OFFSET 0x23c
1891 + #define MDIO_PARAM_MIIM_CYCLE 29
1892 + #define MDIO_PARAM_INTERNAL_SEL 25
1893 + #define MDIO_PARAM_BUS_ID 22
1894 +@@ -30,20 +30,22 @@
1895 + #define MDIO_PARAM_PHY_ID 16
1896 + #define MDIO_PARAM_PHY_DATA 0
1897 +
1898 +-#define MDIO_READ_OFFSET 0x04
1899 ++#define MDIO_READ_OFFSET 0x240
1900 + #define MDIO_READ_DATA_MASK 0xffff
1901 +-#define MDIO_ADDR_OFFSET 0x08
1902 ++#define MDIO_ADDR_OFFSET 0x244
1903 +
1904 +-#define MDIO_CTRL_OFFSET 0x0C
1905 ++#define MDIO_CTRL_OFFSET 0x248
1906 + #define MDIO_CTRL_WRITE_OP 0x1
1907 + #define MDIO_CTRL_READ_OP 0x2
1908 +
1909 +-#define MDIO_STAT_OFFSET 0x10
1910 ++#define MDIO_STAT_OFFSET 0x24c
1911 + #define MDIO_STAT_DONE 1
1912 +
1913 + #define BUS_MAX_ADDR 32
1914 + #define EXT_BUS_START_ADDR 16
1915 +
1916 ++#define MDIO_REG_ADDR_SPACE_SIZE 0x250
1917 ++
1918 + struct iproc_mdiomux_desc {
1919 + void *mux_handle;
1920 + void __iomem *base;
1921 +@@ -169,6 +171,14 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
1922 + md->dev = &pdev->dev;
1923 +
1924 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1925 ++ if (res->start & 0xfff) {
1926 ++ /* For backward compatibility in case the
1927 ++ * base address is specified with an offset.
1928 ++ */
1929 ++ dev_info(&pdev->dev, "fix base address in dt-blob\n");
1930 ++ res->start &= ~0xfff;
1931 ++ res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
1932 ++ }
1933 + md->base = devm_ioremap_resource(&pdev->dev, res);
1934 + if (IS_ERR(md->base)) {
1935 + dev_err(&pdev->dev, "failed to ioremap register\n");
1936 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1937 +index cb17ffadfc30..e0baea2dfd3c 100644
1938 +--- a/drivers/net/tun.c
1939 ++++ b/drivers/net/tun.c
1940 +@@ -534,14 +534,6 @@ static void tun_queue_purge(struct tun_file *tfile)
1941 + skb_queue_purge(&tfile->sk.sk_error_queue);
1942 + }
1943 +
1944 +-static void tun_cleanup_tx_array(struct tun_file *tfile)
1945 +-{
1946 +- if (tfile->tx_array.ring.queue) {
1947 +- skb_array_cleanup(&tfile->tx_array);
1948 +- memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
1949 +- }
1950 +-}
1951 +-
1952 + static void __tun_detach(struct tun_file *tfile, bool clean)
1953 + {
1954 + struct tun_file *ntfile;
1955 +@@ -583,7 +575,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
1956 + tun->dev->reg_state == NETREG_REGISTERED)
1957 + unregister_netdevice(tun->dev);
1958 + }
1959 +- tun_cleanup_tx_array(tfile);
1960 ++ skb_array_cleanup(&tfile->tx_array);
1961 + sock_put(&tfile->sk);
1962 + }
1963 + }
1964 +@@ -623,13 +615,11 @@ static void tun_detach_all(struct net_device *dev)
1965 + /* Drop read queue */
1966 + tun_queue_purge(tfile);
1967 + sock_put(&tfile->sk);
1968 +- tun_cleanup_tx_array(tfile);
1969 + }
1970 + list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
1971 + tun_enable_queue(tfile);
1972 + tun_queue_purge(tfile);
1973 + sock_put(&tfile->sk);
1974 +- tun_cleanup_tx_array(tfile);
1975 + }
1976 + BUG_ON(tun->numdisabled != 0);
1977 +
1978 +@@ -675,7 +665,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
1979 + }
1980 +
1981 + if (!tfile->detached &&
1982 +- skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
1983 ++ skb_array_resize(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
1984 + err = -ENOMEM;
1985 + goto out;
1986 + }
1987 +@@ -2624,6 +2614,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
1988 + &tun_proto, 0);
1989 + if (!tfile)
1990 + return -ENOMEM;
1991 ++ if (skb_array_init(&tfile->tx_array, 0, GFP_KERNEL)) {
1992 ++ sk_free(&tfile->sk);
1993 ++ return -ENOMEM;
1994 ++ }
1995 ++
1996 + RCU_INIT_POINTER(tfile->tun, NULL);
1997 + tfile->flags = 0;
1998 + tfile->ifindex = 0;
1999 +@@ -2644,8 +2639,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
2000 +
2001 + sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
2002 +
2003 +- memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
2004 +-
2005 + return 0;
2006 + }
2007 +
2008 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
2009 +index 52ebed1f55a1..6fa9c223ff93 100644
2010 +--- a/drivers/net/wireless/ath/ath10k/mac.c
2011 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
2012 +@@ -3074,6 +3074,13 @@ static int ath10k_update_channel_list(struct ath10k *ar)
2013 + passive = channel->flags & IEEE80211_CHAN_NO_IR;
2014 + ch->passive = passive;
2015 +
2016 ++ /* the firmware is ignoring the "radar" flag of the
2017 ++ * channel and is scanning actively using Probe Requests
2018 ++ * on "Radar detection"/DFS channels which are not
2019 ++ * marked as "available"
2020 ++ */
2021 ++ ch->passive |= ch->chan_radar;
2022 ++
2023 + ch->freq = channel->center_freq;
2024 + ch->band_center_freq1 = channel->center_freq;
2025 + ch->min_power = 0;
2026 +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2027 +index 7616c1c4bbd3..baec856af90f 100644
2028 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2029 ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2030 +@@ -1451,6 +1451,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
2031 + cfg->keep_alive_pattern_size = __cpu_to_le32(0);
2032 + cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
2033 + cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
2034 ++ cfg->wmi_send_separate = __cpu_to_le32(0);
2035 ++ cfg->num_ocb_vdevs = __cpu_to_le32(0);
2036 ++ cfg->num_ocb_channels = __cpu_to_le32(0);
2037 ++ cfg->num_ocb_schedules = __cpu_to_le32(0);
2038 ++ cfg->host_capab = __cpu_to_le32(0);
2039 +
2040 + ath10k_wmi_put_host_mem_chunks(ar, chunks);
2041 +
2042 +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
2043 +index 22cf011e839a..e75bba0bbf67 100644
2044 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
2045 ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
2046 +@@ -1228,6 +1228,11 @@ struct wmi_tlv_resource_config {
2047 + __le32 keep_alive_pattern_size;
2048 + __le32 max_tdls_concurrent_sleep_sta;
2049 + __le32 max_tdls_concurrent_buffer_sta;
2050 ++ __le32 wmi_send_separate;
2051 ++ __le32 num_ocb_vdevs;
2052 ++ __le32 num_ocb_channels;
2053 ++ __le32 num_ocb_schedules;
2054 ++ __le32 host_capab;
2055 + } __packed;
2056 +
2057 + struct wmi_tlv_init_cmd {
2058 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
2059 +index 8c5c2dd8fa7f..a7f506eb7b36 100644
2060 +--- a/drivers/net/wireless/ath/ath9k/hw.c
2061 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
2062 +@@ -2915,16 +2915,19 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
2063 + struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
2064 + struct ieee80211_channel *channel;
2065 + int chan_pwr, new_pwr;
2066 ++ u16 ctl = NO_CTL;
2067 +
2068 + if (!chan)
2069 + return;
2070 +
2071 ++ if (!test)
2072 ++ ctl = ath9k_regd_get_ctl(reg, chan);
2073 ++
2074 + channel = chan->chan;
2075 + chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
2076 + new_pwr = min_t(int, chan_pwr, reg->power_limit);
2077 +
2078 +- ah->eep_ops->set_txpower(ah, chan,
2079 +- ath9k_regd_get_ctl(reg, chan),
2080 ++ ah->eep_ops->set_txpower(ah, chan, ctl,
2081 + get_antenna_gain(ah, chan), new_pwr, test);
2082 + }
2083 +
2084 +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
2085 +index d8b041f48ca8..fa64c1cc94ae 100644
2086 +--- a/drivers/net/wireless/ath/ath9k/xmit.c
2087 ++++ b/drivers/net/wireless/ath/ath9k/xmit.c
2088 +@@ -86,7 +86,8 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
2089 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2090 + struct ieee80211_sta *sta = info->status.status_driver_data[0];
2091 +
2092 +- if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
2093 ++ if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
2094 ++ IEEE80211_TX_STATUS_EOSP)) {
2095 + ieee80211_tx_status(hw, skb);
2096 + return;
2097 + }
2098 +diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
2099 +index 0f15696195f8..078a4940bc5c 100644
2100 +--- a/drivers/net/wireless/ti/wlcore/rx.c
2101 ++++ b/drivers/net/wireless/ti/wlcore/rx.c
2102 +@@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
2103 + static void wl1271_rx_status(struct wl1271 *wl,
2104 + struct wl1271_rx_descriptor *desc,
2105 + struct ieee80211_rx_status *status,
2106 +- u8 beacon)
2107 ++ u8 beacon, u8 probe_rsp)
2108 + {
2109 + memset(status, 0, sizeof(struct ieee80211_rx_status));
2110 +
2111 +@@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
2112 + }
2113 + }
2114 +
2115 ++ if (beacon || probe_rsp)
2116 ++ status->boottime_ns = ktime_get_boot_ns();
2117 ++
2118 + if (beacon)
2119 + wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
2120 + status->band);
2121 +@@ -191,7 +194,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
2122 + if (ieee80211_is_data_present(hdr->frame_control))
2123 + is_data = 1;
2124 +
2125 +- wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
2126 ++ wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
2127 ++ ieee80211_is_probe_resp(hdr->frame_control));
2128 + wlcore_hw_set_rx_csum(wl, desc, skb);
2129 +
2130 + seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
2131 +diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
2132 +index af81b2dec42e..620f5b995a12 100644
2133 +--- a/drivers/pci/switch/switchtec.c
2134 ++++ b/drivers/pci/switch/switchtec.c
2135 +@@ -24,6 +24,8 @@
2136 + #include <linux/cdev.h>
2137 + #include <linux/wait.h>
2138 +
2139 ++#include <linux/nospec.h>
2140 ++
2141 + MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
2142 + MODULE_VERSION("0.1");
2143 + MODULE_LICENSE("GPL");
2144 +@@ -1173,6 +1175,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev,
2145 + default:
2146 + if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
2147 + return -EINVAL;
2148 ++ p.port = array_index_nospec(p.port,
2149 ++ ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
2150 + p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
2151 + break;
2152 + }
2153 +diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
2154 +index 6e472691d8ee..17f2c5a505b2 100644
2155 +--- a/drivers/pinctrl/freescale/pinctrl-imx.c
2156 ++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
2157 +@@ -389,7 +389,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
2158 + const char *name;
2159 + int i, ret;
2160 +
2161 +- if (group > pctldev->num_groups)
2162 ++ if (group >= pctldev->num_groups)
2163 + return;
2164 +
2165 + seq_printf(s, "\n");
2166 +diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
2167 +index 433af328d981..b78f42abff2f 100644
2168 +--- a/drivers/pinctrl/pinctrl-amd.c
2169 ++++ b/drivers/pinctrl/pinctrl-amd.c
2170 +@@ -530,7 +530,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
2171 + /* Each status bit covers four pins */
2172 + for (i = 0; i < 4; i++) {
2173 + regval = readl(regs + i);
2174 +- if (!(regval & PIN_IRQ_PENDING))
2175 ++ if (!(regval & PIN_IRQ_PENDING) ||
2176 ++ !(regval & BIT(INTERRUPT_MASK_OFF)))
2177 + continue;
2178 + irq = irq_find_mapping(gc->irqdomain, irqnr + i);
2179 + generic_handle_irq(irq);
2180 +diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
2181 +index dffa3aab7178..cec4c3223044 100644
2182 +--- a/drivers/rpmsg/rpmsg_core.c
2183 ++++ b/drivers/rpmsg/rpmsg_core.c
2184 +@@ -23,6 +23,7 @@
2185 + #include <linux/module.h>
2186 + #include <linux/rpmsg.h>
2187 + #include <linux/of_device.h>
2188 ++#include <linux/pm_domain.h>
2189 + #include <linux/slab.h>
2190 +
2191 + #include "rpmsg_internal.h"
2192 +@@ -418,6 +419,10 @@ static int rpmsg_dev_probe(struct device *dev)
2193 + struct rpmsg_endpoint *ept = NULL;
2194 + int err;
2195 +
2196 ++ err = dev_pm_domain_attach(dev, true);
2197 ++ if (err)
2198 ++ goto out;
2199 ++
2200 + if (rpdrv->callback) {
2201 + strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
2202 + chinfo.src = rpdev->src;
2203 +@@ -459,6 +464,8 @@ static int rpmsg_dev_remove(struct device *dev)
2204 +
2205 + rpdrv->remove(rpdev);
2206 +
2207 ++ dev_pm_domain_detach(dev, true);
2208 ++
2209 + if (rpdev->ept)
2210 + rpmsg_destroy_ept(rpdev->ept);
2211 +
2212 +diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
2213 +index a1388842e17e..dd342207095a 100644
2214 +--- a/drivers/scsi/3w-9xxx.c
2215 ++++ b/drivers/scsi/3w-9xxx.c
2216 +@@ -2042,6 +2042,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2217 +
2218 + if (twa_initialize_device_extension(tw_dev)) {
2219 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2220 ++ retval = -ENOMEM;
2221 + goto out_free_device_extension;
2222 + }
2223 +
2224 +@@ -2064,6 +2065,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2225 + tw_dev->base_addr = ioremap(mem_addr, mem_len);
2226 + if (!tw_dev->base_addr) {
2227 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2228 ++ retval = -ENOMEM;
2229 + goto out_release_mem_region;
2230 + }
2231 +
2232 +@@ -2071,8 +2073,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2233 + TW_DISABLE_INTERRUPTS(tw_dev);
2234 +
2235 + /* Initialize the card */
2236 +- if (twa_reset_sequence(tw_dev, 0))
2237 ++ if (twa_reset_sequence(tw_dev, 0)) {
2238 ++ retval = -ENOMEM;
2239 + goto out_iounmap;
2240 ++ }
2241 +
2242 + /* Set host specific parameters */
2243 + if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2244 +diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
2245 +index b150e131b2e7..aa317d6909e8 100644
2246 +--- a/drivers/scsi/3w-sas.c
2247 ++++ b/drivers/scsi/3w-sas.c
2248 +@@ -1597,6 +1597,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2249 +
2250 + if (twl_initialize_device_extension(tw_dev)) {
2251 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
2252 ++ retval = -ENOMEM;
2253 + goto out_free_device_extension;
2254 + }
2255 +
2256 +@@ -1611,6 +1612,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2257 + tw_dev->base_addr = pci_iomap(pdev, 1, 0);
2258 + if (!tw_dev->base_addr) {
2259 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
2260 ++ retval = -ENOMEM;
2261 + goto out_release_mem_region;
2262 + }
2263 +
2264 +@@ -1620,6 +1622,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2265 + /* Initialize the card */
2266 + if (twl_reset_sequence(tw_dev, 0)) {
2267 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
2268 ++ retval = -ENOMEM;
2269 + goto out_iounmap;
2270 + }
2271 +
2272 +diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
2273 +index f6179e3d6953..961ea6f7def8 100644
2274 +--- a/drivers/scsi/3w-xxxx.c
2275 ++++ b/drivers/scsi/3w-xxxx.c
2276 +@@ -2280,6 +2280,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2277 +
2278 + if (tw_initialize_device_extension(tw_dev)) {
2279 + printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
2280 ++ retval = -ENOMEM;
2281 + goto out_free_device_extension;
2282 + }
2283 +
2284 +@@ -2294,6 +2295,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2285 + tw_dev->base_addr = pci_resource_start(pdev, 0);
2286 + if (!tw_dev->base_addr) {
2287 + printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
2288 ++ retval = -ENOMEM;
2289 + goto out_release_mem_region;
2290 + }
2291 +
2292 +diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
2293 +index 8eb3f96fe068..bc61cc8bc6f0 100644
2294 +--- a/drivers/scsi/lpfc/lpfc.h
2295 ++++ b/drivers/scsi/lpfc/lpfc.h
2296 +@@ -676,7 +676,7 @@ struct lpfc_hba {
2297 + #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
2298 + #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
2299 + #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
2300 +-#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */
2301 ++#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
2302 +
2303 + uint32_t hba_flag; /* hba generic flags */
2304 + #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
2305 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2306 +index e6d51135d105..0d0be7d8b9d6 100644
2307 +--- a/drivers/target/target_core_transport.c
2308 ++++ b/drivers/target/target_core_transport.c
2309 +@@ -317,6 +317,7 @@ void __transport_register_session(
2310 + {
2311 + const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
2312 + unsigned char buf[PR_REG_ISID_LEN];
2313 ++ unsigned long flags;
2314 +
2315 + se_sess->se_tpg = se_tpg;
2316 + se_sess->fabric_sess_ptr = fabric_sess_ptr;
2317 +@@ -353,7 +354,7 @@ void __transport_register_session(
2318 + se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
2319 + }
2320 +
2321 +- spin_lock_irq(&se_nacl->nacl_sess_lock);
2322 ++ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
2323 + /*
2324 + * The se_nacl->nacl_sess pointer will be set to the
2325 + * last active I_T Nexus for each struct se_node_acl.
2326 +@@ -362,7 +363,7 @@ void __transport_register_session(
2327 +
2328 + list_add_tail(&se_sess->sess_acl_list,
2329 + &se_nacl->acl_sess_list);
2330 +- spin_unlock_irq(&se_nacl->nacl_sess_lock);
2331 ++ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
2332 + }
2333 + list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
2334 +
2335 +diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
2336 +index 20d79a6007d5..070733ca94d5 100644
2337 +--- a/drivers/tty/rocket.c
2338 ++++ b/drivers/tty/rocket.c
2339 +@@ -1894,7 +1894,7 @@ static __init int register_PCI(int i, struct pci_dev *dev)
2340 + ByteIO_t UPCIRingInd = 0;
2341 +
2342 + if (!dev || !pci_match_id(rocket_pci_ids, dev) ||
2343 +- pci_enable_device(dev))
2344 ++ pci_enable_device(dev) || i >= NUM_BOARDS)
2345 + return 0;
2346 +
2347 + rcktpt_io_addr[i] = pci_resource_start(dev, 0);
2348 +diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
2349 +index ff04b7f8549f..41784798c789 100644
2350 +--- a/drivers/uio/uio.c
2351 ++++ b/drivers/uio/uio.c
2352 +@@ -841,8 +841,6 @@ int __uio_register_device(struct module *owner,
2353 + if (ret)
2354 + goto err_uio_dev_add_attributes;
2355 +
2356 +- info->uio_dev = idev;
2357 +-
2358 + if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
2359 + /*
2360 + * Note that we deliberately don't use devm_request_irq
2361 +@@ -858,6 +856,7 @@ int __uio_register_device(struct module *owner,
2362 + goto err_request_irq;
2363 + }
2364 +
2365 ++ info->uio_dev = idev;
2366 + return 0;
2367 +
2368 + err_request_irq:
2369 +diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
2370 +index 4737615f0eaa..ce696d6c4641 100644
2371 +--- a/fs/autofs4/autofs_i.h
2372 ++++ b/fs/autofs4/autofs_i.h
2373 +@@ -26,6 +26,7 @@
2374 + #include <linux/list.h>
2375 + #include <linux/completion.h>
2376 + #include <asm/current.h>
2377 ++#include <linux/magic.h>
2378 +
2379 + /* This is the range of ioctl() numbers we claim as ours */
2380 + #define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
2381 +@@ -124,7 +125,8 @@ struct autofs_sb_info {
2382 +
2383 + static inline struct autofs_sb_info *autofs4_sbi(struct super_block *sb)
2384 + {
2385 +- return (struct autofs_sb_info *)(sb->s_fs_info);
2386 ++ return sb->s_magic != AUTOFS_SUPER_MAGIC ?
2387 ++ NULL : (struct autofs_sb_info *)(sb->s_fs_info);
2388 + }
2389 +
2390 + static inline struct autofs_info *autofs4_dentry_ino(struct dentry *dentry)
2391 +diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
2392 +index 09e7d68dff02..3c7e727612fa 100644
2393 +--- a/fs/autofs4/inode.c
2394 ++++ b/fs/autofs4/inode.c
2395 +@@ -14,7 +14,6 @@
2396 + #include <linux/pagemap.h>
2397 + #include <linux/parser.h>
2398 + #include <linux/bitops.h>
2399 +-#include <linux/magic.h>
2400 + #include "autofs_i.h"
2401 + #include <linux/module.h>
2402 +
2403 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2404 +index 7303ba108112..a507c0d25354 100644
2405 +--- a/fs/btrfs/ioctl.c
2406 ++++ b/fs/btrfs/ioctl.c
2407 +@@ -3158,6 +3158,25 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
2408 +
2409 + same_lock_start = min_t(u64, loff, dst_loff);
2410 + same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
2411 ++ } else {
2412 ++ /*
2413 ++ * If the source and destination inodes are different, the
2414 ++ * source's range end offset matches the source's i_size, that
2415 ++ * i_size is not a multiple of the sector size, and the
2416 ++ * destination range does not go past the destination's i_size,
2417 ++ * we must round down the length to the nearest sector size
2418 ++ * multiple. If we don't do this adjustment we end replacing
2419 ++ * with zeroes the bytes in the range that starts at the
2420 ++ * deduplication range's end offset and ends at the next sector
2421 ++ * size multiple.
2422 ++ */
2423 ++ if (loff + olen == i_size_read(src) &&
2424 ++ dst_loff + len < i_size_read(dst)) {
2425 ++ const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
2426 ++
2427 ++ len = round_down(i_size_read(src), sz) - loff;
2428 ++ olen = len;
2429 ++ }
2430 + }
2431 +
2432 + /* don't make the dst file partly checksummed */
2433 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
2434 +index caf9cf91b825..2cd0b3053439 100644
2435 +--- a/fs/cifs/inode.c
2436 ++++ b/fs/cifs/inode.c
2437 +@@ -467,6 +467,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
2438 + oparms.cifs_sb = cifs_sb;
2439 + oparms.desired_access = GENERIC_READ;
2440 + oparms.create_options = CREATE_NOT_DIR;
2441 ++ if (backup_cred(cifs_sb))
2442 ++ oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
2443 + oparms.disposition = FILE_OPEN;
2444 + oparms.path = path;
2445 + oparms.fid = &fid;
2446 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2447 +index e9f246fe9d80..759cbbf7b1af 100644
2448 +--- a/fs/cifs/smb2ops.c
2449 ++++ b/fs/cifs/smb2ops.c
2450 +@@ -385,7 +385,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
2451 + oparms.tcon = tcon;
2452 + oparms.desired_access = FILE_READ_ATTRIBUTES;
2453 + oparms.disposition = FILE_OPEN;
2454 +- oparms.create_options = 0;
2455 ++ if (backup_cred(cifs_sb))
2456 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2457 ++ else
2458 ++ oparms.create_options = 0;
2459 + oparms.fid = &fid;
2460 + oparms.reconnect = false;
2461 +
2462 +@@ -534,7 +537,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
2463 + oparms.tcon = tcon;
2464 + oparms.desired_access = FILE_READ_EA;
2465 + oparms.disposition = FILE_OPEN;
2466 +- oparms.create_options = 0;
2467 ++ if (backup_cred(cifs_sb))
2468 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2469 ++ else
2470 ++ oparms.create_options = 0;
2471 + oparms.fid = &fid;
2472 + oparms.reconnect = false;
2473 +
2474 +@@ -613,7 +619,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
2475 + oparms.tcon = tcon;
2476 + oparms.desired_access = FILE_WRITE_EA;
2477 + oparms.disposition = FILE_OPEN;
2478 +- oparms.create_options = 0;
2479 ++ if (backup_cred(cifs_sb))
2480 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2481 ++ else
2482 ++ oparms.create_options = 0;
2483 + oparms.fid = &fid;
2484 + oparms.reconnect = false;
2485 +
2486 +@@ -1215,7 +1224,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2487 + oparms.tcon = tcon;
2488 + oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2489 + oparms.disposition = FILE_OPEN;
2490 +- oparms.create_options = 0;
2491 ++ if (backup_cred(cifs_sb))
2492 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2493 ++ else
2494 ++ oparms.create_options = 0;
2495 + oparms.fid = fid;
2496 + oparms.reconnect = false;
2497 +
2498 +@@ -1491,7 +1503,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
2499 + oparms.tcon = tcon;
2500 + oparms.desired_access = FILE_READ_ATTRIBUTES;
2501 + oparms.disposition = FILE_OPEN;
2502 +- oparms.create_options = 0;
2503 ++ if (backup_cred(cifs_sb))
2504 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2505 ++ else
2506 ++ oparms.create_options = 0;
2507 + oparms.fid = &fid;
2508 + oparms.reconnect = false;
2509 +
2510 +@@ -3200,7 +3215,7 @@ struct smb_version_values smb21_values = {
2511 + struct smb_version_values smb3any_values = {
2512 + .version_string = SMB3ANY_VERSION_STRING,
2513 + .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
2514 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2515 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
2516 + .large_lock_type = 0,
2517 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2518 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2519 +@@ -3220,7 +3235,7 @@ struct smb_version_values smb3any_values = {
2520 + struct smb_version_values smbdefault_values = {
2521 + .version_string = SMBDEFAULT_VERSION_STRING,
2522 + .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
2523 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2524 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
2525 + .large_lock_type = 0,
2526 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2527 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2528 +@@ -3240,7 +3255,7 @@ struct smb_version_values smbdefault_values = {
2529 + struct smb_version_values smb30_values = {
2530 + .version_string = SMB30_VERSION_STRING,
2531 + .protocol_id = SMB30_PROT_ID,
2532 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2533 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
2534 + .large_lock_type = 0,
2535 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2536 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2537 +@@ -3260,7 +3275,7 @@ struct smb_version_values smb30_values = {
2538 + struct smb_version_values smb302_values = {
2539 + .version_string = SMB302_VERSION_STRING,
2540 + .protocol_id = SMB302_PROT_ID,
2541 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2542 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
2543 + .large_lock_type = 0,
2544 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2545 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2546 +@@ -3281,7 +3296,7 @@ struct smb_version_values smb302_values = {
2547 + struct smb_version_values smb311_values = {
2548 + .version_string = SMB311_VERSION_STRING,
2549 + .protocol_id = SMB311_PROT_ID,
2550 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2551 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
2552 + .large_lock_type = 0,
2553 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2554 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2555 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2556 +index 58842b36481d..078ec705a5cc 100644
2557 +--- a/fs/cifs/smb2pdu.c
2558 ++++ b/fs/cifs/smb2pdu.c
2559 +@@ -1816,6 +1816,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
2560 + if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
2561 + *oplock == SMB2_OPLOCK_LEVEL_NONE)
2562 + req->RequestedOplockLevel = *oplock;
2563 ++ else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
2564 ++ (oparms->create_options & CREATE_NOT_FILE))
2565 ++ req->RequestedOplockLevel = *oplock; /* no srv lease support */
2566 + else {
2567 + rc = add_lease_context(server, iov, &n_iov, oplock);
2568 + if (rc) {
2569 +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
2570 +index 3b34004a71c1..54f8520ad7a2 100644
2571 +--- a/fs/f2fs/f2fs.h
2572 ++++ b/fs/f2fs/f2fs.h
2573 +@@ -1766,8 +1766,13 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2574 + pgoff_t index, bool for_write)
2575 + {
2576 + #ifdef CONFIG_F2FS_FAULT_INJECTION
2577 +- struct page *page = find_lock_page(mapping, index);
2578 ++ struct page *page;
2579 +
2580 ++ if (!for_write)
2581 ++ page = find_get_page_flags(mapping, index,
2582 ++ FGP_LOCK | FGP_ACCESSED);
2583 ++ else
2584 ++ page = find_lock_page(mapping, index);
2585 + if (page)
2586 + return page;
2587 +
2588 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
2589 +index 87e654c53c31..6f589730782d 100644
2590 +--- a/fs/f2fs/file.c
2591 ++++ b/fs/f2fs/file.c
2592 +@@ -1803,7 +1803,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2593 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2594 + struct super_block *sb = sbi->sb;
2595 + __u32 in;
2596 +- int ret;
2597 ++ int ret = 0;
2598 +
2599 + if (!capable(CAP_SYS_ADMIN))
2600 + return -EPERM;
2601 +diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
2602 +index f2f897cd23c9..f22884418e92 100644
2603 +--- a/fs/f2fs/gc.c
2604 ++++ b/fs/f2fs/gc.c
2605 +@@ -958,7 +958,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
2606 + goto next;
2607 +
2608 + sum = page_address(sum_page);
2609 +- f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
2610 ++ if (type != GET_SUM_TYPE((&sum->footer))) {
2611 ++ f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
2612 ++ "type [%d, %d] in SSA and SIT",
2613 ++ segno, type, GET_SUM_TYPE((&sum->footer)));
2614 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
2615 ++ goto next;
2616 ++ }
2617 +
2618 + /*
2619 + * this is to avoid deadlock:
2620 +diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
2621 +index 8322e4e7bb3f..888a9dc13677 100644
2622 +--- a/fs/f2fs/inline.c
2623 ++++ b/fs/f2fs/inline.c
2624 +@@ -128,6 +128,16 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
2625 + if (err)
2626 + return err;
2627 +
2628 ++ if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
2629 ++ f2fs_put_dnode(dn);
2630 ++ set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
2631 ++ f2fs_msg(fio.sbi->sb, KERN_WARNING,
2632 ++ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
2633 ++ "run fsck to fix.",
2634 ++ __func__, dn->inode->i_ino, dn->data_blkaddr);
2635 ++ return -EINVAL;
2636 ++ }
2637 ++
2638 + f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
2639 +
2640 + read_inline_data(page, dn->inode_page);
2641 +@@ -365,6 +375,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
2642 + if (err)
2643 + goto out;
2644 +
2645 ++ if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
2646 ++ f2fs_put_dnode(&dn);
2647 ++ set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
2648 ++ f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
2649 ++ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
2650 ++ "run fsck to fix.",
2651 ++ __func__, dir->i_ino, dn.data_blkaddr);
2652 ++ err = -EINVAL;
2653 ++ goto out;
2654 ++ }
2655 ++
2656 + f2fs_wait_on_page_writeback(page, DATA, true);
2657 + zero_user_segment(page, MAX_INLINE_DATA(dir), PAGE_SIZE);
2658 +
2659 +@@ -481,6 +502,7 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
2660 + return 0;
2661 + recover:
2662 + lock_page(ipage);
2663 ++ f2fs_wait_on_page_writeback(ipage, NODE, true);
2664 + memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
2665 + f2fs_i_depth_write(dir, 0);
2666 + f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
2667 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
2668 +index f623da26159f..712505ec5de4 100644
2669 +--- a/fs/f2fs/node.c
2670 ++++ b/fs/f2fs/node.c
2671 +@@ -1610,7 +1610,9 @@ next_step:
2672 + !is_cold_node(page)))
2673 + continue;
2674 + lock_node:
2675 +- if (!trylock_page(page))
2676 ++ if (wbc->sync_mode == WB_SYNC_ALL)
2677 ++ lock_page(page);
2678 ++ else if (!trylock_page(page))
2679 + continue;
2680 +
2681 + if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
2682 +diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
2683 +index 39ada30889b6..4dfb5080098f 100644
2684 +--- a/fs/f2fs/segment.h
2685 ++++ b/fs/f2fs/segment.h
2686 +@@ -414,6 +414,8 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
2687 + if (test_and_clear_bit(segno, free_i->free_segmap)) {
2688 + free_i->free_segments++;
2689 +
2690 ++ if (IS_CURSEC(sbi, secno))
2691 ++ goto skip_free;
2692 + next = find_next_bit(free_i->free_segmap,
2693 + start_segno + sbi->segs_per_sec, start_segno);
2694 + if (next >= start_segno + sbi->segs_per_sec) {
2695 +@@ -421,6 +423,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
2696 + free_i->free_sections++;
2697 + }
2698 + }
2699 ++skip_free:
2700 + spin_unlock(&free_i->segmap_lock);
2701 + }
2702 +
2703 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
2704 +index 400c00058bad..eae35909fa51 100644
2705 +--- a/fs/f2fs/super.c
2706 ++++ b/fs/f2fs/super.c
2707 +@@ -1883,12 +1883,17 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
2708 + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2709 + unsigned int ovp_segments, reserved_segments;
2710 + unsigned int main_segs, blocks_per_seg;
2711 ++ unsigned int sit_segs, nat_segs;
2712 ++ unsigned int sit_bitmap_size, nat_bitmap_size;
2713 ++ unsigned int log_blocks_per_seg;
2714 + int i;
2715 +
2716 + total = le32_to_cpu(raw_super->segment_count);
2717 + fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
2718 +- fsmeta += le32_to_cpu(raw_super->segment_count_sit);
2719 +- fsmeta += le32_to_cpu(raw_super->segment_count_nat);
2720 ++ sit_segs = le32_to_cpu(raw_super->segment_count_sit);
2721 ++ fsmeta += sit_segs;
2722 ++ nat_segs = le32_to_cpu(raw_super->segment_count_nat);
2723 ++ fsmeta += nat_segs;
2724 + fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
2725 + fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
2726 +
2727 +@@ -1919,6 +1924,18 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
2728 + return 1;
2729 + }
2730 +
2731 ++ sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2732 ++ nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2733 ++ log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2734 ++
2735 ++ if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
2736 ++ nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
2737 ++ f2fs_msg(sbi->sb, KERN_ERR,
2738 ++ "Wrong bitmap size: sit: %u, nat:%u",
2739 ++ sit_bitmap_size, nat_bitmap_size);
2740 ++ return 1;
2741 ++ }
2742 ++
2743 + if (unlikely(f2fs_cp_error(sbi))) {
2744 + f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
2745 + return 1;
2746 +diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
2747 +index e2c258f717cd..93af9d7dfcdc 100644
2748 +--- a/fs/f2fs/sysfs.c
2749 ++++ b/fs/f2fs/sysfs.c
2750 +@@ -9,6 +9,7 @@
2751 + * it under the terms of the GNU General Public License version 2 as
2752 + * published by the Free Software Foundation.
2753 + */
2754 ++#include <linux/compiler.h>
2755 + #include <linux/proc_fs.h>
2756 + #include <linux/f2fs_fs.h>
2757 + #include <linux/seq_file.h>
2758 +@@ -381,7 +382,8 @@ static struct kobject f2fs_feat = {
2759 + .kset = &f2fs_kset,
2760 + };
2761 +
2762 +-static int segment_info_seq_show(struct seq_file *seq, void *offset)
2763 ++static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
2764 ++ void *offset)
2765 + {
2766 + struct super_block *sb = seq->private;
2767 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
2768 +@@ -408,7 +410,8 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset)
2769 + return 0;
2770 + }
2771 +
2772 +-static int segment_bits_seq_show(struct seq_file *seq, void *offset)
2773 ++static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
2774 ++ void *offset)
2775 + {
2776 + struct super_block *sb = seq->private;
2777 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
2778 +@@ -432,7 +435,8 @@ static int segment_bits_seq_show(struct seq_file *seq, void *offset)
2779 + return 0;
2780 + }
2781 +
2782 +-static int iostat_info_seq_show(struct seq_file *seq, void *offset)
2783 ++static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
2784 ++ void *offset)
2785 + {
2786 + struct super_block *sb = seq->private;
2787 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
2788 +diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
2789 +index 2c3f398995f6..b8d55da2f04d 100644
2790 +--- a/fs/nfs/callback_proc.c
2791 ++++ b/fs/nfs/callback_proc.c
2792 +@@ -213,9 +213,9 @@ static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
2793 + {
2794 + u32 oldseq, newseq;
2795 +
2796 +- /* Is the stateid still not initialised? */
2797 ++ /* Is the stateid not initialised? */
2798 + if (!pnfs_layout_is_valid(lo))
2799 +- return NFS4ERR_DELAY;
2800 ++ return NFS4ERR_NOMATCHING_LAYOUT;
2801 +
2802 + /* Mismatched stateid? */
2803 + if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
2804 +diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
2805 +index 123c069429a7..57de914630bc 100644
2806 +--- a/fs/nfs/callback_xdr.c
2807 ++++ b/fs/nfs/callback_xdr.c
2808 +@@ -904,16 +904,21 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
2809 +
2810 + if (hdr_arg.minorversion == 0) {
2811 + cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
2812 +- if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
2813 ++ if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) {
2814 ++ if (cps.clp)
2815 ++ nfs_put_client(cps.clp);
2816 + goto out_invalidcred;
2817 ++ }
2818 + }
2819 +
2820 + cps.minorversion = hdr_arg.minorversion;
2821 + hdr_res.taglen = hdr_arg.taglen;
2822 + hdr_res.tag = hdr_arg.tag;
2823 +- if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
2824 ++ if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) {
2825 ++ if (cps.clp)
2826 ++ nfs_put_client(cps.clp);
2827 + return rpc_system_err;
2828 +-
2829 ++ }
2830 + while (status == 0 && nops != hdr_arg.nops) {
2831 + status = process_op(nops, rqstp, &xdr_in,
2832 + rqstp->rq_argp, &xdr_out, rqstp->rq_resp,
2833 +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
2834 +index 9f0bb908e2b5..e41ef532c4ce 100644
2835 +--- a/include/linux/mm_types.h
2836 ++++ b/include/linux/mm_types.h
2837 +@@ -354,7 +354,7 @@ struct kioctx_table;
2838 + struct mm_struct {
2839 + struct vm_area_struct *mmap; /* list of VMAs */
2840 + struct rb_root mm_rb;
2841 +- u32 vmacache_seqnum; /* per-thread vmacache */
2842 ++ u64 vmacache_seqnum; /* per-thread vmacache */
2843 + #ifdef CONFIG_MMU
2844 + unsigned long (*get_unmapped_area) (struct file *filp,
2845 + unsigned long addr, unsigned long len,
2846 +diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
2847 +index 5fe87687664c..d7016dcb245e 100644
2848 +--- a/include/linux/mm_types_task.h
2849 ++++ b/include/linux/mm_types_task.h
2850 +@@ -32,7 +32,7 @@
2851 + #define VMACACHE_MASK (VMACACHE_SIZE - 1)
2852 +
2853 + struct vmacache {
2854 +- u32 seqnum;
2855 ++ u64 seqnum;
2856 + struct vm_area_struct *vmas[VMACACHE_SIZE];
2857 + };
2858 +
2859 +diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
2860 +index 7fd514f36e74..a4be6388a980 100644
2861 +--- a/include/linux/rhashtable.h
2862 ++++ b/include/linux/rhashtable.h
2863 +@@ -152,25 +152,25 @@ struct rhashtable_params {
2864 + /**
2865 + * struct rhashtable - Hash table handle
2866 + * @tbl: Bucket table
2867 +- * @nelems: Number of elements in table
2868 + * @key_len: Key length for hashfn
2869 +- * @p: Configuration parameters
2870 + * @max_elems: Maximum number of elements in table
2871 ++ * @p: Configuration parameters
2872 + * @rhlist: True if this is an rhltable
2873 + * @run_work: Deferred worker to expand/shrink asynchronously
2874 + * @mutex: Mutex to protect current/future table swapping
2875 + * @lock: Spin lock to protect walker list
2876 ++ * @nelems: Number of elements in table
2877 + */
2878 + struct rhashtable {
2879 + struct bucket_table __rcu *tbl;
2880 +- atomic_t nelems;
2881 + unsigned int key_len;
2882 +- struct rhashtable_params p;
2883 + unsigned int max_elems;
2884 ++ struct rhashtable_params p;
2885 + bool rhlist;
2886 + struct work_struct run_work;
2887 + struct mutex mutex;
2888 + spinlock_t lock;
2889 ++ atomic_t nelems;
2890 + };
2891 +
2892 + /**
2893 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
2894 +index 6dd77767fd5b..f64e88444082 100644
2895 +--- a/include/linux/skbuff.h
2896 ++++ b/include/linux/skbuff.h
2897 +@@ -663,21 +663,26 @@ struct sk_buff {
2898 + struct sk_buff *prev;
2899 +
2900 + union {
2901 +- ktime_t tstamp;
2902 +- u64 skb_mstamp;
2903 ++ struct net_device *dev;
2904 ++ /* Some protocols might use this space to store information,
2905 ++ * while device pointer would be NULL.
2906 ++ * UDP receive path is one user.
2907 ++ */
2908 ++ unsigned long dev_scratch;
2909 + };
2910 + };
2911 +- struct rb_node rbnode; /* used in netem & tcp stack */
2912 ++ struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
2913 ++ struct list_head list;
2914 + };
2915 +- struct sock *sk;
2916 +
2917 + union {
2918 +- struct net_device *dev;
2919 +- /* Some protocols might use this space to store information,
2920 +- * while device pointer would be NULL.
2921 +- * UDP receive path is one user.
2922 +- */
2923 +- unsigned long dev_scratch;
2924 ++ struct sock *sk;
2925 ++ int ip_defrag_offset;
2926 ++ };
2927 ++
2928 ++ union {
2929 ++ ktime_t tstamp;
2930 ++ u64 skb_mstamp;
2931 + };
2932 + /*
2933 + * This is the control buffer. It is free to use for every
2934 +@@ -2580,7 +2585,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
2935 + kfree_skb(skb);
2936 + }
2937 +
2938 +-void skb_rbtree_purge(struct rb_root *root);
2939 ++unsigned int skb_rbtree_purge(struct rb_root *root);
2940 +
2941 + void *netdev_alloc_frag(unsigned int fragsz);
2942 +
2943 +@@ -3134,6 +3139,7 @@ static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
2944 + return skb->data;
2945 + }
2946 +
2947 ++int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
2948 + /**
2949 + * pskb_trim_rcsum - trim received skb and update checksum
2950 + * @skb: buffer to trim
2951 +@@ -3147,9 +3153,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2952 + {
2953 + if (likely(len >= skb->len))
2954 + return 0;
2955 +- if (skb->ip_summed == CHECKSUM_COMPLETE)
2956 +- skb->ip_summed = CHECKSUM_NONE;
2957 +- return __pskb_trim(skb, len);
2958 ++ return pskb_trim_rcsum_slow(skb, len);
2959 + }
2960 +
2961 + static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2962 +@@ -3169,6 +3173,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
2963 +
2964 + #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
2965 +
2966 ++#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
2967 ++#define skb_rb_first(root) rb_to_skb(rb_first(root))
2968 ++#define skb_rb_last(root) rb_to_skb(rb_last(root))
2969 ++#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
2970 ++#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
2971 ++
2972 + #define skb_queue_walk(queue, skb) \
2973 + for (skb = (queue)->next; \
2974 + skb != (struct sk_buff *)(queue); \
2975 +@@ -3183,6 +3193,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
2976 + for (; skb != (struct sk_buff *)(queue); \
2977 + skb = skb->next)
2978 +
2979 ++#define skb_rbtree_walk(skb, root) \
2980 ++ for (skb = skb_rb_first(root); skb != NULL; \
2981 ++ skb = skb_rb_next(skb))
2982 ++
2983 ++#define skb_rbtree_walk_from(skb) \
2984 ++ for (; skb != NULL; \
2985 ++ skb = skb_rb_next(skb))
2986 ++
2987 ++#define skb_rbtree_walk_from_safe(skb, tmp) \
2988 ++ for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
2989 ++ skb = tmp)
2990 ++
2991 + #define skb_queue_walk_from_safe(queue, skb, tmp) \
2992 + for (tmp = skb->next; \
2993 + skb != (struct sk_buff *)(queue); \
2994 +diff --git a/include/linux/tpm.h b/include/linux/tpm.h
2995 +index 2a6c3d96b31f..7f7b29f86c59 100644
2996 +--- a/include/linux/tpm.h
2997 ++++ b/include/linux/tpm.h
2998 +@@ -48,6 +48,8 @@ struct tpm_class_ops {
2999 + u8 (*status) (struct tpm_chip *chip);
3000 + bool (*update_timeouts)(struct tpm_chip *chip,
3001 + unsigned long *timeout_cap);
3002 ++ int (*go_idle)(struct tpm_chip *chip);
3003 ++ int (*cmd_ready)(struct tpm_chip *chip);
3004 + int (*request_locality)(struct tpm_chip *chip, int loc);
3005 + int (*relinquish_locality)(struct tpm_chip *chip, int loc);
3006 + void (*clk_enable)(struct tpm_chip *chip, bool value);
3007 +diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
3008 +index 5c7f010676a7..47a3441cf4c4 100644
3009 +--- a/include/linux/vm_event_item.h
3010 ++++ b/include/linux/vm_event_item.h
3011 +@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
3012 + #ifdef CONFIG_DEBUG_VM_VMACACHE
3013 + VMACACHE_FIND_CALLS,
3014 + VMACACHE_FIND_HITS,
3015 +- VMACACHE_FULL_FLUSHES,
3016 + #endif
3017 + #ifdef CONFIG_SWAP
3018 + SWAP_RA,
3019 +diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
3020 +index a5b3aa8d281f..a09b28f76460 100644
3021 +--- a/include/linux/vmacache.h
3022 ++++ b/include/linux/vmacache.h
3023 +@@ -16,7 +16,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
3024 + memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
3025 + }
3026 +
3027 +-extern void vmacache_flush_all(struct mm_struct *mm);
3028 + extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
3029 + extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
3030 + unsigned long addr);
3031 +@@ -30,10 +29,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
3032 + static inline void vmacache_invalidate(struct mm_struct *mm)
3033 + {
3034 + mm->vmacache_seqnum++;
3035 +-
3036 +- /* deal with overflows */
3037 +- if (unlikely(mm->vmacache_seqnum == 0))
3038 +- vmacache_flush_all(mm);
3039 + }
3040 +
3041 + #endif /* __LINUX_VMACACHE_H */
3042 +diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
3043 +index a6e4edd8d4a2..335cf7851f12 100644
3044 +--- a/include/net/inet_frag.h
3045 ++++ b/include/net/inet_frag.h
3046 +@@ -2,14 +2,20 @@
3047 + #ifndef __NET_FRAG_H__
3048 + #define __NET_FRAG_H__
3049 +
3050 ++#include <linux/rhashtable.h>
3051 ++
3052 + struct netns_frags {
3053 +- /* Keep atomic mem on separate cachelines in structs that include it */
3054 +- atomic_t mem ____cacheline_aligned_in_smp;
3055 + /* sysctls */
3056 ++ long high_thresh;
3057 ++ long low_thresh;
3058 + int timeout;
3059 +- int high_thresh;
3060 +- int low_thresh;
3061 + int max_dist;
3062 ++ struct inet_frags *f;
3063 ++
3064 ++ struct rhashtable rhashtable ____cacheline_aligned_in_smp;
3065 ++
3066 ++ /* Keep atomic mem on separate cachelines in structs that include it */
3067 ++ atomic_long_t mem ____cacheline_aligned_in_smp;
3068 + };
3069 +
3070 + /**
3071 +@@ -25,130 +31,115 @@ enum {
3072 + INET_FRAG_COMPLETE = BIT(2),
3073 + };
3074 +
3075 ++struct frag_v4_compare_key {
3076 ++ __be32 saddr;
3077 ++ __be32 daddr;
3078 ++ u32 user;
3079 ++ u32 vif;
3080 ++ __be16 id;
3081 ++ u16 protocol;
3082 ++};
3083 ++
3084 ++struct frag_v6_compare_key {
3085 ++ struct in6_addr saddr;
3086 ++ struct in6_addr daddr;
3087 ++ u32 user;
3088 ++ __be32 id;
3089 ++ u32 iif;
3090 ++};
3091 ++
3092 + /**
3093 + * struct inet_frag_queue - fragment queue
3094 + *
3095 +- * @lock: spinlock protecting the queue
3096 ++ * @node: rhash node
3097 ++ * @key: keys identifying this frag.
3098 + * @timer: queue expiration timer
3099 +- * @list: hash bucket list
3100 ++ * @lock: spinlock protecting this frag
3101 + * @refcnt: reference count of the queue
3102 + * @fragments: received fragments head
3103 ++ * @rb_fragments: received fragments rb-tree root
3104 + * @fragments_tail: received fragments tail
3105 ++ * @last_run_head: the head of the last "run". see ip_fragment.c
3106 + * @stamp: timestamp of the last received fragment
3107 + * @len: total length of the original datagram
3108 + * @meat: length of received fragments so far
3109 + * @flags: fragment queue flags
3110 + * @max_size: maximum received fragment size
3111 + * @net: namespace that this frag belongs to
3112 +- * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
3113 ++ * @rcu: rcu head for freeing deferall
3114 + */
3115 + struct inet_frag_queue {
3116 +- spinlock_t lock;
3117 ++ struct rhash_head node;
3118 ++ union {
3119 ++ struct frag_v4_compare_key v4;
3120 ++ struct frag_v6_compare_key v6;
3121 ++ } key;
3122 + struct timer_list timer;
3123 +- struct hlist_node list;
3124 ++ spinlock_t lock;
3125 + refcount_t refcnt;
3126 +- struct sk_buff *fragments;
3127 ++ struct sk_buff *fragments; /* Used in IPv6. */
3128 ++ struct rb_root rb_fragments; /* Used in IPv4. */
3129 + struct sk_buff *fragments_tail;
3130 ++ struct sk_buff *last_run_head;
3131 + ktime_t stamp;
3132 + int len;
3133 + int meat;
3134 + __u8 flags;
3135 + u16 max_size;
3136 +- struct netns_frags *net;
3137 +- struct hlist_node list_evictor;
3138 +-};
3139 +-
3140 +-#define INETFRAGS_HASHSZ 1024
3141 +-
3142 +-/* averaged:
3143 +- * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
3144 +- * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
3145 +- * struct frag_queue))
3146 +- */
3147 +-#define INETFRAGS_MAXDEPTH 128
3148 +-
3149 +-struct inet_frag_bucket {
3150 +- struct hlist_head chain;
3151 +- spinlock_t chain_lock;
3152 ++ struct netns_frags *net;
3153 ++ struct rcu_head rcu;
3154 + };
3155 +
3156 + struct inet_frags {
3157 +- struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
3158 +-
3159 +- struct work_struct frags_work;
3160 +- unsigned int next_bucket;
3161 +- unsigned long last_rebuild_jiffies;
3162 +- bool rebuild;
3163 +-
3164 +- /* The first call to hashfn is responsible to initialize
3165 +- * rnd. This is best done with net_get_random_once.
3166 +- *
3167 +- * rnd_seqlock is used to let hash insertion detect
3168 +- * when it needs to re-lookup the hash chain to use.
3169 +- */
3170 +- u32 rnd;
3171 +- seqlock_t rnd_seqlock;
3172 + unsigned int qsize;
3173 +
3174 +- unsigned int (*hashfn)(const struct inet_frag_queue *);
3175 +- bool (*match)(const struct inet_frag_queue *q,
3176 +- const void *arg);
3177 + void (*constructor)(struct inet_frag_queue *q,
3178 + const void *arg);
3179 + void (*destructor)(struct inet_frag_queue *);
3180 +- void (*frag_expire)(unsigned long data);
3181 ++ void (*frag_expire)(struct timer_list *t);
3182 + struct kmem_cache *frags_cachep;
3183 + const char *frags_cache_name;
3184 ++ struct rhashtable_params rhash_params;
3185 + };
3186 +
3187 + int inet_frags_init(struct inet_frags *);
3188 + void inet_frags_fini(struct inet_frags *);
3189 +
3190 +-static inline void inet_frags_init_net(struct netns_frags *nf)
3191 ++static inline int inet_frags_init_net(struct netns_frags *nf)
3192 + {
3193 +- atomic_set(&nf->mem, 0);
3194 ++ atomic_long_set(&nf->mem, 0);
3195 ++ return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params);
3196 + }
3197 +-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
3198 ++void inet_frags_exit_net(struct netns_frags *nf);
3199 +
3200 +-void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
3201 +-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
3202 +-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
3203 +- struct inet_frags *f, void *key, unsigned int hash);
3204 ++void inet_frag_kill(struct inet_frag_queue *q);
3205 ++void inet_frag_destroy(struct inet_frag_queue *q);
3206 ++struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
3207 +
3208 +-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
3209 +- const char *prefix);
3210 ++/* Free all skbs in the queue; return the sum of their truesizes. */
3211 ++unsigned int inet_frag_rbtree_purge(struct rb_root *root);
3212 +
3213 +-static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
3214 ++static inline void inet_frag_put(struct inet_frag_queue *q)
3215 + {
3216 + if (refcount_dec_and_test(&q->refcnt))
3217 +- inet_frag_destroy(q, f);
3218 +-}
3219 +-
3220 +-static inline bool inet_frag_evicting(struct inet_frag_queue *q)
3221 +-{
3222 +- return !hlist_unhashed(&q->list_evictor);
3223 ++ inet_frag_destroy(q);
3224 + }
3225 +
3226 + /* Memory Tracking Functions. */
3227 +
3228 +-static inline int frag_mem_limit(struct netns_frags *nf)
3229 +-{
3230 +- return atomic_read(&nf->mem);
3231 +-}
3232 +-
3233 +-static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
3234 ++static inline long frag_mem_limit(const struct netns_frags *nf)
3235 + {
3236 +- atomic_sub(i, &nf->mem);
3237 ++ return atomic_long_read(&nf->mem);
3238 + }
3239 +
3240 +-static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
3241 ++static inline void sub_frag_mem_limit(struct netns_frags *nf, long val)
3242 + {
3243 +- atomic_add(i, &nf->mem);
3244 ++ atomic_long_sub(val, &nf->mem);
3245 + }
3246 +
3247 +-static inline int sum_frag_mem_limit(struct netns_frags *nf)
3248 ++static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
3249 + {
3250 +- return atomic_read(&nf->mem);
3251 ++ atomic_long_add(val, &nf->mem);
3252 + }
3253 +
3254 + /* RFC 3168 support :
3255 +diff --git a/include/net/ip.h b/include/net/ip.h
3256 +index 81da1123fc8e..7c430343176a 100644
3257 +--- a/include/net/ip.h
3258 ++++ b/include/net/ip.h
3259 +@@ -570,7 +570,6 @@ static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *s
3260 + return skb;
3261 + }
3262 + #endif
3263 +-int ip_frag_mem(struct net *net);
3264 +
3265 + /*
3266 + * Functions provided by ip_forward.c
3267 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
3268 +index f280c61e019a..fa87a62e9bd3 100644
3269 +--- a/include/net/ipv6.h
3270 ++++ b/include/net/ipv6.h
3271 +@@ -331,13 +331,6 @@ static inline bool ipv6_accept_ra(struct inet6_dev *idev)
3272 + idev->cnf.accept_ra;
3273 + }
3274 +
3275 +-#if IS_ENABLED(CONFIG_IPV6)
3276 +-static inline int ip6_frag_mem(struct net *net)
3277 +-{
3278 +- return sum_frag_mem_limit(&net->ipv6.frags);
3279 +-}
3280 +-#endif
3281 +-
3282 + #define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
3283 + #define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */
3284 + #define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */
3285 +@@ -531,17 +524,8 @@ enum ip6_defrag_users {
3286 + __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
3287 + };
3288 +
3289 +-struct ip6_create_arg {
3290 +- __be32 id;
3291 +- u32 user;
3292 +- const struct in6_addr *src;
3293 +- const struct in6_addr *dst;
3294 +- int iif;
3295 +- u8 ecn;
3296 +-};
3297 +-
3298 + void ip6_frag_init(struct inet_frag_queue *q, const void *a);
3299 +-bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
3300 ++extern const struct rhashtable_params ip6_rhash_params;
3301 +
3302 + /*
3303 + * Equivalent of ipv4 struct ip
3304 +@@ -549,19 +533,13 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
3305 + struct frag_queue {
3306 + struct inet_frag_queue q;
3307 +
3308 +- __be32 id; /* fragment id */
3309 +- u32 user;
3310 +- struct in6_addr saddr;
3311 +- struct in6_addr daddr;
3312 +-
3313 + int iif;
3314 + unsigned int csum;
3315 + __u16 nhoffset;
3316 + u8 ecn;
3317 + };
3318 +
3319 +-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
3320 +- struct inet_frags *frags);
3321 ++void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
3322 +
3323 + static inline bool ipv6_addr_any(const struct in6_addr *a)
3324 + {
3325 +diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
3326 +index ac71559314e7..9eae13eefc49 100644
3327 +--- a/include/uapi/linux/ethtool.h
3328 ++++ b/include/uapi/linux/ethtool.h
3329 +@@ -898,13 +898,13 @@ struct ethtool_rx_flow_spec {
3330 + static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
3331 + {
3332 + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
3333 +-};
3334 ++}
3335 +
3336 + static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
3337 + {
3338 + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
3339 + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
3340 +-};
3341 ++}
3342 +
3343 + /**
3344 + * struct ethtool_rxnfc - command to get or set RX flow classification rules
3345 +diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
3346 +index 0d941cdd8e8c..f5d753e60836 100644
3347 +--- a/include/uapi/linux/snmp.h
3348 ++++ b/include/uapi/linux/snmp.h
3349 +@@ -56,6 +56,7 @@ enum
3350 + IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
3351 + IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
3352 + IPSTATS_MIB_CEPKTS, /* InCEPkts */
3353 ++ IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
3354 + __IPSTATS_MIB_MAX
3355 + };
3356 +
3357 +diff --git a/kernel/cpu.c b/kernel/cpu.c
3358 +index 8f02f9b6e046..f3f389e33343 100644
3359 +--- a/kernel/cpu.c
3360 ++++ b/kernel/cpu.c
3361 +@@ -612,15 +612,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
3362 + bool bringup = st->bringup;
3363 + enum cpuhp_state state;
3364 +
3365 ++ if (WARN_ON_ONCE(!st->should_run))
3366 ++ return;
3367 ++
3368 + /*
3369 + * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
3370 + * that if we see ->should_run we also see the rest of the state.
3371 + */
3372 + smp_mb();
3373 +
3374 +- if (WARN_ON_ONCE(!st->should_run))
3375 +- return;
3376 +-
3377 + cpuhp_lock_acquire(bringup);
3378 +
3379 + if (st->single) {
3380 +@@ -932,7 +932,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
3381 + ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
3382 + if (ret) {
3383 + st->target = prev_state;
3384 +- undo_cpu_down(cpu, st);
3385 ++ if (st->state < prev_state)
3386 ++ undo_cpu_down(cpu, st);
3387 + break;
3388 + }
3389 + }
3390 +@@ -985,7 +986,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
3391 + * to do the further cleanups.
3392 + */
3393 + ret = cpuhp_down_callbacks(cpu, st, target);
3394 +- if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
3395 ++ if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
3396 + cpuhp_reset_state(st, prev_state);
3397 + __cpuhp_kick_ap(st);
3398 + }
3399 +diff --git a/kernel/time/timer.c b/kernel/time/timer.c
3400 +index 9fe525f410bf..f17c76a1a05f 100644
3401 +--- a/kernel/time/timer.c
3402 ++++ b/kernel/time/timer.c
3403 +@@ -1609,6 +1609,22 @@ static inline void __run_timers(struct timer_base *base)
3404 +
3405 + raw_spin_lock_irq(&base->lock);
3406 +
3407 ++ /*
3408 ++ * timer_base::must_forward_clk must be cleared before running
3409 ++ * timers so that any timer functions that call mod_timer() will
3410 ++ * not try to forward the base. Idle tracking / clock forwarding
3411 ++ * logic is only used with BASE_STD timers.
3412 ++ *
3413 ++ * The must_forward_clk flag is cleared unconditionally also for
3414 ++ * the deferrable base. The deferrable base is not affected by idle
3415 ++ * tracking and never forwarded, so clearing the flag is a NOOP.
3416 ++ *
3417 ++ * The fact that the deferrable base is never forwarded can cause
3418 ++ * large variations in granularity for deferrable timers, but they
3419 ++ * can be deferred for long periods due to idle anyway.
3420 ++ */
3421 ++ base->must_forward_clk = false;
3422 ++
3423 + while (time_after_eq(jiffies, base->clk)) {
3424 +
3425 + levels = collect_expired_timers(base, heads);
3426 +@@ -1628,19 +1644,6 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
3427 + {
3428 + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
3429 +
3430 +- /*
3431 +- * must_forward_clk must be cleared before running timers so that any
3432 +- * timer functions that call mod_timer will not try to forward the
3433 +- * base. idle trcking / clock forwarding logic is only used with
3434 +- * BASE_STD timers.
3435 +- *
3436 +- * The deferrable base does not do idle tracking at all, so we do
3437 +- * not forward it. This can result in very large variations in
3438 +- * granularity for deferrable timers, but they can be deferred for
3439 +- * long periods due to idle.
3440 +- */
3441 +- base->must_forward_clk = false;
3442 +-
3443 + __run_timers(base);
3444 + if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
3445 + __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
3446 +diff --git a/lib/rhashtable.c b/lib/rhashtable.c
3447 +index 39215c724fc7..cebbcec877d7 100644
3448 +--- a/lib/rhashtable.c
3449 ++++ b/lib/rhashtable.c
3450 +@@ -364,6 +364,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
3451 + err = rhashtable_rehash_chain(ht, old_hash);
3452 + if (err)
3453 + return err;
3454 ++ cond_resched();
3455 + }
3456 +
3457 + /* Publish the new table pointer. */
3458 +@@ -1073,6 +1074,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
3459 + for (i = 0; i < tbl->size; i++) {
3460 + struct rhash_head *pos, *next;
3461 +
3462 ++ cond_resched();
3463 + for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
3464 + next = !rht_is_a_nulls(pos) ?
3465 + rht_dereference(pos->next, ht) : NULL;
3466 +diff --git a/mm/debug.c b/mm/debug.c
3467 +index 6726bec731c9..c55abc893fdc 100644
3468 +--- a/mm/debug.c
3469 ++++ b/mm/debug.c
3470 +@@ -100,7 +100,7 @@ EXPORT_SYMBOL(dump_vma);
3471 +
3472 + void dump_mm(const struct mm_struct *mm)
3473 + {
3474 +- pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
3475 ++ pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n"
3476 + #ifdef CONFIG_MMU
3477 + "get_unmapped_area %p\n"
3478 + #endif
3479 +@@ -128,7 +128,7 @@ void dump_mm(const struct mm_struct *mm)
3480 + "tlb_flush_pending %d\n"
3481 + "def_flags: %#lx(%pGv)\n",
3482 +
3483 +- mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
3484 ++ mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
3485 + #ifdef CONFIG_MMU
3486 + mm->get_unmapped_area,
3487 + #endif
3488 +diff --git a/mm/vmacache.c b/mm/vmacache.c
3489 +index db7596eb6132..f1729617dc85 100644
3490 +--- a/mm/vmacache.c
3491 ++++ b/mm/vmacache.c
3492 +@@ -7,44 +7,6 @@
3493 + #include <linux/mm.h>
3494 + #include <linux/vmacache.h>
3495 +
3496 +-/*
3497 +- * Flush vma caches for threads that share a given mm.
3498 +- *
3499 +- * The operation is safe because the caller holds the mmap_sem
3500 +- * exclusively and other threads accessing the vma cache will
3501 +- * have mmap_sem held at least for read, so no extra locking
3502 +- * is required to maintain the vma cache.
3503 +- */
3504 +-void vmacache_flush_all(struct mm_struct *mm)
3505 +-{
3506 +- struct task_struct *g, *p;
3507 +-
3508 +- count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
3509 +-
3510 +- /*
3511 +- * Single threaded tasks need not iterate the entire
3512 +- * list of process. We can avoid the flushing as well
3513 +- * since the mm's seqnum was increased and don't have
3514 +- * to worry about other threads' seqnum. Current's
3515 +- * flush will occur upon the next lookup.
3516 +- */
3517 +- if (atomic_read(&mm->mm_users) == 1)
3518 +- return;
3519 +-
3520 +- rcu_read_lock();
3521 +- for_each_process_thread(g, p) {
3522 +- /*
3523 +- * Only flush the vmacache pointers as the
3524 +- * mm seqnum is already set and curr's will
3525 +- * be set upon invalidation when the next
3526 +- * lookup is done.
3527 +- */
3528 +- if (mm == p->mm)
3529 +- vmacache_flush(p);
3530 +- }
3531 +- rcu_read_unlock();
3532 +-}
3533 +-
3534 + /*
3535 + * This task may be accessing a foreign mm via (for example)
3536 + * get_user_pages()->find_vma(). The vmacache is task-local and this
3537 +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
3538 +index cef3754408d4..b21fcc838784 100644
3539 +--- a/net/bluetooth/hidp/core.c
3540 ++++ b/net/bluetooth/hidp/core.c
3541 +@@ -775,7 +775,7 @@ static int hidp_setup_hid(struct hidp_session *session,
3542 + hid->version = req->version;
3543 + hid->country = req->country;
3544 +
3545 +- strncpy(hid->name, req->name, sizeof(req->name) - 1);
3546 ++ strncpy(hid->name, req->name, sizeof(hid->name));
3547 +
3548 + snprintf(hid->phys, sizeof(hid->phys), "%pMR",
3549 + &l2cap_pi(session->ctrl_sock->sk)->chan->src);
3550 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
3551 +index 2e5eeba97de9..168a3e8883d4 100644
3552 +--- a/net/core/skbuff.c
3553 ++++ b/net/core/skbuff.c
3554 +@@ -1839,6 +1839,20 @@ done:
3555 + }
3556 + EXPORT_SYMBOL(___pskb_trim);
3557 +
3558 ++/* Note : use pskb_trim_rcsum() instead of calling this directly
3559 ++ */
3560 ++int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
3561 ++{
3562 ++ if (skb->ip_summed == CHECKSUM_COMPLETE) {
3563 ++ int delta = skb->len - len;
3564 ++
3565 ++ skb->csum = csum_sub(skb->csum,
3566 ++ skb_checksum(skb, len, delta, 0));
3567 ++ }
3568 ++ return __pskb_trim(skb, len);
3569 ++}
3570 ++EXPORT_SYMBOL(pskb_trim_rcsum_slow);
3571 ++
3572 + /**
3573 + * __pskb_pull_tail - advance tail of skb header
3574 + * @skb: buffer to reallocate
3575 +@@ -2842,20 +2856,27 @@ EXPORT_SYMBOL(skb_queue_purge);
3576 + /**
3577 + * skb_rbtree_purge - empty a skb rbtree
3578 + * @root: root of the rbtree to empty
3579 ++ * Return value: the sum of truesizes of all purged skbs.
3580 + *
3581 + * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
3582 + * the list and one reference dropped. This function does not take
3583 + * any lock. Synchronization should be handled by the caller (e.g., TCP
3584 + * out-of-order queue is protected by the socket lock).
3585 + */
3586 +-void skb_rbtree_purge(struct rb_root *root)
3587 ++unsigned int skb_rbtree_purge(struct rb_root *root)
3588 + {
3589 +- struct sk_buff *skb, *next;
3590 ++ struct rb_node *p = rb_first(root);
3591 ++ unsigned int sum = 0;
3592 +
3593 +- rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode)
3594 +- kfree_skb(skb);
3595 ++ while (p) {
3596 ++ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3597 +
3598 +- *root = RB_ROOT;
3599 ++ p = rb_next(p);
3600 ++ rb_erase(&skb->rbnode, root);
3601 ++ sum += skb->truesize;
3602 ++ kfree_skb(skb);
3603 ++ }
3604 ++ return sum;
3605 + }
3606 +
3607 + /**
3608 +diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
3609 +index bae7d78aa068..fbeacbc2be5d 100644
3610 +--- a/net/dcb/dcbnl.c
3611 ++++ b/net/dcb/dcbnl.c
3612 +@@ -1765,7 +1765,7 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
3613 + if (itr->app.selector == app->selector &&
3614 + itr->app.protocol == app->protocol &&
3615 + itr->ifindex == ifindex &&
3616 +- (!prio || itr->app.priority == prio))
3617 ++ ((prio == -1) || itr->app.priority == prio))
3618 + return itr;
3619 + }
3620 +
3621 +@@ -1800,7 +1800,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
3622 + u8 prio = 0;
3623 +
3624 + spin_lock_bh(&dcb_lock);
3625 +- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
3626 ++ itr = dcb_app_lookup(app, dev->ifindex, -1);
3627 ++ if (itr)
3628 + prio = itr->app.priority;
3629 + spin_unlock_bh(&dcb_lock);
3630 +
3631 +@@ -1828,7 +1829,8 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
3632 +
3633 + spin_lock_bh(&dcb_lock);
3634 + /* Search for existing match and replace */
3635 +- if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
3636 ++ itr = dcb_app_lookup(new, dev->ifindex, -1);
3637 ++ if (itr) {
3638 + if (new->priority)
3639 + itr->app.priority = new->priority;
3640 + else {
3641 +@@ -1861,7 +1863,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
3642 + u8 prio = 0;
3643 +
3644 + spin_lock_bh(&dcb_lock);
3645 +- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
3646 ++ itr = dcb_app_lookup(app, dev->ifindex, -1);
3647 ++ if (itr)
3648 + prio |= 1 << itr->app.priority;
3649 + spin_unlock_bh(&dcb_lock);
3650 +
3651 +diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
3652 +index d8de3bcfb103..b8d95cb71c25 100644
3653 +--- a/net/ieee802154/6lowpan/6lowpan_i.h
3654 ++++ b/net/ieee802154/6lowpan/6lowpan_i.h
3655 +@@ -17,37 +17,19 @@ typedef unsigned __bitwise lowpan_rx_result;
3656 + #define LOWPAN_DISPATCH_FRAG1 0xc0
3657 + #define LOWPAN_DISPATCH_FRAGN 0xe0
3658 +
3659 +-struct lowpan_create_arg {
3660 ++struct frag_lowpan_compare_key {
3661 + u16 tag;
3662 + u16 d_size;
3663 +- const struct ieee802154_addr *src;
3664 +- const struct ieee802154_addr *dst;
3665 ++ const struct ieee802154_addr src;
3666 ++ const struct ieee802154_addr dst;
3667 + };
3668 +
3669 +-/* Equivalent of ipv4 struct ip
3670 ++/* Equivalent of ipv4 struct ipq
3671 + */
3672 + struct lowpan_frag_queue {
3673 + struct inet_frag_queue q;
3674 +-
3675 +- u16 tag;
3676 +- u16 d_size;
3677 +- struct ieee802154_addr saddr;
3678 +- struct ieee802154_addr daddr;
3679 + };
3680 +
3681 +-static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
3682 +-{
3683 +- switch (a->mode) {
3684 +- case IEEE802154_ADDR_LONG:
3685 +- return (((__force u64)a->extended_addr) >> 32) ^
3686 +- (((__force u64)a->extended_addr) & 0xffffffff);
3687 +- case IEEE802154_ADDR_SHORT:
3688 +- return (__force u32)(a->short_addr + (a->pan_id << 16));
3689 +- default:
3690 +- return 0;
3691 +- }
3692 +-}
3693 +-
3694 + int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
3695 + void lowpan_net_frag_exit(void);
3696 + int lowpan_net_frag_init(void);
3697 +diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
3698 +index f85b08baff16..1790b65944b3 100644
3699 +--- a/net/ieee802154/6lowpan/reassembly.c
3700 ++++ b/net/ieee802154/6lowpan/reassembly.c
3701 +@@ -37,55 +37,24 @@ static struct inet_frags lowpan_frags;
3702 + static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
3703 + struct sk_buff *prev, struct net_device *ldev);
3704 +
3705 +-static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
3706 +- const struct ieee802154_addr *saddr,
3707 +- const struct ieee802154_addr *daddr)
3708 +-{
3709 +- net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
3710 +- return jhash_3words(ieee802154_addr_hash(saddr),
3711 +- ieee802154_addr_hash(daddr),
3712 +- (__force u32)(tag + (d_size << 16)),
3713 +- lowpan_frags.rnd);
3714 +-}
3715 +-
3716 +-static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
3717 +-{
3718 +- const struct lowpan_frag_queue *fq;
3719 +-
3720 +- fq = container_of(q, struct lowpan_frag_queue, q);
3721 +- return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
3722 +-}
3723 +-
3724 +-static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
3725 +-{
3726 +- const struct lowpan_frag_queue *fq;
3727 +- const struct lowpan_create_arg *arg = a;
3728 +-
3729 +- fq = container_of(q, struct lowpan_frag_queue, q);
3730 +- return fq->tag == arg->tag && fq->d_size == arg->d_size &&
3731 +- ieee802154_addr_equal(&fq->saddr, arg->src) &&
3732 +- ieee802154_addr_equal(&fq->daddr, arg->dst);
3733 +-}
3734 +-
3735 + static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
3736 + {
3737 +- const struct lowpan_create_arg *arg = a;
3738 ++ const struct frag_lowpan_compare_key *key = a;
3739 + struct lowpan_frag_queue *fq;
3740 +
3741 + fq = container_of(q, struct lowpan_frag_queue, q);
3742 +
3743 +- fq->tag = arg->tag;
3744 +- fq->d_size = arg->d_size;
3745 +- fq->saddr = *arg->src;
3746 +- fq->daddr = *arg->dst;
3747 ++ BUILD_BUG_ON(sizeof(*key) > sizeof(q->key));
3748 ++ memcpy(&q->key, key, sizeof(*key));
3749 + }
3750 +
3751 +-static void lowpan_frag_expire(unsigned long data)
3752 ++static void lowpan_frag_expire(struct timer_list *t)
3753 + {
3754 ++ struct inet_frag_queue *frag = from_timer(frag, t, timer);
3755 + struct frag_queue *fq;
3756 + struct net *net;
3757 +
3758 +- fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
3759 ++ fq = container_of(frag, struct frag_queue, q);
3760 + net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
3761 +
3762 + spin_lock(&fq->q.lock);
3763 +@@ -93,10 +62,10 @@ static void lowpan_frag_expire(unsigned long data)
3764 + if (fq->q.flags & INET_FRAG_COMPLETE)
3765 + goto out;
3766 +
3767 +- inet_frag_kill(&fq->q, &lowpan_frags);
3768 ++ inet_frag_kill(&fq->q);
3769 + out:
3770 + spin_unlock(&fq->q.lock);
3771 +- inet_frag_put(&fq->q, &lowpan_frags);
3772 ++ inet_frag_put(&fq->q);
3773 + }
3774 +
3775 + static inline struct lowpan_frag_queue *
3776 +@@ -104,25 +73,20 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb,
3777 + const struct ieee802154_addr *src,
3778 + const struct ieee802154_addr *dst)
3779 + {
3780 +- struct inet_frag_queue *q;
3781 +- struct lowpan_create_arg arg;
3782 +- unsigned int hash;
3783 + struct netns_ieee802154_lowpan *ieee802154_lowpan =
3784 + net_ieee802154_lowpan(net);
3785 ++ struct frag_lowpan_compare_key key = {
3786 ++ .tag = cb->d_tag,
3787 ++ .d_size = cb->d_size,
3788 ++ .src = *src,
3789 ++ .dst = *dst,
3790 ++ };
3791 ++ struct inet_frag_queue *q;
3792 +
3793 +- arg.tag = cb->d_tag;
3794 +- arg.d_size = cb->d_size;
3795 +- arg.src = src;
3796 +- arg.dst = dst;
3797 +-
3798 +- hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst);
3799 +-
3800 +- q = inet_frag_find(&ieee802154_lowpan->frags,
3801 +- &lowpan_frags, &arg, hash);
3802 +- if (IS_ERR_OR_NULL(q)) {
3803 +- inet_frag_maybe_warn_overflow(q, pr_fmt());
3804 ++ q = inet_frag_find(&ieee802154_lowpan->frags, &key);
3805 ++ if (!q)
3806 + return NULL;
3807 +- }
3808 ++
3809 + return container_of(q, struct lowpan_frag_queue, q);
3810 + }
3811 +
3812 +@@ -229,7 +193,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
3813 + struct sk_buff *fp, *head = fq->q.fragments;
3814 + int sum_truesize;
3815 +
3816 +- inet_frag_kill(&fq->q, &lowpan_frags);
3817 ++ inet_frag_kill(&fq->q);
3818 +
3819 + /* Make the one we just received the head. */
3820 + if (prev) {
3821 +@@ -437,7 +401,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
3822 + ret = lowpan_frag_queue(fq, skb, frag_type);
3823 + spin_unlock(&fq->q.lock);
3824 +
3825 +- inet_frag_put(&fq->q, &lowpan_frags);
3826 ++ inet_frag_put(&fq->q);
3827 + return ret;
3828 + }
3829 +
3830 +@@ -447,24 +411,22 @@ err:
3831 + }
3832 +
3833 + #ifdef CONFIG_SYSCTL
3834 +-static int zero;
3835 +
3836 + static struct ctl_table lowpan_frags_ns_ctl_table[] = {
3837 + {
3838 + .procname = "6lowpanfrag_high_thresh",
3839 + .data = &init_net.ieee802154_lowpan.frags.high_thresh,
3840 +- .maxlen = sizeof(int),
3841 ++ .maxlen = sizeof(unsigned long),
3842 + .mode = 0644,
3843 +- .proc_handler = proc_dointvec_minmax,
3844 ++ .proc_handler = proc_doulongvec_minmax,
3845 + .extra1 = &init_net.ieee802154_lowpan.frags.low_thresh
3846 + },
3847 + {
3848 + .procname = "6lowpanfrag_low_thresh",
3849 + .data = &init_net.ieee802154_lowpan.frags.low_thresh,
3850 +- .maxlen = sizeof(int),
3851 ++ .maxlen = sizeof(unsigned long),
3852 + .mode = 0644,
3853 +- .proc_handler = proc_dointvec_minmax,
3854 +- .extra1 = &zero,
3855 ++ .proc_handler = proc_doulongvec_minmax,
3856 + .extra2 = &init_net.ieee802154_lowpan.frags.high_thresh
3857 + },
3858 + {
3859 +@@ -580,14 +542,20 @@ static int __net_init lowpan_frags_init_net(struct net *net)
3860 + {
3861 + struct netns_ieee802154_lowpan *ieee802154_lowpan =
3862 + net_ieee802154_lowpan(net);
3863 ++ int res;
3864 +
3865 + ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
3866 + ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
3867 + ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
3868 ++ ieee802154_lowpan->frags.f = &lowpan_frags;
3869 +
3870 +- inet_frags_init_net(&ieee802154_lowpan->frags);
3871 +-
3872 +- return lowpan_frags_ns_sysctl_register(net);
3873 ++ res = inet_frags_init_net(&ieee802154_lowpan->frags);
3874 ++ if (res < 0)
3875 ++ return res;
3876 ++ res = lowpan_frags_ns_sysctl_register(net);
3877 ++ if (res < 0)
3878 ++ inet_frags_exit_net(&ieee802154_lowpan->frags);
3879 ++ return res;
3880 + }
3881 +
3882 + static void __net_exit lowpan_frags_exit_net(struct net *net)
3883 +@@ -596,7 +564,7 @@ static void __net_exit lowpan_frags_exit_net(struct net *net)
3884 + net_ieee802154_lowpan(net);
3885 +
3886 + lowpan_frags_ns_sysctl_unregister(net);
3887 +- inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
3888 ++ inet_frags_exit_net(&ieee802154_lowpan->frags);
3889 + }
3890 +
3891 + static struct pernet_operations lowpan_frags_ops = {
3892 +@@ -604,32 +572,63 @@ static struct pernet_operations lowpan_frags_ops = {
3893 + .exit = lowpan_frags_exit_net,
3894 + };
3895 +
3896 +-int __init lowpan_net_frag_init(void)
3897 ++static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed)
3898 + {
3899 +- int ret;
3900 ++ return jhash2(data,
3901 ++ sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
3902 ++}
3903 +
3904 +- ret = lowpan_frags_sysctl_register();
3905 +- if (ret)
3906 +- return ret;
3907 ++static u32 lowpan_obj_hashfn(const void *data, u32 len, u32 seed)
3908 ++{
3909 ++ const struct inet_frag_queue *fq = data;
3910 +
3911 +- ret = register_pernet_subsys(&lowpan_frags_ops);
3912 +- if (ret)
3913 +- goto err_pernet;
3914 ++ return jhash2((const u32 *)&fq->key,
3915 ++ sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
3916 ++}
3917 ++
3918 ++static int lowpan_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
3919 ++{
3920 ++ const struct frag_lowpan_compare_key *key = arg->key;
3921 ++ const struct inet_frag_queue *fq = ptr;
3922 ++
3923 ++ return !!memcmp(&fq->key, key, sizeof(*key));
3924 ++}
3925 ++
3926 ++static const struct rhashtable_params lowpan_rhash_params = {
3927 ++ .head_offset = offsetof(struct inet_frag_queue, node),
3928 ++ .hashfn = lowpan_key_hashfn,
3929 ++ .obj_hashfn = lowpan_obj_hashfn,
3930 ++ .obj_cmpfn = lowpan_obj_cmpfn,
3931 ++ .automatic_shrinking = true,
3932 ++};
3933 ++
3934 ++int __init lowpan_net_frag_init(void)
3935 ++{
3936 ++ int ret;
3937 +
3938 +- lowpan_frags.hashfn = lowpan_hashfn;
3939 + lowpan_frags.constructor = lowpan_frag_init;
3940 + lowpan_frags.destructor = NULL;
3941 + lowpan_frags.qsize = sizeof(struct frag_queue);
3942 +- lowpan_frags.match = lowpan_frag_match;
3943 + lowpan_frags.frag_expire = lowpan_frag_expire;
3944 + lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
3945 ++ lowpan_frags.rhash_params = lowpan_rhash_params;
3946 + ret = inet_frags_init(&lowpan_frags);
3947 + if (ret)
3948 +- goto err_pernet;
3949 ++ goto out;
3950 +
3951 ++ ret = lowpan_frags_sysctl_register();
3952 ++ if (ret)
3953 ++ goto err_sysctl;
3954 ++
3955 ++ ret = register_pernet_subsys(&lowpan_frags_ops);
3956 ++ if (ret)
3957 ++ goto err_pernet;
3958 ++out:
3959 + return ret;
3960 + err_pernet:
3961 + lowpan_frags_sysctl_unregister();
3962 ++err_sysctl:
3963 ++ inet_frags_fini(&lowpan_frags);
3964 + return ret;
3965 + }
3966 +
3967 +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
3968 +index ba4454ecdf0f..f6764537148c 100644
3969 +--- a/net/ipv4/inet_fragment.c
3970 ++++ b/net/ipv4/inet_fragment.c
3971 +@@ -25,12 +25,6 @@
3972 + #include <net/inet_frag.h>
3973 + #include <net/inet_ecn.h>
3974 +
3975 +-#define INETFRAGS_EVICT_BUCKETS 128
3976 +-#define INETFRAGS_EVICT_MAX 512
3977 +-
3978 +-/* don't rebuild inetfrag table with new secret more often than this */
3979 +-#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
3980 +-
3981 + /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
3982 + * Value : 0xff if frame should be dropped.
3983 + * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
3984 +@@ -52,157 +46,8 @@ const u8 ip_frag_ecn_table[16] = {
3985 + };
3986 + EXPORT_SYMBOL(ip_frag_ecn_table);
3987 +
3988 +-static unsigned int
3989 +-inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
3990 +-{
3991 +- return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
3992 +-}
3993 +-
3994 +-static bool inet_frag_may_rebuild(struct inet_frags *f)
3995 +-{
3996 +- return time_after(jiffies,
3997 +- f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
3998 +-}
3999 +-
4000 +-static void inet_frag_secret_rebuild(struct inet_frags *f)
4001 +-{
4002 +- int i;
4003 +-
4004 +- write_seqlock_bh(&f->rnd_seqlock);
4005 +-
4006 +- if (!inet_frag_may_rebuild(f))
4007 +- goto out;
4008 +-
4009 +- get_random_bytes(&f->rnd, sizeof(u32));
4010 +-
4011 +- for (i = 0; i < INETFRAGS_HASHSZ; i++) {
4012 +- struct inet_frag_bucket *hb;
4013 +- struct inet_frag_queue *q;
4014 +- struct hlist_node *n;
4015 +-
4016 +- hb = &f->hash[i];
4017 +- spin_lock(&hb->chain_lock);
4018 +-
4019 +- hlist_for_each_entry_safe(q, n, &hb->chain, list) {
4020 +- unsigned int hval = inet_frag_hashfn(f, q);
4021 +-
4022 +- if (hval != i) {
4023 +- struct inet_frag_bucket *hb_dest;
4024 +-
4025 +- hlist_del(&q->list);
4026 +-
4027 +- /* Relink to new hash chain. */
4028 +- hb_dest = &f->hash[hval];
4029 +-
4030 +- /* This is the only place where we take
4031 +- * another chain_lock while already holding
4032 +- * one. As this will not run concurrently,
4033 +- * we cannot deadlock on hb_dest lock below, if its
4034 +- * already locked it will be released soon since
4035 +- * other caller cannot be waiting for hb lock
4036 +- * that we've taken above.
4037 +- */
4038 +- spin_lock_nested(&hb_dest->chain_lock,
4039 +- SINGLE_DEPTH_NESTING);
4040 +- hlist_add_head(&q->list, &hb_dest->chain);
4041 +- spin_unlock(&hb_dest->chain_lock);
4042 +- }
4043 +- }
4044 +- spin_unlock(&hb->chain_lock);
4045 +- }
4046 +-
4047 +- f->rebuild = false;
4048 +- f->last_rebuild_jiffies = jiffies;
4049 +-out:
4050 +- write_sequnlock_bh(&f->rnd_seqlock);
4051 +-}
4052 +-
4053 +-static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
4054 +-{
4055 +- if (!hlist_unhashed(&q->list_evictor))
4056 +- return false;
4057 +-
4058 +- return q->net->low_thresh == 0 ||
4059 +- frag_mem_limit(q->net) >= q->net->low_thresh;
4060 +-}
4061 +-
4062 +-static unsigned int
4063 +-inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
4064 +-{
4065 +- struct inet_frag_queue *fq;
4066 +- struct hlist_node *n;
4067 +- unsigned int evicted = 0;
4068 +- HLIST_HEAD(expired);
4069 +-
4070 +- spin_lock(&hb->chain_lock);
4071 +-
4072 +- hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
4073 +- if (!inet_fragq_should_evict(fq))
4074 +- continue;
4075 +-
4076 +- if (!del_timer(&fq->timer))
4077 +- continue;
4078 +-
4079 +- hlist_add_head(&fq->list_evictor, &expired);
4080 +- ++evicted;
4081 +- }
4082 +-
4083 +- spin_unlock(&hb->chain_lock);
4084 +-
4085 +- hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
4086 +- f->frag_expire((unsigned long) fq);
4087 +-
4088 +- return evicted;
4089 +-}
4090 +-
4091 +-static void inet_frag_worker(struct work_struct *work)
4092 +-{
4093 +- unsigned int budget = INETFRAGS_EVICT_BUCKETS;
4094 +- unsigned int i, evicted = 0;
4095 +- struct inet_frags *f;
4096 +-
4097 +- f = container_of(work, struct inet_frags, frags_work);
4098 +-
4099 +- BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
4100 +-
4101 +- local_bh_disable();
4102 +-
4103 +- for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
4104 +- evicted += inet_evict_bucket(f, &f->hash[i]);
4105 +- i = (i + 1) & (INETFRAGS_HASHSZ - 1);
4106 +- if (evicted > INETFRAGS_EVICT_MAX)
4107 +- break;
4108 +- }
4109 +-
4110 +- f->next_bucket = i;
4111 +-
4112 +- local_bh_enable();
4113 +-
4114 +- if (f->rebuild && inet_frag_may_rebuild(f))
4115 +- inet_frag_secret_rebuild(f);
4116 +-}
4117 +-
4118 +-static void inet_frag_schedule_worker(struct inet_frags *f)
4119 +-{
4120 +- if (unlikely(!work_pending(&f->frags_work)))
4121 +- schedule_work(&f->frags_work);
4122 +-}
4123 +-
4124 + int inet_frags_init(struct inet_frags *f)
4125 + {
4126 +- int i;
4127 +-
4128 +- INIT_WORK(&f->frags_work, inet_frag_worker);
4129 +-
4130 +- for (i = 0; i < INETFRAGS_HASHSZ; i++) {
4131 +- struct inet_frag_bucket *hb = &f->hash[i];
4132 +-
4133 +- spin_lock_init(&hb->chain_lock);
4134 +- INIT_HLIST_HEAD(&hb->chain);
4135 +- }
4136 +-
4137 +- seqlock_init(&f->rnd_seqlock);
4138 +- f->last_rebuild_jiffies = 0;
4139 + f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
4140 + NULL);
4141 + if (!f->frags_cachep)
4142 +@@ -214,83 +59,75 @@ EXPORT_SYMBOL(inet_frags_init);
4143 +
4144 + void inet_frags_fini(struct inet_frags *f)
4145 + {
4146 +- cancel_work_sync(&f->frags_work);
4147 ++ /* We must wait that all inet_frag_destroy_rcu() have completed. */
4148 ++ rcu_barrier();
4149 ++
4150 + kmem_cache_destroy(f->frags_cachep);
4151 ++ f->frags_cachep = NULL;
4152 + }
4153 + EXPORT_SYMBOL(inet_frags_fini);
4154 +
4155 +-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
4156 ++static void inet_frags_free_cb(void *ptr, void *arg)
4157 + {
4158 +- unsigned int seq;
4159 +- int i;
4160 +-
4161 +- nf->low_thresh = 0;
4162 ++ struct inet_frag_queue *fq = ptr;
4163 +
4164 +-evict_again:
4165 +- local_bh_disable();
4166 +- seq = read_seqbegin(&f->rnd_seqlock);
4167 +-
4168 +- for (i = 0; i < INETFRAGS_HASHSZ ; i++)
4169 +- inet_evict_bucket(f, &f->hash[i]);
4170 +-
4171 +- local_bh_enable();
4172 +- cond_resched();
4173 +-
4174 +- if (read_seqretry(&f->rnd_seqlock, seq) ||
4175 +- sum_frag_mem_limit(nf))
4176 +- goto evict_again;
4177 +-}
4178 +-EXPORT_SYMBOL(inet_frags_exit_net);
4179 +-
4180 +-static struct inet_frag_bucket *
4181 +-get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
4182 +-__acquires(hb->chain_lock)
4183 +-{
4184 +- struct inet_frag_bucket *hb;
4185 +- unsigned int seq, hash;
4186 +-
4187 +- restart:
4188 +- seq = read_seqbegin(&f->rnd_seqlock);
4189 +-
4190 +- hash = inet_frag_hashfn(f, fq);
4191 +- hb = &f->hash[hash];
4192 ++ /* If we can not cancel the timer, it means this frag_queue
4193 ++ * is already disappearing, we have nothing to do.
4194 ++ * Otherwise, we own a refcount until the end of this function.
4195 ++ */
4196 ++ if (!del_timer(&fq->timer))
4197 ++ return;
4198 +
4199 +- spin_lock(&hb->chain_lock);
4200 +- if (read_seqretry(&f->rnd_seqlock, seq)) {
4201 +- spin_unlock(&hb->chain_lock);
4202 +- goto restart;
4203 ++ spin_lock_bh(&fq->lock);
4204 ++ if (!(fq->flags & INET_FRAG_COMPLETE)) {
4205 ++ fq->flags |= INET_FRAG_COMPLETE;
4206 ++ refcount_dec(&fq->refcnt);
4207 + }
4208 ++ spin_unlock_bh(&fq->lock);
4209 +
4210 +- return hb;
4211 ++ inet_frag_put(fq);
4212 + }
4213 +
4214 +-static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
4215 ++void inet_frags_exit_net(struct netns_frags *nf)
4216 + {
4217 +- struct inet_frag_bucket *hb;
4218 ++ nf->low_thresh = 0; /* prevent creation of new frags */
4219 +
4220 +- hb = get_frag_bucket_locked(fq, f);
4221 +- hlist_del(&fq->list);
4222 +- fq->flags |= INET_FRAG_COMPLETE;
4223 +- spin_unlock(&hb->chain_lock);
4224 ++ rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
4225 + }
4226 ++EXPORT_SYMBOL(inet_frags_exit_net);
4227 +
4228 +-void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
4229 ++void inet_frag_kill(struct inet_frag_queue *fq)
4230 + {
4231 + if (del_timer(&fq->timer))
4232 + refcount_dec(&fq->refcnt);
4233 +
4234 + if (!(fq->flags & INET_FRAG_COMPLETE)) {
4235 +- fq_unlink(fq, f);
4236 ++ struct netns_frags *nf = fq->net;
4237 ++
4238 ++ fq->flags |= INET_FRAG_COMPLETE;
4239 ++ rhashtable_remove_fast(&nf->rhashtable, &fq->node, nf->f->rhash_params);
4240 + refcount_dec(&fq->refcnt);
4241 + }
4242 + }
4243 + EXPORT_SYMBOL(inet_frag_kill);
4244 +
4245 +-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
4246 ++static void inet_frag_destroy_rcu(struct rcu_head *head)
4247 ++{
4248 ++ struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
4249 ++ rcu);
4250 ++ struct inet_frags *f = q->net->f;
4251 ++
4252 ++ if (f->destructor)
4253 ++ f->destructor(q);
4254 ++ kmem_cache_free(f->frags_cachep, q);
4255 ++}
4256 ++
4257 ++void inet_frag_destroy(struct inet_frag_queue *q)
4258 + {
4259 + struct sk_buff *fp;
4260 + struct netns_frags *nf;
4261 + unsigned int sum, sum_truesize = 0;
4262 ++ struct inet_frags *f;
4263 +
4264 + WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
4265 + WARN_ON(del_timer(&q->timer) != 0);
4266 +@@ -298,64 +135,35 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
4267 + /* Release all fragment data. */
4268 + fp = q->fragments;
4269 + nf = q->net;
4270 +- while (fp) {
4271 +- struct sk_buff *xp = fp->next;
4272 +-
4273 +- sum_truesize += fp->truesize;
4274 +- kfree_skb(fp);
4275 +- fp = xp;
4276 ++ f = nf->f;
4277 ++ if (fp) {
4278 ++ do {
4279 ++ struct sk_buff *xp = fp->next;
4280 ++
4281 ++ sum_truesize += fp->truesize;
4282 ++ kfree_skb(fp);
4283 ++ fp = xp;
4284 ++ } while (fp);
4285 ++ } else {
4286 ++ sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
4287 + }
4288 + sum = sum_truesize + f->qsize;
4289 +
4290 +- if (f->destructor)
4291 +- f->destructor(q);
4292 +- kmem_cache_free(f->frags_cachep, q);
4293 ++ call_rcu(&q->rcu, inet_frag_destroy_rcu);
4294 +
4295 + sub_frag_mem_limit(nf, sum);
4296 + }
4297 + EXPORT_SYMBOL(inet_frag_destroy);
4298 +
4299 +-static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
4300 +- struct inet_frag_queue *qp_in,
4301 +- struct inet_frags *f,
4302 +- void *arg)
4303 +-{
4304 +- struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
4305 +- struct inet_frag_queue *qp;
4306 +-
4307 +-#ifdef CONFIG_SMP
4308 +- /* With SMP race we have to recheck hash table, because
4309 +- * such entry could have been created on other cpu before
4310 +- * we acquired hash bucket lock.
4311 +- */
4312 +- hlist_for_each_entry(qp, &hb->chain, list) {
4313 +- if (qp->net == nf && f->match(qp, arg)) {
4314 +- refcount_inc(&qp->refcnt);
4315 +- spin_unlock(&hb->chain_lock);
4316 +- qp_in->flags |= INET_FRAG_COMPLETE;
4317 +- inet_frag_put(qp_in, f);
4318 +- return qp;
4319 +- }
4320 +- }
4321 +-#endif
4322 +- qp = qp_in;
4323 +- if (!mod_timer(&qp->timer, jiffies + nf->timeout))
4324 +- refcount_inc(&qp->refcnt);
4325 +-
4326 +- refcount_inc(&qp->refcnt);
4327 +- hlist_add_head(&qp->list, &hb->chain);
4328 +-
4329 +- spin_unlock(&hb->chain_lock);
4330 +-
4331 +- return qp;
4332 +-}
4333 +-
4334 + static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
4335 + struct inet_frags *f,
4336 + void *arg)
4337 + {
4338 + struct inet_frag_queue *q;
4339 +
4340 ++ if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
4341 ++ return NULL;
4342 ++
4343 + q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
4344 + if (!q)
4345 + return NULL;
4346 +@@ -364,77 +172,53 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
4347 + f->constructor(q, arg);
4348 + add_frag_mem_limit(nf, f->qsize);
4349 +
4350 +- setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
4351 ++ timer_setup(&q->timer, f->frag_expire, 0);
4352 + spin_lock_init(&q->lock);
4353 +- refcount_set(&q->refcnt, 1);
4354 ++ refcount_set(&q->refcnt, 3);
4355 +
4356 + return q;
4357 + }
4358 +
4359 + static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
4360 +- struct inet_frags *f,
4361 + void *arg)
4362 + {
4363 ++ struct inet_frags *f = nf->f;
4364 + struct inet_frag_queue *q;
4365 ++ int err;
4366 +
4367 + q = inet_frag_alloc(nf, f, arg);
4368 + if (!q)
4369 + return NULL;
4370 +
4371 +- return inet_frag_intern(nf, q, f, arg);
4372 +-}
4373 +-
4374 +-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
4375 +- struct inet_frags *f, void *key,
4376 +- unsigned int hash)
4377 +-{
4378 +- struct inet_frag_bucket *hb;
4379 +- struct inet_frag_queue *q;
4380 +- int depth = 0;
4381 ++ mod_timer(&q->timer, jiffies + nf->timeout);
4382 +
4383 +- if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
4384 +- inet_frag_schedule_worker(f);
4385 ++ err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
4386 ++ f->rhash_params);
4387 ++ if (err < 0) {
4388 ++ q->flags |= INET_FRAG_COMPLETE;
4389 ++ inet_frag_kill(q);
4390 ++ inet_frag_destroy(q);
4391 + return NULL;
4392 + }
4393 ++ return q;
4394 ++}
4395 +
4396 +- if (frag_mem_limit(nf) > nf->low_thresh)
4397 +- inet_frag_schedule_worker(f);
4398 +-
4399 +- hash &= (INETFRAGS_HASHSZ - 1);
4400 +- hb = &f->hash[hash];
4401 +-
4402 +- spin_lock(&hb->chain_lock);
4403 +- hlist_for_each_entry(q, &hb->chain, list) {
4404 +- if (q->net == nf && f->match(q, key)) {
4405 +- refcount_inc(&q->refcnt);
4406 +- spin_unlock(&hb->chain_lock);
4407 +- return q;
4408 +- }
4409 +- depth++;
4410 +- }
4411 +- spin_unlock(&hb->chain_lock);
4412 ++/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
4413 ++struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
4414 ++{
4415 ++ struct inet_frag_queue *fq;
4416 +
4417 +- if (depth <= INETFRAGS_MAXDEPTH)
4418 +- return inet_frag_create(nf, f, key);
4419 ++ rcu_read_lock();
4420 +
4421 +- if (inet_frag_may_rebuild(f)) {
4422 +- if (!f->rebuild)
4423 +- f->rebuild = true;
4424 +- inet_frag_schedule_worker(f);
4425 ++ fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
4426 ++ if (fq) {
4427 ++ if (!refcount_inc_not_zero(&fq->refcnt))
4428 ++ fq = NULL;
4429 ++ rcu_read_unlock();
4430 ++ return fq;
4431 + }
4432 ++ rcu_read_unlock();
4433 +
4434 +- return ERR_PTR(-ENOBUFS);
4435 ++ return inet_frag_create(nf, key);
4436 + }
4437 + EXPORT_SYMBOL(inet_frag_find);
4438 +-
4439 +-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
4440 +- const char *prefix)
4441 +-{
4442 +- static const char msg[] = "inet_frag_find: Fragment hash bucket"
4443 +- " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
4444 +- ". Dropping fragment.\n";
4445 +-
4446 +- if (PTR_ERR(q) == -ENOBUFS)
4447 +- net_dbg_ratelimited("%s%s", prefix, msg);
4448 +-}
4449 +-EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
4450 +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
4451 +index 4cb1befc3949..e7227128df2c 100644
4452 +--- a/net/ipv4/ip_fragment.c
4453 ++++ b/net/ipv4/ip_fragment.c
4454 +@@ -57,27 +57,64 @@
4455 + */
4456 + static const char ip_frag_cache_name[] = "ip4-frags";
4457 +
4458 +-struct ipfrag_skb_cb
4459 +-{
4460 ++/* Use skb->cb to track consecutive/adjacent fragments coming at
4461 ++ * the end of the queue. Nodes in the rb-tree queue will
4462 ++ * contain "runs" of one or more adjacent fragments.
4463 ++ *
4464 ++ * Invariants:
4465 ++ * - next_frag is NULL at the tail of a "run";
4466 ++ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
4467 ++ */
4468 ++struct ipfrag_skb_cb {
4469 + struct inet_skb_parm h;
4470 +- int offset;
4471 ++ struct sk_buff *next_frag;
4472 ++ int frag_run_len;
4473 + };
4474 +
4475 +-#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
4476 ++#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
4477 ++
4478 ++static void ip4_frag_init_run(struct sk_buff *skb)
4479 ++{
4480 ++ BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
4481 ++
4482 ++ FRAG_CB(skb)->next_frag = NULL;
4483 ++ FRAG_CB(skb)->frag_run_len = skb->len;
4484 ++}
4485 ++
4486 ++/* Append skb to the last "run". */
4487 ++static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
4488 ++ struct sk_buff *skb)
4489 ++{
4490 ++ RB_CLEAR_NODE(&skb->rbnode);
4491 ++ FRAG_CB(skb)->next_frag = NULL;
4492 ++
4493 ++ FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
4494 ++ FRAG_CB(q->fragments_tail)->next_frag = skb;
4495 ++ q->fragments_tail = skb;
4496 ++}
4497 ++
4498 ++/* Create a new "run" with the skb. */
4499 ++static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
4500 ++{
4501 ++ if (q->last_run_head)
4502 ++ rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
4503 ++ &q->last_run_head->rbnode.rb_right);
4504 ++ else
4505 ++ rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
4506 ++ rb_insert_color(&skb->rbnode, &q->rb_fragments);
4507 ++
4508 ++ ip4_frag_init_run(skb);
4509 ++ q->fragments_tail = skb;
4510 ++ q->last_run_head = skb;
4511 ++}
4512 +
4513 + /* Describe an entry in the "incomplete datagrams" queue. */
4514 + struct ipq {
4515 + struct inet_frag_queue q;
4516 +
4517 +- u32 user;
4518 +- __be32 saddr;
4519 +- __be32 daddr;
4520 +- __be16 id;
4521 +- u8 protocol;
4522 + u8 ecn; /* RFC3168 support */
4523 + u16 max_df_size; /* largest frag with DF set seen */
4524 + int iif;
4525 +- int vif; /* L3 master device index */
4526 + unsigned int rid;
4527 + struct inet_peer *peer;
4528 + };
4529 +@@ -89,49 +126,9 @@ static u8 ip4_frag_ecn(u8 tos)
4530 +
4531 + static struct inet_frags ip4_frags;
4532 +
4533 +-int ip_frag_mem(struct net *net)
4534 +-{
4535 +- return sum_frag_mem_limit(&net->ipv4.frags);
4536 +-}
4537 +-
4538 +-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
4539 +- struct net_device *dev);
4540 +-
4541 +-struct ip4_create_arg {
4542 +- struct iphdr *iph;
4543 +- u32 user;
4544 +- int vif;
4545 +-};
4546 ++static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
4547 ++ struct sk_buff *prev_tail, struct net_device *dev);
4548 +
4549 +-static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
4550 +-{
4551 +- net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
4552 +- return jhash_3words((__force u32)id << 16 | prot,
4553 +- (__force u32)saddr, (__force u32)daddr,
4554 +- ip4_frags.rnd);
4555 +-}
4556 +-
4557 +-static unsigned int ip4_hashfn(const struct inet_frag_queue *q)
4558 +-{
4559 +- const struct ipq *ipq;
4560 +-
4561 +- ipq = container_of(q, struct ipq, q);
4562 +- return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
4563 +-}
4564 +-
4565 +-static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a)
4566 +-{
4567 +- const struct ipq *qp;
4568 +- const struct ip4_create_arg *arg = a;
4569 +-
4570 +- qp = container_of(q, struct ipq, q);
4571 +- return qp->id == arg->iph->id &&
4572 +- qp->saddr == arg->iph->saddr &&
4573 +- qp->daddr == arg->iph->daddr &&
4574 +- qp->protocol == arg->iph->protocol &&
4575 +- qp->user == arg->user &&
4576 +- qp->vif == arg->vif;
4577 +-}
4578 +
4579 + static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
4580 + {
4581 +@@ -140,17 +137,12 @@ static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
4582 + frags);
4583 + struct net *net = container_of(ipv4, struct net, ipv4);
4584 +
4585 +- const struct ip4_create_arg *arg = a;
4586 ++ const struct frag_v4_compare_key *key = a;
4587 +
4588 +- qp->protocol = arg->iph->protocol;
4589 +- qp->id = arg->iph->id;
4590 +- qp->ecn = ip4_frag_ecn(arg->iph->tos);
4591 +- qp->saddr = arg->iph->saddr;
4592 +- qp->daddr = arg->iph->daddr;
4593 +- qp->vif = arg->vif;
4594 +- qp->user = arg->user;
4595 ++ q->key.v4 = *key;
4596 ++ qp->ecn = 0;
4597 + qp->peer = q->net->max_dist ?
4598 +- inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) :
4599 ++ inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
4600 + NULL;
4601 + }
4602 +
4603 +@@ -168,7 +160,7 @@ static void ip4_frag_free(struct inet_frag_queue *q)
4604 +
4605 + static void ipq_put(struct ipq *ipq)
4606 + {
4607 +- inet_frag_put(&ipq->q, &ip4_frags);
4608 ++ inet_frag_put(&ipq->q);
4609 + }
4610 +
4611 + /* Kill ipq entry. It is not destroyed immediately,
4612 +@@ -176,7 +168,7 @@ static void ipq_put(struct ipq *ipq)
4613 + */
4614 + static void ipq_kill(struct ipq *ipq)
4615 + {
4616 +- inet_frag_kill(&ipq->q, &ip4_frags);
4617 ++ inet_frag_kill(&ipq->q);
4618 + }
4619 +
4620 + static bool frag_expire_skip_icmp(u32 user)
4621 +@@ -191,12 +183,16 @@ static bool frag_expire_skip_icmp(u32 user)
4622 + /*
4623 + * Oops, a fragment queue timed out. Kill it and send an ICMP reply.
4624 + */
4625 +-static void ip_expire(unsigned long arg)
4626 ++static void ip_expire(struct timer_list *t)
4627 + {
4628 +- struct ipq *qp;
4629 ++ struct inet_frag_queue *frag = from_timer(frag, t, timer);
4630 ++ const struct iphdr *iph;
4631 ++ struct sk_buff *head = NULL;
4632 + struct net *net;
4633 ++ struct ipq *qp;
4634 ++ int err;
4635 +
4636 +- qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
4637 ++ qp = container_of(frag, struct ipq, q);
4638 + net = container_of(qp->q.net, struct net, ipv4.frags);
4639 +
4640 + rcu_read_lock();
4641 +@@ -207,51 +203,65 @@ static void ip_expire(unsigned long arg)
4642 +
4643 + ipq_kill(qp);
4644 + __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
4645 ++ __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
4646 +
4647 +- if (!inet_frag_evicting(&qp->q)) {
4648 +- struct sk_buff *clone, *head = qp->q.fragments;
4649 +- const struct iphdr *iph;
4650 +- int err;
4651 +-
4652 +- __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
4653 ++ if (!(qp->q.flags & INET_FRAG_FIRST_IN))
4654 ++ goto out;
4655 +
4656 +- if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
4657 ++ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
4658 ++ * pull the head out of the tree in order to be able to
4659 ++ * deal with head->dev.
4660 ++ */
4661 ++ if (qp->q.fragments) {
4662 ++ head = qp->q.fragments;
4663 ++ qp->q.fragments = head->next;
4664 ++ } else {
4665 ++ head = skb_rb_first(&qp->q.rb_fragments);
4666 ++ if (!head)
4667 + goto out;
4668 ++ if (FRAG_CB(head)->next_frag)
4669 ++ rb_replace_node(&head->rbnode,
4670 ++ &FRAG_CB(head)->next_frag->rbnode,
4671 ++ &qp->q.rb_fragments);
4672 ++ else
4673 ++ rb_erase(&head->rbnode, &qp->q.rb_fragments);
4674 ++ memset(&head->rbnode, 0, sizeof(head->rbnode));
4675 ++ barrier();
4676 ++ }
4677 ++ if (head == qp->q.fragments_tail)
4678 ++ qp->q.fragments_tail = NULL;
4679 +
4680 +- head->dev = dev_get_by_index_rcu(net, qp->iif);
4681 +- if (!head->dev)
4682 +- goto out;
4683 ++ sub_frag_mem_limit(qp->q.net, head->truesize);
4684 ++
4685 ++ head->dev = dev_get_by_index_rcu(net, qp->iif);
4686 ++ if (!head->dev)
4687 ++ goto out;
4688 +
4689 +
4690 +- /* skb has no dst, perform route lookup again */
4691 +- iph = ip_hdr(head);
4692 +- err = ip_route_input_noref(head, iph->daddr, iph->saddr,
4693 ++ /* skb has no dst, perform route lookup again */
4694 ++ iph = ip_hdr(head);
4695 ++ err = ip_route_input_noref(head, iph->daddr, iph->saddr,
4696 + iph->tos, head->dev);
4697 +- if (err)
4698 +- goto out;
4699 ++ if (err)
4700 ++ goto out;
4701 +
4702 +- /* Only an end host needs to send an ICMP
4703 +- * "Fragment Reassembly Timeout" message, per RFC792.
4704 +- */
4705 +- if (frag_expire_skip_icmp(qp->user) &&
4706 +- (skb_rtable(head)->rt_type != RTN_LOCAL))
4707 +- goto out;
4708 ++ /* Only an end host needs to send an ICMP
4709 ++ * "Fragment Reassembly Timeout" message, per RFC792.
4710 ++ */
4711 ++ if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
4712 ++ (skb_rtable(head)->rt_type != RTN_LOCAL))
4713 ++ goto out;
4714 +
4715 +- clone = skb_clone(head, GFP_ATOMIC);
4716 ++ spin_unlock(&qp->q.lock);
4717 ++ icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
4718 ++ goto out_rcu_unlock;
4719 +
4720 +- /* Send an ICMP "Fragment Reassembly Timeout" message. */
4721 +- if (clone) {
4722 +- spin_unlock(&qp->q.lock);
4723 +- icmp_send(clone, ICMP_TIME_EXCEEDED,
4724 +- ICMP_EXC_FRAGTIME, 0);
4725 +- consume_skb(clone);
4726 +- goto out_rcu_unlock;
4727 +- }
4728 +- }
4729 + out:
4730 + spin_unlock(&qp->q.lock);
4731 + out_rcu_unlock:
4732 + rcu_read_unlock();
4733 ++ if (head)
4734 ++ kfree_skb(head);
4735 + ipq_put(qp);
4736 + }
4737 +
4738 +@@ -261,21 +271,20 @@ out_rcu_unlock:
4739 + static struct ipq *ip_find(struct net *net, struct iphdr *iph,
4740 + u32 user, int vif)
4741 + {
4742 ++ struct frag_v4_compare_key key = {
4743 ++ .saddr = iph->saddr,
4744 ++ .daddr = iph->daddr,
4745 ++ .user = user,
4746 ++ .vif = vif,
4747 ++ .id = iph->id,
4748 ++ .protocol = iph->protocol,
4749 ++ };
4750 + struct inet_frag_queue *q;
4751 +- struct ip4_create_arg arg;
4752 +- unsigned int hash;
4753 +-
4754 +- arg.iph = iph;
4755 +- arg.user = user;
4756 +- arg.vif = vif;
4757 +
4758 +- hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
4759 +-
4760 +- q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
4761 +- if (IS_ERR_OR_NULL(q)) {
4762 +- inet_frag_maybe_warn_overflow(q, pr_fmt());
4763 ++ q = inet_frag_find(&net->ipv4.frags, &key);
4764 ++ if (!q)
4765 + return NULL;
4766 +- }
4767 ++
4768 + return container_of(q, struct ipq, q);
4769 + }
4770 +
4771 +@@ -295,7 +304,7 @@ static int ip_frag_too_far(struct ipq *qp)
4772 + end = atomic_inc_return(&peer->rid);
4773 + qp->rid = end;
4774 +
4775 +- rc = qp->q.fragments && (end - start) > max;
4776 ++ rc = qp->q.fragments_tail && (end - start) > max;
4777 +
4778 + if (rc) {
4779 + struct net *net;
4780 +@@ -309,7 +318,6 @@ static int ip_frag_too_far(struct ipq *qp)
4781 +
4782 + static int ip_frag_reinit(struct ipq *qp)
4783 + {
4784 +- struct sk_buff *fp;
4785 + unsigned int sum_truesize = 0;
4786 +
4787 + if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
4788 +@@ -317,21 +325,16 @@ static int ip_frag_reinit(struct ipq *qp)
4789 + return -ETIMEDOUT;
4790 + }
4791 +
4792 +- fp = qp->q.fragments;
4793 +- do {
4794 +- struct sk_buff *xp = fp->next;
4795 +-
4796 +- sum_truesize += fp->truesize;
4797 +- kfree_skb(fp);
4798 +- fp = xp;
4799 +- } while (fp);
4800 ++ sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments);
4801 + sub_frag_mem_limit(qp->q.net, sum_truesize);
4802 +
4803 + qp->q.flags = 0;
4804 + qp->q.len = 0;
4805 + qp->q.meat = 0;
4806 + qp->q.fragments = NULL;
4807 ++ qp->q.rb_fragments = RB_ROOT;
4808 + qp->q.fragments_tail = NULL;
4809 ++ qp->q.last_run_head = NULL;
4810 + qp->iif = 0;
4811 + qp->ecn = 0;
4812 +
4813 +@@ -341,7 +344,9 @@ static int ip_frag_reinit(struct ipq *qp)
4814 + /* Add new segment to existing queue. */
4815 + static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
4816 + {
4817 +- struct sk_buff *prev, *next;
4818 ++ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
4819 ++ struct rb_node **rbn, *parent;
4820 ++ struct sk_buff *skb1, *prev_tail;
4821 + struct net_device *dev;
4822 + unsigned int fragsize;
4823 + int flags, offset;
4824 +@@ -404,99 +409,61 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
4825 + if (err)
4826 + goto err;
4827 +
4828 +- /* Find out which fragments are in front and at the back of us
4829 +- * in the chain of fragments so far. We must know where to put
4830 +- * this fragment, right?
4831 +- */
4832 +- prev = qp->q.fragments_tail;
4833 +- if (!prev || FRAG_CB(prev)->offset < offset) {
4834 +- next = NULL;
4835 +- goto found;
4836 +- }
4837 +- prev = NULL;
4838 +- for (next = qp->q.fragments; next != NULL; next = next->next) {
4839 +- if (FRAG_CB(next)->offset >= offset)
4840 +- break; /* bingo! */
4841 +- prev = next;
4842 +- }
4843 +-
4844 +-found:
4845 +- /* We found where to put this one. Check for overlap with
4846 +- * preceding fragment, and, if needed, align things so that
4847 +- * any overlaps are eliminated.
4848 ++ /* Note : skb->rbnode and skb->dev share the same location. */
4849 ++ dev = skb->dev;
4850 ++ /* Makes sure compiler wont do silly aliasing games */
4851 ++ barrier();
4852 ++
4853 ++ /* RFC5722, Section 4, amended by Errata ID : 3089
4854 ++ * When reassembling an IPv6 datagram, if
4855 ++ * one or more its constituent fragments is determined to be an
4856 ++ * overlapping fragment, the entire datagram (and any constituent
4857 ++ * fragments) MUST be silently discarded.
4858 ++ *
4859 ++ * We do the same here for IPv4 (and increment an snmp counter).
4860 + */
4861 +- if (prev) {
4862 +- int i = (FRAG_CB(prev)->offset + prev->len) - offset;
4863 +
4864 +- if (i > 0) {
4865 +- offset += i;
4866 +- err = -EINVAL;
4867 +- if (end <= offset)
4868 +- goto err;
4869 +- err = -ENOMEM;
4870 +- if (!pskb_pull(skb, i))
4871 +- goto err;
4872 +- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
4873 +- skb->ip_summed = CHECKSUM_NONE;
4874 +- }
4875 +- }
4876 +-
4877 +- err = -ENOMEM;
4878 +-
4879 +- while (next && FRAG_CB(next)->offset < end) {
4880 +- int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
4881 +-
4882 +- if (i < next->len) {
4883 +- int delta = -next->truesize;
4884 +-
4885 +- /* Eat head of the next overlapped fragment
4886 +- * and leave the loop. The next ones cannot overlap.
4887 +- */
4888 +- if (!pskb_pull(next, i))
4889 +- goto err;
4890 +- delta += next->truesize;
4891 +- if (delta)
4892 +- add_frag_mem_limit(qp->q.net, delta);
4893 +- FRAG_CB(next)->offset += i;
4894 +- qp->q.meat -= i;
4895 +- if (next->ip_summed != CHECKSUM_UNNECESSARY)
4896 +- next->ip_summed = CHECKSUM_NONE;
4897 +- break;
4898 +- } else {
4899 +- struct sk_buff *free_it = next;
4900 +-
4901 +- /* Old fragment is completely overridden with
4902 +- * new one drop it.
4903 +- */
4904 +- next = next->next;
4905 +-
4906 +- if (prev)
4907 +- prev->next = next;
4908 +- else
4909 +- qp->q.fragments = next;
4910 +-
4911 +- qp->q.meat -= free_it->len;
4912 +- sub_frag_mem_limit(qp->q.net, free_it->truesize);
4913 +- kfree_skb(free_it);
4914 +- }
4915 ++ /* Find out where to put this fragment. */
4916 ++ prev_tail = qp->q.fragments_tail;
4917 ++ if (!prev_tail)
4918 ++ ip4_frag_create_run(&qp->q, skb); /* First fragment. */
4919 ++ else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
4920 ++ /* This is the common case: skb goes to the end. */
4921 ++ /* Detect and discard overlaps. */
4922 ++ if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
4923 ++ goto discard_qp;
4924 ++ if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
4925 ++ ip4_frag_append_to_last_run(&qp->q, skb);
4926 ++ else
4927 ++ ip4_frag_create_run(&qp->q, skb);
4928 ++ } else {
4929 ++ /* Binary search. Note that skb can become the first fragment,
4930 ++ * but not the last (covered above).
4931 ++ */
4932 ++ rbn = &qp->q.rb_fragments.rb_node;
4933 ++ do {
4934 ++ parent = *rbn;
4935 ++ skb1 = rb_to_skb(parent);
4936 ++ if (end <= skb1->ip_defrag_offset)
4937 ++ rbn = &parent->rb_left;
4938 ++ else if (offset >= skb1->ip_defrag_offset +
4939 ++ FRAG_CB(skb1)->frag_run_len)
4940 ++ rbn = &parent->rb_right;
4941 ++ else /* Found an overlap with skb1. */
4942 ++ goto discard_qp;
4943 ++ } while (*rbn);
4944 ++ /* Here we have parent properly set, and rbn pointing to
4945 ++ * one of its NULL left/right children. Insert skb.
4946 ++ */
4947 ++ ip4_frag_init_run(skb);
4948 ++ rb_link_node(&skb->rbnode, parent, rbn);
4949 ++ rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
4950 + }
4951 +
4952 +- FRAG_CB(skb)->offset = offset;
4953 +-
4954 +- /* Insert this fragment in the chain of fragments. */
4955 +- skb->next = next;
4956 +- if (!next)
4957 +- qp->q.fragments_tail = skb;
4958 +- if (prev)
4959 +- prev->next = skb;
4960 +- else
4961 +- qp->q.fragments = skb;
4962 +-
4963 +- dev = skb->dev;
4964 +- if (dev) {
4965 ++ if (dev)
4966 + qp->iif = dev->ifindex;
4967 +- skb->dev = NULL;
4968 +- }
4969 ++ skb->ip_defrag_offset = offset;
4970 ++
4971 + qp->q.stamp = skb->tstamp;
4972 + qp->q.meat += skb->len;
4973 + qp->ecn |= ecn;
4974 +@@ -518,7 +485,7 @@ found:
4975 + unsigned long orefdst = skb->_skb_refdst;
4976 +
4977 + skb->_skb_refdst = 0UL;
4978 +- err = ip_frag_reasm(qp, prev, dev);
4979 ++ err = ip_frag_reasm(qp, skb, prev_tail, dev);
4980 + skb->_skb_refdst = orefdst;
4981 + return err;
4982 + }
4983 +@@ -526,20 +493,24 @@ found:
4984 + skb_dst_drop(skb);
4985 + return -EINPROGRESS;
4986 +
4987 ++discard_qp:
4988 ++ inet_frag_kill(&qp->q);
4989 ++ err = -EINVAL;
4990 ++ __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
4991 + err:
4992 + kfree_skb(skb);
4993 + return err;
4994 + }
4995 +
4996 +-
4997 + /* Build a new IP datagram from all its fragments. */
4998 +-
4999 +-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
5000 +- struct net_device *dev)
5001 ++static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
5002 ++ struct sk_buff *prev_tail, struct net_device *dev)
5003 + {
5004 + struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
5005 + struct iphdr *iph;
5006 +- struct sk_buff *fp, *head = qp->q.fragments;
5007 ++ struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
5008 ++ struct sk_buff **nextp; /* To build frag_list. */
5009 ++ struct rb_node *rbn;
5010 + int len;
5011 + int ihlen;
5012 + int err;
5013 +@@ -553,26 +524,27 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
5014 + goto out_fail;
5015 + }
5016 + /* Make the one we just received the head. */
5017 +- if (prev) {
5018 +- head = prev->next;
5019 +- fp = skb_clone(head, GFP_ATOMIC);
5020 ++ if (head != skb) {
5021 ++ fp = skb_clone(skb, GFP_ATOMIC);
5022 + if (!fp)
5023 + goto out_nomem;
5024 +-
5025 +- fp->next = head->next;
5026 +- if (!fp->next)
5027 ++ FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
5028 ++ if (RB_EMPTY_NODE(&skb->rbnode))
5029 ++ FRAG_CB(prev_tail)->next_frag = fp;
5030 ++ else
5031 ++ rb_replace_node(&skb->rbnode, &fp->rbnode,
5032 ++ &qp->q.rb_fragments);
5033 ++ if (qp->q.fragments_tail == skb)
5034 + qp->q.fragments_tail = fp;
5035 +- prev->next = fp;
5036 +-
5037 +- skb_morph(head, qp->q.fragments);
5038 +- head->next = qp->q.fragments->next;
5039 +-
5040 +- consume_skb(qp->q.fragments);
5041 +- qp->q.fragments = head;
5042 ++ skb_morph(skb, head);
5043 ++ FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
5044 ++ rb_replace_node(&head->rbnode, &skb->rbnode,
5045 ++ &qp->q.rb_fragments);
5046 ++ consume_skb(head);
5047 ++ head = skb;
5048 + }
5049 +
5050 +- WARN_ON(!head);
5051 +- WARN_ON(FRAG_CB(head)->offset != 0);
5052 ++ WARN_ON(head->ip_defrag_offset != 0);
5053 +
5054 + /* Allocate a new buffer for the datagram. */
5055 + ihlen = ip_hdrlen(head);
5056 +@@ -596,35 +568,61 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
5057 + clone = alloc_skb(0, GFP_ATOMIC);
5058 + if (!clone)
5059 + goto out_nomem;
5060 +- clone->next = head->next;
5061 +- head->next = clone;
5062 + skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
5063 + skb_frag_list_init(head);
5064 + for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
5065 + plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
5066 + clone->len = clone->data_len = head->data_len - plen;
5067 +- head->data_len -= clone->len;
5068 +- head->len -= clone->len;
5069 ++ head->truesize += clone->truesize;
5070 + clone->csum = 0;
5071 + clone->ip_summed = head->ip_summed;
5072 + add_frag_mem_limit(qp->q.net, clone->truesize);
5073 ++ skb_shinfo(head)->frag_list = clone;
5074 ++ nextp = &clone->next;
5075 ++ } else {
5076 ++ nextp = &skb_shinfo(head)->frag_list;
5077 + }
5078 +
5079 +- skb_shinfo(head)->frag_list = head->next;
5080 + skb_push(head, head->data - skb_network_header(head));
5081 +
5082 +- for (fp=head->next; fp; fp = fp->next) {
5083 +- head->data_len += fp->len;
5084 +- head->len += fp->len;
5085 +- if (head->ip_summed != fp->ip_summed)
5086 +- head->ip_summed = CHECKSUM_NONE;
5087 +- else if (head->ip_summed == CHECKSUM_COMPLETE)
5088 +- head->csum = csum_add(head->csum, fp->csum);
5089 +- head->truesize += fp->truesize;
5090 ++ /* Traverse the tree in order, to build frag_list. */
5091 ++ fp = FRAG_CB(head)->next_frag;
5092 ++ rbn = rb_next(&head->rbnode);
5093 ++ rb_erase(&head->rbnode, &qp->q.rb_fragments);
5094 ++ while (rbn || fp) {
5095 ++ /* fp points to the next sk_buff in the current run;
5096 ++ * rbn points to the next run.
5097 ++ */
5098 ++ /* Go through the current run. */
5099 ++ while (fp) {
5100 ++ *nextp = fp;
5101 ++ nextp = &fp->next;
5102 ++ fp->prev = NULL;
5103 ++ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
5104 ++ fp->sk = NULL;
5105 ++ head->data_len += fp->len;
5106 ++ head->len += fp->len;
5107 ++ if (head->ip_summed != fp->ip_summed)
5108 ++ head->ip_summed = CHECKSUM_NONE;
5109 ++ else if (head->ip_summed == CHECKSUM_COMPLETE)
5110 ++ head->csum = csum_add(head->csum, fp->csum);
5111 ++ head->truesize += fp->truesize;
5112 ++ fp = FRAG_CB(fp)->next_frag;
5113 ++ }
5114 ++ /* Move to the next run. */
5115 ++ if (rbn) {
5116 ++ struct rb_node *rbnext = rb_next(rbn);
5117 ++
5118 ++ fp = rb_to_skb(rbn);
5119 ++ rb_erase(rbn, &qp->q.rb_fragments);
5120 ++ rbn = rbnext;
5121 ++ }
5122 + }
5123 + sub_frag_mem_limit(qp->q.net, head->truesize);
5124 +
5125 ++ *nextp = NULL;
5126 + head->next = NULL;
5127 ++ head->prev = NULL;
5128 + head->dev = dev;
5129 + head->tstamp = qp->q.stamp;
5130 + IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
5131 +@@ -652,7 +650,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
5132 +
5133 + __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
5134 + qp->q.fragments = NULL;
5135 ++ qp->q.rb_fragments = RB_ROOT;
5136 + qp->q.fragments_tail = NULL;
5137 ++ qp->q.last_run_head = NULL;
5138 + return 0;
5139 +
5140 + out_nomem:
5141 +@@ -660,7 +660,7 @@ out_nomem:
5142 + err = -ENOMEM;
5143 + goto out_fail;
5144 + out_oversize:
5145 +- net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
5146 ++ net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
5147 + out_fail:
5148 + __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
5149 + return err;
5150 +@@ -734,25 +734,46 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
5151 + }
5152 + EXPORT_SYMBOL(ip_check_defrag);
5153 +
5154 ++unsigned int inet_frag_rbtree_purge(struct rb_root *root)
5155 ++{
5156 ++ struct rb_node *p = rb_first(root);
5157 ++ unsigned int sum = 0;
5158 ++
5159 ++ while (p) {
5160 ++ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
5161 ++
5162 ++ p = rb_next(p);
5163 ++ rb_erase(&skb->rbnode, root);
5164 ++ while (skb) {
5165 ++ struct sk_buff *next = FRAG_CB(skb)->next_frag;
5166 ++
5167 ++ sum += skb->truesize;
5168 ++ kfree_skb(skb);
5169 ++ skb = next;
5170 ++ }
5171 ++ }
5172 ++ return sum;
5173 ++}
5174 ++EXPORT_SYMBOL(inet_frag_rbtree_purge);
5175 ++
5176 + #ifdef CONFIG_SYSCTL
5177 +-static int zero;
5178 ++static int dist_min;
5179 +
5180 + static struct ctl_table ip4_frags_ns_ctl_table[] = {
5181 + {
5182 + .procname = "ipfrag_high_thresh",
5183 + .data = &init_net.ipv4.frags.high_thresh,
5184 +- .maxlen = sizeof(int),
5185 ++ .maxlen = sizeof(unsigned long),
5186 + .mode = 0644,
5187 +- .proc_handler = proc_dointvec_minmax,
5188 ++ .proc_handler = proc_doulongvec_minmax,
5189 + .extra1 = &init_net.ipv4.frags.low_thresh
5190 + },
5191 + {
5192 + .procname = "ipfrag_low_thresh",
5193 + .data = &init_net.ipv4.frags.low_thresh,
5194 +- .maxlen = sizeof(int),
5195 ++ .maxlen = sizeof(unsigned long),
5196 + .mode = 0644,
5197 +- .proc_handler = proc_dointvec_minmax,
5198 +- .extra1 = &zero,
5199 ++ .proc_handler = proc_doulongvec_minmax,
5200 + .extra2 = &init_net.ipv4.frags.high_thresh
5201 + },
5202 + {
5203 +@@ -768,7 +789,7 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = {
5204 + .maxlen = sizeof(int),
5205 + .mode = 0644,
5206 + .proc_handler = proc_dointvec_minmax,
5207 +- .extra1 = &zero
5208 ++ .extra1 = &dist_min,
5209 + },
5210 + { }
5211 + };
5212 +@@ -850,6 +871,8 @@ static void __init ip4_frags_ctl_register(void)
5213 +
5214 + static int __net_init ipv4_frags_init_net(struct net *net)
5215 + {
5216 ++ int res;
5217 ++
5218 + /* Fragment cache limits.
5219 + *
5220 + * The fragment memory accounting code, (tries to) account for
5221 +@@ -874,16 +897,21 @@ static int __net_init ipv4_frags_init_net(struct net *net)
5222 + net->ipv4.frags.timeout = IP_FRAG_TIME;
5223 +
5224 + net->ipv4.frags.max_dist = 64;
5225 +-
5226 +- inet_frags_init_net(&net->ipv4.frags);
5227 +-
5228 +- return ip4_frags_ns_ctl_register(net);
5229 ++ net->ipv4.frags.f = &ip4_frags;
5230 ++
5231 ++ res = inet_frags_init_net(&net->ipv4.frags);
5232 ++ if (res < 0)
5233 ++ return res;
5234 ++ res = ip4_frags_ns_ctl_register(net);
5235 ++ if (res < 0)
5236 ++ inet_frags_exit_net(&net->ipv4.frags);
5237 ++ return res;
5238 + }
5239 +
5240 + static void __net_exit ipv4_frags_exit_net(struct net *net)
5241 + {
5242 + ip4_frags_ns_ctl_unregister(net);
5243 +- inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
5244 ++ inet_frags_exit_net(&net->ipv4.frags);
5245 + }
5246 +
5247 + static struct pernet_operations ip4_frags_ops = {
5248 +@@ -891,17 +919,49 @@ static struct pernet_operations ip4_frags_ops = {
5249 + .exit = ipv4_frags_exit_net,
5250 + };
5251 +
5252 ++
5253 ++static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
5254 ++{
5255 ++ return jhash2(data,
5256 ++ sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
5257 ++}
5258 ++
5259 ++static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
5260 ++{
5261 ++ const struct inet_frag_queue *fq = data;
5262 ++
5263 ++ return jhash2((const u32 *)&fq->key.v4,
5264 ++ sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
5265 ++}
5266 ++
5267 ++static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
5268 ++{
5269 ++ const struct frag_v4_compare_key *key = arg->key;
5270 ++ const struct inet_frag_queue *fq = ptr;
5271 ++
5272 ++ return !!memcmp(&fq->key, key, sizeof(*key));
5273 ++}
5274 ++
5275 ++static const struct rhashtable_params ip4_rhash_params = {
5276 ++ .head_offset = offsetof(struct inet_frag_queue, node),
5277 ++ .key_offset = offsetof(struct inet_frag_queue, key),
5278 ++ .key_len = sizeof(struct frag_v4_compare_key),
5279 ++ .hashfn = ip4_key_hashfn,
5280 ++ .obj_hashfn = ip4_obj_hashfn,
5281 ++ .obj_cmpfn = ip4_obj_cmpfn,
5282 ++ .automatic_shrinking = true,
5283 ++};
5284 ++
5285 + void __init ipfrag_init(void)
5286 + {
5287 +- ip4_frags_ctl_register();
5288 +- register_pernet_subsys(&ip4_frags_ops);
5289 +- ip4_frags.hashfn = ip4_hashfn;
5290 + ip4_frags.constructor = ip4_frag_init;
5291 + ip4_frags.destructor = ip4_frag_free;
5292 + ip4_frags.qsize = sizeof(struct ipq);
5293 +- ip4_frags.match = ip4_frag_match;
5294 + ip4_frags.frag_expire = ip_expire;
5295 + ip4_frags.frags_cache_name = ip_frag_cache_name;
5296 ++ ip4_frags.rhash_params = ip4_rhash_params;
5297 + if (inet_frags_init(&ip4_frags))
5298 + panic("IP: failed to allocate ip4_frags cache\n");
5299 ++ ip4_frags_ctl_register();
5300 ++ register_pernet_subsys(&ip4_frags_ops);
5301 + }
5302 +diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
5303 +index 127153f1ed8a..3fbf688a1943 100644
5304 +--- a/net/ipv4/proc.c
5305 ++++ b/net/ipv4/proc.c
5306 +@@ -54,7 +54,6 @@
5307 + static int sockstat_seq_show(struct seq_file *seq, void *v)
5308 + {
5309 + struct net *net = seq->private;
5310 +- unsigned int frag_mem;
5311 + int orphans, sockets;
5312 +
5313 + orphans = percpu_counter_sum_positive(&tcp_orphan_count);
5314 +@@ -72,8 +71,9 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
5315 + sock_prot_inuse_get(net, &udplite_prot));
5316 + seq_printf(seq, "RAW: inuse %d\n",
5317 + sock_prot_inuse_get(net, &raw_prot));
5318 +- frag_mem = ip_frag_mem(net);
5319 +- seq_printf(seq, "FRAG: inuse %u memory %u\n", !!frag_mem, frag_mem);
5320 ++ seq_printf(seq, "FRAG: inuse %u memory %lu\n",
5321 ++ atomic_read(&net->ipv4.frags.rhashtable.nelems),
5322 ++ frag_mem_limit(&net->ipv4.frags));
5323 + return 0;
5324 + }
5325 +
5326 +@@ -132,6 +132,7 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
5327 + SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
5328 + SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
5329 + SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
5330 ++ SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS),
5331 + SNMP_MIB_SENTINEL
5332 + };
5333 +
5334 +diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
5335 +index fbbeda647774..0567edb76522 100644
5336 +--- a/net/ipv4/tcp_fastopen.c
5337 ++++ b/net/ipv4/tcp_fastopen.c
5338 +@@ -458,17 +458,15 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
5339 + void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
5340 + {
5341 + struct tcp_sock *tp = tcp_sk(sk);
5342 +- struct rb_node *p;
5343 +- struct sk_buff *skb;
5344 + struct dst_entry *dst;
5345 ++ struct sk_buff *skb;
5346 +
5347 + if (!tp->syn_fastopen)
5348 + return;
5349 +
5350 + if (!tp->data_segs_in) {
5351 +- p = rb_first(&tp->out_of_order_queue);
5352 +- if (p && !rb_next(p)) {
5353 +- skb = rb_entry(p, struct sk_buff, rbnode);
5354 ++ skb = skb_rb_first(&tp->out_of_order_queue);
5355 ++ if (skb && !skb_rb_next(skb)) {
5356 + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
5357 + tcp_fastopen_active_disable(sk);
5358 + return;
5359 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
5360 +index bdabd748f4bc..991f382afc1b 100644
5361 +--- a/net/ipv4/tcp_input.c
5362 ++++ b/net/ipv4/tcp_input.c
5363 +@@ -4372,7 +4372,7 @@ static void tcp_ofo_queue(struct sock *sk)
5364 +
5365 + p = rb_first(&tp->out_of_order_queue);
5366 + while (p) {
5367 +- skb = rb_entry(p, struct sk_buff, rbnode);
5368 ++ skb = rb_to_skb(p);
5369 + if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
5370 + break;
5371 +
5372 +@@ -4440,7 +4440,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
5373 + static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
5374 + {
5375 + struct tcp_sock *tp = tcp_sk(sk);
5376 +- struct rb_node **p, *q, *parent;
5377 ++ struct rb_node **p, *parent;
5378 + struct sk_buff *skb1;
5379 + u32 seq, end_seq;
5380 + bool fragstolen;
5381 +@@ -4503,7 +4503,7 @@ coalesce_done:
5382 + parent = NULL;
5383 + while (*p) {
5384 + parent = *p;
5385 +- skb1 = rb_entry(parent, struct sk_buff, rbnode);
5386 ++ skb1 = rb_to_skb(parent);
5387 + if (before(seq, TCP_SKB_CB(skb1)->seq)) {
5388 + p = &parent->rb_left;
5389 + continue;
5390 +@@ -4548,9 +4548,7 @@ insert:
5391 +
5392 + merge_right:
5393 + /* Remove other segments covered by skb. */
5394 +- while ((q = rb_next(&skb->rbnode)) != NULL) {
5395 +- skb1 = rb_entry(q, struct sk_buff, rbnode);
5396 +-
5397 ++ while ((skb1 = skb_rb_next(skb)) != NULL) {
5398 + if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
5399 + break;
5400 + if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
5401 +@@ -4565,7 +4563,7 @@ merge_right:
5402 + tcp_drop(sk, skb1);
5403 + }
5404 + /* If there is no skb after us, we are the last_skb ! */
5405 +- if (!q)
5406 ++ if (!skb1)
5407 + tp->ooo_last_skb = skb;
5408 +
5409 + add_sack:
5410 +@@ -4749,7 +4747,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li
5411 + if (list)
5412 + return !skb_queue_is_last(list, skb) ? skb->next : NULL;
5413 +
5414 +- return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
5415 ++ return skb_rb_next(skb);
5416 + }
5417 +
5418 + static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
5419 +@@ -4778,7 +4776,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
5420 +
5421 + while (*p) {
5422 + parent = *p;
5423 +- skb1 = rb_entry(parent, struct sk_buff, rbnode);
5424 ++ skb1 = rb_to_skb(parent);
5425 + if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
5426 + p = &parent->rb_left;
5427 + else
5428 +@@ -4898,19 +4896,12 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
5429 + struct tcp_sock *tp = tcp_sk(sk);
5430 + u32 range_truesize, sum_tiny = 0;
5431 + struct sk_buff *skb, *head;
5432 +- struct rb_node *p;
5433 + u32 start, end;
5434 +
5435 +- p = rb_first(&tp->out_of_order_queue);
5436 +- skb = rb_entry_safe(p, struct sk_buff, rbnode);
5437 ++ skb = skb_rb_first(&tp->out_of_order_queue);
5438 + new_range:
5439 + if (!skb) {
5440 +- p = rb_last(&tp->out_of_order_queue);
5441 +- /* Note: This is possible p is NULL here. We do not
5442 +- * use rb_entry_safe(), as ooo_last_skb is valid only
5443 +- * if rbtree is not empty.
5444 +- */
5445 +- tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
5446 ++ tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
5447 + return;
5448 + }
5449 + start = TCP_SKB_CB(skb)->seq;
5450 +@@ -4918,7 +4909,7 @@ new_range:
5451 + range_truesize = skb->truesize;
5452 +
5453 + for (head = skb;;) {
5454 +- skb = tcp_skb_next(skb, NULL);
5455 ++ skb = skb_rb_next(skb);
5456 +
5457 + /* Range is terminated when we see a gap or when
5458 + * we are at the queue end.
5459 +@@ -4974,7 +4965,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
5460 + prev = rb_prev(node);
5461 + rb_erase(node, &tp->out_of_order_queue);
5462 + goal -= rb_to_skb(node)->truesize;
5463 +- tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
5464 ++ tcp_drop(sk, rb_to_skb(node));
5465 + if (!prev || goal <= 0) {
5466 + sk_mem_reclaim(sk);
5467 + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
5468 +@@ -4984,7 +4975,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
5469 + }
5470 + node = prev;
5471 + } while (node);
5472 +- tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
5473 ++ tp->ooo_last_skb = rb_to_skb(prev);
5474 +
5475 + /* Reset SACK state. A conforming SACK implementation will
5476 + * do the same at a timeout based retransmit. When a connection
5477 +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
5478 +index ee33a6743f3b..2ed8536e10b6 100644
5479 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
5480 ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
5481 +@@ -63,7 +63,6 @@ struct nf_ct_frag6_skb_cb
5482 + static struct inet_frags nf_frags;
5483 +
5484 + #ifdef CONFIG_SYSCTL
5485 +-static int zero;
5486 +
5487 + static struct ctl_table nf_ct_frag6_sysctl_table[] = {
5488 + {
5489 +@@ -76,18 +75,17 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
5490 + {
5491 + .procname = "nf_conntrack_frag6_low_thresh",
5492 + .data = &init_net.nf_frag.frags.low_thresh,
5493 +- .maxlen = sizeof(unsigned int),
5494 ++ .maxlen = sizeof(unsigned long),
5495 + .mode = 0644,
5496 +- .proc_handler = proc_dointvec_minmax,
5497 +- .extra1 = &zero,
5498 ++ .proc_handler = proc_doulongvec_minmax,
5499 + .extra2 = &init_net.nf_frag.frags.high_thresh
5500 + },
5501 + {
5502 + .procname = "nf_conntrack_frag6_high_thresh",
5503 + .data = &init_net.nf_frag.frags.high_thresh,
5504 +- .maxlen = sizeof(unsigned int),
5505 ++ .maxlen = sizeof(unsigned long),
5506 + .mode = 0644,
5507 +- .proc_handler = proc_dointvec_minmax,
5508 ++ .proc_handler = proc_doulongvec_minmax,
5509 + .extra1 = &init_net.nf_frag.frags.low_thresh
5510 + },
5511 + { }
5512 +@@ -152,59 +150,35 @@ static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
5513 + return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
5514 + }
5515 +
5516 +-static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr,
5517 +- const struct in6_addr *daddr)
5518 +-{
5519 +- net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd));
5520 +- return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
5521 +- (__force u32)id, nf_frags.rnd);
5522 +-}
5523 +-
5524 +-
5525 +-static unsigned int nf_hashfn(const struct inet_frag_queue *q)
5526 +-{
5527 +- const struct frag_queue *nq;
5528 +-
5529 +- nq = container_of(q, struct frag_queue, q);
5530 +- return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr);
5531 +-}
5532 +-
5533 +-static void nf_ct_frag6_expire(unsigned long data)
5534 ++static void nf_ct_frag6_expire(struct timer_list *t)
5535 + {
5536 ++ struct inet_frag_queue *frag = from_timer(frag, t, timer);
5537 + struct frag_queue *fq;
5538 + struct net *net;
5539 +
5540 +- fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
5541 ++ fq = container_of(frag, struct frag_queue, q);
5542 + net = container_of(fq->q.net, struct net, nf_frag.frags);
5543 +
5544 +- ip6_expire_frag_queue(net, fq, &nf_frags);
5545 ++ ip6_expire_frag_queue(net, fq);
5546 + }
5547 +
5548 + /* Creation primitives. */
5549 +-static inline struct frag_queue *fq_find(struct net *net, __be32 id,
5550 +- u32 user, struct in6_addr *src,
5551 +- struct in6_addr *dst, int iif, u8 ecn)
5552 ++static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
5553 ++ const struct ipv6hdr *hdr, int iif)
5554 + {
5555 ++ struct frag_v6_compare_key key = {
5556 ++ .id = id,
5557 ++ .saddr = hdr->saddr,
5558 ++ .daddr = hdr->daddr,
5559 ++ .user = user,
5560 ++ .iif = iif,
5561 ++ };
5562 + struct inet_frag_queue *q;
5563 +- struct ip6_create_arg arg;
5564 +- unsigned int hash;
5565 +-
5566 +- arg.id = id;
5567 +- arg.user = user;
5568 +- arg.src = src;
5569 +- arg.dst = dst;
5570 +- arg.iif = iif;
5571 +- arg.ecn = ecn;
5572 +-
5573 +- local_bh_disable();
5574 +- hash = nf_hash_frag(id, src, dst);
5575 +-
5576 +- q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
5577 +- local_bh_enable();
5578 +- if (IS_ERR_OR_NULL(q)) {
5579 +- inet_frag_maybe_warn_overflow(q, pr_fmt());
5580 ++
5581 ++ q = inet_frag_find(&net->nf_frag.frags, &key);
5582 ++ if (!q)
5583 + return NULL;
5584 +- }
5585 ++
5586 + return container_of(q, struct frag_queue, q);
5587 + }
5588 +
5589 +@@ -263,7 +237,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
5590 + * this case. -DaveM
5591 + */
5592 + pr_debug("end of fragment not rounded to 8 bytes.\n");
5593 +- inet_frag_kill(&fq->q, &nf_frags);
5594 ++ inet_frag_kill(&fq->q);
5595 + return -EPROTO;
5596 + }
5597 + if (end > fq->q.len) {
5598 +@@ -356,7 +330,7 @@ found:
5599 + return 0;
5600 +
5601 + discard_fq:
5602 +- inet_frag_kill(&fq->q, &nf_frags);
5603 ++ inet_frag_kill(&fq->q);
5604 + err:
5605 + return -EINVAL;
5606 + }
5607 +@@ -378,7 +352,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
5608 + int payload_len;
5609 + u8 ecn;
5610 +
5611 +- inet_frag_kill(&fq->q, &nf_frags);
5612 ++ inet_frag_kill(&fq->q);
5613 +
5614 + WARN_ON(head == NULL);
5615 + WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
5616 +@@ -479,6 +453,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
5617 + else if (head->ip_summed == CHECKSUM_COMPLETE)
5618 + head->csum = csum_add(head->csum, fp->csum);
5619 + head->truesize += fp->truesize;
5620 ++ fp->sk = NULL;
5621 + }
5622 + sub_frag_mem_limit(fq->q.net, head->truesize);
5623 +
5624 +@@ -497,6 +472,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
5625 + head->csum);
5626 +
5627 + fq->q.fragments = NULL;
5628 ++ fq->q.rb_fragments = RB_ROOT;
5629 + fq->q.fragments_tail = NULL;
5630 +
5631 + return true;
5632 +@@ -591,9 +567,13 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
5633 + hdr = ipv6_hdr(skb);
5634 + fhdr = (struct frag_hdr *)skb_transport_header(skb);
5635 +
5636 ++ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
5637 ++ fhdr->frag_off & htons(IP6_MF))
5638 ++ return -EINVAL;
5639 ++
5640 + skb_orphan(skb);
5641 +- fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
5642 +- skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
5643 ++ fq = fq_find(net, fhdr->identification, user, hdr,
5644 ++ skb->dev ? skb->dev->ifindex : 0);
5645 + if (fq == NULL) {
5646 + pr_debug("Can't find and can't create new queue\n");
5647 + return -ENOMEM;
5648 +@@ -623,25 +603,33 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
5649 +
5650 + out_unlock:
5651 + spin_unlock_bh(&fq->q.lock);
5652 +- inet_frag_put(&fq->q, &nf_frags);
5653 ++ inet_frag_put(&fq->q);
5654 + return ret;
5655 + }
5656 + EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
5657 +
5658 + static int nf_ct_net_init(struct net *net)
5659 + {
5660 ++ int res;
5661 ++
5662 + net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
5663 + net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
5664 + net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
5665 +- inet_frags_init_net(&net->nf_frag.frags);
5666 +-
5667 +- return nf_ct_frag6_sysctl_register(net);
5668 ++ net->nf_frag.frags.f = &nf_frags;
5669 ++
5670 ++ res = inet_frags_init_net(&net->nf_frag.frags);
5671 ++ if (res < 0)
5672 ++ return res;
5673 ++ res = nf_ct_frag6_sysctl_register(net);
5674 ++ if (res < 0)
5675 ++ inet_frags_exit_net(&net->nf_frag.frags);
5676 ++ return res;
5677 + }
5678 +
5679 + static void nf_ct_net_exit(struct net *net)
5680 + {
5681 + nf_ct_frags6_sysctl_unregister(net);
5682 +- inet_frags_exit_net(&net->nf_frag.frags, &nf_frags);
5683 ++ inet_frags_exit_net(&net->nf_frag.frags);
5684 + }
5685 +
5686 + static struct pernet_operations nf_ct_net_ops = {
5687 +@@ -653,13 +641,12 @@ int nf_ct_frag6_init(void)
5688 + {
5689 + int ret = 0;
5690 +
5691 +- nf_frags.hashfn = nf_hashfn;
5692 + nf_frags.constructor = ip6_frag_init;
5693 + nf_frags.destructor = NULL;
5694 + nf_frags.qsize = sizeof(struct frag_queue);
5695 +- nf_frags.match = ip6_frag_match;
5696 + nf_frags.frag_expire = nf_ct_frag6_expire;
5697 + nf_frags.frags_cache_name = nf_frags_cache_name;
5698 ++ nf_frags.rhash_params = ip6_rhash_params;
5699 + ret = inet_frags_init(&nf_frags);
5700 + if (ret)
5701 + goto out;
5702 +diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
5703 +index e88bcb8ff0fd..dc04c024986c 100644
5704 +--- a/net/ipv6/proc.c
5705 ++++ b/net/ipv6/proc.c
5706 +@@ -38,7 +38,6 @@
5707 + static int sockstat6_seq_show(struct seq_file *seq, void *v)
5708 + {
5709 + struct net *net = seq->private;
5710 +- unsigned int frag_mem = ip6_frag_mem(net);
5711 +
5712 + seq_printf(seq, "TCP6: inuse %d\n",
5713 + sock_prot_inuse_get(net, &tcpv6_prot));
5714 +@@ -48,7 +47,9 @@ static int sockstat6_seq_show(struct seq_file *seq, void *v)
5715 + sock_prot_inuse_get(net, &udplitev6_prot));
5716 + seq_printf(seq, "RAW6: inuse %d\n",
5717 + sock_prot_inuse_get(net, &rawv6_prot));
5718 +- seq_printf(seq, "FRAG6: inuse %u memory %u\n", !!frag_mem, frag_mem);
5719 ++ seq_printf(seq, "FRAG6: inuse %u memory %lu\n",
5720 ++ atomic_read(&net->ipv6.frags.rhashtable.nelems),
5721 ++ frag_mem_limit(&net->ipv6.frags));
5722 + return 0;
5723 + }
5724 +
5725 +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
5726 +index 846012eae526..ede0061b6f5d 100644
5727 +--- a/net/ipv6/reassembly.c
5728 ++++ b/net/ipv6/reassembly.c
5729 +@@ -79,130 +79,93 @@ static struct inet_frags ip6_frags;
5730 + static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
5731 + struct net_device *dev);
5732 +
5733 +-/*
5734 +- * callers should be careful not to use the hash value outside the ipfrag_lock
5735 +- * as doing so could race with ipfrag_hash_rnd being recalculated.
5736 +- */
5737 +-static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
5738 +- const struct in6_addr *daddr)
5739 +-{
5740 +- net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
5741 +- return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
5742 +- (__force u32)id, ip6_frags.rnd);
5743 +-}
5744 +-
5745 +-static unsigned int ip6_hashfn(const struct inet_frag_queue *q)
5746 +-{
5747 +- const struct frag_queue *fq;
5748 +-
5749 +- fq = container_of(q, struct frag_queue, q);
5750 +- return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
5751 +-}
5752 +-
5753 +-bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
5754 +-{
5755 +- const struct frag_queue *fq;
5756 +- const struct ip6_create_arg *arg = a;
5757 +-
5758 +- fq = container_of(q, struct frag_queue, q);
5759 +- return fq->id == arg->id &&
5760 +- fq->user == arg->user &&
5761 +- ipv6_addr_equal(&fq->saddr, arg->src) &&
5762 +- ipv6_addr_equal(&fq->daddr, arg->dst) &&
5763 +- (arg->iif == fq->iif ||
5764 +- !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
5765 +- IPV6_ADDR_LINKLOCAL)));
5766 +-}
5767 +-EXPORT_SYMBOL(ip6_frag_match);
5768 +-
5769 + void ip6_frag_init(struct inet_frag_queue *q, const void *a)
5770 + {
5771 + struct frag_queue *fq = container_of(q, struct frag_queue, q);
5772 +- const struct ip6_create_arg *arg = a;
5773 ++ const struct frag_v6_compare_key *key = a;
5774 +
5775 +- fq->id = arg->id;
5776 +- fq->user = arg->user;
5777 +- fq->saddr = *arg->src;
5778 +- fq->daddr = *arg->dst;
5779 +- fq->ecn = arg->ecn;
5780 ++ q->key.v6 = *key;
5781 ++ fq->ecn = 0;
5782 + }
5783 + EXPORT_SYMBOL(ip6_frag_init);
5784 +
5785 +-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
5786 +- struct inet_frags *frags)
5787 ++void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
5788 + {
5789 + struct net_device *dev = NULL;
5790 ++ struct sk_buff *head;
5791 +
5792 ++ rcu_read_lock();
5793 + spin_lock(&fq->q.lock);
5794 +
5795 + if (fq->q.flags & INET_FRAG_COMPLETE)
5796 + goto out;
5797 +
5798 +- inet_frag_kill(&fq->q, frags);
5799 ++ inet_frag_kill(&fq->q);
5800 +
5801 +- rcu_read_lock();
5802 + dev = dev_get_by_index_rcu(net, fq->iif);
5803 + if (!dev)
5804 +- goto out_rcu_unlock;
5805 ++ goto out;
5806 +
5807 + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
5808 +-
5809 +- if (inet_frag_evicting(&fq->q))
5810 +- goto out_rcu_unlock;
5811 +-
5812 + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
5813 +
5814 + /* Don't send error if the first segment did not arrive. */
5815 +- if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments)
5816 +- goto out_rcu_unlock;
5817 ++ head = fq->q.fragments;
5818 ++ if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
5819 ++ goto out;
5820 +
5821 + /* But use as source device on which LAST ARRIVED
5822 + * segment was received. And do not use fq->dev
5823 + * pointer directly, device might already disappeared.
5824 + */
5825 +- fq->q.fragments->dev = dev;
5826 +- icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
5827 +-out_rcu_unlock:
5828 +- rcu_read_unlock();
5829 ++ head->dev = dev;
5830 ++ skb_get(head);
5831 ++ spin_unlock(&fq->q.lock);
5832 ++
5833 ++ icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
5834 ++ kfree_skb(head);
5835 ++ goto out_rcu_unlock;
5836 ++
5837 + out:
5838 + spin_unlock(&fq->q.lock);
5839 +- inet_frag_put(&fq->q, frags);
5840 ++out_rcu_unlock:
5841 ++ rcu_read_unlock();
5842 ++ inet_frag_put(&fq->q);
5843 + }
5844 + EXPORT_SYMBOL(ip6_expire_frag_queue);
5845 +
5846 +-static void ip6_frag_expire(unsigned long data)
5847 ++static void ip6_frag_expire(struct timer_list *t)
5848 + {
5849 ++ struct inet_frag_queue *frag = from_timer(frag, t, timer);
5850 + struct frag_queue *fq;
5851 + struct net *net;
5852 +
5853 +- fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
5854 ++ fq = container_of(frag, struct frag_queue, q);
5855 + net = container_of(fq->q.net, struct net, ipv6.frags);
5856 +
5857 +- ip6_expire_frag_queue(net, fq, &ip6_frags);
5858 ++ ip6_expire_frag_queue(net, fq);
5859 + }
5860 +
5861 + static struct frag_queue *
5862 +-fq_find(struct net *net, __be32 id, const struct in6_addr *src,
5863 +- const struct in6_addr *dst, int iif, u8 ecn)
5864 ++fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif)
5865 + {
5866 ++ struct frag_v6_compare_key key = {
5867 ++ .id = id,
5868 ++ .saddr = hdr->saddr,
5869 ++ .daddr = hdr->daddr,
5870 ++ .user = IP6_DEFRAG_LOCAL_DELIVER,
5871 ++ .iif = iif,
5872 ++ };
5873 + struct inet_frag_queue *q;
5874 +- struct ip6_create_arg arg;
5875 +- unsigned int hash;
5876 +
5877 +- arg.id = id;
5878 +- arg.user = IP6_DEFRAG_LOCAL_DELIVER;
5879 +- arg.src = src;
5880 +- arg.dst = dst;
5881 +- arg.iif = iif;
5882 +- arg.ecn = ecn;
5883 ++ if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
5884 ++ IPV6_ADDR_LINKLOCAL)))
5885 ++ key.iif = 0;
5886 +
5887 +- hash = inet6_hash_frag(id, src, dst);
5888 +-
5889 +- q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
5890 +- if (IS_ERR_OR_NULL(q)) {
5891 +- inet_frag_maybe_warn_overflow(q, pr_fmt());
5892 ++ q = inet_frag_find(&net->ipv6.frags, &key);
5893 ++ if (!q)
5894 + return NULL;
5895 +- }
5896 ++
5897 + return container_of(q, struct frag_queue, q);
5898 + }
5899 +
5900 +@@ -363,7 +326,7 @@ found:
5901 + return -1;
5902 +
5903 + discard_fq:
5904 +- inet_frag_kill(&fq->q, &ip6_frags);
5905 ++ inet_frag_kill(&fq->q);
5906 + err:
5907 + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
5908 + IPSTATS_MIB_REASMFAILS);
5909 +@@ -390,7 +353,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
5910 + int sum_truesize;
5911 + u8 ecn;
5912 +
5913 +- inet_frag_kill(&fq->q, &ip6_frags);
5914 ++ inet_frag_kill(&fq->q);
5915 +
5916 + ecn = ip_frag_ecn_table[fq->ecn];
5917 + if (unlikely(ecn == 0xff))
5918 +@@ -509,6 +472,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
5919 + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
5920 + rcu_read_unlock();
5921 + fq->q.fragments = NULL;
5922 ++ fq->q.rb_fragments = RB_ROOT;
5923 + fq->q.fragments_tail = NULL;
5924 + return 1;
5925 +
5926 +@@ -530,6 +494,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
5927 + struct frag_queue *fq;
5928 + const struct ipv6hdr *hdr = ipv6_hdr(skb);
5929 + struct net *net = dev_net(skb_dst(skb)->dev);
5930 ++ int iif;
5931 +
5932 + if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
5933 + goto fail_hdr;
5934 +@@ -558,17 +523,22 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
5935 + return 1;
5936 + }
5937 +
5938 +- fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
5939 +- skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
5940 ++ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
5941 ++ fhdr->frag_off & htons(IP6_MF))
5942 ++ goto fail_hdr;
5943 ++
5944 ++ iif = skb->dev ? skb->dev->ifindex : 0;
5945 ++ fq = fq_find(net, fhdr->identification, hdr, iif);
5946 + if (fq) {
5947 + int ret;
5948 +
5949 + spin_lock(&fq->q.lock);
5950 +
5951 ++ fq->iif = iif;
5952 + ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
5953 +
5954 + spin_unlock(&fq->q.lock);
5955 +- inet_frag_put(&fq->q, &ip6_frags);
5956 ++ inet_frag_put(&fq->q);
5957 + return ret;
5958 + }
5959 +
5960 +@@ -589,24 +559,22 @@ static const struct inet6_protocol frag_protocol = {
5961 + };
5962 +
5963 + #ifdef CONFIG_SYSCTL
5964 +-static int zero;
5965 +
5966 + static struct ctl_table ip6_frags_ns_ctl_table[] = {
5967 + {
5968 + .procname = "ip6frag_high_thresh",
5969 + .data = &init_net.ipv6.frags.high_thresh,
5970 +- .maxlen = sizeof(int),
5971 ++ .maxlen = sizeof(unsigned long),
5972 + .mode = 0644,
5973 +- .proc_handler = proc_dointvec_minmax,
5974 ++ .proc_handler = proc_doulongvec_minmax,
5975 + .extra1 = &init_net.ipv6.frags.low_thresh
5976 + },
5977 + {
5978 + .procname = "ip6frag_low_thresh",
5979 + .data = &init_net.ipv6.frags.low_thresh,
5980 +- .maxlen = sizeof(int),
5981 ++ .maxlen = sizeof(unsigned long),
5982 + .mode = 0644,
5983 +- .proc_handler = proc_dointvec_minmax,
5984 +- .extra1 = &zero,
5985 ++ .proc_handler = proc_doulongvec_minmax,
5986 + .extra2 = &init_net.ipv6.frags.high_thresh
5987 + },
5988 + {
5989 +@@ -649,10 +617,6 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
5990 + table[1].data = &net->ipv6.frags.low_thresh;
5991 + table[1].extra2 = &net->ipv6.frags.high_thresh;
5992 + table[2].data = &net->ipv6.frags.timeout;
5993 +-
5994 +- /* Don't export sysctls to unprivileged users */
5995 +- if (net->user_ns != &init_user_ns)
5996 +- table[0].procname = NULL;
5997 + }
5998 +
5999 + hdr = register_net_sysctl(net, "net/ipv6", table);
6000 +@@ -714,19 +678,27 @@ static void ip6_frags_sysctl_unregister(void)
6001 +
6002 + static int __net_init ipv6_frags_init_net(struct net *net)
6003 + {
6004 ++ int res;
6005 ++
6006 + net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
6007 + net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
6008 + net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
6009 ++ net->ipv6.frags.f = &ip6_frags;
6010 +
6011 +- inet_frags_init_net(&net->ipv6.frags);
6012 ++ res = inet_frags_init_net(&net->ipv6.frags);
6013 ++ if (res < 0)
6014 ++ return res;
6015 +
6016 +- return ip6_frags_ns_sysctl_register(net);
6017 ++ res = ip6_frags_ns_sysctl_register(net);
6018 ++ if (res < 0)
6019 ++ inet_frags_exit_net(&net->ipv6.frags);
6020 ++ return res;
6021 + }
6022 +
6023 + static void __net_exit ipv6_frags_exit_net(struct net *net)
6024 + {
6025 + ip6_frags_ns_sysctl_unregister(net);
6026 +- inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
6027 ++ inet_frags_exit_net(&net->ipv6.frags);
6028 + }
6029 +
6030 + static struct pernet_operations ip6_frags_ops = {
6031 +@@ -734,14 +706,55 @@ static struct pernet_operations ip6_frags_ops = {
6032 + .exit = ipv6_frags_exit_net,
6033 + };
6034 +
6035 ++static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed)
6036 ++{
6037 ++ return jhash2(data,
6038 ++ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
6039 ++}
6040 ++
6041 ++static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed)
6042 ++{
6043 ++ const struct inet_frag_queue *fq = data;
6044 ++
6045 ++ return jhash2((const u32 *)&fq->key.v6,
6046 ++ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
6047 ++}
6048 ++
6049 ++static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
6050 ++{
6051 ++ const struct frag_v6_compare_key *key = arg->key;
6052 ++ const struct inet_frag_queue *fq = ptr;
6053 ++
6054 ++ return !!memcmp(&fq->key, key, sizeof(*key));
6055 ++}
6056 ++
6057 ++const struct rhashtable_params ip6_rhash_params = {
6058 ++ .head_offset = offsetof(struct inet_frag_queue, node),
6059 ++ .hashfn = ip6_key_hashfn,
6060 ++ .obj_hashfn = ip6_obj_hashfn,
6061 ++ .obj_cmpfn = ip6_obj_cmpfn,
6062 ++ .automatic_shrinking = true,
6063 ++};
6064 ++EXPORT_SYMBOL(ip6_rhash_params);
6065 ++
6066 + int __init ipv6_frag_init(void)
6067 + {
6068 + int ret;
6069 +
6070 +- ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
6071 ++ ip6_frags.constructor = ip6_frag_init;
6072 ++ ip6_frags.destructor = NULL;
6073 ++ ip6_frags.qsize = sizeof(struct frag_queue);
6074 ++ ip6_frags.frag_expire = ip6_frag_expire;
6075 ++ ip6_frags.frags_cache_name = ip6_frag_cache_name;
6076 ++ ip6_frags.rhash_params = ip6_rhash_params;
6077 ++ ret = inet_frags_init(&ip6_frags);
6078 + if (ret)
6079 + goto out;
6080 +
6081 ++ ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
6082 ++ if (ret)
6083 ++ goto err_protocol;
6084 ++
6085 + ret = ip6_frags_sysctl_register();
6086 + if (ret)
6087 + goto err_sysctl;
6088 +@@ -750,16 +763,6 @@ int __init ipv6_frag_init(void)
6089 + if (ret)
6090 + goto err_pernet;
6091 +
6092 +- ip6_frags.hashfn = ip6_hashfn;
6093 +- ip6_frags.constructor = ip6_frag_init;
6094 +- ip6_frags.destructor = NULL;
6095 +- ip6_frags.qsize = sizeof(struct frag_queue);
6096 +- ip6_frags.match = ip6_frag_match;
6097 +- ip6_frags.frag_expire = ip6_frag_expire;
6098 +- ip6_frags.frags_cache_name = ip6_frag_cache_name;
6099 +- ret = inet_frags_init(&ip6_frags);
6100 +- if (ret)
6101 +- goto err_pernet;
6102 + out:
6103 + return ret;
6104 +
6105 +@@ -767,6 +770,8 @@ err_pernet:
6106 + ip6_frags_sysctl_unregister();
6107 + err_sysctl:
6108 + inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
6109 ++err_protocol:
6110 ++ inet_frags_fini(&ip6_frags);
6111 + goto out;
6112 + }
6113 +
6114 +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
6115 +index 8c8df75dbead..2a2ab6bfe5d8 100644
6116 +--- a/net/sched/sch_netem.c
6117 ++++ b/net/sched/sch_netem.c
6118 +@@ -149,12 +149,6 @@ struct netem_skb_cb {
6119 + ktime_t tstamp_save;
6120 + };
6121 +
6122 +-
6123 +-static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
6124 +-{
6125 +- return rb_entry(rb, struct sk_buff, rbnode);
6126 +-}
6127 +-
6128 + static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
6129 + {
6130 + /* we assume we can use skb next/prev/tstamp as storage for rb_node */
6131 +@@ -365,7 +359,7 @@ static void tfifo_reset(struct Qdisc *sch)
6132 + struct rb_node *p;
6133 +
6134 + while ((p = rb_first(&q->t_root))) {
6135 +- struct sk_buff *skb = netem_rb_to_skb(p);
6136 ++ struct sk_buff *skb = rb_to_skb(p);
6137 +
6138 + rb_erase(p, &q->t_root);
6139 + rtnl_kfree_skbs(skb, skb);
6140 +@@ -382,7 +376,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
6141 + struct sk_buff *skb;
6142 +
6143 + parent = *p;
6144 +- skb = netem_rb_to_skb(parent);
6145 ++ skb = rb_to_skb(parent);
6146 + if (tnext >= netem_skb_cb(skb)->time_to_send)
6147 + p = &parent->rb_right;
6148 + else
6149 +@@ -538,7 +532,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
6150 + struct sk_buff *t_skb;
6151 + struct netem_skb_cb *t_last;
6152 +
6153 +- t_skb = netem_rb_to_skb(rb_last(&q->t_root));
6154 ++ t_skb = skb_rb_last(&q->t_root);
6155 + t_last = netem_skb_cb(t_skb);
6156 + if (!last ||
6157 + t_last->time_to_send > last->time_to_send) {
6158 +@@ -618,7 +612,7 @@ deliver:
6159 + if (p) {
6160 + psched_time_t time_to_send;
6161 +
6162 +- skb = netem_rb_to_skb(p);
6163 ++ skb = rb_to_skb(p);
6164 +
6165 + /* if more time remaining? */
6166 + time_to_send = netem_skb_cb(skb)->time_to_send;
6167 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
6168 +index 417abbb1f72c..8a027973f2ad 100644
6169 +--- a/sound/pci/hda/hda_codec.c
6170 ++++ b/sound/pci/hda/hda_codec.c
6171 +@@ -3923,7 +3923,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)
6172 +
6173 + list_for_each_codec(codec, bus) {
6174 + /* FIXME: maybe a better way needed for forced reset */
6175 +- cancel_delayed_work_sync(&codec->jackpoll_work);
6176 ++ if (current_work() != &codec->jackpoll_work.work)
6177 ++ cancel_delayed_work_sync(&codec->jackpoll_work);
6178 + #ifdef CONFIG_PM
6179 + if (hda_codec_is_power_on(codec)) {
6180 + hda_call_codec_suspend(codec);
6181 +diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
6182 +index 3479a1bc7caa..fb76423022e8 100644
6183 +--- a/tools/perf/builtin-c2c.c
6184 ++++ b/tools/perf/builtin-c2c.c
6185 +@@ -2229,6 +2229,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
6186 + " s Togle full lenght of symbol and source line columns \n"
6187 + " q Return back to cacheline list \n";
6188 +
6189 ++ if (!he)
6190 ++ return 0;
6191 ++
6192 + /* Display compact version first. */
6193 + c2c.symbol_full = false;
6194 +
6195 +diff --git a/tools/perf/perf.h b/tools/perf/perf.h
6196 +index 55086389fc06..96f62dd7e3ed 100644
6197 +--- a/tools/perf/perf.h
6198 ++++ b/tools/perf/perf.h
6199 +@@ -24,7 +24,9 @@ static inline unsigned long long rdclock(void)
6200 + return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
6201 + }
6202 +
6203 ++#ifndef MAX_NR_CPUS
6204 + #define MAX_NR_CPUS 1024
6205 ++#endif
6206 +
6207 + extern const char *input_name;
6208 + extern bool perf_host, perf_guest;
6209 +diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
6210 +index 226a9245d1db..2227ee92d8e2 100644
6211 +--- a/tools/perf/util/evsel.c
6212 ++++ b/tools/perf/util/evsel.c
6213 +@@ -824,6 +824,12 @@ static void apply_config_terms(struct perf_evsel *evsel,
6214 + }
6215 + }
6216 +
6217 ++static bool is_dummy_event(struct perf_evsel *evsel)
6218 ++{
6219 ++ return (evsel->attr.type == PERF_TYPE_SOFTWARE) &&
6220 ++ (evsel->attr.config == PERF_COUNT_SW_DUMMY);
6221 ++}
6222 ++
6223 + /*
6224 + * The enable_on_exec/disabled value strategy:
6225 + *
6226 +@@ -1054,6 +1060,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
6227 + else
6228 + perf_evsel__reset_sample_bit(evsel, PERIOD);
6229 + }
6230 ++
6231 ++ /*
6232 ++ * For initial_delay, a dummy event is added implicitly.
6233 ++ * The software event will trigger -EOPNOTSUPP error out,
6234 ++ * if BRANCH_STACK bit is set.
6235 ++ */
6236 ++ if (opts->initial_delay && is_dummy_event(evsel))
6237 ++ perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
6238 + }
6239 +
6240 + static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
6241 +diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c
6242 +index b53596ad601b..2e7fd8227969 100644
6243 +--- a/tools/testing/nvdimm/pmem-dax.c
6244 ++++ b/tools/testing/nvdimm/pmem-dax.c
6245 +@@ -31,17 +31,21 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
6246 + if (get_nfit_res(pmem->phys_addr + offset)) {
6247 + struct page *page;
6248 +
6249 +- *kaddr = pmem->virt_addr + offset;
6250 ++ if (kaddr)
6251 ++ *kaddr = pmem->virt_addr + offset;
6252 + page = vmalloc_to_page(pmem->virt_addr + offset);
6253 +- *pfn = page_to_pfn_t(page);
6254 ++ if (pfn)
6255 ++ *pfn = page_to_pfn_t(page);
6256 + pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
6257 + __func__, pmem, pgoff, page_to_pfn(page));
6258 +
6259 + return 1;
6260 + }
6261 +
6262 +- *kaddr = pmem->virt_addr + offset;
6263 +- *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
6264 ++ if (kaddr)
6265 ++ *kaddr = pmem->virt_addr + offset;
6266 ++ if (pfn)
6267 ++ *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
6268 +
6269 + /*
6270 + * If badblocks are present, limit known good range to the
6271 +diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
6272 +index 9167ee976314..041dbbb30ff0 100644
6273 +--- a/tools/testing/selftests/bpf/test_verifier.c
6274 ++++ b/tools/testing/selftests/bpf/test_verifier.c
6275 +@@ -5895,7 +5895,7 @@ static struct bpf_test tests[] = {
6276 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6277 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6278 + BPF_FUNC_map_lookup_elem),
6279 +- BPF_MOV64_REG(BPF_REG_0, 0),
6280 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
6281 + BPF_EXIT_INSN(),
6282 + },
6283 + .fixup_map_in_map = { 3 },
6284 +@@ -5918,7 +5918,7 @@ static struct bpf_test tests[] = {
6285 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6286 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6287 + BPF_FUNC_map_lookup_elem),
6288 +- BPF_MOV64_REG(BPF_REG_0, 0),
6289 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
6290 + BPF_EXIT_INSN(),
6291 + },
6292 + .fixup_map_in_map = { 3 },
6293 +@@ -5941,7 +5941,7 @@ static struct bpf_test tests[] = {
6294 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6295 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6296 + BPF_FUNC_map_lookup_elem),
6297 +- BPF_MOV64_REG(BPF_REG_0, 0),
6298 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
6299 + BPF_EXIT_INSN(),
6300 + },
6301 + .fixup_map_in_map = { 3 },