Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Wed, 19 Sep 2018 22:40:25
Message-Id: 1537396797.4bc7ed8fb9c79688d522121323fe4d6f3fce9792.mpagano@gentoo
1 commit: 4bc7ed8fb9c79688d522121323fe4d6f3fce9792
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 19 22:39:57 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 19 22:39:57 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4bc7ed8f
7
8 Linux patch 4.14.71
9
10 0000_README | 4 +
11 1070_linux-4.14.71.patch | 6264 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 6268 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 2e98e70..cc63ee7 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -323,6 +323,10 @@ Patch: 1069_linux-4.14.70.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.14.70
21
22 +Patch: 1070_linux-4.14.71.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.14.71
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1070_linux-4.14.71.patch b/1070_linux-4.14.71.patch
31 new file mode 100644
32 index 0000000..cdd7c12
33 --- /dev/null
34 +++ b/1070_linux-4.14.71.patch
35 @@ -0,0 +1,6264 @@
36 +diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
37 +index d499676890d8..a054b5ad410a 100644
38 +--- a/Documentation/networking/ip-sysctl.txt
39 ++++ b/Documentation/networking/ip-sysctl.txt
40 +@@ -133,14 +133,11 @@ min_adv_mss - INTEGER
41 +
42 + IP Fragmentation:
43 +
44 +-ipfrag_high_thresh - INTEGER
45 +- Maximum memory used to reassemble IP fragments. When
46 +- ipfrag_high_thresh bytes of memory is allocated for this purpose,
47 +- the fragment handler will toss packets until ipfrag_low_thresh
48 +- is reached. This also serves as a maximum limit to namespaces
49 +- different from the initial one.
50 +-
51 +-ipfrag_low_thresh - INTEGER
52 ++ipfrag_high_thresh - LONG INTEGER
53 ++ Maximum memory used to reassemble IP fragments.
54 ++
55 ++ipfrag_low_thresh - LONG INTEGER
56 ++ (Obsolete since linux-4.17)
57 + Maximum memory used to reassemble IP fragments before the kernel
58 + begins to remove incomplete fragment queues to free up resources.
59 + The kernel still accepts new fragments for defragmentation.
60 +diff --git a/Makefile b/Makefile
61 +index aa458afa7fa2..dd4eaeeb2050 100644
62 +--- a/Makefile
63 ++++ b/Makefile
64 +@@ -1,7 +1,7 @@
65 + # SPDX-License-Identifier: GPL-2.0
66 + VERSION = 4
67 + PATCHLEVEL = 14
68 +-SUBLEVEL = 70
69 ++SUBLEVEL = 71
70 + EXTRAVERSION =
71 + NAME = Petit Gorille
72 +
73 +diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
74 +index a8242362e551..ece78630d711 100644
75 +--- a/arch/arc/configs/axs101_defconfig
76 ++++ b/arch/arc/configs/axs101_defconfig
77 +@@ -1,5 +1,4 @@
78 + CONFIG_DEFAULT_HOSTNAME="ARCLinux"
79 +-# CONFIG_SWAP is not set
80 + CONFIG_SYSVIPC=y
81 + CONFIG_POSIX_MQUEUE=y
82 + # CONFIG_CROSS_MEMORY_ATTACH is not set
83 +diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
84 +index ef3c31cd7737..240c9251a7d4 100644
85 +--- a/arch/arc/configs/axs103_defconfig
86 ++++ b/arch/arc/configs/axs103_defconfig
87 +@@ -1,5 +1,4 @@
88 + CONFIG_DEFAULT_HOSTNAME="ARCLinux"
89 +-# CONFIG_SWAP is not set
90 + CONFIG_SYSVIPC=y
91 + CONFIG_POSIX_MQUEUE=y
92 + # CONFIG_CROSS_MEMORY_ATTACH is not set
93 +diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
94 +index 1757ac9cecbc..af54b96abee0 100644
95 +--- a/arch/arc/configs/axs103_smp_defconfig
96 ++++ b/arch/arc/configs/axs103_smp_defconfig
97 +@@ -1,5 +1,4 @@
98 + CONFIG_DEFAULT_HOSTNAME="ARCLinux"
99 +-# CONFIG_SWAP is not set
100 + CONFIG_SYSVIPC=y
101 + CONFIG_POSIX_MQUEUE=y
102 + # CONFIG_CROSS_MEMORY_ATTACH is not set
103 +diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
104 +index 8505db478904..1d92efb82c37 100644
105 +--- a/arch/mips/cavium-octeon/octeon-platform.c
106 ++++ b/arch/mips/cavium-octeon/octeon-platform.c
107 +@@ -322,6 +322,7 @@ static int __init octeon_ehci_device_init(void)
108 + return 0;
109 +
110 + pd = of_find_device_by_node(ehci_node);
111 ++ of_node_put(ehci_node);
112 + if (!pd)
113 + return 0;
114 +
115 +@@ -384,6 +385,7 @@ static int __init octeon_ohci_device_init(void)
116 + return 0;
117 +
118 + pd = of_find_device_by_node(ohci_node);
119 ++ of_node_put(ohci_node);
120 + if (!pd)
121 + return 0;
122 +
123 +diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
124 +index 5ba6fcc26fa7..94a78dbbc91f 100644
125 +--- a/arch/mips/generic/init.c
126 ++++ b/arch/mips/generic/init.c
127 +@@ -204,6 +204,7 @@ void __init arch_init_irq(void)
128 + "mti,cpu-interrupt-controller");
129 + if (!cpu_has_veic && !intc_node)
130 + mips_cpu_irq_init();
131 ++ of_node_put(intc_node);
132 +
133 + irqchip_init();
134 + }
135 +diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
136 +index cea8ad864b3f..57b34257be2b 100644
137 +--- a/arch/mips/include/asm/io.h
138 ++++ b/arch/mips/include/asm/io.h
139 +@@ -141,14 +141,14 @@ static inline void * phys_to_virt(unsigned long address)
140 + /*
141 + * ISA I/O bus memory addresses are 1:1 with the physical address.
142 + */
143 +-static inline unsigned long isa_virt_to_bus(volatile void * address)
144 ++static inline unsigned long isa_virt_to_bus(volatile void *address)
145 + {
146 +- return (unsigned long)address - PAGE_OFFSET;
147 ++ return virt_to_phys(address);
148 + }
149 +
150 +-static inline void * isa_bus_to_virt(unsigned long address)
151 ++static inline void *isa_bus_to_virt(unsigned long address)
152 + {
153 +- return (void *)(address + PAGE_OFFSET);
154 ++ return phys_to_virt(address);
155 + }
156 +
157 + #define isa_page_to_bus page_to_phys
158 +diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
159 +index 019035d7225c..8f845f6e5f42 100644
160 +--- a/arch/mips/kernel/vdso.c
161 ++++ b/arch/mips/kernel/vdso.c
162 +@@ -13,6 +13,7 @@
163 + #include <linux/err.h>
164 + #include <linux/init.h>
165 + #include <linux/ioport.h>
166 ++#include <linux/kernel.h>
167 + #include <linux/mm.h>
168 + #include <linux/sched.h>
169 + #include <linux/slab.h>
170 +@@ -20,6 +21,7 @@
171 +
172 + #include <asm/abi.h>
173 + #include <asm/mips-cps.h>
174 ++#include <asm/page.h>
175 + #include <asm/vdso.h>
176 +
177 + /* Kernel-provided data used by the VDSO. */
178 +@@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
179 + vvar_size = gic_size + PAGE_SIZE;
180 + size = vvar_size + image->size;
181 +
182 ++ /*
183 ++ * Find a region that's large enough for us to perform the
184 ++ * colour-matching alignment below.
185 ++ */
186 ++ if (cpu_has_dc_aliases)
187 ++ size += shm_align_mask + 1;
188 ++
189 + base = get_unmapped_area(NULL, 0, size, 0, 0);
190 + if (IS_ERR_VALUE(base)) {
191 + ret = base;
192 + goto out;
193 + }
194 +
195 ++ /*
196 ++ * If we suffer from dcache aliasing, ensure that the VDSO data page
197 ++ * mapping is coloured the same as the kernel's mapping of that memory.
198 ++ * This ensures that when the kernel updates the VDSO data userland
199 ++ * will observe it without requiring cache invalidations.
200 ++ */
201 ++ if (cpu_has_dc_aliases) {
202 ++ base = __ALIGN_MASK(base, shm_align_mask);
203 ++ base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
204 ++ }
205 ++
206 + data_addr = base + gic_size;
207 + vdso_addr = data_addr + PAGE_SIZE;
208 +
209 +diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
210 +index e12dfa48b478..a5893b2cdc0e 100644
211 +--- a/arch/mips/mm/c-r4k.c
212 ++++ b/arch/mips/mm/c-r4k.c
213 +@@ -835,7 +835,8 @@ static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
214 + static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
215 + {
216 + /* Catch bad driver code */
217 +- BUG_ON(size == 0);
218 ++ if (WARN_ON(size == 0))
219 ++ return;
220 +
221 + preempt_disable();
222 + if (cpu_has_inclusive_pcaches) {
223 +@@ -871,7 +872,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
224 + static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
225 + {
226 + /* Catch bad driver code */
227 +- BUG_ON(size == 0);
228 ++ if (WARN_ON(size == 0))
229 ++ return;
230 +
231 + preempt_disable();
232 + if (cpu_has_inclusive_pcaches) {
233 +diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
234 +index 63f007f2de7e..4b95bdde22aa 100644
235 +--- a/arch/powerpc/platforms/powernv/npu-dma.c
236 ++++ b/arch/powerpc/platforms/powernv/npu-dma.c
237 +@@ -427,8 +427,9 @@ static int get_mmio_atsd_reg(struct npu *npu)
238 + int i;
239 +
240 + for (i = 0; i < npu->mmio_atsd_count; i++) {
241 +- if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
242 +- return i;
243 ++ if (!test_bit(i, &npu->mmio_atsd_usage))
244 ++ if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
245 ++ return i;
246 + }
247 +
248 + return -ENOSPC;
249 +diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
250 +index 4f1f5fc8139d..061906f98dc5 100644
251 +--- a/arch/s390/kvm/vsie.c
252 ++++ b/arch/s390/kvm/vsie.c
253 +@@ -170,7 +170,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
254 + return set_validity_icpt(scb_s, 0x0039U);
255 +
256 + /* copy only the wrapping keys */
257 +- if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
258 ++ if (read_guest_real(vcpu, crycb_addr + 72,
259 ++ vsie_page->crycb.dea_wrapping_key_mask, 56))
260 + return set_validity_icpt(scb_s, 0x0035U);
261 +
262 + scb_s->ecb3 |= ecb3_flags;
263 +diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
264 +index 48179928ff38..9d33dbf2489e 100644
265 +--- a/arch/x86/kernel/cpu/microcode/amd.c
266 ++++ b/arch/x86/kernel/cpu/microcode/amd.c
267 +@@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
268 + struct microcode_amd *mc_amd;
269 + struct ucode_cpu_info *uci;
270 + struct ucode_patch *p;
271 ++ enum ucode_state ret;
272 + u32 rev, dummy;
273 +
274 + BUG_ON(raw_smp_processor_id() != cpu);
275 +@@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu)
276 +
277 + /* need to apply patch? */
278 + if (rev >= mc_amd->hdr.patch_id) {
279 +- c->microcode = rev;
280 +- uci->cpu_sig.rev = rev;
281 +- return UCODE_OK;
282 ++ ret = UCODE_OK;
283 ++ goto out;
284 + }
285 +
286 + if (__apply_microcode_amd(mc_amd)) {
287 +@@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu)
288 + cpu, mc_amd->hdr.patch_id);
289 + return UCODE_ERROR;
290 + }
291 +- pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
292 +- mc_amd->hdr.patch_id);
293 +
294 +- uci->cpu_sig.rev = mc_amd->hdr.patch_id;
295 +- c->microcode = mc_amd->hdr.patch_id;
296 ++ rev = mc_amd->hdr.patch_id;
297 ++ ret = UCODE_UPDATED;
298 ++
299 ++ pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
300 +
301 +- return UCODE_UPDATED;
302 ++out:
303 ++ uci->cpu_sig.rev = rev;
304 ++ c->microcode = rev;
305 ++
306 ++ /* Update boot_cpu_data's revision too, if we're on the BSP: */
307 ++ if (c->cpu_index == boot_cpu_data.cpu_index)
308 ++ boot_cpu_data.microcode = rev;
309 ++
310 ++ return ret;
311 + }
312 +
313 + static int install_equiv_cpu_table(const u8 *buf)
314 +diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
315 +index 97ccf4c3b45b..16936a24795c 100644
316 +--- a/arch/x86/kernel/cpu/microcode/intel.c
317 ++++ b/arch/x86/kernel/cpu/microcode/intel.c
318 +@@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
319 + struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
320 + struct cpuinfo_x86 *c = &cpu_data(cpu);
321 + struct microcode_intel *mc;
322 ++ enum ucode_state ret;
323 + static int prev_rev;
324 + u32 rev;
325 +
326 +@@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu)
327 + */
328 + rev = intel_get_microcode_revision();
329 + if (rev >= mc->hdr.rev) {
330 +- uci->cpu_sig.rev = rev;
331 +- c->microcode = rev;
332 +- return UCODE_OK;
333 ++ ret = UCODE_OK;
334 ++ goto out;
335 + }
336 +
337 + /*
338 +@@ -848,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu)
339 + prev_rev = rev;
340 + }
341 +
342 ++ ret = UCODE_UPDATED;
343 ++
344 ++out:
345 + uci->cpu_sig.rev = rev;
346 +- c->microcode = rev;
347 ++ c->microcode = rev;
348 ++
349 ++ /* Update boot_cpu_data's revision too, if we're on the BSP: */
350 ++ if (c->cpu_index == boot_cpu_data.cpu_index)
351 ++ boot_cpu_data.microcode = rev;
352 +
353 +- return UCODE_UPDATED;
354 ++ return ret;
355 + }
356 +
357 + static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
358 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
359 +index 4e5a8e30cc4e..fd46d890296c 100644
360 +--- a/arch/x86/kvm/vmx.c
361 ++++ b/arch/x86/kvm/vmx.c
362 +@@ -6965,8 +6965,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
363 + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
364 + return kvm_skip_emulated_instruction(vcpu);
365 + else
366 +- return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
367 +- NULL, 0) == EMULATE_DONE;
368 ++ return emulate_instruction(vcpu, EMULTYPE_SKIP) ==
369 ++ EMULATE_DONE;
370 + }
371 +
372 + ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
373 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
374 +index c2faff548f59..794c35c4ca73 100644
375 +--- a/arch/x86/mm/fault.c
376 ++++ b/arch/x86/mm/fault.c
377 +@@ -317,8 +317,6 @@ static noinline int vmalloc_fault(unsigned long address)
378 + if (!(address >= VMALLOC_START && address < VMALLOC_END))
379 + return -1;
380 +
381 +- WARN_ON_ONCE(in_nmi());
382 +-
383 + /*
384 + * Synchronize this task's top level page-table
385 + * with the 'reference' page table.
386 +diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
387 +index 4b571f3ea009..afbbe5750a1f 100644
388 +--- a/block/bfq-cgroup.c
389 ++++ b/block/bfq-cgroup.c
390 +@@ -224,9 +224,9 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)
391 +
392 + void bfqg_and_blkg_put(struct bfq_group *bfqg)
393 + {
394 +- bfqg_put(bfqg);
395 +-
396 + blkg_put(bfqg_to_blkg(bfqg));
397 ++
398 ++ bfqg_put(bfqg);
399 + }
400 +
401 + void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
402 +diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
403 +index 6714507aa6c7..3d2ab65d2dd1 100644
404 +--- a/block/blk-mq-tag.c
405 ++++ b/block/blk-mq-tag.c
406 +@@ -416,8 +416,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
407 + if (tdepth <= tags->nr_reserved_tags)
408 + return -EINVAL;
409 +
410 +- tdepth -= tags->nr_reserved_tags;
411 +-
412 + /*
413 + * If we are allowed to grow beyond the original size, allocate
414 + * a new set of tags before freeing the old one.
415 +@@ -437,7 +435,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
416 + if (tdepth > 16 * BLKDEV_MAX_RQ)
417 + return -EINVAL;
418 +
419 +- new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
420 ++ new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
421 ++ tags->nr_reserved_tags);
422 + if (!new)
423 + return -ENOMEM;
424 + ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
425 +@@ -454,7 +453,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
426 + * Don't need (or can't) update reserved tags here, they
427 + * remain static and should never need resizing.
428 + */
429 +- sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
430 ++ sbitmap_queue_resize(&tags->bitmap_tags,
431 ++ tdepth - tags->nr_reserved_tags);
432 + }
433 +
434 + return 0;
435 +diff --git a/block/partitions/aix.c b/block/partitions/aix.c
436 +index 007f95eea0e1..903f3ed175d0 100644
437 +--- a/block/partitions/aix.c
438 ++++ b/block/partitions/aix.c
439 +@@ -178,7 +178,7 @@ int aix_partition(struct parsed_partitions *state)
440 + u32 vgda_sector = 0;
441 + u32 vgda_len = 0;
442 + int numlvs = 0;
443 +- struct pvd *pvd;
444 ++ struct pvd *pvd = NULL;
445 + struct lv_info {
446 + unsigned short pps_per_lv;
447 + unsigned short pps_found;
448 +@@ -232,10 +232,11 @@ int aix_partition(struct parsed_partitions *state)
449 + if (lvip[i].pps_per_lv)
450 + foundlvs += 1;
451 + }
452 ++ /* pvd loops depend on n[].name and lvip[].pps_per_lv */
453 ++ pvd = alloc_pvd(state, vgda_sector + 17);
454 + }
455 + put_dev_sector(sect);
456 + }
457 +- pvd = alloc_pvd(state, vgda_sector + 17);
458 + if (pvd) {
459 + int numpps = be16_to_cpu(pvd->pp_count);
460 + int psn_part1 = be32_to_cpu(pvd->psn_part1);
461 +@@ -282,10 +283,14 @@ int aix_partition(struct parsed_partitions *state)
462 + next_lp_ix += 1;
463 + }
464 + for (i = 0; i < state->limit; i += 1)
465 +- if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
466 ++ if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
467 ++ char tmp[sizeof(n[i].name) + 1]; // null char
468 ++
469 ++ snprintf(tmp, sizeof(tmp), "%s", n[i].name);
470 + pr_warn("partition %s (%u pp's found) is "
471 + "not contiguous\n",
472 +- n[i].name, lvip[i].pps_found);
473 ++ tmp, lvip[i].pps_found);
474 ++ }
475 + kfree(pvd);
476 + }
477 + kfree(n);
478 +diff --git a/crypto/Makefile b/crypto/Makefile
479 +index adaf2c63baeb..56282e2d75ad 100644
480 +--- a/crypto/Makefile
481 ++++ b/crypto/Makefile
482 +@@ -98,7 +98,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
483 + obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
484 + CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
485 + obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
486 +-CFLAGS_aes_generic.o := $(call cc-ifversion, -ge, 0701, -Os) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
487 ++CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
488 + obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o
489 + obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
490 + obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
491 +diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
492 +index 6cb148268676..58e4658f9dd6 100644
493 +--- a/drivers/android/binder_alloc.c
494 ++++ b/drivers/android/binder_alloc.c
495 +@@ -324,6 +324,34 @@ err_no_vma:
496 + return vma ? -ENOMEM : -ESRCH;
497 + }
498 +
499 ++static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
500 ++ struct vm_area_struct *vma)
501 ++{
502 ++ if (vma)
503 ++ alloc->vma_vm_mm = vma->vm_mm;
504 ++ /*
505 ++ * If we see alloc->vma is not NULL, buffer data structures set up
506 ++ * completely. Look at smp_rmb side binder_alloc_get_vma.
507 ++ * We also want to guarantee new alloc->vma_vm_mm is always visible
508 ++ * if alloc->vma is set.
509 ++ */
510 ++ smp_wmb();
511 ++ alloc->vma = vma;
512 ++}
513 ++
514 ++static inline struct vm_area_struct *binder_alloc_get_vma(
515 ++ struct binder_alloc *alloc)
516 ++{
517 ++ struct vm_area_struct *vma = NULL;
518 ++
519 ++ if (alloc->vma) {
520 ++ /* Look at description in binder_alloc_set_vma */
521 ++ smp_rmb();
522 ++ vma = alloc->vma;
523 ++ }
524 ++ return vma;
525 ++}
526 ++
527 + struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
528 + size_t data_size,
529 + size_t offsets_size,
530 +@@ -339,7 +367,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
531 + size_t size, data_offsets_size;
532 + int ret;
533 +
534 +- if (alloc->vma == NULL) {
535 ++ if (!binder_alloc_get_vma(alloc)) {
536 + pr_err("%d: binder_alloc_buf, no vma\n",
537 + alloc->pid);
538 + return ERR_PTR(-ESRCH);
539 +@@ -712,9 +740,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
540 + buffer->free = 1;
541 + binder_insert_free_buffer(alloc, buffer);
542 + alloc->free_async_space = alloc->buffer_size / 2;
543 +- barrier();
544 +- alloc->vma = vma;
545 +- alloc->vma_vm_mm = vma->vm_mm;
546 ++ binder_alloc_set_vma(alloc, vma);
547 + mmgrab(alloc->vma_vm_mm);
548 +
549 + return 0;
550 +@@ -741,10 +767,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
551 + int buffers, page_count;
552 + struct binder_buffer *buffer;
553 +
554 +- BUG_ON(alloc->vma);
555 +-
556 + buffers = 0;
557 + mutex_lock(&alloc->mutex);
558 ++ BUG_ON(alloc->vma);
559 ++
560 + while ((n = rb_first(&alloc->allocated_buffers))) {
561 + buffer = rb_entry(n, struct binder_buffer, rb_node);
562 +
563 +@@ -886,7 +912,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
564 + */
565 + void binder_alloc_vma_close(struct binder_alloc *alloc)
566 + {
567 +- WRITE_ONCE(alloc->vma, NULL);
568 ++ binder_alloc_set_vma(alloc, NULL);
569 + }
570 +
571 + /**
572 +@@ -921,7 +947,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
573 +
574 + index = page - alloc->pages;
575 + page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
576 +- vma = alloc->vma;
577 ++ vma = binder_alloc_get_vma(alloc);
578 + if (vma) {
579 + if (!mmget_not_zero(alloc->vma_vm_mm))
580 + goto err_mmget;
581 +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
582 +index bc562fd2b0a0..cda9a0b5bdaa 100644
583 +--- a/drivers/ata/libahci.c
584 ++++ b/drivers/ata/libahci.c
585 +@@ -2096,7 +2096,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
586 + struct ahci_host_priv *hpriv = ap->host->private_data;
587 + void __iomem *port_mmio = ahci_port_base(ap);
588 + struct ata_device *dev = ap->link.device;
589 +- u32 devslp, dm, dito, mdat, deto;
590 ++ u32 devslp, dm, dito, mdat, deto, dito_conf;
591 + int rc;
592 + unsigned int err_mask;
593 +
594 +@@ -2120,8 +2120,15 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
595 + return;
596 + }
597 +
598 +- /* device sleep was already enabled */
599 +- if (devslp & PORT_DEVSLP_ADSE)
600 ++ dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
601 ++ dito = devslp_idle_timeout / (dm + 1);
602 ++ if (dito > 0x3ff)
603 ++ dito = 0x3ff;
604 ++
605 ++ dito_conf = (devslp >> PORT_DEVSLP_DITO_OFFSET) & 0x3FF;
606 ++
607 ++ /* device sleep was already enabled and same dito */
608 ++ if ((devslp & PORT_DEVSLP_ADSE) && (dito_conf == dito))
609 + return;
610 +
611 + /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
612 +@@ -2129,11 +2136,6 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
613 + if (rc)
614 + return;
615 +
616 +- dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
617 +- dito = devslp_idle_timeout / (dm + 1);
618 +- if (dito > 0x3ff)
619 +- dito = 0x3ff;
620 +-
621 + /* Use the nominal value 10 ms if the read MDAT is zero,
622 + * the nominal value of DETO is 20 ms.
623 + */
624 +@@ -2151,6 +2153,8 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
625 + deto = 20;
626 + }
627 +
628 ++ /* Make dito, mdat, deto bits to 0s */
629 ++ devslp &= ~GENMASK_ULL(24, 2);
630 + devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
631 + (mdat << PORT_DEVSLP_MDAT_OFFSET) |
632 + (deto << PORT_DEVSLP_DETO_OFFSET) |
633 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
634 +index 5e55d03d3d01..fe1414df0f33 100644
635 +--- a/drivers/block/nbd.c
636 ++++ b/drivers/block/nbd.c
637 +@@ -1228,6 +1228,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
638 + case NBD_SET_SOCK:
639 + return nbd_add_socket(nbd, arg, false);
640 + case NBD_SET_BLKSIZE:
641 ++ if (!arg || !is_power_of_2(arg) || arg < 512 ||
642 ++ arg > PAGE_SIZE)
643 ++ return -EINVAL;
644 + nbd_size_set(nbd, arg,
645 + div_s64(config->bytesize, arg));
646 + return 0;
647 +diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
648 +index 531a0915066b..11ec92e47455 100644
649 +--- a/drivers/block/pktcdvd.c
650 ++++ b/drivers/block/pktcdvd.c
651 +@@ -67,7 +67,7 @@
652 + #include <scsi/scsi.h>
653 + #include <linux/debugfs.h>
654 + #include <linux/device.h>
655 +-
656 ++#include <linux/nospec.h>
657 + #include <linux/uaccess.h>
658 +
659 + #define DRIVER_NAME "pktcdvd"
660 +@@ -2231,6 +2231,8 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
661 + {
662 + if (dev_minor >= MAX_WRITERS)
663 + return NULL;
664 ++
665 ++ dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
666 + return pkt_devs[dev_minor];
667 + }
668 +
669 +diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
670 +index b33c8d6eb8c7..500d4d632e48 100644
671 +--- a/drivers/bluetooth/Kconfig
672 ++++ b/drivers/bluetooth/Kconfig
673 +@@ -146,6 +146,7 @@ config BT_HCIUART_LL
674 + config BT_HCIUART_3WIRE
675 + bool "Three-wire UART (H5) protocol support"
676 + depends on BT_HCIUART
677 ++ depends on BT_HCIUART_SERDEV
678 + help
679 + The HCI Three-wire UART Transport Layer makes it possible to
680 + user the Bluetooth HCI over a serial port interface. The HCI
681 +diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
682 +index 86b526b7d990..a2070ab86c82 100644
683 +--- a/drivers/char/tpm/tpm-interface.c
684 ++++ b/drivers/char/tpm/tpm-interface.c
685 +@@ -369,10 +369,13 @@ err_len:
686 + return -EINVAL;
687 + }
688 +
689 +-static int tpm_request_locality(struct tpm_chip *chip)
690 ++static int tpm_request_locality(struct tpm_chip *chip, unsigned int flags)
691 + {
692 + int rc;
693 +
694 ++ if (flags & TPM_TRANSMIT_RAW)
695 ++ return 0;
696 ++
697 + if (!chip->ops->request_locality)
698 + return 0;
699 +
700 +@@ -385,10 +388,13 @@ static int tpm_request_locality(struct tpm_chip *chip)
701 + return 0;
702 + }
703 +
704 +-static void tpm_relinquish_locality(struct tpm_chip *chip)
705 ++static void tpm_relinquish_locality(struct tpm_chip *chip, unsigned int flags)
706 + {
707 + int rc;
708 +
709 ++ if (flags & TPM_TRANSMIT_RAW)
710 ++ return;
711 ++
712 + if (!chip->ops->relinquish_locality)
713 + return;
714 +
715 +@@ -399,6 +405,28 @@ static void tpm_relinquish_locality(struct tpm_chip *chip)
716 + chip->locality = -1;
717 + }
718 +
719 ++static int tpm_cmd_ready(struct tpm_chip *chip, unsigned int flags)
720 ++{
721 ++ if (flags & TPM_TRANSMIT_RAW)
722 ++ return 0;
723 ++
724 ++ if (!chip->ops->cmd_ready)
725 ++ return 0;
726 ++
727 ++ return chip->ops->cmd_ready(chip);
728 ++}
729 ++
730 ++static int tpm_go_idle(struct tpm_chip *chip, unsigned int flags)
731 ++{
732 ++ if (flags & TPM_TRANSMIT_RAW)
733 ++ return 0;
734 ++
735 ++ if (!chip->ops->go_idle)
736 ++ return 0;
737 ++
738 ++ return chip->ops->go_idle(chip);
739 ++}
740 ++
741 + static ssize_t tpm_try_transmit(struct tpm_chip *chip,
742 + struct tpm_space *space,
743 + u8 *buf, size_t bufsiz,
744 +@@ -449,14 +477,15 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
745 + /* Store the decision as chip->locality will be changed. */
746 + need_locality = chip->locality == -1;
747 +
748 +- if (!(flags & TPM_TRANSMIT_RAW) && need_locality) {
749 +- rc = tpm_request_locality(chip);
750 ++ if (need_locality) {
751 ++ rc = tpm_request_locality(chip, flags);
752 + if (rc < 0)
753 + goto out_no_locality;
754 + }
755 +
756 +- if (chip->dev.parent)
757 +- pm_runtime_get_sync(chip->dev.parent);
758 ++ rc = tpm_cmd_ready(chip, flags);
759 ++ if (rc)
760 ++ goto out;
761 +
762 + rc = tpm2_prepare_space(chip, space, ordinal, buf);
763 + if (rc)
764 +@@ -516,13 +545,16 @@ out_recv:
765 + }
766 +
767 + rc = tpm2_commit_space(chip, space, ordinal, buf, &len);
768 ++ if (rc)
769 ++ dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
770 +
771 + out:
772 +- if (chip->dev.parent)
773 +- pm_runtime_put_sync(chip->dev.parent);
774 ++ rc = tpm_go_idle(chip, flags);
775 ++ if (rc)
776 ++ goto out;
777 +
778 + if (need_locality)
779 +- tpm_relinquish_locality(chip);
780 ++ tpm_relinquish_locality(chip, flags);
781 +
782 + out_no_locality:
783 + if (chip->ops->clk_enable != NULL)
784 +diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
785 +index b83b30a3eea5..4bb9b4aa9b49 100644
786 +--- a/drivers/char/tpm/tpm.h
787 ++++ b/drivers/char/tpm/tpm.h
788 +@@ -511,9 +511,17 @@ extern const struct file_operations tpm_fops;
789 + extern const struct file_operations tpmrm_fops;
790 + extern struct idr dev_nums_idr;
791 +
792 ++/**
793 ++ * enum tpm_transmit_flags
794 ++ *
795 ++ * @TPM_TRANSMIT_UNLOCKED: used to lock sequence of tpm_transmit calls.
796 ++ * @TPM_TRANSMIT_RAW: prevent recursive calls into setup steps
797 ++ * (go idle, locality,..). Always use with UNLOCKED
798 ++ * as it will fail on double locking.
799 ++ */
800 + enum tpm_transmit_flags {
801 +- TPM_TRANSMIT_UNLOCKED = BIT(0),
802 +- TPM_TRANSMIT_RAW = BIT(1),
803 ++ TPM_TRANSMIT_UNLOCKED = BIT(0),
804 ++ TPM_TRANSMIT_RAW = BIT(1),
805 + };
806 +
807 + ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
808 +diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
809 +index d26ea7513226..dabb2ae4e779 100644
810 +--- a/drivers/char/tpm/tpm2-space.c
811 ++++ b/drivers/char/tpm/tpm2-space.c
812 +@@ -39,7 +39,8 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
813 + for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
814 + if (space->session_tbl[i])
815 + tpm2_flush_context_cmd(chip, space->session_tbl[i],
816 +- TPM_TRANSMIT_UNLOCKED);
817 ++ TPM_TRANSMIT_UNLOCKED |
818 ++ TPM_TRANSMIT_RAW);
819 + }
820 + }
821 +
822 +@@ -84,7 +85,7 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
823 + tpm_buf_append(&tbuf, &buf[*offset], body_size);
824 +
825 + rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 4,
826 +- TPM_TRANSMIT_UNLOCKED, NULL);
827 ++ TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW, NULL);
828 + if (rc < 0) {
829 + dev_warn(&chip->dev, "%s: failed with a system error %d\n",
830 + __func__, rc);
831 +@@ -133,7 +134,7 @@ static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
832 + tpm_buf_append_u32(&tbuf, handle);
833 +
834 + rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 0,
835 +- TPM_TRANSMIT_UNLOCKED, NULL);
836 ++ TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW, NULL);
837 + if (rc < 0) {
838 + dev_warn(&chip->dev, "%s: failed with a system error %d\n",
839 + __func__, rc);
840 +@@ -170,7 +171,8 @@ static void tpm2_flush_space(struct tpm_chip *chip)
841 + for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
842 + if (space->context_tbl[i] && ~space->context_tbl[i])
843 + tpm2_flush_context_cmd(chip, space->context_tbl[i],
844 +- TPM_TRANSMIT_UNLOCKED);
845 ++ TPM_TRANSMIT_UNLOCKED |
846 ++ TPM_TRANSMIT_RAW);
847 +
848 + tpm2_flush_sessions(chip, space);
849 + }
850 +@@ -377,7 +379,8 @@ static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp,
851 +
852 + return 0;
853 + out_no_slots:
854 +- tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_UNLOCKED);
855 ++ tpm2_flush_context_cmd(chip, phandle,
856 ++ TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW);
857 + dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__,
858 + phandle);
859 + return -ENOMEM;
860 +@@ -465,7 +468,8 @@ static int tpm2_save_space(struct tpm_chip *chip)
861 + return rc;
862 +
863 + tpm2_flush_context_cmd(chip, space->context_tbl[i],
864 +- TPM_TRANSMIT_UNLOCKED);
865 ++ TPM_TRANSMIT_UNLOCKED |
866 ++ TPM_TRANSMIT_RAW);
867 + space->context_tbl[i] = ~0;
868 + }
869 +
870 +diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
871 +index bb756ad7897e..5c7ce5aaaf6f 100644
872 +--- a/drivers/char/tpm/tpm_crb.c
873 ++++ b/drivers/char/tpm/tpm_crb.c
874 +@@ -137,7 +137,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
875 + }
876 +
877 + /**
878 +- * crb_go_idle - request tpm crb device to go the idle state
879 ++ * __crb_go_idle - request tpm crb device to go the idle state
880 + *
881 + * @dev: crb device
882 + * @priv: crb private data
883 +@@ -151,7 +151,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
884 + *
885 + * Return: 0 always
886 + */
887 +-static int crb_go_idle(struct device *dev, struct crb_priv *priv)
888 ++static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
889 + {
890 + if ((priv->flags & CRB_FL_ACPI_START) ||
891 + (priv->flags & CRB_FL_CRB_SMC_START))
892 +@@ -166,11 +166,20 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
893 + dev_warn(dev, "goIdle timed out\n");
894 + return -ETIME;
895 + }
896 ++
897 + return 0;
898 + }
899 +
900 ++static int crb_go_idle(struct tpm_chip *chip)
901 ++{
902 ++ struct device *dev = &chip->dev;
903 ++ struct crb_priv *priv = dev_get_drvdata(dev);
904 ++
905 ++ return __crb_go_idle(dev, priv);
906 ++}
907 ++
908 + /**
909 +- * crb_cmd_ready - request tpm crb device to enter ready state
910 ++ * __crb_cmd_ready - request tpm crb device to enter ready state
911 + *
912 + * @dev: crb device
913 + * @priv: crb private data
914 +@@ -183,7 +192,7 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
915 + *
916 + * Return: 0 on success -ETIME on timeout;
917 + */
918 +-static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
919 ++static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
920 + {
921 + if ((priv->flags & CRB_FL_ACPI_START) ||
922 + (priv->flags & CRB_FL_CRB_SMC_START))
923 +@@ -201,6 +210,14 @@ static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
924 + return 0;
925 + }
926 +
927 ++static int crb_cmd_ready(struct tpm_chip *chip)
928 ++{
929 ++ struct device *dev = &chip->dev;
930 ++ struct crb_priv *priv = dev_get_drvdata(dev);
931 ++
932 ++ return __crb_cmd_ready(dev, priv);
933 ++}
934 ++
935 + static int __crb_request_locality(struct device *dev,
936 + struct crb_priv *priv, int loc)
937 + {
938 +@@ -393,6 +410,8 @@ static const struct tpm_class_ops tpm_crb = {
939 + .send = crb_send,
940 + .cancel = crb_cancel,
941 + .req_canceled = crb_req_canceled,
942 ++ .go_idle = crb_go_idle,
943 ++ .cmd_ready = crb_cmd_ready,
944 + .request_locality = crb_request_locality,
945 + .relinquish_locality = crb_relinquish_locality,
946 + .req_complete_mask = CRB_DRV_STS_COMPLETE,
947 +@@ -508,7 +527,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
948 + * PTT HW bug w/a: wake up the device to access
949 + * possibly not retained registers.
950 + */
951 +- ret = crb_cmd_ready(dev, priv);
952 ++ ret = __crb_cmd_ready(dev, priv);
953 + if (ret)
954 + return ret;
955 +
956 +@@ -553,7 +572,7 @@ out:
957 + if (!ret)
958 + priv->cmd_size = cmd_size;
959 +
960 +- crb_go_idle(dev, priv);
961 ++ __crb_go_idle(dev, priv);
962 +
963 + __crb_relinquish_locality(dev, priv, 0);
964 +
965 +@@ -624,32 +643,7 @@ static int crb_acpi_add(struct acpi_device *device)
966 + chip->acpi_dev_handle = device->handle;
967 + chip->flags = TPM_CHIP_FLAG_TPM2;
968 +
969 +- rc = __crb_request_locality(dev, priv, 0);
970 +- if (rc)
971 +- return rc;
972 +-
973 +- rc = crb_cmd_ready(dev, priv);
974 +- if (rc)
975 +- goto out;
976 +-
977 +- pm_runtime_get_noresume(dev);
978 +- pm_runtime_set_active(dev);
979 +- pm_runtime_enable(dev);
980 +-
981 +- rc = tpm_chip_register(chip);
982 +- if (rc) {
983 +- crb_go_idle(dev, priv);
984 +- pm_runtime_put_noidle(dev);
985 +- pm_runtime_disable(dev);
986 +- goto out;
987 +- }
988 +-
989 +- pm_runtime_put_sync(dev);
990 +-
991 +-out:
992 +- __crb_relinquish_locality(dev, priv, 0);
993 +-
994 +- return rc;
995 ++ return tpm_chip_register(chip);
996 + }
997 +
998 + static int crb_acpi_remove(struct acpi_device *device)
999 +@@ -659,52 +653,11 @@ static int crb_acpi_remove(struct acpi_device *device)
1000 +
1001 + tpm_chip_unregister(chip);
1002 +
1003 +- pm_runtime_disable(dev);
1004 +-
1005 + return 0;
1006 + }
1007 +
1008 +-static int __maybe_unused crb_pm_runtime_suspend(struct device *dev)
1009 +-{
1010 +- struct tpm_chip *chip = dev_get_drvdata(dev);
1011 +- struct crb_priv *priv = dev_get_drvdata(&chip->dev);
1012 +-
1013 +- return crb_go_idle(dev, priv);
1014 +-}
1015 +-
1016 +-static int __maybe_unused crb_pm_runtime_resume(struct device *dev)
1017 +-{
1018 +- struct tpm_chip *chip = dev_get_drvdata(dev);
1019 +- struct crb_priv *priv = dev_get_drvdata(&chip->dev);
1020 +-
1021 +- return crb_cmd_ready(dev, priv);
1022 +-}
1023 +-
1024 +-static int __maybe_unused crb_pm_suspend(struct device *dev)
1025 +-{
1026 +- int ret;
1027 +-
1028 +- ret = tpm_pm_suspend(dev);
1029 +- if (ret)
1030 +- return ret;
1031 +-
1032 +- return crb_pm_runtime_suspend(dev);
1033 +-}
1034 +-
1035 +-static int __maybe_unused crb_pm_resume(struct device *dev)
1036 +-{
1037 +- int ret;
1038 +-
1039 +- ret = crb_pm_runtime_resume(dev);
1040 +- if (ret)
1041 +- return ret;
1042 +-
1043 +- return tpm_pm_resume(dev);
1044 +-}
1045 +-
1046 + static const struct dev_pm_ops crb_pm = {
1047 +- SET_SYSTEM_SLEEP_PM_OPS(crb_pm_suspend, crb_pm_resume)
1048 +- SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
1049 ++ SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
1050 + };
1051 +
1052 + static const struct acpi_device_id crb_device_ids[] = {
1053 +diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
1054 +index d5b44cadac56..c619e76ce827 100644
1055 +--- a/drivers/char/tpm/tpm_i2c_infineon.c
1056 ++++ b/drivers/char/tpm/tpm_i2c_infineon.c
1057 +@@ -117,7 +117,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
1058 + /* Lock the adapter for the duration of the whole sequence. */
1059 + if (!tpm_dev.client->adapter->algo->master_xfer)
1060 + return -EOPNOTSUPP;
1061 +- i2c_lock_adapter(tpm_dev.client->adapter);
1062 ++ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1063 +
1064 + if (tpm_dev.chip_type == SLB9645) {
1065 + /* use a combined read for newer chips
1066 +@@ -192,7 +192,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
1067 + }
1068 +
1069 + out:
1070 +- i2c_unlock_adapter(tpm_dev.client->adapter);
1071 ++ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1072 + /* take care of 'guard time' */
1073 + usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
1074 +
1075 +@@ -224,7 +224,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
1076 +
1077 + if (!tpm_dev.client->adapter->algo->master_xfer)
1078 + return -EOPNOTSUPP;
1079 +- i2c_lock_adapter(tpm_dev.client->adapter);
1080 ++ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1081 +
1082 + /* prepend the 'register address' to the buffer */
1083 + tpm_dev.buf[0] = addr;
1084 +@@ -243,7 +243,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
1085 + usleep_range(sleep_low, sleep_hi);
1086 + }
1087 +
1088 +- i2c_unlock_adapter(tpm_dev.client->adapter);
1089 ++ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1090 + /* take care of 'guard time' */
1091 + usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
1092 +
1093 +diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
1094 +index 8ab0bd8445f6..b00388fc41c8 100644
1095 +--- a/drivers/char/tpm/tpm_tis_spi.c
1096 ++++ b/drivers/char/tpm/tpm_tis_spi.c
1097 +@@ -188,6 +188,7 @@ static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
1098 + static int tpm_tis_spi_probe(struct spi_device *dev)
1099 + {
1100 + struct tpm_tis_spi_phy *phy;
1101 ++ int irq;
1102 +
1103 + phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
1104 + GFP_KERNEL);
1105 +@@ -200,7 +201,13 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
1106 + if (!phy->iobuf)
1107 + return -ENOMEM;
1108 +
1109 +- return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
1110 ++ /* If the SPI device has an IRQ then use that */
1111 ++ if (dev->irq > 0)
1112 ++ irq = dev->irq;
1113 ++ else
1114 ++ irq = -1;
1115 ++
1116 ++ return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
1117 + NULL);
1118 + }
1119 +
1120 +diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
1121 +index e4b40f2b4627..9c0f7cf920af 100644
1122 +--- a/drivers/firmware/google/vpd.c
1123 ++++ b/drivers/firmware/google/vpd.c
1124 +@@ -246,6 +246,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
1125 + sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
1126 + kfree(sec->raw_name);
1127 + memunmap(sec->baseaddr);
1128 ++ sec->enabled = false;
1129 + }
1130 +
1131 + return 0;
1132 +@@ -279,8 +280,10 @@ static int vpd_sections_init(phys_addr_t physaddr)
1133 + ret = vpd_section_init("rw", &rw_vpd,
1134 + physaddr + sizeof(struct vpd_cbmem) +
1135 + header.ro_size, header.rw_size);
1136 +- if (ret)
1137 ++ if (ret) {
1138 ++ vpd_section_destroy(&ro_vpd);
1139 + return ret;
1140 ++ }
1141 + }
1142 +
1143 + return 0;
1144 +diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
1145 +index 4b80e996d976..1022fe8d09c7 100644
1146 +--- a/drivers/gpio/gpio-ml-ioh.c
1147 ++++ b/drivers/gpio/gpio-ml-ioh.c
1148 +@@ -497,9 +497,10 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
1149 + return 0;
1150 +
1151 + err_gpiochip_add:
1152 ++ chip = chip_save;
1153 + while (--i >= 0) {
1154 +- chip--;
1155 + gpiochip_remove(&chip->gpio);
1156 ++ chip++;
1157 + }
1158 + kfree(chip_save);
1159 +
1160 +diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
1161 +index fbaf974277df..1eb857e2f62f 100644
1162 +--- a/drivers/gpio/gpio-tegra.c
1163 ++++ b/drivers/gpio/gpio-tegra.c
1164 +@@ -728,4 +728,4 @@ static int __init tegra_gpio_init(void)
1165 + {
1166 + return platform_driver_register(&tegra_gpio_driver);
1167 + }
1168 +-postcore_initcall(tegra_gpio_init);
1169 ++subsys_initcall(tegra_gpio_init);
1170 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1171 +index be813b2738c1..2e706f1abe64 100644
1172 +--- a/drivers/gpu/drm/i915/i915_reg.h
1173 ++++ b/drivers/gpu/drm/i915/i915_reg.h
1174 +@@ -8462,6 +8462,7 @@ enum skl_power_gate {
1175 + #define TRANS_MSA_10_BPC (2<<5)
1176 + #define TRANS_MSA_12_BPC (3<<5)
1177 + #define TRANS_MSA_16_BPC (4<<5)
1178 ++#define TRANS_MSA_CEA_RANGE (1<<3)
1179 +
1180 + /* LCPLL Control */
1181 + #define LCPLL_CTL _MMIO(0x130040)
1182 +diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
1183 +index 5e5fe03b638c..3a4a581345c4 100644
1184 +--- a/drivers/gpu/drm/i915/intel_ddi.c
1185 ++++ b/drivers/gpu/drm/i915/intel_ddi.c
1186 +@@ -1396,6 +1396,10 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
1187 + WARN_ON(transcoder_is_dsi(cpu_transcoder));
1188 +
1189 + temp = TRANS_MSA_SYNC_CLK;
1190 ++
1191 ++ if (crtc_state->limited_color_range)
1192 ++ temp |= TRANS_MSA_CEA_RANGE;
1193 ++
1194 + switch (crtc_state->pipe_bpp) {
1195 + case 18:
1196 + temp |= TRANS_MSA_6_BPC;
1197 +diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
1198 +index 658fa2d3e40c..2c8411b8d050 100644
1199 +--- a/drivers/gpu/ipu-v3/ipu-common.c
1200 ++++ b/drivers/gpu/ipu-v3/ipu-common.c
1201 +@@ -1401,6 +1401,8 @@ static int ipu_probe(struct platform_device *pdev)
1202 + return -ENODEV;
1203 +
1204 + ipu->id = of_alias_get_id(np, "ipu");
1205 ++ if (ipu->id < 0)
1206 ++ ipu->id = 0;
1207 +
1208 + if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
1209 + IS_ENABLED(CONFIG_DRM)) {
1210 +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
1211 +index 8267439dd1ee..d8101cd28dfa 100644
1212 +--- a/drivers/hv/hv.c
1213 ++++ b/drivers/hv/hv.c
1214 +@@ -196,6 +196,10 @@ int hv_synic_alloc(void)
1215 +
1216 + return 0;
1217 + err:
1218 ++ /*
1219 ++ * Any memory allocations that succeeded will be freed when
1220 ++ * the caller cleans up by calling hv_synic_free()
1221 ++ */
1222 + return -ENOMEM;
1223 + }
1224 +
1225 +@@ -208,12 +212,10 @@ void hv_synic_free(void)
1226 + struct hv_per_cpu_context *hv_cpu
1227 + = per_cpu_ptr(hv_context.cpu_context, cpu);
1228 +
1229 +- if (hv_cpu->synic_event_page)
1230 +- free_page((unsigned long)hv_cpu->synic_event_page);
1231 +- if (hv_cpu->synic_message_page)
1232 +- free_page((unsigned long)hv_cpu->synic_message_page);
1233 +- if (hv_cpu->post_msg_page)
1234 +- free_page((unsigned long)hv_cpu->post_msg_page);
1235 ++ kfree(hv_cpu->clk_evt);
1236 ++ free_page((unsigned long)hv_cpu->synic_event_page);
1237 ++ free_page((unsigned long)hv_cpu->synic_message_page);
1238 ++ free_page((unsigned long)hv_cpu->post_msg_page);
1239 + }
1240 +
1241 + kfree(hv_context.hv_numa_map);
1242 +diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
1243 +index 284f8670dbeb..2feae9a421e6 100644
1244 +--- a/drivers/i2c/busses/i2c-aspeed.c
1245 ++++ b/drivers/i2c/busses/i2c-aspeed.c
1246 +@@ -859,7 +859,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
1247 + if (!match)
1248 + bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val;
1249 + else
1250 +- bus->get_clk_reg_val = match->data;
1251 ++ bus->get_clk_reg_val = (u32 (*)(u32))match->data;
1252 +
1253 + /* Initialize the I2C adapter */
1254 + spin_lock_init(&bus->lock);
1255 +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
1256 +index ba8df2fde1b2..67cbd9f61acc 100644
1257 +--- a/drivers/i2c/busses/i2c-i801.c
1258 ++++ b/drivers/i2c/busses/i2c-i801.c
1259 +@@ -138,6 +138,7 @@
1260 +
1261 + #define SBREG_BAR 0x10
1262 + #define SBREG_SMBCTRL 0xc6000c
1263 ++#define SBREG_SMBCTRL_DNV 0xcf000c
1264 +
1265 + /* Host status bits for SMBPCISTS */
1266 + #define SMBPCISTS_INTS BIT(3)
1267 +@@ -1395,7 +1396,11 @@ static void i801_add_tco(struct i801_priv *priv)
1268 + spin_unlock(&p2sb_spinlock);
1269 +
1270 + res = &tco_res[ICH_RES_MEM_OFF];
1271 +- res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
1272 ++ if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
1273 ++ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
1274 ++ else
1275 ++ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
1276 ++
1277 + res->end = res->start + 3;
1278 + res->flags = IORESOURCE_MEM;
1279 +
1280 +diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
1281 +index ae6ed254e01d..732d6c456a6f 100644
1282 +--- a/drivers/i2c/busses/i2c-xiic.c
1283 ++++ b/drivers/i2c/busses/i2c-xiic.c
1284 +@@ -538,6 +538,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
1285 + {
1286 + u8 rx_watermark;
1287 + struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
1288 ++ unsigned long flags;
1289 +
1290 + /* Clear and enable Rx full interrupt. */
1291 + xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
1292 +@@ -553,6 +554,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
1293 + rx_watermark = IIC_RX_FIFO_DEPTH;
1294 + xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
1295 +
1296 ++ local_irq_save(flags);
1297 + if (!(msg->flags & I2C_M_NOSTART))
1298 + /* write the address */
1299 + xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
1300 +@@ -563,6 +565,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
1301 +
1302 + xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
1303 + msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
1304 ++ local_irq_restore(flags);
1305 ++
1306 + if (i2c->nmsgs == 1)
1307 + /* very last, enable bus not busy as well */
1308 + xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
1309 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1310 +index 79843a3ca9dc..752dbc388c27 100644
1311 +--- a/drivers/infiniband/core/cma.c
1312 ++++ b/drivers/infiniband/core/cma.c
1313 +@@ -1459,9 +1459,16 @@ static bool cma_match_net_dev(const struct rdma_cm_id *id,
1314 + (addr->src_addr.ss_family == AF_IB ||
1315 + cma_protocol_roce_dev_port(id->device, port_num));
1316 +
1317 +- return !addr->dev_addr.bound_dev_if ||
1318 +- (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1319 +- addr->dev_addr.bound_dev_if == net_dev->ifindex);
1320 ++ /*
1321 ++ * Net namespaces must match, and if the listner is listening
1322 ++ * on a specific netdevice than netdevice must match as well.
1323 ++ */
1324 ++ if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1325 ++ (!!addr->dev_addr.bound_dev_if ==
1326 ++ (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
1327 ++ return true;
1328 ++ else
1329 ++ return false;
1330 + }
1331 +
1332 + static struct rdma_id_private *cma_find_listener(
1333 +diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
1334 +index fc149ea64be7..59aaac43db91 100644
1335 +--- a/drivers/input/touchscreen/atmel_mxt_ts.c
1336 ++++ b/drivers/input/touchscreen/atmel_mxt_ts.c
1337 +@@ -1647,10 +1647,11 @@ static int mxt_parse_object_table(struct mxt_data *data,
1338 + break;
1339 + case MXT_TOUCH_MULTI_T9:
1340 + data->multitouch = MXT_TOUCH_MULTI_T9;
1341 ++ /* Only handle messages from first T9 instance */
1342 + data->T9_reportid_min = min_id;
1343 +- data->T9_reportid_max = max_id;
1344 +- data->num_touchids = object->num_report_ids
1345 +- * mxt_obj_instances(object);
1346 ++ data->T9_reportid_max = min_id +
1347 ++ object->num_report_ids - 1;
1348 ++ data->num_touchids = object->num_report_ids;
1349 + break;
1350 + case MXT_SPT_MESSAGECOUNT_T44:
1351 + data->T44_address = object->start_address;
1352 +diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
1353 +index 195d6e93ac71..5d0ba5f644c4 100644
1354 +--- a/drivers/iommu/ipmmu-vmsa.c
1355 ++++ b/drivers/iommu/ipmmu-vmsa.c
1356 +@@ -54,7 +54,7 @@ struct ipmmu_vmsa_domain {
1357 + struct io_pgtable_ops *iop;
1358 +
1359 + unsigned int context_id;
1360 +- spinlock_t lock; /* Protects mappings */
1361 ++ struct mutex mutex; /* Protects mappings */
1362 + };
1363 +
1364 + struct ipmmu_vmsa_iommu_priv {
1365 +@@ -523,7 +523,7 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
1366 + if (!domain)
1367 + return NULL;
1368 +
1369 +- spin_lock_init(&domain->lock);
1370 ++ mutex_init(&domain->mutex);
1371 +
1372 + return &domain->io_domain;
1373 + }
1374 +@@ -548,7 +548,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
1375 + struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1376 + struct ipmmu_vmsa_device *mmu = priv->mmu;
1377 + struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
1378 +- unsigned long flags;
1379 + unsigned int i;
1380 + int ret = 0;
1381 +
1382 +@@ -557,7 +556,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
1383 + return -ENXIO;
1384 + }
1385 +
1386 +- spin_lock_irqsave(&domain->lock, flags);
1387 ++ mutex_lock(&domain->mutex);
1388 +
1389 + if (!domain->mmu) {
1390 + /* The domain hasn't been used yet, initialize it. */
1391 +@@ -574,7 +573,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
1392 + } else
1393 + dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
1394 +
1395 +- spin_unlock_irqrestore(&domain->lock, flags);
1396 ++ mutex_unlock(&domain->mutex);
1397 +
1398 + if (ret < 0)
1399 + return ret;
1400 +diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
1401 +index c4c2b3b85ebc..f6e040fcad9a 100644
1402 +--- a/drivers/macintosh/via-pmu.c
1403 ++++ b/drivers/macintosh/via-pmu.c
1404 +@@ -532,8 +532,9 @@ init_pmu(void)
1405 + int timeout;
1406 + struct adb_request req;
1407 +
1408 +- out_8(&via[B], via[B] | TREQ); /* negate TREQ */
1409 +- out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK); /* TACK in, TREQ out */
1410 ++ /* Negate TREQ. Set TACK to input and TREQ to output. */
1411 ++ out_8(&via[B], in_8(&via[B]) | TREQ);
1412 ++ out_8(&via[DIRB], (in_8(&via[DIRB]) | TREQ) & ~TACK);
1413 +
1414 + pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
1415 + timeout = 100000;
1416 +@@ -1455,8 +1456,8 @@ pmu_sr_intr(void)
1417 + struct adb_request *req;
1418 + int bite = 0;
1419 +
1420 +- if (via[B] & TREQ) {
1421 +- printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]);
1422 ++ if (in_8(&via[B]) & TREQ) {
1423 ++ printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via[B]));
1424 + out_8(&via[IFR], SR_INT);
1425 + return NULL;
1426 + }
1427 +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
1428 +index 71c3507df9a0..a4b7c2698096 100644
1429 +--- a/drivers/md/dm-cache-target.c
1430 ++++ b/drivers/md/dm-cache-target.c
1431 +@@ -2330,7 +2330,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
1432 + {0, 2, "Invalid number of cache feature arguments"},
1433 + };
1434 +
1435 +- int r;
1436 ++ int r, mode_ctr = 0;
1437 + unsigned argc;
1438 + const char *arg;
1439 + struct cache_features *cf = &ca->features;
1440 +@@ -2344,14 +2344,20 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
1441 + while (argc--) {
1442 + arg = dm_shift_arg(as);
1443 +
1444 +- if (!strcasecmp(arg, "writeback"))
1445 ++ if (!strcasecmp(arg, "writeback")) {
1446 + cf->io_mode = CM_IO_WRITEBACK;
1447 ++ mode_ctr++;
1448 ++ }
1449 +
1450 +- else if (!strcasecmp(arg, "writethrough"))
1451 ++ else if (!strcasecmp(arg, "writethrough")) {
1452 + cf->io_mode = CM_IO_WRITETHROUGH;
1453 ++ mode_ctr++;
1454 ++ }
1455 +
1456 +- else if (!strcasecmp(arg, "passthrough"))
1457 ++ else if (!strcasecmp(arg, "passthrough")) {
1458 + cf->io_mode = CM_IO_PASSTHROUGH;
1459 ++ mode_ctr++;
1460 ++ }
1461 +
1462 + else if (!strcasecmp(arg, "metadata2"))
1463 + cf->metadata_version = 2;
1464 +@@ -2362,6 +2368,11 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
1465 + }
1466 + }
1467 +
1468 ++ if (mode_ctr > 1) {
1469 ++ *error = "Duplicate cache io_mode features requested";
1470 ++ return -EINVAL;
1471 ++ }
1472 ++
1473 + return 0;
1474 + }
1475 +
1476 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1477 +index 07ca2fd10189..5018fb2352c2 100644
1478 +--- a/drivers/md/raid5.c
1479 ++++ b/drivers/md/raid5.c
1480 +@@ -4516,6 +4516,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
1481 + s->failed++;
1482 + if (rdev && !test_bit(Faulty, &rdev->flags))
1483 + do_recovery = 1;
1484 ++ else if (!rdev) {
1485 ++ rdev = rcu_dereference(
1486 ++ conf->disks[i].replacement);
1487 ++ if (rdev && !test_bit(Faulty, &rdev->flags))
1488 ++ do_recovery = 1;
1489 ++ }
1490 + }
1491 +
1492 + if (test_bit(R5_InJournal, &dev->flags))
1493 +diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
1494 +index 2ab8d83e5576..fcfe658a4328 100644
1495 +--- a/drivers/media/dvb-frontends/helene.c
1496 ++++ b/drivers/media/dvb-frontends/helene.c
1497 +@@ -897,7 +897,10 @@ static int helene_x_pon(struct helene_priv *priv)
1498 + helene_write_regs(priv, 0x99, cdata, sizeof(cdata));
1499 +
1500 + /* 0x81 - 0x94 */
1501 +- data[0] = 0x18; /* xtal 24 MHz */
1502 ++ if (priv->xtal == SONY_HELENE_XTAL_16000)
1503 ++ data[0] = 0x10; /* xtal 16 MHz */
1504 ++ else
1505 ++ data[0] = 0x18; /* xtal 24 MHz */
1506 + data[1] = (uint8_t)(0x80 | (0x04 & 0x1F)); /* 4 x 25 = 100uA */
1507 + data[2] = (uint8_t)(0x80 | (0x26 & 0x7F)); /* 38 x 0.25 = 9.5pF */
1508 + data[3] = 0x80; /* REFOUT signal output 500mVpp */
1509 +diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
1510 +index 56fe4e5b396e..4a65861433d6 100644
1511 +--- a/drivers/media/platform/davinci/vpif_display.c
1512 ++++ b/drivers/media/platform/davinci/vpif_display.c
1513 +@@ -1114,6 +1114,14 @@ vpif_init_free_channel_objects:
1514 + return err;
1515 + }
1516 +
1517 ++static void free_vpif_objs(void)
1518 ++{
1519 ++ int i;
1520 ++
1521 ++ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++)
1522 ++ kfree(vpif_obj.dev[i]);
1523 ++}
1524 ++
1525 + static int vpif_async_bound(struct v4l2_async_notifier *notifier,
1526 + struct v4l2_subdev *subdev,
1527 + struct v4l2_async_subdev *asd)
1528 +@@ -1250,11 +1258,6 @@ static __init int vpif_probe(struct platform_device *pdev)
1529 + return -EINVAL;
1530 + }
1531 +
1532 +- if (!pdev->dev.platform_data) {
1533 +- dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
1534 +- return -EINVAL;
1535 +- }
1536 +-
1537 + vpif_dev = &pdev->dev;
1538 + err = initialize_vpif();
1539 +
1540 +@@ -1266,7 +1269,7 @@ static __init int vpif_probe(struct platform_device *pdev)
1541 + err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
1542 + if (err) {
1543 + v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
1544 +- return err;
1545 ++ goto vpif_free;
1546 + }
1547 +
1548 + while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
1549 +@@ -1309,7 +1312,10 @@ static __init int vpif_probe(struct platform_device *pdev)
1550 + if (vpif_obj.sd[i])
1551 + vpif_obj.sd[i]->grp_id = 1 << i;
1552 + }
1553 +- vpif_probe_complete();
1554 ++ err = vpif_probe_complete();
1555 ++ if (err) {
1556 ++ goto probe_subdev_out;
1557 ++ }
1558 + } else {
1559 + vpif_obj.notifier.subdevs = vpif_obj.config->asd;
1560 + vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
1561 +@@ -1330,6 +1336,8 @@ probe_subdev_out:
1562 + kfree(vpif_obj.sd);
1563 + vpif_unregister:
1564 + v4l2_device_unregister(&vpif_obj.v4l2_dev);
1565 ++vpif_free:
1566 ++ free_vpif_objs();
1567 +
1568 + return err;
1569 + }
1570 +@@ -1351,8 +1359,8 @@ static int vpif_remove(struct platform_device *device)
1571 + ch = vpif_obj.dev[i];
1572 + /* Unregister video device */
1573 + video_unregister_device(&ch->video_dev);
1574 +- kfree(vpif_obj.dev[i]);
1575 + }
1576 ++ free_vpif_objs();
1577 +
1578 + return 0;
1579 + }
1580 +diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.c b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
1581 +index 64df82817de3..4882ee25bd75 100644
1582 +--- a/drivers/media/platform/qcom/camss-8x16/camss-csid.c
1583 ++++ b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
1584 +@@ -392,9 +392,6 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
1585 + !media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
1586 + return -ENOLINK;
1587 +
1588 +- dt = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SRC].code)->
1589 +- data_type;
1590 +-
1591 + if (tg->enabled) {
1592 + /* Config Test Generator */
1593 + struct v4l2_mbus_framefmt *f =
1594 +@@ -416,6 +413,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
1595 + writel_relaxed(val, csid->base +
1596 + CAMSS_CSID_TG_DT_n_CGG_0(0));
1597 +
1598 ++ dt = csid_get_fmt_entry(
1599 ++ csid->fmt[MSM_CSID_PAD_SRC].code)->data_type;
1600 ++
1601 + /* 5:0 data type */
1602 + val = dt;
1603 + writel_relaxed(val, csid->base +
1604 +@@ -425,6 +425,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
1605 + val = tg->payload_mode;
1606 + writel_relaxed(val, csid->base +
1607 + CAMSS_CSID_TG_DT_n_CGG_2(0));
1608 ++
1609 ++ df = csid_get_fmt_entry(
1610 ++ csid->fmt[MSM_CSID_PAD_SRC].code)->decode_format;
1611 + } else {
1612 + struct csid_phy_config *phy = &csid->phy;
1613 +
1614 +@@ -439,13 +442,16 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
1615 +
1616 + writel_relaxed(val,
1617 + csid->base + CAMSS_CSID_CORE_CTRL_1);
1618 ++
1619 ++ dt = csid_get_fmt_entry(
1620 ++ csid->fmt[MSM_CSID_PAD_SINK].code)->data_type;
1621 ++ df = csid_get_fmt_entry(
1622 ++ csid->fmt[MSM_CSID_PAD_SINK].code)->decode_format;
1623 + }
1624 +
1625 + /* Config LUT */
1626 +
1627 + dt_shift = (cid % 4) * 8;
1628 +- df = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SINK].code)->
1629 +- decode_format;
1630 +
1631 + val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
1632 + val &= ~(0xff << dt_shift);
1633 +diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
1634 +index 8e9531f7f83f..9942932ecbf9 100644
1635 +--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
1636 ++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
1637 +@@ -254,24 +254,24 @@ static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
1638 + static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
1639 + {
1640 + struct s5p_mfc_dev *dev = ctx->dev;
1641 +- struct s5p_mfc_buf *dst_buf, *src_buf;
1642 +- size_t dec_y_addr;
1643 ++ struct s5p_mfc_buf *dst_buf, *src_buf;
1644 ++ u32 dec_y_addr;
1645 + unsigned int frame_type;
1646 +
1647 + /* Make sure we actually have a new frame before continuing. */
1648 + frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
1649 + if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED)
1650 + return;
1651 +- dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
1652 ++ dec_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
1653 +
1654 + /* Copy timestamp / timecode from decoded src to dst and set
1655 + appropriate flags. */
1656 + src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1657 + list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
1658 +- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
1659 +- == dec_y_addr) {
1660 +- dst_buf->b->timecode =
1661 +- src_buf->b->timecode;
1662 ++ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
1663 ++
1664 ++ if (addr == dec_y_addr) {
1665 ++ dst_buf->b->timecode = src_buf->b->timecode;
1666 + dst_buf->b->vb2_buf.timestamp =
1667 + src_buf->b->vb2_buf.timestamp;
1668 + dst_buf->b->flags &=
1669 +@@ -307,10 +307,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
1670 + {
1671 + struct s5p_mfc_dev *dev = ctx->dev;
1672 + struct s5p_mfc_buf *dst_buf;
1673 +- size_t dspl_y_addr;
1674 ++ u32 dspl_y_addr;
1675 + unsigned int frame_type;
1676 +
1677 +- dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
1678 ++ dspl_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
1679 + if (IS_MFCV6_PLUS(dev))
1680 + frame_type = s5p_mfc_hw_call(dev->mfc_ops,
1681 + get_disp_frame_type, ctx);
1682 +@@ -329,9 +329,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
1683 + /* The MFC returns address of the buffer, now we have to
1684 + * check which videobuf does it correspond to */
1685 + list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
1686 ++ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
1687 ++
1688 + /* Check if this is the buffer we're looking for */
1689 +- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
1690 +- == dspl_y_addr) {
1691 ++ if (addr == dspl_y_addr) {
1692 + list_del(&dst_buf->list);
1693 + ctx->dst_queue_cnt--;
1694 + dst_buf->b->sequence = ctx->sequence;
1695 +diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
1696 +index b421329b21fa..3d09e1c87921 100644
1697 +--- a/drivers/media/usb/dvb-usb/dw2102.c
1698 ++++ b/drivers/media/usb/dvb-usb/dw2102.c
1699 +@@ -2103,14 +2103,12 @@ static struct dvb_usb_device_properties s6x0_properties = {
1700 + }
1701 + };
1702 +
1703 +-static struct dvb_usb_device_properties *p1100;
1704 + static const struct dvb_usb_device_description d1100 = {
1705 + "Prof 1100 USB ",
1706 + {&dw2102_table[PROF_1100], NULL},
1707 + {NULL},
1708 + };
1709 +
1710 +-static struct dvb_usb_device_properties *s660;
1711 + static const struct dvb_usb_device_description d660 = {
1712 + "TeVii S660 USB",
1713 + {&dw2102_table[TEVII_S660], NULL},
1714 +@@ -2129,14 +2127,12 @@ static const struct dvb_usb_device_description d480_2 = {
1715 + {NULL},
1716 + };
1717 +
1718 +-static struct dvb_usb_device_properties *p7500;
1719 + static const struct dvb_usb_device_description d7500 = {
1720 + "Prof 7500 USB DVB-S2",
1721 + {&dw2102_table[PROF_7500], NULL},
1722 + {NULL},
1723 + };
1724 +
1725 +-static struct dvb_usb_device_properties *s421;
1726 + static const struct dvb_usb_device_description d421 = {
1727 + "TeVii S421 PCI",
1728 + {&dw2102_table[TEVII_S421], NULL},
1729 +@@ -2336,6 +2332,11 @@ static int dw2102_probe(struct usb_interface *intf,
1730 + const struct usb_device_id *id)
1731 + {
1732 + int retval = -ENOMEM;
1733 ++ struct dvb_usb_device_properties *p1100;
1734 ++ struct dvb_usb_device_properties *s660;
1735 ++ struct dvb_usb_device_properties *p7500;
1736 ++ struct dvb_usb_device_properties *s421;
1737 ++
1738 + p1100 = kmemdup(&s6x0_properties,
1739 + sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
1740 + if (!p1100)
1741 +@@ -2404,8 +2405,16 @@ static int dw2102_probe(struct usb_interface *intf,
1742 + 0 == dvb_usb_device_init(intf, &t220_properties,
1743 + THIS_MODULE, NULL, adapter_nr) ||
1744 + 0 == dvb_usb_device_init(intf, &tt_s2_4600_properties,
1745 +- THIS_MODULE, NULL, adapter_nr))
1746 ++ THIS_MODULE, NULL, adapter_nr)) {
1747 ++
1748 ++ /* clean up copied properties */
1749 ++ kfree(s421);
1750 ++ kfree(p7500);
1751 ++ kfree(s660);
1752 ++ kfree(p1100);
1753 ++
1754 + return 0;
1755 ++ }
1756 +
1757 + retval = -ENODEV;
1758 + kfree(s421);
1759 +diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
1760 +index 0f3fab47fe48..7dc1cbcd2fb8 100644
1761 +--- a/drivers/mfd/ti_am335x_tscadc.c
1762 ++++ b/drivers/mfd/ti_am335x_tscadc.c
1763 +@@ -210,14 +210,13 @@ static int ti_tscadc_probe(struct platform_device *pdev)
1764 + * The TSC_ADC_SS controller design assumes the OCP clock is
1765 + * at least 6x faster than the ADC clock.
1766 + */
1767 +- clk = clk_get(&pdev->dev, "adc_tsc_fck");
1768 ++ clk = devm_clk_get(&pdev->dev, "adc_tsc_fck");
1769 + if (IS_ERR(clk)) {
1770 + dev_err(&pdev->dev, "failed to get TSC fck\n");
1771 + err = PTR_ERR(clk);
1772 + goto err_disable_clk;
1773 + }
1774 + clock_rate = clk_get_rate(clk);
1775 +- clk_put(clk);
1776 + tscadc->clk_div = clock_rate / ADC_CLK;
1777 +
1778 + /* TSCADC_CLKDIV needs to be configured to the value minus 1 */
1779 +diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
1780 +index ddc9e4b08b5c..56efa9d18a9a 100644
1781 +--- a/drivers/misc/mic/scif/scif_api.c
1782 ++++ b/drivers/misc/mic/scif/scif_api.c
1783 +@@ -370,11 +370,10 @@ int scif_bind(scif_epd_t epd, u16 pn)
1784 + goto scif_bind_exit;
1785 + }
1786 + } else {
1787 +- pn = scif_get_new_port();
1788 +- if (!pn) {
1789 +- ret = -ENOSPC;
1790 ++ ret = scif_get_new_port();
1791 ++ if (ret < 0)
1792 + goto scif_bind_exit;
1793 +- }
1794 ++ pn = ret;
1795 + }
1796 +
1797 + ep->state = SCIFEP_BOUND;
1798 +@@ -648,13 +647,12 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
1799 + err = -EISCONN;
1800 + break;
1801 + case SCIFEP_UNBOUND:
1802 +- ep->port.port = scif_get_new_port();
1803 +- if (!ep->port.port) {
1804 +- err = -ENOSPC;
1805 +- } else {
1806 +- ep->port.node = scif_info.nodeid;
1807 +- ep->conn_async_state = ASYNC_CONN_IDLE;
1808 +- }
1809 ++ err = scif_get_new_port();
1810 ++ if (err < 0)
1811 ++ break;
1812 ++ ep->port.port = err;
1813 ++ ep->port.node = scif_info.nodeid;
1814 ++ ep->conn_async_state = ASYNC_CONN_IDLE;
1815 + /* Fall through */
1816 + case SCIFEP_BOUND:
1817 + /*
1818 +diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
1819 +index b77aacafc3fc..dda3ed72d05b 100644
1820 +--- a/drivers/misc/ti-st/st_kim.c
1821 ++++ b/drivers/misc/ti-st/st_kim.c
1822 +@@ -756,14 +756,14 @@ static int kim_probe(struct platform_device *pdev)
1823 + err = gpio_request(kim_gdata->nshutdown, "kim");
1824 + if (unlikely(err)) {
1825 + pr_err(" gpio %d request failed ", kim_gdata->nshutdown);
1826 +- return err;
1827 ++ goto err_sysfs_group;
1828 + }
1829 +
1830 + /* Configure nShutdown GPIO as output=0 */
1831 + err = gpio_direction_output(kim_gdata->nshutdown, 0);
1832 + if (unlikely(err)) {
1833 + pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
1834 +- return err;
1835 ++ goto err_sysfs_group;
1836 + }
1837 + /* get reference of pdev for request_firmware
1838 + */
1839 +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
1840 +index 23a6986d512b..a8f74d9bba4f 100644
1841 +--- a/drivers/mtd/ubi/wl.c
1842 ++++ b/drivers/mtd/ubi/wl.c
1843 +@@ -1615,8 +1615,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1844 + cond_resched();
1845 +
1846 + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1847 +- if (!e)
1848 ++ if (!e) {
1849 ++ err = -ENOMEM;
1850 + goto out_free;
1851 ++ }
1852 +
1853 + e->pnum = aeb->pnum;
1854 + e->ec = aeb->ec;
1855 +@@ -1635,8 +1637,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1856 + cond_resched();
1857 +
1858 + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1859 +- if (!e)
1860 ++ if (!e) {
1861 ++ err = -ENOMEM;
1862 + goto out_free;
1863 ++ }
1864 +
1865 + e->pnum = aeb->pnum;
1866 + e->ec = aeb->ec;
1867 +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
1868 +index 3deaa3413313..074a5b79d691 100644
1869 +--- a/drivers/net/ethernet/marvell/mvneta.c
1870 ++++ b/drivers/net/ethernet/marvell/mvneta.c
1871 +@@ -3195,7 +3195,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
1872 +
1873 + on_each_cpu(mvneta_percpu_enable, pp, true);
1874 + mvneta_start_dev(pp);
1875 +- mvneta_port_up(pp);
1876 +
1877 + netdev_update_features(dev);
1878 +
1879 +diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
1880 +index 0c5b68e7da51..9b3167054843 100644
1881 +--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
1882 ++++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
1883 +@@ -22,7 +22,7 @@
1884 + #include <linux/mdio-mux.h>
1885 + #include <linux/delay.h>
1886 +
1887 +-#define MDIO_PARAM_OFFSET 0x00
1888 ++#define MDIO_PARAM_OFFSET 0x23c
1889 + #define MDIO_PARAM_MIIM_CYCLE 29
1890 + #define MDIO_PARAM_INTERNAL_SEL 25
1891 + #define MDIO_PARAM_BUS_ID 22
1892 +@@ -30,20 +30,22 @@
1893 + #define MDIO_PARAM_PHY_ID 16
1894 + #define MDIO_PARAM_PHY_DATA 0
1895 +
1896 +-#define MDIO_READ_OFFSET 0x04
1897 ++#define MDIO_READ_OFFSET 0x240
1898 + #define MDIO_READ_DATA_MASK 0xffff
1899 +-#define MDIO_ADDR_OFFSET 0x08
1900 ++#define MDIO_ADDR_OFFSET 0x244
1901 +
1902 +-#define MDIO_CTRL_OFFSET 0x0C
1903 ++#define MDIO_CTRL_OFFSET 0x248
1904 + #define MDIO_CTRL_WRITE_OP 0x1
1905 + #define MDIO_CTRL_READ_OP 0x2
1906 +
1907 +-#define MDIO_STAT_OFFSET 0x10
1908 ++#define MDIO_STAT_OFFSET 0x24c
1909 + #define MDIO_STAT_DONE 1
1910 +
1911 + #define BUS_MAX_ADDR 32
1912 + #define EXT_BUS_START_ADDR 16
1913 +
1914 ++#define MDIO_REG_ADDR_SPACE_SIZE 0x250
1915 ++
1916 + struct iproc_mdiomux_desc {
1917 + void *mux_handle;
1918 + void __iomem *base;
1919 +@@ -169,6 +171,14 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
1920 + md->dev = &pdev->dev;
1921 +
1922 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1923 ++ if (res->start & 0xfff) {
1924 ++ /* For backward compatibility in case the
1925 ++ * base address is specified with an offset.
1926 ++ */
1927 ++ dev_info(&pdev->dev, "fix base address in dt-blob\n");
1928 ++ res->start &= ~0xfff;
1929 ++ res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
1930 ++ }
1931 + md->base = devm_ioremap_resource(&pdev->dev, res);
1932 + if (IS_ERR(md->base)) {
1933 + dev_err(&pdev->dev, "failed to ioremap register\n");
1934 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1935 +index cb17ffadfc30..e0baea2dfd3c 100644
1936 +--- a/drivers/net/tun.c
1937 ++++ b/drivers/net/tun.c
1938 +@@ -534,14 +534,6 @@ static void tun_queue_purge(struct tun_file *tfile)
1939 + skb_queue_purge(&tfile->sk.sk_error_queue);
1940 + }
1941 +
1942 +-static void tun_cleanup_tx_array(struct tun_file *tfile)
1943 +-{
1944 +- if (tfile->tx_array.ring.queue) {
1945 +- skb_array_cleanup(&tfile->tx_array);
1946 +- memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
1947 +- }
1948 +-}
1949 +-
1950 + static void __tun_detach(struct tun_file *tfile, bool clean)
1951 + {
1952 + struct tun_file *ntfile;
1953 +@@ -583,7 +575,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
1954 + tun->dev->reg_state == NETREG_REGISTERED)
1955 + unregister_netdevice(tun->dev);
1956 + }
1957 +- tun_cleanup_tx_array(tfile);
1958 ++ skb_array_cleanup(&tfile->tx_array);
1959 + sock_put(&tfile->sk);
1960 + }
1961 + }
1962 +@@ -623,13 +615,11 @@ static void tun_detach_all(struct net_device *dev)
1963 + /* Drop read queue */
1964 + tun_queue_purge(tfile);
1965 + sock_put(&tfile->sk);
1966 +- tun_cleanup_tx_array(tfile);
1967 + }
1968 + list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
1969 + tun_enable_queue(tfile);
1970 + tun_queue_purge(tfile);
1971 + sock_put(&tfile->sk);
1972 +- tun_cleanup_tx_array(tfile);
1973 + }
1974 + BUG_ON(tun->numdisabled != 0);
1975 +
1976 +@@ -675,7 +665,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
1977 + }
1978 +
1979 + if (!tfile->detached &&
1980 +- skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
1981 ++ skb_array_resize(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
1982 + err = -ENOMEM;
1983 + goto out;
1984 + }
1985 +@@ -2624,6 +2614,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
1986 + &tun_proto, 0);
1987 + if (!tfile)
1988 + return -ENOMEM;
1989 ++ if (skb_array_init(&tfile->tx_array, 0, GFP_KERNEL)) {
1990 ++ sk_free(&tfile->sk);
1991 ++ return -ENOMEM;
1992 ++ }
1993 ++
1994 + RCU_INIT_POINTER(tfile->tun, NULL);
1995 + tfile->flags = 0;
1996 + tfile->ifindex = 0;
1997 +@@ -2644,8 +2639,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
1998 +
1999 + sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
2000 +
2001 +- memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
2002 +-
2003 + return 0;
2004 + }
2005 +
2006 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
2007 +index 52ebed1f55a1..6fa9c223ff93 100644
2008 +--- a/drivers/net/wireless/ath/ath10k/mac.c
2009 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
2010 +@@ -3074,6 +3074,13 @@ static int ath10k_update_channel_list(struct ath10k *ar)
2011 + passive = channel->flags & IEEE80211_CHAN_NO_IR;
2012 + ch->passive = passive;
2013 +
2014 ++ /* the firmware is ignoring the "radar" flag of the
2015 ++ * channel and is scanning actively using Probe Requests
2016 ++ * on "Radar detection"/DFS channels which are not
2017 ++ * marked as "available"
2018 ++ */
2019 ++ ch->passive |= ch->chan_radar;
2020 ++
2021 + ch->freq = channel->center_freq;
2022 + ch->band_center_freq1 = channel->center_freq;
2023 + ch->min_power = 0;
2024 +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2025 +index 7616c1c4bbd3..baec856af90f 100644
2026 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2027 ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2028 +@@ -1451,6 +1451,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
2029 + cfg->keep_alive_pattern_size = __cpu_to_le32(0);
2030 + cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
2031 + cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
2032 ++ cfg->wmi_send_separate = __cpu_to_le32(0);
2033 ++ cfg->num_ocb_vdevs = __cpu_to_le32(0);
2034 ++ cfg->num_ocb_channels = __cpu_to_le32(0);
2035 ++ cfg->num_ocb_schedules = __cpu_to_le32(0);
2036 ++ cfg->host_capab = __cpu_to_le32(0);
2037 +
2038 + ath10k_wmi_put_host_mem_chunks(ar, chunks);
2039 +
2040 +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
2041 +index 22cf011e839a..e75bba0bbf67 100644
2042 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
2043 ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
2044 +@@ -1228,6 +1228,11 @@ struct wmi_tlv_resource_config {
2045 + __le32 keep_alive_pattern_size;
2046 + __le32 max_tdls_concurrent_sleep_sta;
2047 + __le32 max_tdls_concurrent_buffer_sta;
2048 ++ __le32 wmi_send_separate;
2049 ++ __le32 num_ocb_vdevs;
2050 ++ __le32 num_ocb_channels;
2051 ++ __le32 num_ocb_schedules;
2052 ++ __le32 host_capab;
2053 + } __packed;
2054 +
2055 + struct wmi_tlv_init_cmd {
2056 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
2057 +index 8c5c2dd8fa7f..a7f506eb7b36 100644
2058 +--- a/drivers/net/wireless/ath/ath9k/hw.c
2059 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
2060 +@@ -2915,16 +2915,19 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
2061 + struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
2062 + struct ieee80211_channel *channel;
2063 + int chan_pwr, new_pwr;
2064 ++ u16 ctl = NO_CTL;
2065 +
2066 + if (!chan)
2067 + return;
2068 +
2069 ++ if (!test)
2070 ++ ctl = ath9k_regd_get_ctl(reg, chan);
2071 ++
2072 + channel = chan->chan;
2073 + chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
2074 + new_pwr = min_t(int, chan_pwr, reg->power_limit);
2075 +
2076 +- ah->eep_ops->set_txpower(ah, chan,
2077 +- ath9k_regd_get_ctl(reg, chan),
2078 ++ ah->eep_ops->set_txpower(ah, chan, ctl,
2079 + get_antenna_gain(ah, chan), new_pwr, test);
2080 + }
2081 +
2082 +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
2083 +index d8b041f48ca8..fa64c1cc94ae 100644
2084 +--- a/drivers/net/wireless/ath/ath9k/xmit.c
2085 ++++ b/drivers/net/wireless/ath/ath9k/xmit.c
2086 +@@ -86,7 +86,8 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
2087 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2088 + struct ieee80211_sta *sta = info->status.status_driver_data[0];
2089 +
2090 +- if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
2091 ++ if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
2092 ++ IEEE80211_TX_STATUS_EOSP)) {
2093 + ieee80211_tx_status(hw, skb);
2094 + return;
2095 + }
2096 +diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
2097 +index 0f15696195f8..078a4940bc5c 100644
2098 +--- a/drivers/net/wireless/ti/wlcore/rx.c
2099 ++++ b/drivers/net/wireless/ti/wlcore/rx.c
2100 +@@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
2101 + static void wl1271_rx_status(struct wl1271 *wl,
2102 + struct wl1271_rx_descriptor *desc,
2103 + struct ieee80211_rx_status *status,
2104 +- u8 beacon)
2105 ++ u8 beacon, u8 probe_rsp)
2106 + {
2107 + memset(status, 0, sizeof(struct ieee80211_rx_status));
2108 +
2109 +@@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
2110 + }
2111 + }
2112 +
2113 ++ if (beacon || probe_rsp)
2114 ++ status->boottime_ns = ktime_get_boot_ns();
2115 ++
2116 + if (beacon)
2117 + wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
2118 + status->band);
2119 +@@ -191,7 +194,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
2120 + if (ieee80211_is_data_present(hdr->frame_control))
2121 + is_data = 1;
2122 +
2123 +- wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
2124 ++ wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
2125 ++ ieee80211_is_probe_resp(hdr->frame_control));
2126 + wlcore_hw_set_rx_csum(wl, desc, skb);
2127 +
2128 + seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
2129 +diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
2130 +index af81b2dec42e..620f5b995a12 100644
2131 +--- a/drivers/pci/switch/switchtec.c
2132 ++++ b/drivers/pci/switch/switchtec.c
2133 +@@ -24,6 +24,8 @@
2134 + #include <linux/cdev.h>
2135 + #include <linux/wait.h>
2136 +
2137 ++#include <linux/nospec.h>
2138 ++
2139 + MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
2140 + MODULE_VERSION("0.1");
2141 + MODULE_LICENSE("GPL");
2142 +@@ -1173,6 +1175,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev,
2143 + default:
2144 + if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
2145 + return -EINVAL;
2146 ++ p.port = array_index_nospec(p.port,
2147 ++ ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
2148 + p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
2149 + break;
2150 + }
2151 +diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
2152 +index 6e472691d8ee..17f2c5a505b2 100644
2153 +--- a/drivers/pinctrl/freescale/pinctrl-imx.c
2154 ++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
2155 +@@ -389,7 +389,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
2156 + const char *name;
2157 + int i, ret;
2158 +
2159 +- if (group > pctldev->num_groups)
2160 ++ if (group >= pctldev->num_groups)
2161 + return;
2162 +
2163 + seq_printf(s, "\n");
2164 +diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
2165 +index 433af328d981..b78f42abff2f 100644
2166 +--- a/drivers/pinctrl/pinctrl-amd.c
2167 ++++ b/drivers/pinctrl/pinctrl-amd.c
2168 +@@ -530,7 +530,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
2169 + /* Each status bit covers four pins */
2170 + for (i = 0; i < 4; i++) {
2171 + regval = readl(regs + i);
2172 +- if (!(regval & PIN_IRQ_PENDING))
2173 ++ if (!(regval & PIN_IRQ_PENDING) ||
2174 ++ !(regval & BIT(INTERRUPT_MASK_OFF)))
2175 + continue;
2176 + irq = irq_find_mapping(gc->irqdomain, irqnr + i);
2177 + generic_handle_irq(irq);
2178 +diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
2179 +index dffa3aab7178..cec4c3223044 100644
2180 +--- a/drivers/rpmsg/rpmsg_core.c
2181 ++++ b/drivers/rpmsg/rpmsg_core.c
2182 +@@ -23,6 +23,7 @@
2183 + #include <linux/module.h>
2184 + #include <linux/rpmsg.h>
2185 + #include <linux/of_device.h>
2186 ++#include <linux/pm_domain.h>
2187 + #include <linux/slab.h>
2188 +
2189 + #include "rpmsg_internal.h"
2190 +@@ -418,6 +419,10 @@ static int rpmsg_dev_probe(struct device *dev)
2191 + struct rpmsg_endpoint *ept = NULL;
2192 + int err;
2193 +
2194 ++ err = dev_pm_domain_attach(dev, true);
2195 ++ if (err)
2196 ++ goto out;
2197 ++
2198 + if (rpdrv->callback) {
2199 + strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
2200 + chinfo.src = rpdev->src;
2201 +@@ -459,6 +464,8 @@ static int rpmsg_dev_remove(struct device *dev)
2202 +
2203 + rpdrv->remove(rpdev);
2204 +
2205 ++ dev_pm_domain_detach(dev, true);
2206 ++
2207 + if (rpdev->ept)
2208 + rpmsg_destroy_ept(rpdev->ept);
2209 +
2210 +diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
2211 +index a1388842e17e..dd342207095a 100644
2212 +--- a/drivers/scsi/3w-9xxx.c
2213 ++++ b/drivers/scsi/3w-9xxx.c
2214 +@@ -2042,6 +2042,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2215 +
2216 + if (twa_initialize_device_extension(tw_dev)) {
2217 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2218 ++ retval = -ENOMEM;
2219 + goto out_free_device_extension;
2220 + }
2221 +
2222 +@@ -2064,6 +2065,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2223 + tw_dev->base_addr = ioremap(mem_addr, mem_len);
2224 + if (!tw_dev->base_addr) {
2225 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2226 ++ retval = -ENOMEM;
2227 + goto out_release_mem_region;
2228 + }
2229 +
2230 +@@ -2071,8 +2073,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2231 + TW_DISABLE_INTERRUPTS(tw_dev);
2232 +
2233 + /* Initialize the card */
2234 +- if (twa_reset_sequence(tw_dev, 0))
2235 ++ if (twa_reset_sequence(tw_dev, 0)) {
2236 ++ retval = -ENOMEM;
2237 + goto out_iounmap;
2238 ++ }
2239 +
2240 + /* Set host specific parameters */
2241 + if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2242 +diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
2243 +index b150e131b2e7..aa317d6909e8 100644
2244 +--- a/drivers/scsi/3w-sas.c
2245 ++++ b/drivers/scsi/3w-sas.c
2246 +@@ -1597,6 +1597,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2247 +
2248 + if (twl_initialize_device_extension(tw_dev)) {
2249 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
2250 ++ retval = -ENOMEM;
2251 + goto out_free_device_extension;
2252 + }
2253 +
2254 +@@ -1611,6 +1612,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2255 + tw_dev->base_addr = pci_iomap(pdev, 1, 0);
2256 + if (!tw_dev->base_addr) {
2257 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
2258 ++ retval = -ENOMEM;
2259 + goto out_release_mem_region;
2260 + }
2261 +
2262 +@@ -1620,6 +1622,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2263 + /* Initialize the card */
2264 + if (twl_reset_sequence(tw_dev, 0)) {
2265 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
2266 ++ retval = -ENOMEM;
2267 + goto out_iounmap;
2268 + }
2269 +
2270 +diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
2271 +index f6179e3d6953..961ea6f7def8 100644
2272 +--- a/drivers/scsi/3w-xxxx.c
2273 ++++ b/drivers/scsi/3w-xxxx.c
2274 +@@ -2280,6 +2280,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2275 +
2276 + if (tw_initialize_device_extension(tw_dev)) {
2277 + printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
2278 ++ retval = -ENOMEM;
2279 + goto out_free_device_extension;
2280 + }
2281 +
2282 +@@ -2294,6 +2295,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2283 + tw_dev->base_addr = pci_resource_start(pdev, 0);
2284 + if (!tw_dev->base_addr) {
2285 + printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
2286 ++ retval = -ENOMEM;
2287 + goto out_release_mem_region;
2288 + }
2289 +
2290 +diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
2291 +index 8eb3f96fe068..bc61cc8bc6f0 100644
2292 +--- a/drivers/scsi/lpfc/lpfc.h
2293 ++++ b/drivers/scsi/lpfc/lpfc.h
2294 +@@ -676,7 +676,7 @@ struct lpfc_hba {
2295 + #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
2296 + #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
2297 + #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
2298 +-#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */
2299 ++#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
2300 +
2301 + uint32_t hba_flag; /* hba generic flags */
2302 + #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
2303 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2304 +index e6d51135d105..0d0be7d8b9d6 100644
2305 +--- a/drivers/target/target_core_transport.c
2306 ++++ b/drivers/target/target_core_transport.c
2307 +@@ -317,6 +317,7 @@ void __transport_register_session(
2308 + {
2309 + const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
2310 + unsigned char buf[PR_REG_ISID_LEN];
2311 ++ unsigned long flags;
2312 +
2313 + se_sess->se_tpg = se_tpg;
2314 + se_sess->fabric_sess_ptr = fabric_sess_ptr;
2315 +@@ -353,7 +354,7 @@ void __transport_register_session(
2316 + se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
2317 + }
2318 +
2319 +- spin_lock_irq(&se_nacl->nacl_sess_lock);
2320 ++ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
2321 + /*
2322 + * The se_nacl->nacl_sess pointer will be set to the
2323 + * last active I_T Nexus for each struct se_node_acl.
2324 +@@ -362,7 +363,7 @@ void __transport_register_session(
2325 +
2326 + list_add_tail(&se_sess->sess_acl_list,
2327 + &se_nacl->acl_sess_list);
2328 +- spin_unlock_irq(&se_nacl->nacl_sess_lock);
2329 ++ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
2330 + }
2331 + list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
2332 +
2333 +diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
2334 +index 20d79a6007d5..070733ca94d5 100644
2335 +--- a/drivers/tty/rocket.c
2336 ++++ b/drivers/tty/rocket.c
2337 +@@ -1894,7 +1894,7 @@ static __init int register_PCI(int i, struct pci_dev *dev)
2338 + ByteIO_t UPCIRingInd = 0;
2339 +
2340 + if (!dev || !pci_match_id(rocket_pci_ids, dev) ||
2341 +- pci_enable_device(dev))
2342 ++ pci_enable_device(dev) || i >= NUM_BOARDS)
2343 + return 0;
2344 +
2345 + rcktpt_io_addr[i] = pci_resource_start(dev, 0);
2346 +diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
2347 +index ff04b7f8549f..41784798c789 100644
2348 +--- a/drivers/uio/uio.c
2349 ++++ b/drivers/uio/uio.c
2350 +@@ -841,8 +841,6 @@ int __uio_register_device(struct module *owner,
2351 + if (ret)
2352 + goto err_uio_dev_add_attributes;
2353 +
2354 +- info->uio_dev = idev;
2355 +-
2356 + if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
2357 + /*
2358 + * Note that we deliberately don't use devm_request_irq
2359 +@@ -858,6 +856,7 @@ int __uio_register_device(struct module *owner,
2360 + goto err_request_irq;
2361 + }
2362 +
2363 ++ info->uio_dev = idev;
2364 + return 0;
2365 +
2366 + err_request_irq:
2367 +diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
2368 +index 4737615f0eaa..ce696d6c4641 100644
2369 +--- a/fs/autofs4/autofs_i.h
2370 ++++ b/fs/autofs4/autofs_i.h
2371 +@@ -26,6 +26,7 @@
2372 + #include <linux/list.h>
2373 + #include <linux/completion.h>
2374 + #include <asm/current.h>
2375 ++#include <linux/magic.h>
2376 +
2377 + /* This is the range of ioctl() numbers we claim as ours */
2378 + #define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
2379 +@@ -124,7 +125,8 @@ struct autofs_sb_info {
2380 +
2381 + static inline struct autofs_sb_info *autofs4_sbi(struct super_block *sb)
2382 + {
2383 +- return (struct autofs_sb_info *)(sb->s_fs_info);
2384 ++ return sb->s_magic != AUTOFS_SUPER_MAGIC ?
2385 ++ NULL : (struct autofs_sb_info *)(sb->s_fs_info);
2386 + }
2387 +
2388 + static inline struct autofs_info *autofs4_dentry_ino(struct dentry *dentry)
2389 +diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
2390 +index 09e7d68dff02..3c7e727612fa 100644
2391 +--- a/fs/autofs4/inode.c
2392 ++++ b/fs/autofs4/inode.c
2393 +@@ -14,7 +14,6 @@
2394 + #include <linux/pagemap.h>
2395 + #include <linux/parser.h>
2396 + #include <linux/bitops.h>
2397 +-#include <linux/magic.h>
2398 + #include "autofs_i.h"
2399 + #include <linux/module.h>
2400 +
2401 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2402 +index 7303ba108112..a507c0d25354 100644
2403 +--- a/fs/btrfs/ioctl.c
2404 ++++ b/fs/btrfs/ioctl.c
2405 +@@ -3158,6 +3158,25 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
2406 +
2407 + same_lock_start = min_t(u64, loff, dst_loff);
2408 + same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
2409 ++ } else {
2410 ++ /*
2411 ++ * If the source and destination inodes are different, the
2412 ++ * source's range end offset matches the source's i_size, that
2413 ++ * i_size is not a multiple of the sector size, and the
2414 ++ * destination range does not go past the destination's i_size,
2415 ++ * we must round down the length to the nearest sector size
2416 ++ * multiple. If we don't do this adjustment we end replacing
2417 ++ * with zeroes the bytes in the range that starts at the
2418 ++ * deduplication range's end offset and ends at the next sector
2419 ++ * size multiple.
2420 ++ */
2421 ++ if (loff + olen == i_size_read(src) &&
2422 ++ dst_loff + len < i_size_read(dst)) {
2423 ++ const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
2424 ++
2425 ++ len = round_down(i_size_read(src), sz) - loff;
2426 ++ olen = len;
2427 ++ }
2428 + }
2429 +
2430 + /* don't make the dst file partly checksummed */
2431 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
2432 +index caf9cf91b825..2cd0b3053439 100644
2433 +--- a/fs/cifs/inode.c
2434 ++++ b/fs/cifs/inode.c
2435 +@@ -467,6 +467,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
2436 + oparms.cifs_sb = cifs_sb;
2437 + oparms.desired_access = GENERIC_READ;
2438 + oparms.create_options = CREATE_NOT_DIR;
2439 ++ if (backup_cred(cifs_sb))
2440 ++ oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
2441 + oparms.disposition = FILE_OPEN;
2442 + oparms.path = path;
2443 + oparms.fid = &fid;
2444 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2445 +index e9f246fe9d80..759cbbf7b1af 100644
2446 +--- a/fs/cifs/smb2ops.c
2447 ++++ b/fs/cifs/smb2ops.c
2448 +@@ -385,7 +385,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
2449 + oparms.tcon = tcon;
2450 + oparms.desired_access = FILE_READ_ATTRIBUTES;
2451 + oparms.disposition = FILE_OPEN;
2452 +- oparms.create_options = 0;
2453 ++ if (backup_cred(cifs_sb))
2454 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2455 ++ else
2456 ++ oparms.create_options = 0;
2457 + oparms.fid = &fid;
2458 + oparms.reconnect = false;
2459 +
2460 +@@ -534,7 +537,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
2461 + oparms.tcon = tcon;
2462 + oparms.desired_access = FILE_READ_EA;
2463 + oparms.disposition = FILE_OPEN;
2464 +- oparms.create_options = 0;
2465 ++ if (backup_cred(cifs_sb))
2466 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2467 ++ else
2468 ++ oparms.create_options = 0;
2469 + oparms.fid = &fid;
2470 + oparms.reconnect = false;
2471 +
2472 +@@ -613,7 +619,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
2473 + oparms.tcon = tcon;
2474 + oparms.desired_access = FILE_WRITE_EA;
2475 + oparms.disposition = FILE_OPEN;
2476 +- oparms.create_options = 0;
2477 ++ if (backup_cred(cifs_sb))
2478 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2479 ++ else
2480 ++ oparms.create_options = 0;
2481 + oparms.fid = &fid;
2482 + oparms.reconnect = false;
2483 +
2484 +@@ -1215,7 +1224,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2485 + oparms.tcon = tcon;
2486 + oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2487 + oparms.disposition = FILE_OPEN;
2488 +- oparms.create_options = 0;
2489 ++ if (backup_cred(cifs_sb))
2490 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2491 ++ else
2492 ++ oparms.create_options = 0;
2493 + oparms.fid = fid;
2494 + oparms.reconnect = false;
2495 +
2496 +@@ -1491,7 +1503,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
2497 + oparms.tcon = tcon;
2498 + oparms.desired_access = FILE_READ_ATTRIBUTES;
2499 + oparms.disposition = FILE_OPEN;
2500 +- oparms.create_options = 0;
2501 ++ if (backup_cred(cifs_sb))
2502 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2503 ++ else
2504 ++ oparms.create_options = 0;
2505 + oparms.fid = &fid;
2506 + oparms.reconnect = false;
2507 +
2508 +@@ -3200,7 +3215,7 @@ struct smb_version_values smb21_values = {
2509 + struct smb_version_values smb3any_values = {
2510 + .version_string = SMB3ANY_VERSION_STRING,
2511 + .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
2512 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2513 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
2514 + .large_lock_type = 0,
2515 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2516 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2517 +@@ -3220,7 +3235,7 @@ struct smb_version_values smb3any_values = {
2518 + struct smb_version_values smbdefault_values = {
2519 + .version_string = SMBDEFAULT_VERSION_STRING,
2520 + .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
2521 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2522 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
2523 + .large_lock_type = 0,
2524 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2525 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2526 +@@ -3240,7 +3255,7 @@ struct smb_version_values smbdefault_values = {
2527 + struct smb_version_values smb30_values = {
2528 + .version_string = SMB30_VERSION_STRING,
2529 + .protocol_id = SMB30_PROT_ID,
2530 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2531 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
2532 + .large_lock_type = 0,
2533 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2534 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2535 +@@ -3260,7 +3275,7 @@ struct smb_version_values smb30_values = {
2536 + struct smb_version_values smb302_values = {
2537 + .version_string = SMB302_VERSION_STRING,
2538 + .protocol_id = SMB302_PROT_ID,
2539 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2540 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
2541 + .large_lock_type = 0,
2542 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2543 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2544 +@@ -3281,7 +3296,7 @@ struct smb_version_values smb302_values = {
2545 + struct smb_version_values smb311_values = {
2546 + .version_string = SMB311_VERSION_STRING,
2547 + .protocol_id = SMB311_PROT_ID,
2548 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2549 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
2550 + .large_lock_type = 0,
2551 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2552 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2553 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2554 +index 58842b36481d..078ec705a5cc 100644
2555 +--- a/fs/cifs/smb2pdu.c
2556 ++++ b/fs/cifs/smb2pdu.c
2557 +@@ -1816,6 +1816,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
2558 + if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
2559 + *oplock == SMB2_OPLOCK_LEVEL_NONE)
2560 + req->RequestedOplockLevel = *oplock;
2561 ++ else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
2562 ++ (oparms->create_options & CREATE_NOT_FILE))
2563 ++ req->RequestedOplockLevel = *oplock; /* no srv lease support */
2564 + else {
2565 + rc = add_lease_context(server, iov, &n_iov, oplock);
2566 + if (rc) {
2567 +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
2568 +index 3b34004a71c1..54f8520ad7a2 100644
2569 +--- a/fs/f2fs/f2fs.h
2570 ++++ b/fs/f2fs/f2fs.h
2571 +@@ -1766,8 +1766,13 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2572 + pgoff_t index, bool for_write)
2573 + {
2574 + #ifdef CONFIG_F2FS_FAULT_INJECTION
2575 +- struct page *page = find_lock_page(mapping, index);
2576 ++ struct page *page;
2577 +
2578 ++ if (!for_write)
2579 ++ page = find_get_page_flags(mapping, index,
2580 ++ FGP_LOCK | FGP_ACCESSED);
2581 ++ else
2582 ++ page = find_lock_page(mapping, index);
2583 + if (page)
2584 + return page;
2585 +
2586 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
2587 +index 87e654c53c31..6f589730782d 100644
2588 +--- a/fs/f2fs/file.c
2589 ++++ b/fs/f2fs/file.c
2590 +@@ -1803,7 +1803,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2591 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2592 + struct super_block *sb = sbi->sb;
2593 + __u32 in;
2594 +- int ret;
2595 ++ int ret = 0;
2596 +
2597 + if (!capable(CAP_SYS_ADMIN))
2598 + return -EPERM;
2599 +diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
2600 +index f2f897cd23c9..f22884418e92 100644
2601 +--- a/fs/f2fs/gc.c
2602 ++++ b/fs/f2fs/gc.c
2603 +@@ -958,7 +958,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
2604 + goto next;
2605 +
2606 + sum = page_address(sum_page);
2607 +- f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
2608 ++ if (type != GET_SUM_TYPE((&sum->footer))) {
2609 ++ f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
2610 ++ "type [%d, %d] in SSA and SIT",
2611 ++ segno, type, GET_SUM_TYPE((&sum->footer)));
2612 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
2613 ++ goto next;
2614 ++ }
2615 +
2616 + /*
2617 + * this is to avoid deadlock:
2618 +diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
2619 +index 8322e4e7bb3f..888a9dc13677 100644
2620 +--- a/fs/f2fs/inline.c
2621 ++++ b/fs/f2fs/inline.c
2622 +@@ -128,6 +128,16 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
2623 + if (err)
2624 + return err;
2625 +
2626 ++ if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
2627 ++ f2fs_put_dnode(dn);
2628 ++ set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
2629 ++ f2fs_msg(fio.sbi->sb, KERN_WARNING,
2630 ++ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
2631 ++ "run fsck to fix.",
2632 ++ __func__, dn->inode->i_ino, dn->data_blkaddr);
2633 ++ return -EINVAL;
2634 ++ }
2635 ++
2636 + f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
2637 +
2638 + read_inline_data(page, dn->inode_page);
2639 +@@ -365,6 +375,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
2640 + if (err)
2641 + goto out;
2642 +
2643 ++ if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
2644 ++ f2fs_put_dnode(&dn);
2645 ++ set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
2646 ++ f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
2647 ++ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
2648 ++ "run fsck to fix.",
2649 ++ __func__, dir->i_ino, dn.data_blkaddr);
2650 ++ err = -EINVAL;
2651 ++ goto out;
2652 ++ }
2653 ++
2654 + f2fs_wait_on_page_writeback(page, DATA, true);
2655 + zero_user_segment(page, MAX_INLINE_DATA(dir), PAGE_SIZE);
2656 +
2657 +@@ -481,6 +502,7 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
2658 + return 0;
2659 + recover:
2660 + lock_page(ipage);
2661 ++ f2fs_wait_on_page_writeback(ipage, NODE, true);
2662 + memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
2663 + f2fs_i_depth_write(dir, 0);
2664 + f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
2665 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
2666 +index f623da26159f..712505ec5de4 100644
2667 +--- a/fs/f2fs/node.c
2668 ++++ b/fs/f2fs/node.c
2669 +@@ -1610,7 +1610,9 @@ next_step:
2670 + !is_cold_node(page)))
2671 + continue;
2672 + lock_node:
2673 +- if (!trylock_page(page))
2674 ++ if (wbc->sync_mode == WB_SYNC_ALL)
2675 ++ lock_page(page);
2676 ++ else if (!trylock_page(page))
2677 + continue;
2678 +
2679 + if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
2680 +diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
2681 +index 39ada30889b6..4dfb5080098f 100644
2682 +--- a/fs/f2fs/segment.h
2683 ++++ b/fs/f2fs/segment.h
2684 +@@ -414,6 +414,8 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
2685 + if (test_and_clear_bit(segno, free_i->free_segmap)) {
2686 + free_i->free_segments++;
2687 +
2688 ++ if (IS_CURSEC(sbi, secno))
2689 ++ goto skip_free;
2690 + next = find_next_bit(free_i->free_segmap,
2691 + start_segno + sbi->segs_per_sec, start_segno);
2692 + if (next >= start_segno + sbi->segs_per_sec) {
2693 +@@ -421,6 +423,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
2694 + free_i->free_sections++;
2695 + }
2696 + }
2697 ++skip_free:
2698 + spin_unlock(&free_i->segmap_lock);
2699 + }
2700 +
2701 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
2702 +index 400c00058bad..eae35909fa51 100644
2703 +--- a/fs/f2fs/super.c
2704 ++++ b/fs/f2fs/super.c
2705 +@@ -1883,12 +1883,17 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
2706 + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2707 + unsigned int ovp_segments, reserved_segments;
2708 + unsigned int main_segs, blocks_per_seg;
2709 ++ unsigned int sit_segs, nat_segs;
2710 ++ unsigned int sit_bitmap_size, nat_bitmap_size;
2711 ++ unsigned int log_blocks_per_seg;
2712 + int i;
2713 +
2714 + total = le32_to_cpu(raw_super->segment_count);
2715 + fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
2716 +- fsmeta += le32_to_cpu(raw_super->segment_count_sit);
2717 +- fsmeta += le32_to_cpu(raw_super->segment_count_nat);
2718 ++ sit_segs = le32_to_cpu(raw_super->segment_count_sit);
2719 ++ fsmeta += sit_segs;
2720 ++ nat_segs = le32_to_cpu(raw_super->segment_count_nat);
2721 ++ fsmeta += nat_segs;
2722 + fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
2723 + fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
2724 +
2725 +@@ -1919,6 +1924,18 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
2726 + return 1;
2727 + }
2728 +
2729 ++ sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2730 ++ nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2731 ++ log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2732 ++
2733 ++ if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
2734 ++ nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
2735 ++ f2fs_msg(sbi->sb, KERN_ERR,
2736 ++ "Wrong bitmap size: sit: %u, nat:%u",
2737 ++ sit_bitmap_size, nat_bitmap_size);
2738 ++ return 1;
2739 ++ }
2740 ++
2741 + if (unlikely(f2fs_cp_error(sbi))) {
2742 + f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
2743 + return 1;
2744 +diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
2745 +index e2c258f717cd..93af9d7dfcdc 100644
2746 +--- a/fs/f2fs/sysfs.c
2747 ++++ b/fs/f2fs/sysfs.c
2748 +@@ -9,6 +9,7 @@
2749 + * it under the terms of the GNU General Public License version 2 as
2750 + * published by the Free Software Foundation.
2751 + */
2752 ++#include <linux/compiler.h>
2753 + #include <linux/proc_fs.h>
2754 + #include <linux/f2fs_fs.h>
2755 + #include <linux/seq_file.h>
2756 +@@ -381,7 +382,8 @@ static struct kobject f2fs_feat = {
2757 + .kset = &f2fs_kset,
2758 + };
2759 +
2760 +-static int segment_info_seq_show(struct seq_file *seq, void *offset)
2761 ++static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
2762 ++ void *offset)
2763 + {
2764 + struct super_block *sb = seq->private;
2765 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
2766 +@@ -408,7 +410,8 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset)
2767 + return 0;
2768 + }
2769 +
2770 +-static int segment_bits_seq_show(struct seq_file *seq, void *offset)
2771 ++static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
2772 ++ void *offset)
2773 + {
2774 + struct super_block *sb = seq->private;
2775 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
2776 +@@ -432,7 +435,8 @@ static int segment_bits_seq_show(struct seq_file *seq, void *offset)
2777 + return 0;
2778 + }
2779 +
2780 +-static int iostat_info_seq_show(struct seq_file *seq, void *offset)
2781 ++static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
2782 ++ void *offset)
2783 + {
2784 + struct super_block *sb = seq->private;
2785 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
2786 +diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
2787 +index 2c3f398995f6..b8d55da2f04d 100644
2788 +--- a/fs/nfs/callback_proc.c
2789 ++++ b/fs/nfs/callback_proc.c
2790 +@@ -213,9 +213,9 @@ static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
2791 + {
2792 + u32 oldseq, newseq;
2793 +
2794 +- /* Is the stateid still not initialised? */
2795 ++ /* Is the stateid not initialised? */
2796 + if (!pnfs_layout_is_valid(lo))
2797 +- return NFS4ERR_DELAY;
2798 ++ return NFS4ERR_NOMATCHING_LAYOUT;
2799 +
2800 + /* Mismatched stateid? */
2801 + if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
2802 +diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
2803 +index 123c069429a7..57de914630bc 100644
2804 +--- a/fs/nfs/callback_xdr.c
2805 ++++ b/fs/nfs/callback_xdr.c
2806 +@@ -904,16 +904,21 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
2807 +
2808 + if (hdr_arg.minorversion == 0) {
2809 + cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
2810 +- if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
2811 ++ if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) {
2812 ++ if (cps.clp)
2813 ++ nfs_put_client(cps.clp);
2814 + goto out_invalidcred;
2815 ++ }
2816 + }
2817 +
2818 + cps.minorversion = hdr_arg.minorversion;
2819 + hdr_res.taglen = hdr_arg.taglen;
2820 + hdr_res.tag = hdr_arg.tag;
2821 +- if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
2822 ++ if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) {
2823 ++ if (cps.clp)
2824 ++ nfs_put_client(cps.clp);
2825 + return rpc_system_err;
2826 +-
2827 ++ }
2828 + while (status == 0 && nops != hdr_arg.nops) {
2829 + status = process_op(nops, rqstp, &xdr_in,
2830 + rqstp->rq_argp, &xdr_out, rqstp->rq_resp,
2831 +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
2832 +index 9f0bb908e2b5..e41ef532c4ce 100644
2833 +--- a/include/linux/mm_types.h
2834 ++++ b/include/linux/mm_types.h
2835 +@@ -354,7 +354,7 @@ struct kioctx_table;
2836 + struct mm_struct {
2837 + struct vm_area_struct *mmap; /* list of VMAs */
2838 + struct rb_root mm_rb;
2839 +- u32 vmacache_seqnum; /* per-thread vmacache */
2840 ++ u64 vmacache_seqnum; /* per-thread vmacache */
2841 + #ifdef CONFIG_MMU
2842 + unsigned long (*get_unmapped_area) (struct file *filp,
2843 + unsigned long addr, unsigned long len,
2844 +diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
2845 +index 5fe87687664c..d7016dcb245e 100644
2846 +--- a/include/linux/mm_types_task.h
2847 ++++ b/include/linux/mm_types_task.h
2848 +@@ -32,7 +32,7 @@
2849 + #define VMACACHE_MASK (VMACACHE_SIZE - 1)
2850 +
2851 + struct vmacache {
2852 +- u32 seqnum;
2853 ++ u64 seqnum;
2854 + struct vm_area_struct *vmas[VMACACHE_SIZE];
2855 + };
2856 +
2857 +diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
2858 +index 7fd514f36e74..a4be6388a980 100644
2859 +--- a/include/linux/rhashtable.h
2860 ++++ b/include/linux/rhashtable.h
2861 +@@ -152,25 +152,25 @@ struct rhashtable_params {
2862 + /**
2863 + * struct rhashtable - Hash table handle
2864 + * @tbl: Bucket table
2865 +- * @nelems: Number of elements in table
2866 + * @key_len: Key length for hashfn
2867 +- * @p: Configuration parameters
2868 + * @max_elems: Maximum number of elements in table
2869 ++ * @p: Configuration parameters
2870 + * @rhlist: True if this is an rhltable
2871 + * @run_work: Deferred worker to expand/shrink asynchronously
2872 + * @mutex: Mutex to protect current/future table swapping
2873 + * @lock: Spin lock to protect walker list
2874 ++ * @nelems: Number of elements in table
2875 + */
2876 + struct rhashtable {
2877 + struct bucket_table __rcu *tbl;
2878 +- atomic_t nelems;
2879 + unsigned int key_len;
2880 +- struct rhashtable_params p;
2881 + unsigned int max_elems;
2882 ++ struct rhashtable_params p;
2883 + bool rhlist;
2884 + struct work_struct run_work;
2885 + struct mutex mutex;
2886 + spinlock_t lock;
2887 ++ atomic_t nelems;
2888 + };
2889 +
2890 + /**
2891 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
2892 +index 6dd77767fd5b..f64e88444082 100644
2893 +--- a/include/linux/skbuff.h
2894 ++++ b/include/linux/skbuff.h
2895 +@@ -663,21 +663,26 @@ struct sk_buff {
2896 + struct sk_buff *prev;
2897 +
2898 + union {
2899 +- ktime_t tstamp;
2900 +- u64 skb_mstamp;
2901 ++ struct net_device *dev;
2902 ++ /* Some protocols might use this space to store information,
2903 ++ * while device pointer would be NULL.
2904 ++ * UDP receive path is one user.
2905 ++ */
2906 ++ unsigned long dev_scratch;
2907 + };
2908 + };
2909 +- struct rb_node rbnode; /* used in netem & tcp stack */
2910 ++ struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
2911 ++ struct list_head list;
2912 + };
2913 +- struct sock *sk;
2914 +
2915 + union {
2916 +- struct net_device *dev;
2917 +- /* Some protocols might use this space to store information,
2918 +- * while device pointer would be NULL.
2919 +- * UDP receive path is one user.
2920 +- */
2921 +- unsigned long dev_scratch;
2922 ++ struct sock *sk;
2923 ++ int ip_defrag_offset;
2924 ++ };
2925 ++
2926 ++ union {
2927 ++ ktime_t tstamp;
2928 ++ u64 skb_mstamp;
2929 + };
2930 + /*
2931 + * This is the control buffer. It is free to use for every
2932 +@@ -2580,7 +2585,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
2933 + kfree_skb(skb);
2934 + }
2935 +
2936 +-void skb_rbtree_purge(struct rb_root *root);
2937 ++unsigned int skb_rbtree_purge(struct rb_root *root);
2938 +
2939 + void *netdev_alloc_frag(unsigned int fragsz);
2940 +
2941 +@@ -3134,6 +3139,7 @@ static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
2942 + return skb->data;
2943 + }
2944 +
2945 ++int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
2946 + /**
2947 + * pskb_trim_rcsum - trim received skb and update checksum
2948 + * @skb: buffer to trim
2949 +@@ -3147,9 +3153,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2950 + {
2951 + if (likely(len >= skb->len))
2952 + return 0;
2953 +- if (skb->ip_summed == CHECKSUM_COMPLETE)
2954 +- skb->ip_summed = CHECKSUM_NONE;
2955 +- return __pskb_trim(skb, len);
2956 ++ return pskb_trim_rcsum_slow(skb, len);
2957 + }
2958 +
2959 + static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2960 +@@ -3169,6 +3173,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
2961 +
2962 + #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
2963 +
2964 ++#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
2965 ++#define skb_rb_first(root) rb_to_skb(rb_first(root))
2966 ++#define skb_rb_last(root) rb_to_skb(rb_last(root))
2967 ++#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
2968 ++#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
2969 ++
2970 + #define skb_queue_walk(queue, skb) \
2971 + for (skb = (queue)->next; \
2972 + skb != (struct sk_buff *)(queue); \
2973 +@@ -3183,6 +3193,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
2974 + for (; skb != (struct sk_buff *)(queue); \
2975 + skb = skb->next)
2976 +
2977 ++#define skb_rbtree_walk(skb, root) \
2978 ++ for (skb = skb_rb_first(root); skb != NULL; \
2979 ++ skb = skb_rb_next(skb))
2980 ++
2981 ++#define skb_rbtree_walk_from(skb) \
2982 ++ for (; skb != NULL; \
2983 ++ skb = skb_rb_next(skb))
2984 ++
2985 ++#define skb_rbtree_walk_from_safe(skb, tmp) \
2986 ++ for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
2987 ++ skb = tmp)
2988 ++
2989 + #define skb_queue_walk_from_safe(queue, skb, tmp) \
2990 + for (tmp = skb->next; \
2991 + skb != (struct sk_buff *)(queue); \
2992 +diff --git a/include/linux/tpm.h b/include/linux/tpm.h
2993 +index 2a6c3d96b31f..7f7b29f86c59 100644
2994 +--- a/include/linux/tpm.h
2995 ++++ b/include/linux/tpm.h
2996 +@@ -48,6 +48,8 @@ struct tpm_class_ops {
2997 + u8 (*status) (struct tpm_chip *chip);
2998 + bool (*update_timeouts)(struct tpm_chip *chip,
2999 + unsigned long *timeout_cap);
3000 ++ int (*go_idle)(struct tpm_chip *chip);
3001 ++ int (*cmd_ready)(struct tpm_chip *chip);
3002 + int (*request_locality)(struct tpm_chip *chip, int loc);
3003 + int (*relinquish_locality)(struct tpm_chip *chip, int loc);
3004 + void (*clk_enable)(struct tpm_chip *chip, bool value);
3005 +diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
3006 +index 5c7f010676a7..47a3441cf4c4 100644
3007 +--- a/include/linux/vm_event_item.h
3008 ++++ b/include/linux/vm_event_item.h
3009 +@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
3010 + #ifdef CONFIG_DEBUG_VM_VMACACHE
3011 + VMACACHE_FIND_CALLS,
3012 + VMACACHE_FIND_HITS,
3013 +- VMACACHE_FULL_FLUSHES,
3014 + #endif
3015 + #ifdef CONFIG_SWAP
3016 + SWAP_RA,
3017 +diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
3018 +index a5b3aa8d281f..a09b28f76460 100644
3019 +--- a/include/linux/vmacache.h
3020 ++++ b/include/linux/vmacache.h
3021 +@@ -16,7 +16,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
3022 + memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
3023 + }
3024 +
3025 +-extern void vmacache_flush_all(struct mm_struct *mm);
3026 + extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
3027 + extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
3028 + unsigned long addr);
3029 +@@ -30,10 +29,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
3030 + static inline void vmacache_invalidate(struct mm_struct *mm)
3031 + {
3032 + mm->vmacache_seqnum++;
3033 +-
3034 +- /* deal with overflows */
3035 +- if (unlikely(mm->vmacache_seqnum == 0))
3036 +- vmacache_flush_all(mm);
3037 + }
3038 +
3039 + #endif /* __LINUX_VMACACHE_H */
3040 +diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
3041 +index a6e4edd8d4a2..335cf7851f12 100644
3042 +--- a/include/net/inet_frag.h
3043 ++++ b/include/net/inet_frag.h
3044 +@@ -2,14 +2,20 @@
3045 + #ifndef __NET_FRAG_H__
3046 + #define __NET_FRAG_H__
3047 +
3048 ++#include <linux/rhashtable.h>
3049 ++
3050 + struct netns_frags {
3051 +- /* Keep atomic mem on separate cachelines in structs that include it */
3052 +- atomic_t mem ____cacheline_aligned_in_smp;
3053 + /* sysctls */
3054 ++ long high_thresh;
3055 ++ long low_thresh;
3056 + int timeout;
3057 +- int high_thresh;
3058 +- int low_thresh;
3059 + int max_dist;
3060 ++ struct inet_frags *f;
3061 ++
3062 ++ struct rhashtable rhashtable ____cacheline_aligned_in_smp;
3063 ++
3064 ++ /* Keep atomic mem on separate cachelines in structs that include it */
3065 ++ atomic_long_t mem ____cacheline_aligned_in_smp;
3066 + };
3067 +
3068 + /**
3069 +@@ -25,130 +31,115 @@ enum {
3070 + INET_FRAG_COMPLETE = BIT(2),
3071 + };
3072 +
3073 ++struct frag_v4_compare_key {
3074 ++ __be32 saddr;
3075 ++ __be32 daddr;
3076 ++ u32 user;
3077 ++ u32 vif;
3078 ++ __be16 id;
3079 ++ u16 protocol;
3080 ++};
3081 ++
3082 ++struct frag_v6_compare_key {
3083 ++ struct in6_addr saddr;
3084 ++ struct in6_addr daddr;
3085 ++ u32 user;
3086 ++ __be32 id;
3087 ++ u32 iif;
3088 ++};
3089 ++
3090 + /**
3091 + * struct inet_frag_queue - fragment queue
3092 + *
3093 +- * @lock: spinlock protecting the queue
3094 ++ * @node: rhash node
3095 ++ * @key: keys identifying this frag.
3096 + * @timer: queue expiration timer
3097 +- * @list: hash bucket list
3098 ++ * @lock: spinlock protecting this frag
3099 + * @refcnt: reference count of the queue
3100 + * @fragments: received fragments head
3101 ++ * @rb_fragments: received fragments rb-tree root
3102 + * @fragments_tail: received fragments tail
3103 ++ * @last_run_head: the head of the last "run". see ip_fragment.c
3104 + * @stamp: timestamp of the last received fragment
3105 + * @len: total length of the original datagram
3106 + * @meat: length of received fragments so far
3107 + * @flags: fragment queue flags
3108 + * @max_size: maximum received fragment size
3109 + * @net: namespace that this frag belongs to
3110 +- * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
3111 ++ * @rcu: rcu head for freeing deferall
3112 + */
3113 + struct inet_frag_queue {
3114 +- spinlock_t lock;
3115 ++ struct rhash_head node;
3116 ++ union {
3117 ++ struct frag_v4_compare_key v4;
3118 ++ struct frag_v6_compare_key v6;
3119 ++ } key;
3120 + struct timer_list timer;
3121 +- struct hlist_node list;
3122 ++ spinlock_t lock;
3123 + refcount_t refcnt;
3124 +- struct sk_buff *fragments;
3125 ++ struct sk_buff *fragments; /* Used in IPv6. */
3126 ++ struct rb_root rb_fragments; /* Used in IPv4. */
3127 + struct sk_buff *fragments_tail;
3128 ++ struct sk_buff *last_run_head;
3129 + ktime_t stamp;
3130 + int len;
3131 + int meat;
3132 + __u8 flags;
3133 + u16 max_size;
3134 +- struct netns_frags *net;
3135 +- struct hlist_node list_evictor;
3136 +-};
3137 +-
3138 +-#define INETFRAGS_HASHSZ 1024
3139 +-
3140 +-/* averaged:
3141 +- * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
3142 +- * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
3143 +- * struct frag_queue))
3144 +- */
3145 +-#define INETFRAGS_MAXDEPTH 128
3146 +-
3147 +-struct inet_frag_bucket {
3148 +- struct hlist_head chain;
3149 +- spinlock_t chain_lock;
3150 ++ struct netns_frags *net;
3151 ++ struct rcu_head rcu;
3152 + };
3153 +
3154 + struct inet_frags {
3155 +- struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
3156 +-
3157 +- struct work_struct frags_work;
3158 +- unsigned int next_bucket;
3159 +- unsigned long last_rebuild_jiffies;
3160 +- bool rebuild;
3161 +-
3162 +- /* The first call to hashfn is responsible to initialize
3163 +- * rnd. This is best done with net_get_random_once.
3164 +- *
3165 +- * rnd_seqlock is used to let hash insertion detect
3166 +- * when it needs to re-lookup the hash chain to use.
3167 +- */
3168 +- u32 rnd;
3169 +- seqlock_t rnd_seqlock;
3170 + unsigned int qsize;
3171 +
3172 +- unsigned int (*hashfn)(const struct inet_frag_queue *);
3173 +- bool (*match)(const struct inet_frag_queue *q,
3174 +- const void *arg);
3175 + void (*constructor)(struct inet_frag_queue *q,
3176 + const void *arg);
3177 + void (*destructor)(struct inet_frag_queue *);
3178 +- void (*frag_expire)(unsigned long data);
3179 ++ void (*frag_expire)(struct timer_list *t);
3180 + struct kmem_cache *frags_cachep;
3181 + const char *frags_cache_name;
3182 ++ struct rhashtable_params rhash_params;
3183 + };
3184 +
3185 + int inet_frags_init(struct inet_frags *);
3186 + void inet_frags_fini(struct inet_frags *);
3187 +
3188 +-static inline void inet_frags_init_net(struct netns_frags *nf)
3189 ++static inline int inet_frags_init_net(struct netns_frags *nf)
3190 + {
3191 +- atomic_set(&nf->mem, 0);
3192 ++ atomic_long_set(&nf->mem, 0);
3193 ++ return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params);
3194 + }
3195 +-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
3196 ++void inet_frags_exit_net(struct netns_frags *nf);
3197 +
3198 +-void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
3199 +-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
3200 +-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
3201 +- struct inet_frags *f, void *key, unsigned int hash);
3202 ++void inet_frag_kill(struct inet_frag_queue *q);
3203 ++void inet_frag_destroy(struct inet_frag_queue *q);
3204 ++struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
3205 +
3206 +-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
3207 +- const char *prefix);
3208 ++/* Free all skbs in the queue; return the sum of their truesizes. */
3209 ++unsigned int inet_frag_rbtree_purge(struct rb_root *root);
3210 +
3211 +-static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
3212 ++static inline void inet_frag_put(struct inet_frag_queue *q)
3213 + {
3214 + if (refcount_dec_and_test(&q->refcnt))
3215 +- inet_frag_destroy(q, f);
3216 +-}
3217 +-
3218 +-static inline bool inet_frag_evicting(struct inet_frag_queue *q)
3219 +-{
3220 +- return !hlist_unhashed(&q->list_evictor);
3221 ++ inet_frag_destroy(q);
3222 + }
3223 +
3224 + /* Memory Tracking Functions. */
3225 +
3226 +-static inline int frag_mem_limit(struct netns_frags *nf)
3227 +-{
3228 +- return atomic_read(&nf->mem);
3229 +-}
3230 +-
3231 +-static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
3232 ++static inline long frag_mem_limit(const struct netns_frags *nf)
3233 + {
3234 +- atomic_sub(i, &nf->mem);
3235 ++ return atomic_long_read(&nf->mem);
3236 + }
3237 +
3238 +-static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
3239 ++static inline void sub_frag_mem_limit(struct netns_frags *nf, long val)
3240 + {
3241 +- atomic_add(i, &nf->mem);
3242 ++ atomic_long_sub(val, &nf->mem);
3243 + }
3244 +
3245 +-static inline int sum_frag_mem_limit(struct netns_frags *nf)
3246 ++static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
3247 + {
3248 +- return atomic_read(&nf->mem);
3249 ++ atomic_long_add(val, &nf->mem);
3250 + }
3251 +
3252 + /* RFC 3168 support :
3253 +diff --git a/include/net/ip.h b/include/net/ip.h
3254 +index 81da1123fc8e..7c430343176a 100644
3255 +--- a/include/net/ip.h
3256 ++++ b/include/net/ip.h
3257 +@@ -570,7 +570,6 @@ static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *s
3258 + return skb;
3259 + }
3260 + #endif
3261 +-int ip_frag_mem(struct net *net);
3262 +
3263 + /*
3264 + * Functions provided by ip_forward.c
3265 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
3266 +index f280c61e019a..fa87a62e9bd3 100644
3267 +--- a/include/net/ipv6.h
3268 ++++ b/include/net/ipv6.h
3269 +@@ -331,13 +331,6 @@ static inline bool ipv6_accept_ra(struct inet6_dev *idev)
3270 + idev->cnf.accept_ra;
3271 + }
3272 +
3273 +-#if IS_ENABLED(CONFIG_IPV6)
3274 +-static inline int ip6_frag_mem(struct net *net)
3275 +-{
3276 +- return sum_frag_mem_limit(&net->ipv6.frags);
3277 +-}
3278 +-#endif
3279 +-
3280 + #define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
3281 + #define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */
3282 + #define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */
3283 +@@ -531,17 +524,8 @@ enum ip6_defrag_users {
3284 + __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
3285 + };
3286 +
3287 +-struct ip6_create_arg {
3288 +- __be32 id;
3289 +- u32 user;
3290 +- const struct in6_addr *src;
3291 +- const struct in6_addr *dst;
3292 +- int iif;
3293 +- u8 ecn;
3294 +-};
3295 +-
3296 + void ip6_frag_init(struct inet_frag_queue *q, const void *a);
3297 +-bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
3298 ++extern const struct rhashtable_params ip6_rhash_params;
3299 +
3300 + /*
3301 + * Equivalent of ipv4 struct ip
3302 +@@ -549,19 +533,13 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
3303 + struct frag_queue {
3304 + struct inet_frag_queue q;
3305 +
3306 +- __be32 id; /* fragment id */
3307 +- u32 user;
3308 +- struct in6_addr saddr;
3309 +- struct in6_addr daddr;
3310 +-
3311 + int iif;
3312 + unsigned int csum;
3313 + __u16 nhoffset;
3314 + u8 ecn;
3315 + };
3316 +
3317 +-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
3318 +- struct inet_frags *frags);
3319 ++void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
3320 +
3321 + static inline bool ipv6_addr_any(const struct in6_addr *a)
3322 + {
3323 +diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
3324 +index ac71559314e7..9eae13eefc49 100644
3325 +--- a/include/uapi/linux/ethtool.h
3326 ++++ b/include/uapi/linux/ethtool.h
3327 +@@ -898,13 +898,13 @@ struct ethtool_rx_flow_spec {
3328 + static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
3329 + {
3330 + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
3331 +-};
3332 ++}
3333 +
3334 + static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
3335 + {
3336 + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
3337 + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
3338 +-};
3339 ++}
3340 +
3341 + /**
3342 + * struct ethtool_rxnfc - command to get or set RX flow classification rules
3343 +diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
3344 +index 0d941cdd8e8c..f5d753e60836 100644
3345 +--- a/include/uapi/linux/snmp.h
3346 ++++ b/include/uapi/linux/snmp.h
3347 +@@ -56,6 +56,7 @@ enum
3348 + IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
3349 + IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
3350 + IPSTATS_MIB_CEPKTS, /* InCEPkts */
3351 ++ IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
3352 + __IPSTATS_MIB_MAX
3353 + };
3354 +
3355 +diff --git a/kernel/cpu.c b/kernel/cpu.c
3356 +index 8f02f9b6e046..f3f389e33343 100644
3357 +--- a/kernel/cpu.c
3358 ++++ b/kernel/cpu.c
3359 +@@ -612,15 +612,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
3360 + bool bringup = st->bringup;
3361 + enum cpuhp_state state;
3362 +
3363 ++ if (WARN_ON_ONCE(!st->should_run))
3364 ++ return;
3365 ++
3366 + /*
3367 + * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
3368 + * that if we see ->should_run we also see the rest of the state.
3369 + */
3370 + smp_mb();
3371 +
3372 +- if (WARN_ON_ONCE(!st->should_run))
3373 +- return;
3374 +-
3375 + cpuhp_lock_acquire(bringup);
3376 +
3377 + if (st->single) {
3378 +@@ -932,7 +932,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
3379 + ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
3380 + if (ret) {
3381 + st->target = prev_state;
3382 +- undo_cpu_down(cpu, st);
3383 ++ if (st->state < prev_state)
3384 ++ undo_cpu_down(cpu, st);
3385 + break;
3386 + }
3387 + }
3388 +@@ -985,7 +986,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
3389 + * to do the further cleanups.
3390 + */
3391 + ret = cpuhp_down_callbacks(cpu, st, target);
3392 +- if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
3393 ++ if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
3394 + cpuhp_reset_state(st, prev_state);
3395 + __cpuhp_kick_ap(st);
3396 + }
3397 +diff --git a/kernel/time/timer.c b/kernel/time/timer.c
3398 +index 9fe525f410bf..f17c76a1a05f 100644
3399 +--- a/kernel/time/timer.c
3400 ++++ b/kernel/time/timer.c
3401 +@@ -1609,6 +1609,22 @@ static inline void __run_timers(struct timer_base *base)
3402 +
3403 + raw_spin_lock_irq(&base->lock);
3404 +
3405 ++ /*
3406 ++ * timer_base::must_forward_clk must be cleared before running
3407 ++ * timers so that any timer functions that call mod_timer() will
3408 ++ * not try to forward the base. Idle tracking / clock forwarding
3409 ++ * logic is only used with BASE_STD timers.
3410 ++ *
3411 ++ * The must_forward_clk flag is cleared unconditionally also for
3412 ++ * the deferrable base. The deferrable base is not affected by idle
3413 ++ * tracking and never forwarded, so clearing the flag is a NOOP.
3414 ++ *
3415 ++ * The fact that the deferrable base is never forwarded can cause
3416 ++ * large variations in granularity for deferrable timers, but they
3417 ++ * can be deferred for long periods due to idle anyway.
3418 ++ */
3419 ++ base->must_forward_clk = false;
3420 ++
3421 + while (time_after_eq(jiffies, base->clk)) {
3422 +
3423 + levels = collect_expired_timers(base, heads);
3424 +@@ -1628,19 +1644,6 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
3425 + {
3426 + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
3427 +
3428 +- /*
3429 +- * must_forward_clk must be cleared before running timers so that any
3430 +- * timer functions that call mod_timer will not try to forward the
3431 +- * base. idle trcking / clock forwarding logic is only used with
3432 +- * BASE_STD timers.
3433 +- *
3434 +- * The deferrable base does not do idle tracking at all, so we do
3435 +- * not forward it. This can result in very large variations in
3436 +- * granularity for deferrable timers, but they can be deferred for
3437 +- * long periods due to idle.
3438 +- */
3439 +- base->must_forward_clk = false;
3440 +-
3441 + __run_timers(base);
3442 + if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
3443 + __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
3444 +diff --git a/lib/rhashtable.c b/lib/rhashtable.c
3445 +index 39215c724fc7..cebbcec877d7 100644
3446 +--- a/lib/rhashtable.c
3447 ++++ b/lib/rhashtable.c
3448 +@@ -364,6 +364,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
3449 + err = rhashtable_rehash_chain(ht, old_hash);
3450 + if (err)
3451 + return err;
3452 ++ cond_resched();
3453 + }
3454 +
3455 + /* Publish the new table pointer. */
3456 +@@ -1073,6 +1074,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
3457 + for (i = 0; i < tbl->size; i++) {
3458 + struct rhash_head *pos, *next;
3459 +
3460 ++ cond_resched();
3461 + for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
3462 + next = !rht_is_a_nulls(pos) ?
3463 + rht_dereference(pos->next, ht) : NULL;
3464 +diff --git a/mm/debug.c b/mm/debug.c
3465 +index 6726bec731c9..c55abc893fdc 100644
3466 +--- a/mm/debug.c
3467 ++++ b/mm/debug.c
3468 +@@ -100,7 +100,7 @@ EXPORT_SYMBOL(dump_vma);
3469 +
3470 + void dump_mm(const struct mm_struct *mm)
3471 + {
3472 +- pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
3473 ++ pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n"
3474 + #ifdef CONFIG_MMU
3475 + "get_unmapped_area %p\n"
3476 + #endif
3477 +@@ -128,7 +128,7 @@ void dump_mm(const struct mm_struct *mm)
3478 + "tlb_flush_pending %d\n"
3479 + "def_flags: %#lx(%pGv)\n",
3480 +
3481 +- mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
3482 ++ mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
3483 + #ifdef CONFIG_MMU
3484 + mm->get_unmapped_area,
3485 + #endif
3486 +diff --git a/mm/vmacache.c b/mm/vmacache.c
3487 +index db7596eb6132..f1729617dc85 100644
3488 +--- a/mm/vmacache.c
3489 ++++ b/mm/vmacache.c
3490 +@@ -7,44 +7,6 @@
3491 + #include <linux/mm.h>
3492 + #include <linux/vmacache.h>
3493 +
3494 +-/*
3495 +- * Flush vma caches for threads that share a given mm.
3496 +- *
3497 +- * The operation is safe because the caller holds the mmap_sem
3498 +- * exclusively and other threads accessing the vma cache will
3499 +- * have mmap_sem held at least for read, so no extra locking
3500 +- * is required to maintain the vma cache.
3501 +- */
3502 +-void vmacache_flush_all(struct mm_struct *mm)
3503 +-{
3504 +- struct task_struct *g, *p;
3505 +-
3506 +- count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
3507 +-
3508 +- /*
3509 +- * Single threaded tasks need not iterate the entire
3510 +- * list of process. We can avoid the flushing as well
3511 +- * since the mm's seqnum was increased and don't have
3512 +- * to worry about other threads' seqnum. Current's
3513 +- * flush will occur upon the next lookup.
3514 +- */
3515 +- if (atomic_read(&mm->mm_users) == 1)
3516 +- return;
3517 +-
3518 +- rcu_read_lock();
3519 +- for_each_process_thread(g, p) {
3520 +- /*
3521 +- * Only flush the vmacache pointers as the
3522 +- * mm seqnum is already set and curr's will
3523 +- * be set upon invalidation when the next
3524 +- * lookup is done.
3525 +- */
3526 +- if (mm == p->mm)
3527 +- vmacache_flush(p);
3528 +- }
3529 +- rcu_read_unlock();
3530 +-}
3531 +-
3532 + /*
3533 + * This task may be accessing a foreign mm via (for example)
3534 + * get_user_pages()->find_vma(). The vmacache is task-local and this
3535 +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
3536 +index cef3754408d4..b21fcc838784 100644
3537 +--- a/net/bluetooth/hidp/core.c
3538 ++++ b/net/bluetooth/hidp/core.c
3539 +@@ -775,7 +775,7 @@ static int hidp_setup_hid(struct hidp_session *session,
3540 + hid->version = req->version;
3541 + hid->country = req->country;
3542 +
3543 +- strncpy(hid->name, req->name, sizeof(req->name) - 1);
3544 ++ strncpy(hid->name, req->name, sizeof(hid->name));
3545 +
3546 + snprintf(hid->phys, sizeof(hid->phys), "%pMR",
3547 + &l2cap_pi(session->ctrl_sock->sk)->chan->src);
3548 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
3549 +index 2e5eeba97de9..168a3e8883d4 100644
3550 +--- a/net/core/skbuff.c
3551 ++++ b/net/core/skbuff.c
3552 +@@ -1839,6 +1839,20 @@ done:
3553 + }
3554 + EXPORT_SYMBOL(___pskb_trim);
3555 +
3556 ++/* Note : use pskb_trim_rcsum() instead of calling this directly
3557 ++ */
3558 ++int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
3559 ++{
3560 ++ if (skb->ip_summed == CHECKSUM_COMPLETE) {
3561 ++ int delta = skb->len - len;
3562 ++
3563 ++ skb->csum = csum_sub(skb->csum,
3564 ++ skb_checksum(skb, len, delta, 0));
3565 ++ }
3566 ++ return __pskb_trim(skb, len);
3567 ++}
3568 ++EXPORT_SYMBOL(pskb_trim_rcsum_slow);
3569 ++
3570 + /**
3571 + * __pskb_pull_tail - advance tail of skb header
3572 + * @skb: buffer to reallocate
3573 +@@ -2842,20 +2856,27 @@ EXPORT_SYMBOL(skb_queue_purge);
3574 + /**
3575 + * skb_rbtree_purge - empty a skb rbtree
3576 + * @root: root of the rbtree to empty
3577 ++ * Return value: the sum of truesizes of all purged skbs.
3578 + *
3579 + * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
3580 + * the list and one reference dropped. This function does not take
3581 + * any lock. Synchronization should be handled by the caller (e.g., TCP
3582 + * out-of-order queue is protected by the socket lock).
3583 + */
3584 +-void skb_rbtree_purge(struct rb_root *root)
3585 ++unsigned int skb_rbtree_purge(struct rb_root *root)
3586 + {
3587 +- struct sk_buff *skb, *next;
3588 ++ struct rb_node *p = rb_first(root);
3589 ++ unsigned int sum = 0;
3590 +
3591 +- rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode)
3592 +- kfree_skb(skb);
3593 ++ while (p) {
3594 ++ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3595 +
3596 +- *root = RB_ROOT;
3597 ++ p = rb_next(p);
3598 ++ rb_erase(&skb->rbnode, root);
3599 ++ sum += skb->truesize;
3600 ++ kfree_skb(skb);
3601 ++ }
3602 ++ return sum;
3603 + }
3604 +
3605 + /**
3606 +diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
3607 +index bae7d78aa068..fbeacbc2be5d 100644
3608 +--- a/net/dcb/dcbnl.c
3609 ++++ b/net/dcb/dcbnl.c
3610 +@@ -1765,7 +1765,7 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
3611 + if (itr->app.selector == app->selector &&
3612 + itr->app.protocol == app->protocol &&
3613 + itr->ifindex == ifindex &&
3614 +- (!prio || itr->app.priority == prio))
3615 ++ ((prio == -1) || itr->app.priority == prio))
3616 + return itr;
3617 + }
3618 +
3619 +@@ -1800,7 +1800,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
3620 + u8 prio = 0;
3621 +
3622 + spin_lock_bh(&dcb_lock);
3623 +- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
3624 ++ itr = dcb_app_lookup(app, dev->ifindex, -1);
3625 ++ if (itr)
3626 + prio = itr->app.priority;
3627 + spin_unlock_bh(&dcb_lock);
3628 +
3629 +@@ -1828,7 +1829,8 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
3630 +
3631 + spin_lock_bh(&dcb_lock);
3632 + /* Search for existing match and replace */
3633 +- if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
3634 ++ itr = dcb_app_lookup(new, dev->ifindex, -1);
3635 ++ if (itr) {
3636 + if (new->priority)
3637 + itr->app.priority = new->priority;
3638 + else {
3639 +@@ -1861,7 +1863,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
3640 + u8 prio = 0;
3641 +
3642 + spin_lock_bh(&dcb_lock);
3643 +- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
3644 ++ itr = dcb_app_lookup(app, dev->ifindex, -1);
3645 ++ if (itr)
3646 + prio |= 1 << itr->app.priority;
3647 + spin_unlock_bh(&dcb_lock);
3648 +
3649 +diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
3650 +index d8de3bcfb103..b8d95cb71c25 100644
3651 +--- a/net/ieee802154/6lowpan/6lowpan_i.h
3652 ++++ b/net/ieee802154/6lowpan/6lowpan_i.h
3653 +@@ -17,37 +17,19 @@ typedef unsigned __bitwise lowpan_rx_result;
3654 + #define LOWPAN_DISPATCH_FRAG1 0xc0
3655 + #define LOWPAN_DISPATCH_FRAGN 0xe0
3656 +
3657 +-struct lowpan_create_arg {
3658 ++struct frag_lowpan_compare_key {
3659 + u16 tag;
3660 + u16 d_size;
3661 +- const struct ieee802154_addr *src;
3662 +- const struct ieee802154_addr *dst;
3663 ++ const struct ieee802154_addr src;
3664 ++ const struct ieee802154_addr dst;
3665 + };
3666 +
3667 +-/* Equivalent of ipv4 struct ip
3668 ++/* Equivalent of ipv4 struct ipq
3669 + */
3670 + struct lowpan_frag_queue {
3671 + struct inet_frag_queue q;
3672 +-
3673 +- u16 tag;
3674 +- u16 d_size;
3675 +- struct ieee802154_addr saddr;
3676 +- struct ieee802154_addr daddr;
3677 + };
3678 +
3679 +-static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
3680 +-{
3681 +- switch (a->mode) {
3682 +- case IEEE802154_ADDR_LONG:
3683 +- return (((__force u64)a->extended_addr) >> 32) ^
3684 +- (((__force u64)a->extended_addr) & 0xffffffff);
3685 +- case IEEE802154_ADDR_SHORT:
3686 +- return (__force u32)(a->short_addr + (a->pan_id << 16));
3687 +- default:
3688 +- return 0;
3689 +- }
3690 +-}
3691 +-
3692 + int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
3693 + void lowpan_net_frag_exit(void);
3694 + int lowpan_net_frag_init(void);
3695 +diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
3696 +index f85b08baff16..1790b65944b3 100644
3697 +--- a/net/ieee802154/6lowpan/reassembly.c
3698 ++++ b/net/ieee802154/6lowpan/reassembly.c
3699 +@@ -37,55 +37,24 @@ static struct inet_frags lowpan_frags;
3700 + static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
3701 + struct sk_buff *prev, struct net_device *ldev);
3702 +
3703 +-static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
3704 +- const struct ieee802154_addr *saddr,
3705 +- const struct ieee802154_addr *daddr)
3706 +-{
3707 +- net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
3708 +- return jhash_3words(ieee802154_addr_hash(saddr),
3709 +- ieee802154_addr_hash(daddr),
3710 +- (__force u32)(tag + (d_size << 16)),
3711 +- lowpan_frags.rnd);
3712 +-}
3713 +-
3714 +-static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
3715 +-{
3716 +- const struct lowpan_frag_queue *fq;
3717 +-
3718 +- fq = container_of(q, struct lowpan_frag_queue, q);
3719 +- return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
3720 +-}
3721 +-
3722 +-static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
3723 +-{
3724 +- const struct lowpan_frag_queue *fq;
3725 +- const struct lowpan_create_arg *arg = a;
3726 +-
3727 +- fq = container_of(q, struct lowpan_frag_queue, q);
3728 +- return fq->tag == arg->tag && fq->d_size == arg->d_size &&
3729 +- ieee802154_addr_equal(&fq->saddr, arg->src) &&
3730 +- ieee802154_addr_equal(&fq->daddr, arg->dst);
3731 +-}
3732 +-
3733 + static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
3734 + {
3735 +- const struct lowpan_create_arg *arg = a;
3736 ++ const struct frag_lowpan_compare_key *key = a;
3737 + struct lowpan_frag_queue *fq;
3738 +
3739 + fq = container_of(q, struct lowpan_frag_queue, q);
3740 +
3741 +- fq->tag = arg->tag;
3742 +- fq->d_size = arg->d_size;
3743 +- fq->saddr = *arg->src;
3744 +- fq->daddr = *arg->dst;
3745 ++ BUILD_BUG_ON(sizeof(*key) > sizeof(q->key));
3746 ++ memcpy(&q->key, key, sizeof(*key));
3747 + }
3748 +
3749 +-static void lowpan_frag_expire(unsigned long data)
3750 ++static void lowpan_frag_expire(struct timer_list *t)
3751 + {
3752 ++ struct inet_frag_queue *frag = from_timer(frag, t, timer);
3753 + struct frag_queue *fq;
3754 + struct net *net;
3755 +
3756 +- fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
3757 ++ fq = container_of(frag, struct frag_queue, q);
3758 + net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
3759 +
3760 + spin_lock(&fq->q.lock);
3761 +@@ -93,10 +62,10 @@ static void lowpan_frag_expire(unsigned long data)
3762 + if (fq->q.flags & INET_FRAG_COMPLETE)
3763 + goto out;
3764 +
3765 +- inet_frag_kill(&fq->q, &lowpan_frags);
3766 ++ inet_frag_kill(&fq->q);
3767 + out:
3768 + spin_unlock(&fq->q.lock);
3769 +- inet_frag_put(&fq->q, &lowpan_frags);
3770 ++ inet_frag_put(&fq->q);
3771 + }
3772 +
3773 + static inline struct lowpan_frag_queue *
3774 +@@ -104,25 +73,20 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb,
3775 + const struct ieee802154_addr *src,
3776 + const struct ieee802154_addr *dst)
3777 + {
3778 +- struct inet_frag_queue *q;
3779 +- struct lowpan_create_arg arg;
3780 +- unsigned int hash;
3781 + struct netns_ieee802154_lowpan *ieee802154_lowpan =
3782 + net_ieee802154_lowpan(net);
3783 ++ struct frag_lowpan_compare_key key = {
3784 ++ .tag = cb->d_tag,
3785 ++ .d_size = cb->d_size,
3786 ++ .src = *src,
3787 ++ .dst = *dst,
3788 ++ };
3789 ++ struct inet_frag_queue *q;
3790 +
3791 +- arg.tag = cb->d_tag;
3792 +- arg.d_size = cb->d_size;
3793 +- arg.src = src;
3794 +- arg.dst = dst;
3795 +-
3796 +- hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst);
3797 +-
3798 +- q = inet_frag_find(&ieee802154_lowpan->frags,
3799 +- &lowpan_frags, &arg, hash);
3800 +- if (IS_ERR_OR_NULL(q)) {
3801 +- inet_frag_maybe_warn_overflow(q, pr_fmt());
3802 ++ q = inet_frag_find(&ieee802154_lowpan->frags, &key);
3803 ++ if (!q)
3804 + return NULL;
3805 +- }
3806 ++
3807 + return container_of(q, struct lowpan_frag_queue, q);
3808 + }
3809 +
3810 +@@ -229,7 +193,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
3811 + struct sk_buff *fp, *head = fq->q.fragments;
3812 + int sum_truesize;
3813 +
3814 +- inet_frag_kill(&fq->q, &lowpan_frags);
3815 ++ inet_frag_kill(&fq->q);
3816 +
3817 + /* Make the one we just received the head. */
3818 + if (prev) {
3819 +@@ -437,7 +401,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
3820 + ret = lowpan_frag_queue(fq, skb, frag_type);
3821 + spin_unlock(&fq->q.lock);
3822 +
3823 +- inet_frag_put(&fq->q, &lowpan_frags);
3824 ++ inet_frag_put(&fq->q);
3825 + return ret;
3826 + }
3827 +
3828 +@@ -447,24 +411,22 @@ err:
3829 + }
3830 +
3831 + #ifdef CONFIG_SYSCTL
3832 +-static int zero;
3833 +
3834 + static struct ctl_table lowpan_frags_ns_ctl_table[] = {
3835 + {
3836 + .procname = "6lowpanfrag_high_thresh",
3837 + .data = &init_net.ieee802154_lowpan.frags.high_thresh,
3838 +- .maxlen = sizeof(int),
3839 ++ .maxlen = sizeof(unsigned long),
3840 + .mode = 0644,
3841 +- .proc_handler = proc_dointvec_minmax,
3842 ++ .proc_handler = proc_doulongvec_minmax,
3843 + .extra1 = &init_net.ieee802154_lowpan.frags.low_thresh
3844 + },
3845 + {
3846 + .procname = "6lowpanfrag_low_thresh",
3847 + .data = &init_net.ieee802154_lowpan.frags.low_thresh,
3848 +- .maxlen = sizeof(int),
3849 ++ .maxlen = sizeof(unsigned long),
3850 + .mode = 0644,
3851 +- .proc_handler = proc_dointvec_minmax,
3852 +- .extra1 = &zero,
3853 ++ .proc_handler = proc_doulongvec_minmax,
3854 + .extra2 = &init_net.ieee802154_lowpan.frags.high_thresh
3855 + },
3856 + {
3857 +@@ -580,14 +542,20 @@ static int __net_init lowpan_frags_init_net(struct net *net)
3858 + {
3859 + struct netns_ieee802154_lowpan *ieee802154_lowpan =
3860 + net_ieee802154_lowpan(net);
3861 ++ int res;
3862 +
3863 + ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
3864 + ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
3865 + ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
3866 ++ ieee802154_lowpan->frags.f = &lowpan_frags;
3867 +
3868 +- inet_frags_init_net(&ieee802154_lowpan->frags);
3869 +-
3870 +- return lowpan_frags_ns_sysctl_register(net);
3871 ++ res = inet_frags_init_net(&ieee802154_lowpan->frags);
3872 ++ if (res < 0)
3873 ++ return res;
3874 ++ res = lowpan_frags_ns_sysctl_register(net);
3875 ++ if (res < 0)
3876 ++ inet_frags_exit_net(&ieee802154_lowpan->frags);
3877 ++ return res;
3878 + }
3879 +
3880 + static void __net_exit lowpan_frags_exit_net(struct net *net)
3881 +@@ -596,7 +564,7 @@ static void __net_exit lowpan_frags_exit_net(struct net *net)
3882 + net_ieee802154_lowpan(net);
3883 +
3884 + lowpan_frags_ns_sysctl_unregister(net);
3885 +- inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
3886 ++ inet_frags_exit_net(&ieee802154_lowpan->frags);
3887 + }
3888 +
3889 + static struct pernet_operations lowpan_frags_ops = {
3890 +@@ -604,32 +572,63 @@ static struct pernet_operations lowpan_frags_ops = {
3891 + .exit = lowpan_frags_exit_net,
3892 + };
3893 +
3894 +-int __init lowpan_net_frag_init(void)
3895 ++static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed)
3896 + {
3897 +- int ret;
3898 ++ return jhash2(data,
3899 ++ sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
3900 ++}
3901 +
3902 +- ret = lowpan_frags_sysctl_register();
3903 +- if (ret)
3904 +- return ret;
3905 ++static u32 lowpan_obj_hashfn(const void *data, u32 len, u32 seed)
3906 ++{
3907 ++ const struct inet_frag_queue *fq = data;
3908 +
3909 +- ret = register_pernet_subsys(&lowpan_frags_ops);
3910 +- if (ret)
3911 +- goto err_pernet;
3912 ++ return jhash2((const u32 *)&fq->key,
3913 ++ sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
3914 ++}
3915 ++
3916 ++static int lowpan_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
3917 ++{
3918 ++ const struct frag_lowpan_compare_key *key = arg->key;
3919 ++ const struct inet_frag_queue *fq = ptr;
3920 ++
3921 ++ return !!memcmp(&fq->key, key, sizeof(*key));
3922 ++}
3923 ++
3924 ++static const struct rhashtable_params lowpan_rhash_params = {
3925 ++ .head_offset = offsetof(struct inet_frag_queue, node),
3926 ++ .hashfn = lowpan_key_hashfn,
3927 ++ .obj_hashfn = lowpan_obj_hashfn,
3928 ++ .obj_cmpfn = lowpan_obj_cmpfn,
3929 ++ .automatic_shrinking = true,
3930 ++};
3931 ++
3932 ++int __init lowpan_net_frag_init(void)
3933 ++{
3934 ++ int ret;
3935 +
3936 +- lowpan_frags.hashfn = lowpan_hashfn;
3937 + lowpan_frags.constructor = lowpan_frag_init;
3938 + lowpan_frags.destructor = NULL;
3939 + lowpan_frags.qsize = sizeof(struct frag_queue);
3940 +- lowpan_frags.match = lowpan_frag_match;
3941 + lowpan_frags.frag_expire = lowpan_frag_expire;
3942 + lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
3943 ++ lowpan_frags.rhash_params = lowpan_rhash_params;
3944 + ret = inet_frags_init(&lowpan_frags);
3945 + if (ret)
3946 +- goto err_pernet;
3947 ++ goto out;
3948 +
3949 ++ ret = lowpan_frags_sysctl_register();
3950 ++ if (ret)
3951 ++ goto err_sysctl;
3952 ++
3953 ++ ret = register_pernet_subsys(&lowpan_frags_ops);
3954 ++ if (ret)
3955 ++ goto err_pernet;
3956 ++out:
3957 + return ret;
3958 + err_pernet:
3959 + lowpan_frags_sysctl_unregister();
3960 ++err_sysctl:
3961 ++ inet_frags_fini(&lowpan_frags);
3962 + return ret;
3963 + }
3964 +
3965 +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
3966 +index ba4454ecdf0f..f6764537148c 100644
3967 +--- a/net/ipv4/inet_fragment.c
3968 ++++ b/net/ipv4/inet_fragment.c
3969 +@@ -25,12 +25,6 @@
3970 + #include <net/inet_frag.h>
3971 + #include <net/inet_ecn.h>
3972 +
3973 +-#define INETFRAGS_EVICT_BUCKETS 128
3974 +-#define INETFRAGS_EVICT_MAX 512
3975 +-
3976 +-/* don't rebuild inetfrag table with new secret more often than this */
3977 +-#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
3978 +-
3979 + /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
3980 + * Value : 0xff if frame should be dropped.
3981 + * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
3982 +@@ -52,157 +46,8 @@ const u8 ip_frag_ecn_table[16] = {
3983 + };
3984 + EXPORT_SYMBOL(ip_frag_ecn_table);
3985 +
3986 +-static unsigned int
3987 +-inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
3988 +-{
3989 +- return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
3990 +-}
3991 +-
3992 +-static bool inet_frag_may_rebuild(struct inet_frags *f)
3993 +-{
3994 +- return time_after(jiffies,
3995 +- f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
3996 +-}
3997 +-
3998 +-static void inet_frag_secret_rebuild(struct inet_frags *f)
3999 +-{
4000 +- int i;
4001 +-
4002 +- write_seqlock_bh(&f->rnd_seqlock);
4003 +-
4004 +- if (!inet_frag_may_rebuild(f))
4005 +- goto out;
4006 +-
4007 +- get_random_bytes(&f->rnd, sizeof(u32));
4008 +-
4009 +- for (i = 0; i < INETFRAGS_HASHSZ; i++) {
4010 +- struct inet_frag_bucket *hb;
4011 +- struct inet_frag_queue *q;
4012 +- struct hlist_node *n;
4013 +-
4014 +- hb = &f->hash[i];
4015 +- spin_lock(&hb->chain_lock);
4016 +-
4017 +- hlist_for_each_entry_safe(q, n, &hb->chain, list) {
4018 +- unsigned int hval = inet_frag_hashfn(f, q);
4019 +-
4020 +- if (hval != i) {
4021 +- struct inet_frag_bucket *hb_dest;
4022 +-
4023 +- hlist_del(&q->list);
4024 +-
4025 +- /* Relink to new hash chain. */
4026 +- hb_dest = &f->hash[hval];
4027 +-
4028 +- /* This is the only place where we take
4029 +- * another chain_lock while already holding
4030 +- * one. As this will not run concurrently,
4031 +- * we cannot deadlock on hb_dest lock below, if its
4032 +- * already locked it will be released soon since
4033 +- * other caller cannot be waiting for hb lock
4034 +- * that we've taken above.
4035 +- */
4036 +- spin_lock_nested(&hb_dest->chain_lock,
4037 +- SINGLE_DEPTH_NESTING);
4038 +- hlist_add_head(&q->list, &hb_dest->chain);
4039 +- spin_unlock(&hb_dest->chain_lock);
4040 +- }
4041 +- }
4042 +- spin_unlock(&hb->chain_lock);
4043 +- }
4044 +-
4045 +- f->rebuild = false;
4046 +- f->last_rebuild_jiffies = jiffies;
4047 +-out:
4048 +- write_sequnlock_bh(&f->rnd_seqlock);
4049 +-}
4050 +-
4051 +-static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
4052 +-{
4053 +- if (!hlist_unhashed(&q->list_evictor))
4054 +- return false;
4055 +-
4056 +- return q->net->low_thresh == 0 ||
4057 +- frag_mem_limit(q->net) >= q->net->low_thresh;
4058 +-}
4059 +-
4060 +-static unsigned int
4061 +-inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
4062 +-{
4063 +- struct inet_frag_queue *fq;
4064 +- struct hlist_node *n;
4065 +- unsigned int evicted = 0;
4066 +- HLIST_HEAD(expired);
4067 +-
4068 +- spin_lock(&hb->chain_lock);
4069 +-
4070 +- hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
4071 +- if (!inet_fragq_should_evict(fq))
4072 +- continue;
4073 +-
4074 +- if (!del_timer(&fq->timer))
4075 +- continue;
4076 +-
4077 +- hlist_add_head(&fq->list_evictor, &expired);
4078 +- ++evicted;
4079 +- }
4080 +-
4081 +- spin_unlock(&hb->chain_lock);
4082 +-
4083 +- hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
4084 +- f->frag_expire((unsigned long) fq);
4085 +-
4086 +- return evicted;
4087 +-}
4088 +-
4089 +-static void inet_frag_worker(struct work_struct *work)
4090 +-{
4091 +- unsigned int budget = INETFRAGS_EVICT_BUCKETS;
4092 +- unsigned int i, evicted = 0;
4093 +- struct inet_frags *f;
4094 +-
4095 +- f = container_of(work, struct inet_frags, frags_work);
4096 +-
4097 +- BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
4098 +-
4099 +- local_bh_disable();
4100 +-
4101 +- for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
4102 +- evicted += inet_evict_bucket(f, &f->hash[i]);
4103 +- i = (i + 1) & (INETFRAGS_HASHSZ - 1);
4104 +- if (evicted > INETFRAGS_EVICT_MAX)
4105 +- break;
4106 +- }
4107 +-
4108 +- f->next_bucket = i;
4109 +-
4110 +- local_bh_enable();
4111 +-
4112 +- if (f->rebuild && inet_frag_may_rebuild(f))
4113 +- inet_frag_secret_rebuild(f);
4114 +-}
4115 +-
4116 +-static void inet_frag_schedule_worker(struct inet_frags *f)
4117 +-{
4118 +- if (unlikely(!work_pending(&f->frags_work)))
4119 +- schedule_work(&f->frags_work);
4120 +-}
4121 +-
4122 + int inet_frags_init(struct inet_frags *f)
4123 + {
4124 +- int i;
4125 +-
4126 +- INIT_WORK(&f->frags_work, inet_frag_worker);
4127 +-
4128 +- for (i = 0; i < INETFRAGS_HASHSZ; i++) {
4129 +- struct inet_frag_bucket *hb = &f->hash[i];
4130 +-
4131 +- spin_lock_init(&hb->chain_lock);
4132 +- INIT_HLIST_HEAD(&hb->chain);
4133 +- }
4134 +-
4135 +- seqlock_init(&f->rnd_seqlock);
4136 +- f->last_rebuild_jiffies = 0;
4137 + f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
4138 + NULL);
4139 + if (!f->frags_cachep)
4140 +@@ -214,83 +59,75 @@ EXPORT_SYMBOL(inet_frags_init);
4141 +
4142 + void inet_frags_fini(struct inet_frags *f)
4143 + {
4144 +- cancel_work_sync(&f->frags_work);
4145 ++ /* We must wait that all inet_frag_destroy_rcu() have completed. */
4146 ++ rcu_barrier();
4147 ++
4148 + kmem_cache_destroy(f->frags_cachep);
4149 ++ f->frags_cachep = NULL;
4150 + }
4151 + EXPORT_SYMBOL(inet_frags_fini);
4152 +
4153 +-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
4154 ++static void inet_frags_free_cb(void *ptr, void *arg)
4155 + {
4156 +- unsigned int seq;
4157 +- int i;
4158 +-
4159 +- nf->low_thresh = 0;
4160 ++ struct inet_frag_queue *fq = ptr;
4161 +
4162 +-evict_again:
4163 +- local_bh_disable();
4164 +- seq = read_seqbegin(&f->rnd_seqlock);
4165 +-
4166 +- for (i = 0; i < INETFRAGS_HASHSZ ; i++)
4167 +- inet_evict_bucket(f, &f->hash[i]);
4168 +-
4169 +- local_bh_enable();
4170 +- cond_resched();
4171 +-
4172 +- if (read_seqretry(&f->rnd_seqlock, seq) ||
4173 +- sum_frag_mem_limit(nf))
4174 +- goto evict_again;
4175 +-}
4176 +-EXPORT_SYMBOL(inet_frags_exit_net);
4177 +-
4178 +-static struct inet_frag_bucket *
4179 +-get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
4180 +-__acquires(hb->chain_lock)
4181 +-{
4182 +- struct inet_frag_bucket *hb;
4183 +- unsigned int seq, hash;
4184 +-
4185 +- restart:
4186 +- seq = read_seqbegin(&f->rnd_seqlock);
4187 +-
4188 +- hash = inet_frag_hashfn(f, fq);
4189 +- hb = &f->hash[hash];
4190 ++ /* If we can not cancel the timer, it means this frag_queue
4191 ++ * is already disappearing, we have nothing to do.
4192 ++ * Otherwise, we own a refcount until the end of this function.
4193 ++ */
4194 ++ if (!del_timer(&fq->timer))
4195 ++ return;
4196 +
4197 +- spin_lock(&hb->chain_lock);
4198 +- if (read_seqretry(&f->rnd_seqlock, seq)) {
4199 +- spin_unlock(&hb->chain_lock);
4200 +- goto restart;
4201 ++ spin_lock_bh(&fq->lock);
4202 ++ if (!(fq->flags & INET_FRAG_COMPLETE)) {
4203 ++ fq->flags |= INET_FRAG_COMPLETE;
4204 ++ refcount_dec(&fq->refcnt);
4205 + }
4206 ++ spin_unlock_bh(&fq->lock);
4207 +
4208 +- return hb;
4209 ++ inet_frag_put(fq);
4210 + }
4211 +
4212 +-static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
4213 ++void inet_frags_exit_net(struct netns_frags *nf)
4214 + {
4215 +- struct inet_frag_bucket *hb;
4216 ++ nf->low_thresh = 0; /* prevent creation of new frags */
4217 +
4218 +- hb = get_frag_bucket_locked(fq, f);
4219 +- hlist_del(&fq->list);
4220 +- fq->flags |= INET_FRAG_COMPLETE;
4221 +- spin_unlock(&hb->chain_lock);
4222 ++ rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
4223 + }
4224 ++EXPORT_SYMBOL(inet_frags_exit_net);
4225 +
4226 +-void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
4227 ++void inet_frag_kill(struct inet_frag_queue *fq)
4228 + {
4229 + if (del_timer(&fq->timer))
4230 + refcount_dec(&fq->refcnt);
4231 +
4232 + if (!(fq->flags & INET_FRAG_COMPLETE)) {
4233 +- fq_unlink(fq, f);
4234 ++ struct netns_frags *nf = fq->net;
4235 ++
4236 ++ fq->flags |= INET_FRAG_COMPLETE;
4237 ++ rhashtable_remove_fast(&nf->rhashtable, &fq->node, nf->f->rhash_params);
4238 + refcount_dec(&fq->refcnt);
4239 + }
4240 + }
4241 + EXPORT_SYMBOL(inet_frag_kill);
4242 +
4243 +-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
4244 ++static void inet_frag_destroy_rcu(struct rcu_head *head)
4245 ++{
4246 ++ struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
4247 ++ rcu);
4248 ++ struct inet_frags *f = q->net->f;
4249 ++
4250 ++ if (f->destructor)
4251 ++ f->destructor(q);
4252 ++ kmem_cache_free(f->frags_cachep, q);
4253 ++}
4254 ++
4255 ++void inet_frag_destroy(struct inet_frag_queue *q)
4256 + {
4257 + struct sk_buff *fp;
4258 + struct netns_frags *nf;
4259 + unsigned int sum, sum_truesize = 0;
4260 ++ struct inet_frags *f;
4261 +
4262 + WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
4263 + WARN_ON(del_timer(&q->timer) != 0);
4264 +@@ -298,64 +135,35 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
4265 + /* Release all fragment data. */
4266 + fp = q->fragments;
4267 + nf = q->net;
4268 +- while (fp) {
4269 +- struct sk_buff *xp = fp->next;
4270 +-
4271 +- sum_truesize += fp->truesize;
4272 +- kfree_skb(fp);
4273 +- fp = xp;
4274 ++ f = nf->f;
4275 ++ if (fp) {
4276 ++ do {
4277 ++ struct sk_buff *xp = fp->next;
4278 ++
4279 ++ sum_truesize += fp->truesize;
4280 ++ kfree_skb(fp);
4281 ++ fp = xp;
4282 ++ } while (fp);
4283 ++ } else {
4284 ++ sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
4285 + }
4286 + sum = sum_truesize + f->qsize;
4287 +
4288 +- if (f->destructor)
4289 +- f->destructor(q);
4290 +- kmem_cache_free(f->frags_cachep, q);
4291 ++ call_rcu(&q->rcu, inet_frag_destroy_rcu);
4292 +
4293 + sub_frag_mem_limit(nf, sum);
4294 + }
4295 + EXPORT_SYMBOL(inet_frag_destroy);
4296 +
4297 +-static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
4298 +- struct inet_frag_queue *qp_in,
4299 +- struct inet_frags *f,
4300 +- void *arg)
4301 +-{
4302 +- struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
4303 +- struct inet_frag_queue *qp;
4304 +-
4305 +-#ifdef CONFIG_SMP
4306 +- /* With SMP race we have to recheck hash table, because
4307 +- * such entry could have been created on other cpu before
4308 +- * we acquired hash bucket lock.
4309 +- */
4310 +- hlist_for_each_entry(qp, &hb->chain, list) {
4311 +- if (qp->net == nf && f->match(qp, arg)) {
4312 +- refcount_inc(&qp->refcnt);
4313 +- spin_unlock(&hb->chain_lock);
4314 +- qp_in->flags |= INET_FRAG_COMPLETE;
4315 +- inet_frag_put(qp_in, f);
4316 +- return qp;
4317 +- }
4318 +- }
4319 +-#endif
4320 +- qp = qp_in;
4321 +- if (!mod_timer(&qp->timer, jiffies + nf->timeout))
4322 +- refcount_inc(&qp->refcnt);
4323 +-
4324 +- refcount_inc(&qp->refcnt);
4325 +- hlist_add_head(&qp->list, &hb->chain);
4326 +-
4327 +- spin_unlock(&hb->chain_lock);
4328 +-
4329 +- return qp;
4330 +-}
4331 +-
4332 + static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
4333 + struct inet_frags *f,
4334 + void *arg)
4335 + {
4336 + struct inet_frag_queue *q;
4337 +
4338 ++ if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
4339 ++ return NULL;
4340 ++
4341 + q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
4342 + if (!q)
4343 + return NULL;
4344 +@@ -364,77 +172,53 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
4345 + f->constructor(q, arg);
4346 + add_frag_mem_limit(nf, f->qsize);
4347 +
4348 +- setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
4349 ++ timer_setup(&q->timer, f->frag_expire, 0);
4350 + spin_lock_init(&q->lock);
4351 +- refcount_set(&q->refcnt, 1);
4352 ++ refcount_set(&q->refcnt, 3);
4353 +
4354 + return q;
4355 + }
4356 +
4357 + static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
4358 +- struct inet_frags *f,
4359 + void *arg)
4360 + {
4361 ++ struct inet_frags *f = nf->f;
4362 + struct inet_frag_queue *q;
4363 ++ int err;
4364 +
4365 + q = inet_frag_alloc(nf, f, arg);
4366 + if (!q)
4367 + return NULL;
4368 +
4369 +- return inet_frag_intern(nf, q, f, arg);
4370 +-}
4371 +-
4372 +-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
4373 +- struct inet_frags *f, void *key,
4374 +- unsigned int hash)
4375 +-{
4376 +- struct inet_frag_bucket *hb;
4377 +- struct inet_frag_queue *q;
4378 +- int depth = 0;
4379 ++ mod_timer(&q->timer, jiffies + nf->timeout);
4380 +
4381 +- if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
4382 +- inet_frag_schedule_worker(f);
4383 ++ err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
4384 ++ f->rhash_params);
4385 ++ if (err < 0) {
4386 ++ q->flags |= INET_FRAG_COMPLETE;
4387 ++ inet_frag_kill(q);
4388 ++ inet_frag_destroy(q);
4389 + return NULL;
4390 + }
4391 ++ return q;
4392 ++}
4393 +
4394 +- if (frag_mem_limit(nf) > nf->low_thresh)
4395 +- inet_frag_schedule_worker(f);
4396 +-
4397 +- hash &= (INETFRAGS_HASHSZ - 1);
4398 +- hb = &f->hash[hash];
4399 +-
4400 +- spin_lock(&hb->chain_lock);
4401 +- hlist_for_each_entry(q, &hb->chain, list) {
4402 +- if (q->net == nf && f->match(q, key)) {
4403 +- refcount_inc(&q->refcnt);
4404 +- spin_unlock(&hb->chain_lock);
4405 +- return q;
4406 +- }
4407 +- depth++;
4408 +- }
4409 +- spin_unlock(&hb->chain_lock);
4410 ++/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
4411 ++struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
4412 ++{
4413 ++ struct inet_frag_queue *fq;
4414 +
4415 +- if (depth <= INETFRAGS_MAXDEPTH)
4416 +- return inet_frag_create(nf, f, key);
4417 ++ rcu_read_lock();
4418 +
4419 +- if (inet_frag_may_rebuild(f)) {
4420 +- if (!f->rebuild)
4421 +- f->rebuild = true;
4422 +- inet_frag_schedule_worker(f);
4423 ++ fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
4424 ++ if (fq) {
4425 ++ if (!refcount_inc_not_zero(&fq->refcnt))
4426 ++ fq = NULL;
4427 ++ rcu_read_unlock();
4428 ++ return fq;
4429 + }
4430 ++ rcu_read_unlock();
4431 +
4432 +- return ERR_PTR(-ENOBUFS);
4433 ++ return inet_frag_create(nf, key);
4434 + }
4435 + EXPORT_SYMBOL(inet_frag_find);
4436 +-
4437 +-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
4438 +- const char *prefix)
4439 +-{
4440 +- static const char msg[] = "inet_frag_find: Fragment hash bucket"
4441 +- " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
4442 +- ". Dropping fragment.\n";
4443 +-
4444 +- if (PTR_ERR(q) == -ENOBUFS)
4445 +- net_dbg_ratelimited("%s%s", prefix, msg);
4446 +-}
4447 +-EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
4448 +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
4449 +index 4cb1befc3949..e7227128df2c 100644
4450 +--- a/net/ipv4/ip_fragment.c
4451 ++++ b/net/ipv4/ip_fragment.c
4452 +@@ -57,27 +57,64 @@
4453 + */
4454 + static const char ip_frag_cache_name[] = "ip4-frags";
4455 +
4456 +-struct ipfrag_skb_cb
4457 +-{
4458 ++/* Use skb->cb to track consecutive/adjacent fragments coming at
4459 ++ * the end of the queue. Nodes in the rb-tree queue will
4460 ++ * contain "runs" of one or more adjacent fragments.
4461 ++ *
4462 ++ * Invariants:
4463 ++ * - next_frag is NULL at the tail of a "run";
4464 ++ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
4465 ++ */
4466 ++struct ipfrag_skb_cb {
4467 + struct inet_skb_parm h;
4468 +- int offset;
4469 ++ struct sk_buff *next_frag;
4470 ++ int frag_run_len;
4471 + };
4472 +
4473 +-#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
4474 ++#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
4475 ++
4476 ++static void ip4_frag_init_run(struct sk_buff *skb)
4477 ++{
4478 ++ BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
4479 ++
4480 ++ FRAG_CB(skb)->next_frag = NULL;
4481 ++ FRAG_CB(skb)->frag_run_len = skb->len;
4482 ++}
4483 ++
4484 ++/* Append skb to the last "run". */
4485 ++static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
4486 ++ struct sk_buff *skb)
4487 ++{
4488 ++ RB_CLEAR_NODE(&skb->rbnode);
4489 ++ FRAG_CB(skb)->next_frag = NULL;
4490 ++
4491 ++ FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
4492 ++ FRAG_CB(q->fragments_tail)->next_frag = skb;
4493 ++ q->fragments_tail = skb;
4494 ++}
4495 ++
4496 ++/* Create a new "run" with the skb. */
4497 ++static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
4498 ++{
4499 ++ if (q->last_run_head)
4500 ++ rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
4501 ++ &q->last_run_head->rbnode.rb_right);
4502 ++ else
4503 ++ rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
4504 ++ rb_insert_color(&skb->rbnode, &q->rb_fragments);
4505 ++
4506 ++ ip4_frag_init_run(skb);
4507 ++ q->fragments_tail = skb;
4508 ++ q->last_run_head = skb;
4509 ++}
4510 +
4511 + /* Describe an entry in the "incomplete datagrams" queue. */
4512 + struct ipq {
4513 + struct inet_frag_queue q;
4514 +
4515 +- u32 user;
4516 +- __be32 saddr;
4517 +- __be32 daddr;
4518 +- __be16 id;
4519 +- u8 protocol;
4520 + u8 ecn; /* RFC3168 support */
4521 + u16 max_df_size; /* largest frag with DF set seen */
4522 + int iif;
4523 +- int vif; /* L3 master device index */
4524 + unsigned int rid;
4525 + struct inet_peer *peer;
4526 + };
4527 +@@ -89,49 +126,9 @@ static u8 ip4_frag_ecn(u8 tos)
4528 +
4529 + static struct inet_frags ip4_frags;
4530 +
4531 +-int ip_frag_mem(struct net *net)
4532 +-{
4533 +- return sum_frag_mem_limit(&net->ipv4.frags);
4534 +-}
4535 +-
4536 +-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
4537 +- struct net_device *dev);
4538 +-
4539 +-struct ip4_create_arg {
4540 +- struct iphdr *iph;
4541 +- u32 user;
4542 +- int vif;
4543 +-};
4544 ++static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
4545 ++ struct sk_buff *prev_tail, struct net_device *dev);
4546 +
4547 +-static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
4548 +-{
4549 +- net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
4550 +- return jhash_3words((__force u32)id << 16 | prot,
4551 +- (__force u32)saddr, (__force u32)daddr,
4552 +- ip4_frags.rnd);
4553 +-}
4554 +-
4555 +-static unsigned int ip4_hashfn(const struct inet_frag_queue *q)
4556 +-{
4557 +- const struct ipq *ipq;
4558 +-
4559 +- ipq = container_of(q, struct ipq, q);
4560 +- return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
4561 +-}
4562 +-
4563 +-static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a)
4564 +-{
4565 +- const struct ipq *qp;
4566 +- const struct ip4_create_arg *arg = a;
4567 +-
4568 +- qp = container_of(q, struct ipq, q);
4569 +- return qp->id == arg->iph->id &&
4570 +- qp->saddr == arg->iph->saddr &&
4571 +- qp->daddr == arg->iph->daddr &&
4572 +- qp->protocol == arg->iph->protocol &&
4573 +- qp->user == arg->user &&
4574 +- qp->vif == arg->vif;
4575 +-}
4576 +
4577 + static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
4578 + {
4579 +@@ -140,17 +137,12 @@ static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
4580 + frags);
4581 + struct net *net = container_of(ipv4, struct net, ipv4);
4582 +
4583 +- const struct ip4_create_arg *arg = a;
4584 ++ const struct frag_v4_compare_key *key = a;
4585 +
4586 +- qp->protocol = arg->iph->protocol;
4587 +- qp->id = arg->iph->id;
4588 +- qp->ecn = ip4_frag_ecn(arg->iph->tos);
4589 +- qp->saddr = arg->iph->saddr;
4590 +- qp->daddr = arg->iph->daddr;
4591 +- qp->vif = arg->vif;
4592 +- qp->user = arg->user;
4593 ++ q->key.v4 = *key;
4594 ++ qp->ecn = 0;
4595 + qp->peer = q->net->max_dist ?
4596 +- inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) :
4597 ++ inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
4598 + NULL;
4599 + }
4600 +
4601 +@@ -168,7 +160,7 @@ static void ip4_frag_free(struct inet_frag_queue *q)
4602 +
4603 + static void ipq_put(struct ipq *ipq)
4604 + {
4605 +- inet_frag_put(&ipq->q, &ip4_frags);
4606 ++ inet_frag_put(&ipq->q);
4607 + }
4608 +
4609 + /* Kill ipq entry. It is not destroyed immediately,
4610 +@@ -176,7 +168,7 @@ static void ipq_put(struct ipq *ipq)
4611 + */
4612 + static void ipq_kill(struct ipq *ipq)
4613 + {
4614 +- inet_frag_kill(&ipq->q, &ip4_frags);
4615 ++ inet_frag_kill(&ipq->q);
4616 + }
4617 +
4618 + static bool frag_expire_skip_icmp(u32 user)
4619 +@@ -191,12 +183,16 @@ static bool frag_expire_skip_icmp(u32 user)
4620 + /*
4621 + * Oops, a fragment queue timed out. Kill it and send an ICMP reply.
4622 + */
4623 +-static void ip_expire(unsigned long arg)
4624 ++static void ip_expire(struct timer_list *t)
4625 + {
4626 +- struct ipq *qp;
4627 ++ struct inet_frag_queue *frag = from_timer(frag, t, timer);
4628 ++ const struct iphdr *iph;
4629 ++ struct sk_buff *head = NULL;
4630 + struct net *net;
4631 ++ struct ipq *qp;
4632 ++ int err;
4633 +
4634 +- qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
4635 ++ qp = container_of(frag, struct ipq, q);
4636 + net = container_of(qp->q.net, struct net, ipv4.frags);
4637 +
4638 + rcu_read_lock();
4639 +@@ -207,51 +203,65 @@ static void ip_expire(unsigned long arg)
4640 +
4641 + ipq_kill(qp);
4642 + __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
4643 ++ __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
4644 +
4645 +- if (!inet_frag_evicting(&qp->q)) {
4646 +- struct sk_buff *clone, *head = qp->q.fragments;
4647 +- const struct iphdr *iph;
4648 +- int err;
4649 +-
4650 +- __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
4651 ++ if (!(qp->q.flags & INET_FRAG_FIRST_IN))
4652 ++ goto out;
4653 +
4654 +- if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
4655 ++ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
4656 ++ * pull the head out of the tree in order to be able to
4657 ++ * deal with head->dev.
4658 ++ */
4659 ++ if (qp->q.fragments) {
4660 ++ head = qp->q.fragments;
4661 ++ qp->q.fragments = head->next;
4662 ++ } else {
4663 ++ head = skb_rb_first(&qp->q.rb_fragments);
4664 ++ if (!head)
4665 + goto out;
4666 ++ if (FRAG_CB(head)->next_frag)
4667 ++ rb_replace_node(&head->rbnode,
4668 ++ &FRAG_CB(head)->next_frag->rbnode,
4669 ++ &qp->q.rb_fragments);
4670 ++ else
4671 ++ rb_erase(&head->rbnode, &qp->q.rb_fragments);
4672 ++ memset(&head->rbnode, 0, sizeof(head->rbnode));
4673 ++ barrier();
4674 ++ }
4675 ++ if (head == qp->q.fragments_tail)
4676 ++ qp->q.fragments_tail = NULL;
4677 +
4678 +- head->dev = dev_get_by_index_rcu(net, qp->iif);
4679 +- if (!head->dev)
4680 +- goto out;
4681 ++ sub_frag_mem_limit(qp->q.net, head->truesize);
4682 ++
4683 ++ head->dev = dev_get_by_index_rcu(net, qp->iif);
4684 ++ if (!head->dev)
4685 ++ goto out;
4686 +
4687 +
4688 +- /* skb has no dst, perform route lookup again */
4689 +- iph = ip_hdr(head);
4690 +- err = ip_route_input_noref(head, iph->daddr, iph->saddr,
4691 ++ /* skb has no dst, perform route lookup again */
4692 ++ iph = ip_hdr(head);
4693 ++ err = ip_route_input_noref(head, iph->daddr, iph->saddr,
4694 + iph->tos, head->dev);
4695 +- if (err)
4696 +- goto out;
4697 ++ if (err)
4698 ++ goto out;
4699 +
4700 +- /* Only an end host needs to send an ICMP
4701 +- * "Fragment Reassembly Timeout" message, per RFC792.
4702 +- */
4703 +- if (frag_expire_skip_icmp(qp->user) &&
4704 +- (skb_rtable(head)->rt_type != RTN_LOCAL))
4705 +- goto out;
4706 ++ /* Only an end host needs to send an ICMP
4707 ++ * "Fragment Reassembly Timeout" message, per RFC792.
4708 ++ */
4709 ++ if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
4710 ++ (skb_rtable(head)->rt_type != RTN_LOCAL))
4711 ++ goto out;
4712 +
4713 +- clone = skb_clone(head, GFP_ATOMIC);
4714 ++ spin_unlock(&qp->q.lock);
4715 ++ icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
4716 ++ goto out_rcu_unlock;
4717 +
4718 +- /* Send an ICMP "Fragment Reassembly Timeout" message. */
4719 +- if (clone) {
4720 +- spin_unlock(&qp->q.lock);
4721 +- icmp_send(clone, ICMP_TIME_EXCEEDED,
4722 +- ICMP_EXC_FRAGTIME, 0);
4723 +- consume_skb(clone);
4724 +- goto out_rcu_unlock;
4725 +- }
4726 +- }
4727 + out:
4728 + spin_unlock(&qp->q.lock);
4729 + out_rcu_unlock:
4730 + rcu_read_unlock();
4731 ++ if (head)
4732 ++ kfree_skb(head);
4733 + ipq_put(qp);
4734 + }
4735 +
4736 +@@ -261,21 +271,20 @@ out_rcu_unlock:
4737 + static struct ipq *ip_find(struct net *net, struct iphdr *iph,
4738 + u32 user, int vif)
4739 + {
4740 ++ struct frag_v4_compare_key key = {
4741 ++ .saddr = iph->saddr,
4742 ++ .daddr = iph->daddr,
4743 ++ .user = user,
4744 ++ .vif = vif,
4745 ++ .id = iph->id,
4746 ++ .protocol = iph->protocol,
4747 ++ };
4748 + struct inet_frag_queue *q;
4749 +- struct ip4_create_arg arg;
4750 +- unsigned int hash;
4751 +-
4752 +- arg.iph = iph;
4753 +- arg.user = user;
4754 +- arg.vif = vif;
4755 +
4756 +- hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
4757 +-
4758 +- q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
4759 +- if (IS_ERR_OR_NULL(q)) {
4760 +- inet_frag_maybe_warn_overflow(q, pr_fmt());
4761 ++ q = inet_frag_find(&net->ipv4.frags, &key);
4762 ++ if (!q)
4763 + return NULL;
4764 +- }
4765 ++
4766 + return container_of(q, struct ipq, q);
4767 + }
4768 +
4769 +@@ -295,7 +304,7 @@ static int ip_frag_too_far(struct ipq *qp)
4770 + end = atomic_inc_return(&peer->rid);
4771 + qp->rid = end;
4772 +
4773 +- rc = qp->q.fragments && (end - start) > max;
4774 ++ rc = qp->q.fragments_tail && (end - start) > max;
4775 +
4776 + if (rc) {
4777 + struct net *net;
4778 +@@ -309,7 +318,6 @@ static int ip_frag_too_far(struct ipq *qp)
4779 +
4780 + static int ip_frag_reinit(struct ipq *qp)
4781 + {
4782 +- struct sk_buff *fp;
4783 + unsigned int sum_truesize = 0;
4784 +
4785 + if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
4786 +@@ -317,21 +325,16 @@ static int ip_frag_reinit(struct ipq *qp)
4787 + return -ETIMEDOUT;
4788 + }
4789 +
4790 +- fp = qp->q.fragments;
4791 +- do {
4792 +- struct sk_buff *xp = fp->next;
4793 +-
4794 +- sum_truesize += fp->truesize;
4795 +- kfree_skb(fp);
4796 +- fp = xp;
4797 +- } while (fp);
4798 ++ sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments);
4799 + sub_frag_mem_limit(qp->q.net, sum_truesize);
4800 +
4801 + qp->q.flags = 0;
4802 + qp->q.len = 0;
4803 + qp->q.meat = 0;
4804 + qp->q.fragments = NULL;
4805 ++ qp->q.rb_fragments = RB_ROOT;
4806 + qp->q.fragments_tail = NULL;
4807 ++ qp->q.last_run_head = NULL;
4808 + qp->iif = 0;
4809 + qp->ecn = 0;
4810 +
4811 +@@ -341,7 +344,9 @@ static int ip_frag_reinit(struct ipq *qp)
4812 + /* Add new segment to existing queue. */
4813 + static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
4814 + {
4815 +- struct sk_buff *prev, *next;
4816 ++ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
4817 ++ struct rb_node **rbn, *parent;
4818 ++ struct sk_buff *skb1, *prev_tail;
4819 + struct net_device *dev;
4820 + unsigned int fragsize;
4821 + int flags, offset;
4822 +@@ -404,99 +409,61 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
4823 + if (err)
4824 + goto err;
4825 +
4826 +- /* Find out which fragments are in front and at the back of us
4827 +- * in the chain of fragments so far. We must know where to put
4828 +- * this fragment, right?
4829 +- */
4830 +- prev = qp->q.fragments_tail;
4831 +- if (!prev || FRAG_CB(prev)->offset < offset) {
4832 +- next = NULL;
4833 +- goto found;
4834 +- }
4835 +- prev = NULL;
4836 +- for (next = qp->q.fragments; next != NULL; next = next->next) {
4837 +- if (FRAG_CB(next)->offset >= offset)
4838 +- break; /* bingo! */
4839 +- prev = next;
4840 +- }
4841 +-
4842 +-found:
4843 +- /* We found where to put this one. Check for overlap with
4844 +- * preceding fragment, and, if needed, align things so that
4845 +- * any overlaps are eliminated.
4846 ++ /* Note : skb->rbnode and skb->dev share the same location. */
4847 ++ dev = skb->dev;
4848 ++ /* Makes sure compiler wont do silly aliasing games */
4849 ++ barrier();
4850 ++
4851 ++ /* RFC5722, Section 4, amended by Errata ID : 3089
4852 ++ * When reassembling an IPv6 datagram, if
4853 ++ * one or more its constituent fragments is determined to be an
4854 ++ * overlapping fragment, the entire datagram (and any constituent
4855 ++ * fragments) MUST be silently discarded.
4856 ++ *
4857 ++ * We do the same here for IPv4 (and increment an snmp counter).
4858 + */
4859 +- if (prev) {
4860 +- int i = (FRAG_CB(prev)->offset + prev->len) - offset;
4861 +
4862 +- if (i > 0) {
4863 +- offset += i;
4864 +- err = -EINVAL;
4865 +- if (end <= offset)
4866 +- goto err;
4867 +- err = -ENOMEM;
4868 +- if (!pskb_pull(skb, i))
4869 +- goto err;
4870 +- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
4871 +- skb->ip_summed = CHECKSUM_NONE;
4872 +- }
4873 +- }
4874 +-
4875 +- err = -ENOMEM;
4876 +-
4877 +- while (next && FRAG_CB(next)->offset < end) {
4878 +- int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
4879 +-
4880 +- if (i < next->len) {
4881 +- int delta = -next->truesize;
4882 +-
4883 +- /* Eat head of the next overlapped fragment
4884 +- * and leave the loop. The next ones cannot overlap.
4885 +- */
4886 +- if (!pskb_pull(next, i))
4887 +- goto err;
4888 +- delta += next->truesize;
4889 +- if (delta)
4890 +- add_frag_mem_limit(qp->q.net, delta);
4891 +- FRAG_CB(next)->offset += i;
4892 +- qp->q.meat -= i;
4893 +- if (next->ip_summed != CHECKSUM_UNNECESSARY)
4894 +- next->ip_summed = CHECKSUM_NONE;
4895 +- break;
4896 +- } else {
4897 +- struct sk_buff *free_it = next;
4898 +-
4899 +- /* Old fragment is completely overridden with
4900 +- * new one drop it.
4901 +- */
4902 +- next = next->next;
4903 +-
4904 +- if (prev)
4905 +- prev->next = next;
4906 +- else
4907 +- qp->q.fragments = next;
4908 +-
4909 +- qp->q.meat -= free_it->len;
4910 +- sub_frag_mem_limit(qp->q.net, free_it->truesize);
4911 +- kfree_skb(free_it);
4912 +- }
4913 ++ /* Find out where to put this fragment. */
4914 ++ prev_tail = qp->q.fragments_tail;
4915 ++ if (!prev_tail)
4916 ++ ip4_frag_create_run(&qp->q, skb); /* First fragment. */
4917 ++ else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
4918 ++ /* This is the common case: skb goes to the end. */
4919 ++ /* Detect and discard overlaps. */
4920 ++ if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
4921 ++ goto discard_qp;
4922 ++ if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
4923 ++ ip4_frag_append_to_last_run(&qp->q, skb);
4924 ++ else
4925 ++ ip4_frag_create_run(&qp->q, skb);
4926 ++ } else {
4927 ++ /* Binary search. Note that skb can become the first fragment,
4928 ++ * but not the last (covered above).
4929 ++ */
4930 ++ rbn = &qp->q.rb_fragments.rb_node;
4931 ++ do {
4932 ++ parent = *rbn;
4933 ++ skb1 = rb_to_skb(parent);
4934 ++ if (end <= skb1->ip_defrag_offset)
4935 ++ rbn = &parent->rb_left;
4936 ++ else if (offset >= skb1->ip_defrag_offset +
4937 ++ FRAG_CB(skb1)->frag_run_len)
4938 ++ rbn = &parent->rb_right;
4939 ++ else /* Found an overlap with skb1. */
4940 ++ goto discard_qp;
4941 ++ } while (*rbn);
4942 ++ /* Here we have parent properly set, and rbn pointing to
4943 ++ * one of its NULL left/right children. Insert skb.
4944 ++ */
4945 ++ ip4_frag_init_run(skb);
4946 ++ rb_link_node(&skb->rbnode, parent, rbn);
4947 ++ rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
4948 + }
4949 +
4950 +- FRAG_CB(skb)->offset = offset;
4951 +-
4952 +- /* Insert this fragment in the chain of fragments. */
4953 +- skb->next = next;
4954 +- if (!next)
4955 +- qp->q.fragments_tail = skb;
4956 +- if (prev)
4957 +- prev->next = skb;
4958 +- else
4959 +- qp->q.fragments = skb;
4960 +-
4961 +- dev = skb->dev;
4962 +- if (dev) {
4963 ++ if (dev)
4964 + qp->iif = dev->ifindex;
4965 +- skb->dev = NULL;
4966 +- }
4967 ++ skb->ip_defrag_offset = offset;
4968 ++
4969 + qp->q.stamp = skb->tstamp;
4970 + qp->q.meat += skb->len;
4971 + qp->ecn |= ecn;
4972 +@@ -518,7 +485,7 @@ found:
4973 + unsigned long orefdst = skb->_skb_refdst;
4974 +
4975 + skb->_skb_refdst = 0UL;
4976 +- err = ip_frag_reasm(qp, prev, dev);
4977 ++ err = ip_frag_reasm(qp, skb, prev_tail, dev);
4978 + skb->_skb_refdst = orefdst;
4979 + return err;
4980 + }
4981 +@@ -526,20 +493,24 @@ found:
4982 + skb_dst_drop(skb);
4983 + return -EINPROGRESS;
4984 +
4985 ++discard_qp:
4986 ++ inet_frag_kill(&qp->q);
4987 ++ err = -EINVAL;
4988 ++ __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
4989 + err:
4990 + kfree_skb(skb);
4991 + return err;
4992 + }
4993 +
4994 +-
4995 + /* Build a new IP datagram from all its fragments. */
4996 +-
4997 +-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
4998 +- struct net_device *dev)
4999 ++static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
5000 ++ struct sk_buff *prev_tail, struct net_device *dev)
5001 + {
5002 + struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
5003 + struct iphdr *iph;
5004 +- struct sk_buff *fp, *head = qp->q.fragments;
5005 ++ struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
5006 ++ struct sk_buff **nextp; /* To build frag_list. */
5007 ++ struct rb_node *rbn;
5008 + int len;
5009 + int ihlen;
5010 + int err;
5011 +@@ -553,26 +524,27 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
5012 + goto out_fail;
5013 + }
5014 + /* Make the one we just received the head. */
5015 +- if (prev) {
5016 +- head = prev->next;
5017 +- fp = skb_clone(head, GFP_ATOMIC);
5018 ++ if (head != skb) {
5019 ++ fp = skb_clone(skb, GFP_ATOMIC);
5020 + if (!fp)
5021 + goto out_nomem;
5022 +-
5023 +- fp->next = head->next;
5024 +- if (!fp->next)
5025 ++ FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
5026 ++ if (RB_EMPTY_NODE(&skb->rbnode))
5027 ++ FRAG_CB(prev_tail)->next_frag = fp;
5028 ++ else
5029 ++ rb_replace_node(&skb->rbnode, &fp->rbnode,
5030 ++ &qp->q.rb_fragments);
5031 ++ if (qp->q.fragments_tail == skb)
5032 + qp->q.fragments_tail = fp;
5033 +- prev->next = fp;
5034 +-
5035 +- skb_morph(head, qp->q.fragments);
5036 +- head->next = qp->q.fragments->next;
5037 +-
5038 +- consume_skb(qp->q.fragments);
5039 +- qp->q.fragments = head;
5040 ++ skb_morph(skb, head);
5041 ++ FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
5042 ++ rb_replace_node(&head->rbnode, &skb->rbnode,
5043 ++ &qp->q.rb_fragments);
5044 ++ consume_skb(head);
5045 ++ head = skb;
5046 + }
5047 +
5048 +- WARN_ON(!head);
5049 +- WARN_ON(FRAG_CB(head)->offset != 0);
5050 ++ WARN_ON(head->ip_defrag_offset != 0);
5051 +
5052 + /* Allocate a new buffer for the datagram. */
5053 + ihlen = ip_hdrlen(head);
5054 +@@ -596,35 +568,61 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
5055 + clone = alloc_skb(0, GFP_ATOMIC);
5056 + if (!clone)
5057 + goto out_nomem;
5058 +- clone->next = head->next;
5059 +- head->next = clone;
5060 + skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
5061 + skb_frag_list_init(head);
5062 + for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
5063 + plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
5064 + clone->len = clone->data_len = head->data_len - plen;
5065 +- head->data_len -= clone->len;
5066 +- head->len -= clone->len;
5067 ++ head->truesize += clone->truesize;
5068 + clone->csum = 0;
5069 + clone->ip_summed = head->ip_summed;
5070 + add_frag_mem_limit(qp->q.net, clone->truesize);
5071 ++ skb_shinfo(head)->frag_list = clone;
5072 ++ nextp = &clone->next;
5073 ++ } else {
5074 ++ nextp = &skb_shinfo(head)->frag_list;
5075 + }
5076 +
5077 +- skb_shinfo(head)->frag_list = head->next;
5078 + skb_push(head, head->data - skb_network_header(head));
5079 +
5080 +- for (fp=head->next; fp; fp = fp->next) {
5081 +- head->data_len += fp->len;
5082 +- head->len += fp->len;
5083 +- if (head->ip_summed != fp->ip_summed)
5084 +- head->ip_summed = CHECKSUM_NONE;
5085 +- else if (head->ip_summed == CHECKSUM_COMPLETE)
5086 +- head->csum = csum_add(head->csum, fp->csum);
5087 +- head->truesize += fp->truesize;
5088 ++ /* Traverse the tree in order, to build frag_list. */
5089 ++ fp = FRAG_CB(head)->next_frag;
5090 ++ rbn = rb_next(&head->rbnode);
5091 ++ rb_erase(&head->rbnode, &qp->q.rb_fragments);
5092 ++ while (rbn || fp) {
5093 ++ /* fp points to the next sk_buff in the current run;
5094 ++ * rbn points to the next run.
5095 ++ */
5096 ++ /* Go through the current run. */
5097 ++ while (fp) {
5098 ++ *nextp = fp;
5099 ++ nextp = &fp->next;
5100 ++ fp->prev = NULL;
5101 ++ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
5102 ++ fp->sk = NULL;
5103 ++ head->data_len += fp->len;
5104 ++ head->len += fp->len;
5105 ++ if (head->ip_summed != fp->ip_summed)
5106 ++ head->ip_summed = CHECKSUM_NONE;
5107 ++ else if (head->ip_summed == CHECKSUM_COMPLETE)
5108 ++ head->csum = csum_add(head->csum, fp->csum);
5109 ++ head->truesize += fp->truesize;
5110 ++ fp = FRAG_CB(fp)->next_frag;
5111 ++ }
5112 ++ /* Move to the next run. */
5113 ++ if (rbn) {
5114 ++ struct rb_node *rbnext = rb_next(rbn);
5115 ++
5116 ++ fp = rb_to_skb(rbn);
5117 ++ rb_erase(rbn, &qp->q.rb_fragments);
5118 ++ rbn = rbnext;
5119 ++ }
5120 + }
5121 + sub_frag_mem_limit(qp->q.net, head->truesize);
5122 +
5123 ++ *nextp = NULL;
5124 + head->next = NULL;
5125 ++ head->prev = NULL;
5126 + head->dev = dev;
5127 + head->tstamp = qp->q.stamp;
5128 + IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
5129 +@@ -652,7 +650,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
5130 +
5131 + __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
5132 + qp->q.fragments = NULL;
5133 ++ qp->q.rb_fragments = RB_ROOT;
5134 + qp->q.fragments_tail = NULL;
5135 ++ qp->q.last_run_head = NULL;
5136 + return 0;
5137 +
5138 + out_nomem:
5139 +@@ -660,7 +660,7 @@ out_nomem:
5140 + err = -ENOMEM;
5141 + goto out_fail;
5142 + out_oversize:
5143 +- net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
5144 ++ net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
5145 + out_fail:
5146 + __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
5147 + return err;
5148 +@@ -734,25 +734,46 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
5149 + }
5150 + EXPORT_SYMBOL(ip_check_defrag);
5151 +
5152 ++unsigned int inet_frag_rbtree_purge(struct rb_root *root)
5153 ++{
5154 ++ struct rb_node *p = rb_first(root);
5155 ++ unsigned int sum = 0;
5156 ++
5157 ++ while (p) {
5158 ++ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
5159 ++
5160 ++ p = rb_next(p);
5161 ++ rb_erase(&skb->rbnode, root);
5162 ++ while (skb) {
5163 ++ struct sk_buff *next = FRAG_CB(skb)->next_frag;
5164 ++
5165 ++ sum += skb->truesize;
5166 ++ kfree_skb(skb);
5167 ++ skb = next;
5168 ++ }
5169 ++ }
5170 ++ return sum;
5171 ++}
5172 ++EXPORT_SYMBOL(inet_frag_rbtree_purge);
5173 ++
5174 + #ifdef CONFIG_SYSCTL
5175 +-static int zero;
5176 ++static int dist_min;
5177 +
5178 + static struct ctl_table ip4_frags_ns_ctl_table[] = {
5179 + {
5180 + .procname = "ipfrag_high_thresh",
5181 + .data = &init_net.ipv4.frags.high_thresh,
5182 +- .maxlen = sizeof(int),
5183 ++ .maxlen = sizeof(unsigned long),
5184 + .mode = 0644,
5185 +- .proc_handler = proc_dointvec_minmax,
5186 ++ .proc_handler = proc_doulongvec_minmax,
5187 + .extra1 = &init_net.ipv4.frags.low_thresh
5188 + },
5189 + {
5190 + .procname = "ipfrag_low_thresh",
5191 + .data = &init_net.ipv4.frags.low_thresh,
5192 +- .maxlen = sizeof(int),
5193 ++ .maxlen = sizeof(unsigned long),
5194 + .mode = 0644,
5195 +- .proc_handler = proc_dointvec_minmax,
5196 +- .extra1 = &zero,
5197 ++ .proc_handler = proc_doulongvec_minmax,
5198 + .extra2 = &init_net.ipv4.frags.high_thresh
5199 + },
5200 + {
5201 +@@ -768,7 +789,7 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = {
5202 + .maxlen = sizeof(int),
5203 + .mode = 0644,
5204 + .proc_handler = proc_dointvec_minmax,
5205 +- .extra1 = &zero
5206 ++ .extra1 = &dist_min,
5207 + },
5208 + { }
5209 + };
5210 +@@ -850,6 +871,8 @@ static void __init ip4_frags_ctl_register(void)
5211 +
5212 + static int __net_init ipv4_frags_init_net(struct net *net)
5213 + {
5214 ++ int res;
5215 ++
5216 + /* Fragment cache limits.
5217 + *
5218 + * The fragment memory accounting code, (tries to) account for
5219 +@@ -874,16 +897,21 @@ static int __net_init ipv4_frags_init_net(struct net *net)
5220 + net->ipv4.frags.timeout = IP_FRAG_TIME;
5221 +
5222 + net->ipv4.frags.max_dist = 64;
5223 +-
5224 +- inet_frags_init_net(&net->ipv4.frags);
5225 +-
5226 +- return ip4_frags_ns_ctl_register(net);
5227 ++ net->ipv4.frags.f = &ip4_frags;
5228 ++
5229 ++ res = inet_frags_init_net(&net->ipv4.frags);
5230 ++ if (res < 0)
5231 ++ return res;
5232 ++ res = ip4_frags_ns_ctl_register(net);
5233 ++ if (res < 0)
5234 ++ inet_frags_exit_net(&net->ipv4.frags);
5235 ++ return res;
5236 + }
5237 +
5238 + static void __net_exit ipv4_frags_exit_net(struct net *net)
5239 + {
5240 + ip4_frags_ns_ctl_unregister(net);
5241 +- inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
5242 ++ inet_frags_exit_net(&net->ipv4.frags);
5243 + }
5244 +
5245 + static struct pernet_operations ip4_frags_ops = {
5246 +@@ -891,17 +919,49 @@ static struct pernet_operations ip4_frags_ops = {
5247 + .exit = ipv4_frags_exit_net,
5248 + };
5249 +
5250 ++
5251 ++static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
5252 ++{
5253 ++ return jhash2(data,
5254 ++ sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
5255 ++}
5256 ++
5257 ++static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
5258 ++{
5259 ++ const struct inet_frag_queue *fq = data;
5260 ++
5261 ++ return jhash2((const u32 *)&fq->key.v4,
5262 ++ sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
5263 ++}
5264 ++
5265 ++static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
5266 ++{
5267 ++ const struct frag_v4_compare_key *key = arg->key;
5268 ++ const struct inet_frag_queue *fq = ptr;
5269 ++
5270 ++ return !!memcmp(&fq->key, key, sizeof(*key));
5271 ++}
5272 ++
5273 ++static const struct rhashtable_params ip4_rhash_params = {
5274 ++ .head_offset = offsetof(struct inet_frag_queue, node),
5275 ++ .key_offset = offsetof(struct inet_frag_queue, key),
5276 ++ .key_len = sizeof(struct frag_v4_compare_key),
5277 ++ .hashfn = ip4_key_hashfn,
5278 ++ .obj_hashfn = ip4_obj_hashfn,
5279 ++ .obj_cmpfn = ip4_obj_cmpfn,
5280 ++ .automatic_shrinking = true,
5281 ++};
5282 ++
5283 + void __init ipfrag_init(void)
5284 + {
5285 +- ip4_frags_ctl_register();
5286 +- register_pernet_subsys(&ip4_frags_ops);
5287 +- ip4_frags.hashfn = ip4_hashfn;
5288 + ip4_frags.constructor = ip4_frag_init;
5289 + ip4_frags.destructor = ip4_frag_free;
5290 + ip4_frags.qsize = sizeof(struct ipq);
5291 +- ip4_frags.match = ip4_frag_match;
5292 + ip4_frags.frag_expire = ip_expire;
5293 + ip4_frags.frags_cache_name = ip_frag_cache_name;
5294 ++ ip4_frags.rhash_params = ip4_rhash_params;
5295 + if (inet_frags_init(&ip4_frags))
5296 + panic("IP: failed to allocate ip4_frags cache\n");
5297 ++ ip4_frags_ctl_register();
5298 ++ register_pernet_subsys(&ip4_frags_ops);
5299 + }
5300 +diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
5301 +index 127153f1ed8a..3fbf688a1943 100644
5302 +--- a/net/ipv4/proc.c
5303 ++++ b/net/ipv4/proc.c
5304 +@@ -54,7 +54,6 @@
5305 + static int sockstat_seq_show(struct seq_file *seq, void *v)
5306 + {
5307 + struct net *net = seq->private;
5308 +- unsigned int frag_mem;
5309 + int orphans, sockets;
5310 +
5311 + orphans = percpu_counter_sum_positive(&tcp_orphan_count);
5312 +@@ -72,8 +71,9 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
5313 + sock_prot_inuse_get(net, &udplite_prot));
5314 + seq_printf(seq, "RAW: inuse %d\n",
5315 + sock_prot_inuse_get(net, &raw_prot));
5316 +- frag_mem = ip_frag_mem(net);
5317 +- seq_printf(seq, "FRAG: inuse %u memory %u\n", !!frag_mem, frag_mem);
5318 ++ seq_printf(seq, "FRAG: inuse %u memory %lu\n",
5319 ++ atomic_read(&net->ipv4.frags.rhashtable.nelems),
5320 ++ frag_mem_limit(&net->ipv4.frags));
5321 + return 0;
5322 + }
5323 +
5324 +@@ -132,6 +132,7 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
5325 + SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
5326 + SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
5327 + SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
5328 ++ SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS),
5329 + SNMP_MIB_SENTINEL
5330 + };
5331 +
5332 +diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
5333 +index fbbeda647774..0567edb76522 100644
5334 +--- a/net/ipv4/tcp_fastopen.c
5335 ++++ b/net/ipv4/tcp_fastopen.c
5336 +@@ -458,17 +458,15 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
5337 + void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
5338 + {
5339 + struct tcp_sock *tp = tcp_sk(sk);
5340 +- struct rb_node *p;
5341 +- struct sk_buff *skb;
5342 + struct dst_entry *dst;
5343 ++ struct sk_buff *skb;
5344 +
5345 + if (!tp->syn_fastopen)
5346 + return;
5347 +
5348 + if (!tp->data_segs_in) {
5349 +- p = rb_first(&tp->out_of_order_queue);
5350 +- if (p && !rb_next(p)) {
5351 +- skb = rb_entry(p, struct sk_buff, rbnode);
5352 ++ skb = skb_rb_first(&tp->out_of_order_queue);
5353 ++ if (skb && !skb_rb_next(skb)) {
5354 + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
5355 + tcp_fastopen_active_disable(sk);
5356 + return;
5357 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
5358 +index bdabd748f4bc..991f382afc1b 100644
5359 +--- a/net/ipv4/tcp_input.c
5360 ++++ b/net/ipv4/tcp_input.c
5361 +@@ -4372,7 +4372,7 @@ static void tcp_ofo_queue(struct sock *sk)
5362 +
5363 + p = rb_first(&tp->out_of_order_queue);
5364 + while (p) {
5365 +- skb = rb_entry(p, struct sk_buff, rbnode);
5366 ++ skb = rb_to_skb(p);
5367 + if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
5368 + break;
5369 +
5370 +@@ -4440,7 +4440,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
5371 + static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
5372 + {
5373 + struct tcp_sock *tp = tcp_sk(sk);
5374 +- struct rb_node **p, *q, *parent;
5375 ++ struct rb_node **p, *parent;
5376 + struct sk_buff *skb1;
5377 + u32 seq, end_seq;
5378 + bool fragstolen;
5379 +@@ -4503,7 +4503,7 @@ coalesce_done:
5380 + parent = NULL;
5381 + while (*p) {
5382 + parent = *p;
5383 +- skb1 = rb_entry(parent, struct sk_buff, rbnode);
5384 ++ skb1 = rb_to_skb(parent);
5385 + if (before(seq, TCP_SKB_CB(skb1)->seq)) {
5386 + p = &parent->rb_left;
5387 + continue;
5388 +@@ -4548,9 +4548,7 @@ insert:
5389 +
5390 + merge_right:
5391 + /* Remove other segments covered by skb. */
5392 +- while ((q = rb_next(&skb->rbnode)) != NULL) {
5393 +- skb1 = rb_entry(q, struct sk_buff, rbnode);
5394 +-
5395 ++ while ((skb1 = skb_rb_next(skb)) != NULL) {
5396 + if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
5397 + break;
5398 + if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
5399 +@@ -4565,7 +4563,7 @@ merge_right:
5400 + tcp_drop(sk, skb1);
5401 + }
5402 + /* If there is no skb after us, we are the last_skb ! */
5403 +- if (!q)
5404 ++ if (!skb1)
5405 + tp->ooo_last_skb = skb;
5406 +
5407 + add_sack:
5408 +@@ -4749,7 +4747,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li
5409 + if (list)
5410 + return !skb_queue_is_last(list, skb) ? skb->next : NULL;
5411 +
5412 +- return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
5413 ++ return skb_rb_next(skb);
5414 + }
5415 +
5416 + static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
5417 +@@ -4778,7 +4776,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
5418 +
5419 + while (*p) {
5420 + parent = *p;
5421 +- skb1 = rb_entry(parent, struct sk_buff, rbnode);
5422 ++ skb1 = rb_to_skb(parent);
5423 + if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
5424 + p = &parent->rb_left;
5425 + else
5426 +@@ -4898,19 +4896,12 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
5427 + struct tcp_sock *tp = tcp_sk(sk);
5428 + u32 range_truesize, sum_tiny = 0;
5429 + struct sk_buff *skb, *head;
5430 +- struct rb_node *p;
5431 + u32 start, end;
5432 +
5433 +- p = rb_first(&tp->out_of_order_queue);
5434 +- skb = rb_entry_safe(p, struct sk_buff, rbnode);
5435 ++ skb = skb_rb_first(&tp->out_of_order_queue);
5436 + new_range:
5437 + if (!skb) {
5438 +- p = rb_last(&tp->out_of_order_queue);
5439 +- /* Note: This is possible p is NULL here. We do not
5440 +- * use rb_entry_safe(), as ooo_last_skb is valid only
5441 +- * if rbtree is not empty.
5442 +- */
5443 +- tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
5444 ++ tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
5445 + return;
5446 + }
5447 + start = TCP_SKB_CB(skb)->seq;
5448 +@@ -4918,7 +4909,7 @@ new_range:
5449 + range_truesize = skb->truesize;
5450 +
5451 + for (head = skb;;) {
5452 +- skb = tcp_skb_next(skb, NULL);
5453 ++ skb = skb_rb_next(skb);
5454 +
5455 + /* Range is terminated when we see a gap or when
5456 + * we are at the queue end.
5457 +@@ -4974,7 +4965,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
5458 + prev = rb_prev(node);
5459 + rb_erase(node, &tp->out_of_order_queue);
5460 + goal -= rb_to_skb(node)->truesize;
5461 +- tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
5462 ++ tcp_drop(sk, rb_to_skb(node));
5463 + if (!prev || goal <= 0) {
5464 + sk_mem_reclaim(sk);
5465 + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
5466 +@@ -4984,7 +4975,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
5467 + }
5468 + node = prev;
5469 + } while (node);
5470 +- tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
5471 ++ tp->ooo_last_skb = rb_to_skb(prev);
5472 +
5473 + /* Reset SACK state. A conforming SACK implementation will
5474 + * do the same at a timeout based retransmit. When a connection
5475 +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
5476 +index ee33a6743f3b..2ed8536e10b6 100644
5477 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
5478 ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
5479 +@@ -63,7 +63,6 @@ struct nf_ct_frag6_skb_cb
5480 + static struct inet_frags nf_frags;
5481 +
5482 + #ifdef CONFIG_SYSCTL
5483 +-static int zero;
5484 +
5485 + static struct ctl_table nf_ct_frag6_sysctl_table[] = {
5486 + {
5487 +@@ -76,18 +75,17 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
5488 + {
5489 + .procname = "nf_conntrack_frag6_low_thresh",
5490 + .data = &init_net.nf_frag.frags.low_thresh,
5491 +- .maxlen = sizeof(unsigned int),
5492 ++ .maxlen = sizeof(unsigned long),
5493 + .mode = 0644,
5494 +- .proc_handler = proc_dointvec_minmax,
5495 +- .extra1 = &zero,
5496 ++ .proc_handler = proc_doulongvec_minmax,
5497 + .extra2 = &init_net.nf_frag.frags.high_thresh
5498 + },
5499 + {
5500 + .procname = "nf_conntrack_frag6_high_thresh",
5501 + .data = &init_net.nf_frag.frags.high_thresh,
5502 +- .maxlen = sizeof(unsigned int),
5503 ++ .maxlen = sizeof(unsigned long),
5504 + .mode = 0644,
5505 +- .proc_handler = proc_dointvec_minmax,
5506 ++ .proc_handler = proc_doulongvec_minmax,
5507 + .extra1 = &init_net.nf_frag.frags.low_thresh
5508 + },
5509 + { }
5510 +@@ -152,59 +150,35 @@ static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
5511 + return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
5512 + }
5513 +
5514 +-static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr,
5515 +- const struct in6_addr *daddr)
5516 +-{
5517 +- net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd));
5518 +- return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
5519 +- (__force u32)id, nf_frags.rnd);
5520 +-}
5521 +-
5522 +-
5523 +-static unsigned int nf_hashfn(const struct inet_frag_queue *q)
5524 +-{
5525 +- const struct frag_queue *nq;
5526 +-
5527 +- nq = container_of(q, struct frag_queue, q);
5528 +- return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr);
5529 +-}
5530 +-
5531 +-static void nf_ct_frag6_expire(unsigned long data)
5532 ++static void nf_ct_frag6_expire(struct timer_list *t)
5533 + {
5534 ++ struct inet_frag_queue *frag = from_timer(frag, t, timer);
5535 + struct frag_queue *fq;
5536 + struct net *net;
5537 +
5538 +- fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
5539 ++ fq = container_of(frag, struct frag_queue, q);
5540 + net = container_of(fq->q.net, struct net, nf_frag.frags);
5541 +
5542 +- ip6_expire_frag_queue(net, fq, &nf_frags);
5543 ++ ip6_expire_frag_queue(net, fq);
5544 + }
5545 +
5546 + /* Creation primitives. */
5547 +-static inline struct frag_queue *fq_find(struct net *net, __be32 id,
5548 +- u32 user, struct in6_addr *src,
5549 +- struct in6_addr *dst, int iif, u8 ecn)
5550 ++static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
5551 ++ const struct ipv6hdr *hdr, int iif)
5552 + {
5553 ++ struct frag_v6_compare_key key = {
5554 ++ .id = id,
5555 ++ .saddr = hdr->saddr,
5556 ++ .daddr = hdr->daddr,
5557 ++ .user = user,
5558 ++ .iif = iif,
5559 ++ };
5560 + struct inet_frag_queue *q;
5561 +- struct ip6_create_arg arg;
5562 +- unsigned int hash;
5563 +-
5564 +- arg.id = id;
5565 +- arg.user = user;
5566 +- arg.src = src;
5567 +- arg.dst = dst;
5568 +- arg.iif = iif;
5569 +- arg.ecn = ecn;
5570 +-
5571 +- local_bh_disable();
5572 +- hash = nf_hash_frag(id, src, dst);
5573 +-
5574 +- q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
5575 +- local_bh_enable();
5576 +- if (IS_ERR_OR_NULL(q)) {
5577 +- inet_frag_maybe_warn_overflow(q, pr_fmt());
5578 ++
5579 ++ q = inet_frag_find(&net->nf_frag.frags, &key);
5580 ++ if (!q)
5581 + return NULL;
5582 +- }
5583 ++
5584 + return container_of(q, struct frag_queue, q);
5585 + }
5586 +
5587 +@@ -263,7 +237,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
5588 + * this case. -DaveM
5589 + */
5590 + pr_debug("end of fragment not rounded to 8 bytes.\n");
5591 +- inet_frag_kill(&fq->q, &nf_frags);
5592 ++ inet_frag_kill(&fq->q);
5593 + return -EPROTO;
5594 + }
5595 + if (end > fq->q.len) {
5596 +@@ -356,7 +330,7 @@ found:
5597 + return 0;
5598 +
5599 + discard_fq:
5600 +- inet_frag_kill(&fq->q, &nf_frags);
5601 ++ inet_frag_kill(&fq->q);
5602 + err:
5603 + return -EINVAL;
5604 + }
5605 +@@ -378,7 +352,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
5606 + int payload_len;
5607 + u8 ecn;
5608 +
5609 +- inet_frag_kill(&fq->q, &nf_frags);
5610 ++ inet_frag_kill(&fq->q);
5611 +
5612 + WARN_ON(head == NULL);
5613 + WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
5614 +@@ -479,6 +453,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
5615 + else if (head->ip_summed == CHECKSUM_COMPLETE)
5616 + head->csum = csum_add(head->csum, fp->csum);
5617 + head->truesize += fp->truesize;
5618 ++ fp->sk = NULL;
5619 + }
5620 + sub_frag_mem_limit(fq->q.net, head->truesize);
5621 +
5622 +@@ -497,6 +472,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
5623 + head->csum);
5624 +
5625 + fq->q.fragments = NULL;
5626 ++ fq->q.rb_fragments = RB_ROOT;
5627 + fq->q.fragments_tail = NULL;
5628 +
5629 + return true;
5630 +@@ -591,9 +567,13 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
5631 + hdr = ipv6_hdr(skb);
5632 + fhdr = (struct frag_hdr *)skb_transport_header(skb);
5633 +
5634 ++ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
5635 ++ fhdr->frag_off & htons(IP6_MF))
5636 ++ return -EINVAL;
5637 ++
5638 + skb_orphan(skb);
5639 +- fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
5640 +- skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
5641 ++ fq = fq_find(net, fhdr->identification, user, hdr,
5642 ++ skb->dev ? skb->dev->ifindex : 0);
5643 + if (fq == NULL) {
5644 + pr_debug("Can't find and can't create new queue\n");
5645 + return -ENOMEM;
5646 +@@ -623,25 +603,33 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
5647 +
5648 + out_unlock:
5649 + spin_unlock_bh(&fq->q.lock);
5650 +- inet_frag_put(&fq->q, &nf_frags);
5651 ++ inet_frag_put(&fq->q);
5652 + return ret;
5653 + }
5654 + EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
5655 +
5656 + static int nf_ct_net_init(struct net *net)
5657 + {
5658 ++ int res;
5659 ++
5660 + net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
5661 + net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
5662 + net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
5663 +- inet_frags_init_net(&net->nf_frag.frags);
5664 +-
5665 +- return nf_ct_frag6_sysctl_register(net);
5666 ++ net->nf_frag.frags.f = &nf_frags;
5667 ++
5668 ++ res = inet_frags_init_net(&net->nf_frag.frags);
5669 ++ if (res < 0)
5670 ++ return res;
5671 ++ res = nf_ct_frag6_sysctl_register(net);
5672 ++ if (res < 0)
5673 ++ inet_frags_exit_net(&net->nf_frag.frags);
5674 ++ return res;
5675 + }
5676 +
5677 + static void nf_ct_net_exit(struct net *net)
5678 + {
5679 + nf_ct_frags6_sysctl_unregister(net);
5680 +- inet_frags_exit_net(&net->nf_frag.frags, &nf_frags);
5681 ++ inet_frags_exit_net(&net->nf_frag.frags);
5682 + }
5683 +
5684 + static struct pernet_operations nf_ct_net_ops = {
5685 +@@ -653,13 +641,12 @@ int nf_ct_frag6_init(void)
5686 + {
5687 + int ret = 0;
5688 +
5689 +- nf_frags.hashfn = nf_hashfn;
5690 + nf_frags.constructor = ip6_frag_init;
5691 + nf_frags.destructor = NULL;
5692 + nf_frags.qsize = sizeof(struct frag_queue);
5693 +- nf_frags.match = ip6_frag_match;
5694 + nf_frags.frag_expire = nf_ct_frag6_expire;
5695 + nf_frags.frags_cache_name = nf_frags_cache_name;
5696 ++ nf_frags.rhash_params = ip6_rhash_params;
5697 + ret = inet_frags_init(&nf_frags);
5698 + if (ret)
5699 + goto out;
5700 +diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
5701 +index e88bcb8ff0fd..dc04c024986c 100644
5702 +--- a/net/ipv6/proc.c
5703 ++++ b/net/ipv6/proc.c
5704 +@@ -38,7 +38,6 @@
5705 + static int sockstat6_seq_show(struct seq_file *seq, void *v)
5706 + {
5707 + struct net *net = seq->private;
5708 +- unsigned int frag_mem = ip6_frag_mem(net);
5709 +
5710 + seq_printf(seq, "TCP6: inuse %d\n",
5711 + sock_prot_inuse_get(net, &tcpv6_prot));
5712 +@@ -48,7 +47,9 @@ static int sockstat6_seq_show(struct seq_file *seq, void *v)
5713 + sock_prot_inuse_get(net, &udplitev6_prot));
5714 + seq_printf(seq, "RAW6: inuse %d\n",
5715 + sock_prot_inuse_get(net, &rawv6_prot));
5716 +- seq_printf(seq, "FRAG6: inuse %u memory %u\n", !!frag_mem, frag_mem);
5717 ++ seq_printf(seq, "FRAG6: inuse %u memory %lu\n",
5718 ++ atomic_read(&net->ipv6.frags.rhashtable.nelems),
5719 ++ frag_mem_limit(&net->ipv6.frags));
5720 + return 0;
5721 + }
5722 +
5723 +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
5724 +index 846012eae526..ede0061b6f5d 100644
5725 +--- a/net/ipv6/reassembly.c
5726 ++++ b/net/ipv6/reassembly.c
5727 +@@ -79,130 +79,93 @@ static struct inet_frags ip6_frags;
5728 + static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
5729 + struct net_device *dev);
5730 +
5731 +-/*
5732 +- * callers should be careful not to use the hash value outside the ipfrag_lock
5733 +- * as doing so could race with ipfrag_hash_rnd being recalculated.
5734 +- */
5735 +-static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
5736 +- const struct in6_addr *daddr)
5737 +-{
5738 +- net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
5739 +- return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
5740 +- (__force u32)id, ip6_frags.rnd);
5741 +-}
5742 +-
5743 +-static unsigned int ip6_hashfn(const struct inet_frag_queue *q)
5744 +-{
5745 +- const struct frag_queue *fq;
5746 +-
5747 +- fq = container_of(q, struct frag_queue, q);
5748 +- return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
5749 +-}
5750 +-
5751 +-bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
5752 +-{
5753 +- const struct frag_queue *fq;
5754 +- const struct ip6_create_arg *arg = a;
5755 +-
5756 +- fq = container_of(q, struct frag_queue, q);
5757 +- return fq->id == arg->id &&
5758 +- fq->user == arg->user &&
5759 +- ipv6_addr_equal(&fq->saddr, arg->src) &&
5760 +- ipv6_addr_equal(&fq->daddr, arg->dst) &&
5761 +- (arg->iif == fq->iif ||
5762 +- !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
5763 +- IPV6_ADDR_LINKLOCAL)));
5764 +-}
5765 +-EXPORT_SYMBOL(ip6_frag_match);
5766 +-
5767 + void ip6_frag_init(struct inet_frag_queue *q, const void *a)
5768 + {
5769 + struct frag_queue *fq = container_of(q, struct frag_queue, q);
5770 +- const struct ip6_create_arg *arg = a;
5771 ++ const struct frag_v6_compare_key *key = a;
5772 +
5773 +- fq->id = arg->id;
5774 +- fq->user = arg->user;
5775 +- fq->saddr = *arg->src;
5776 +- fq->daddr = *arg->dst;
5777 +- fq->ecn = arg->ecn;
5778 ++ q->key.v6 = *key;
5779 ++ fq->ecn = 0;
5780 + }
5781 + EXPORT_SYMBOL(ip6_frag_init);
5782 +
5783 +-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
5784 +- struct inet_frags *frags)
5785 ++void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
5786 + {
5787 + struct net_device *dev = NULL;
5788 ++ struct sk_buff *head;
5789 +
5790 ++ rcu_read_lock();
5791 + spin_lock(&fq->q.lock);
5792 +
5793 + if (fq->q.flags & INET_FRAG_COMPLETE)
5794 + goto out;
5795 +
5796 +- inet_frag_kill(&fq->q, frags);
5797 ++ inet_frag_kill(&fq->q);
5798 +
5799 +- rcu_read_lock();
5800 + dev = dev_get_by_index_rcu(net, fq->iif);
5801 + if (!dev)
5802 +- goto out_rcu_unlock;
5803 ++ goto out;
5804 +
5805 + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
5806 +-
5807 +- if (inet_frag_evicting(&fq->q))
5808 +- goto out_rcu_unlock;
5809 +-
5810 + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
5811 +
5812 + /* Don't send error if the first segment did not arrive. */
5813 +- if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments)
5814 +- goto out_rcu_unlock;
5815 ++ head = fq->q.fragments;
5816 ++ if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
5817 ++ goto out;
5818 +
5819 + /* But use as source device on which LAST ARRIVED
5820 + * segment was received. And do not use fq->dev
5821 + * pointer directly, device might already disappeared.
5822 + */
5823 +- fq->q.fragments->dev = dev;
5824 +- icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
5825 +-out_rcu_unlock:
5826 +- rcu_read_unlock();
5827 ++ head->dev = dev;
5828 ++ skb_get(head);
5829 ++ spin_unlock(&fq->q.lock);
5830 ++
5831 ++ icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
5832 ++ kfree_skb(head);
5833 ++ goto out_rcu_unlock;
5834 ++
5835 + out:
5836 + spin_unlock(&fq->q.lock);
5837 +- inet_frag_put(&fq->q, frags);
5838 ++out_rcu_unlock:
5839 ++ rcu_read_unlock();
5840 ++ inet_frag_put(&fq->q);
5841 + }
5842 + EXPORT_SYMBOL(ip6_expire_frag_queue);
5843 +
5844 +-static void ip6_frag_expire(unsigned long data)
5845 ++static void ip6_frag_expire(struct timer_list *t)
5846 + {
5847 ++ struct inet_frag_queue *frag = from_timer(frag, t, timer);
5848 + struct frag_queue *fq;
5849 + struct net *net;
5850 +
5851 +- fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
5852 ++ fq = container_of(frag, struct frag_queue, q);
5853 + net = container_of(fq->q.net, struct net, ipv6.frags);
5854 +
5855 +- ip6_expire_frag_queue(net, fq, &ip6_frags);
5856 ++ ip6_expire_frag_queue(net, fq);
5857 + }
5858 +
5859 + static struct frag_queue *
5860 +-fq_find(struct net *net, __be32 id, const struct in6_addr *src,
5861 +- const struct in6_addr *dst, int iif, u8 ecn)
5862 ++fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif)
5863 + {
5864 ++ struct frag_v6_compare_key key = {
5865 ++ .id = id,
5866 ++ .saddr = hdr->saddr,
5867 ++ .daddr = hdr->daddr,
5868 ++ .user = IP6_DEFRAG_LOCAL_DELIVER,
5869 ++ .iif = iif,
5870 ++ };
5871 + struct inet_frag_queue *q;
5872 +- struct ip6_create_arg arg;
5873 +- unsigned int hash;
5874 +
5875 +- arg.id = id;
5876 +- arg.user = IP6_DEFRAG_LOCAL_DELIVER;
5877 +- arg.src = src;
5878 +- arg.dst = dst;
5879 +- arg.iif = iif;
5880 +- arg.ecn = ecn;
5881 ++ if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
5882 ++ IPV6_ADDR_LINKLOCAL)))
5883 ++ key.iif = 0;
5884 +
5885 +- hash = inet6_hash_frag(id, src, dst);
5886 +-
5887 +- q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
5888 +- if (IS_ERR_OR_NULL(q)) {
5889 +- inet_frag_maybe_warn_overflow(q, pr_fmt());
5890 ++ q = inet_frag_find(&net->ipv6.frags, &key);
5891 ++ if (!q)
5892 + return NULL;
5893 +- }
5894 ++
5895 + return container_of(q, struct frag_queue, q);
5896 + }
5897 +
5898 +@@ -363,7 +326,7 @@ found:
5899 + return -1;
5900 +
5901 + discard_fq:
5902 +- inet_frag_kill(&fq->q, &ip6_frags);
5903 ++ inet_frag_kill(&fq->q);
5904 + err:
5905 + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
5906 + IPSTATS_MIB_REASMFAILS);
5907 +@@ -390,7 +353,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
5908 + int sum_truesize;
5909 + u8 ecn;
5910 +
5911 +- inet_frag_kill(&fq->q, &ip6_frags);
5912 ++ inet_frag_kill(&fq->q);
5913 +
5914 + ecn = ip_frag_ecn_table[fq->ecn];
5915 + if (unlikely(ecn == 0xff))
5916 +@@ -509,6 +472,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
5917 + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
5918 + rcu_read_unlock();
5919 + fq->q.fragments = NULL;
5920 ++ fq->q.rb_fragments = RB_ROOT;
5921 + fq->q.fragments_tail = NULL;
5922 + return 1;
5923 +
5924 +@@ -530,6 +494,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
5925 + struct frag_queue *fq;
5926 + const struct ipv6hdr *hdr = ipv6_hdr(skb);
5927 + struct net *net = dev_net(skb_dst(skb)->dev);
5928 ++ int iif;
5929 +
5930 + if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
5931 + goto fail_hdr;
5932 +@@ -558,17 +523,22 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
5933 + return 1;
5934 + }
5935 +
5936 +- fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
5937 +- skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
5938 ++ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
5939 ++ fhdr->frag_off & htons(IP6_MF))
5940 ++ goto fail_hdr;
5941 ++
5942 ++ iif = skb->dev ? skb->dev->ifindex : 0;
5943 ++ fq = fq_find(net, fhdr->identification, hdr, iif);
5944 + if (fq) {
5945 + int ret;
5946 +
5947 + spin_lock(&fq->q.lock);
5948 +
5949 ++ fq->iif = iif;
5950 + ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
5951 +
5952 + spin_unlock(&fq->q.lock);
5953 +- inet_frag_put(&fq->q, &ip6_frags);
5954 ++ inet_frag_put(&fq->q);
5955 + return ret;
5956 + }
5957 +
5958 +@@ -589,24 +559,22 @@ static const struct inet6_protocol frag_protocol = {
5959 + };
5960 +
5961 + #ifdef CONFIG_SYSCTL
5962 +-static int zero;
5963 +
5964 + static struct ctl_table ip6_frags_ns_ctl_table[] = {
5965 + {
5966 + .procname = "ip6frag_high_thresh",
5967 + .data = &init_net.ipv6.frags.high_thresh,
5968 +- .maxlen = sizeof(int),
5969 ++ .maxlen = sizeof(unsigned long),
5970 + .mode = 0644,
5971 +- .proc_handler = proc_dointvec_minmax,
5972 ++ .proc_handler = proc_doulongvec_minmax,
5973 + .extra1 = &init_net.ipv6.frags.low_thresh
5974 + },
5975 + {
5976 + .procname = "ip6frag_low_thresh",
5977 + .data = &init_net.ipv6.frags.low_thresh,
5978 +- .maxlen = sizeof(int),
5979 ++ .maxlen = sizeof(unsigned long),
5980 + .mode = 0644,
5981 +- .proc_handler = proc_dointvec_minmax,
5982 +- .extra1 = &zero,
5983 ++ .proc_handler = proc_doulongvec_minmax,
5984 + .extra2 = &init_net.ipv6.frags.high_thresh
5985 + },
5986 + {
5987 +@@ -649,10 +617,6 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
5988 + table[1].data = &net->ipv6.frags.low_thresh;
5989 + table[1].extra2 = &net->ipv6.frags.high_thresh;
5990 + table[2].data = &net->ipv6.frags.timeout;
5991 +-
5992 +- /* Don't export sysctls to unprivileged users */
5993 +- if (net->user_ns != &init_user_ns)
5994 +- table[0].procname = NULL;
5995 + }
5996 +
5997 + hdr = register_net_sysctl(net, "net/ipv6", table);
5998 +@@ -714,19 +678,27 @@ static void ip6_frags_sysctl_unregister(void)
5999 +
6000 + static int __net_init ipv6_frags_init_net(struct net *net)
6001 + {
6002 ++ int res;
6003 ++
6004 + net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
6005 + net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
6006 + net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
6007 ++ net->ipv6.frags.f = &ip6_frags;
6008 +
6009 +- inet_frags_init_net(&net->ipv6.frags);
6010 ++ res = inet_frags_init_net(&net->ipv6.frags);
6011 ++ if (res < 0)
6012 ++ return res;
6013 +
6014 +- return ip6_frags_ns_sysctl_register(net);
6015 ++ res = ip6_frags_ns_sysctl_register(net);
6016 ++ if (res < 0)
6017 ++ inet_frags_exit_net(&net->ipv6.frags);
6018 ++ return res;
6019 + }
6020 +
6021 + static void __net_exit ipv6_frags_exit_net(struct net *net)
6022 + {
6023 + ip6_frags_ns_sysctl_unregister(net);
6024 +- inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
6025 ++ inet_frags_exit_net(&net->ipv6.frags);
6026 + }
6027 +
6028 + static struct pernet_operations ip6_frags_ops = {
6029 +@@ -734,14 +706,55 @@ static struct pernet_operations ip6_frags_ops = {
6030 + .exit = ipv6_frags_exit_net,
6031 + };
6032 +
6033 ++static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed)
6034 ++{
6035 ++ return jhash2(data,
6036 ++ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
6037 ++}
6038 ++
6039 ++static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed)
6040 ++{
6041 ++ const struct inet_frag_queue *fq = data;
6042 ++
6043 ++ return jhash2((const u32 *)&fq->key.v6,
6044 ++ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
6045 ++}
6046 ++
6047 ++static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
6048 ++{
6049 ++ const struct frag_v6_compare_key *key = arg->key;
6050 ++ const struct inet_frag_queue *fq = ptr;
6051 ++
6052 ++ return !!memcmp(&fq->key, key, sizeof(*key));
6053 ++}
6054 ++
6055 ++const struct rhashtable_params ip6_rhash_params = {
6056 ++ .head_offset = offsetof(struct inet_frag_queue, node),
6057 ++ .hashfn = ip6_key_hashfn,
6058 ++ .obj_hashfn = ip6_obj_hashfn,
6059 ++ .obj_cmpfn = ip6_obj_cmpfn,
6060 ++ .automatic_shrinking = true,
6061 ++};
6062 ++EXPORT_SYMBOL(ip6_rhash_params);
6063 ++
6064 + int __init ipv6_frag_init(void)
6065 + {
6066 + int ret;
6067 +
6068 +- ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
6069 ++ ip6_frags.constructor = ip6_frag_init;
6070 ++ ip6_frags.destructor = NULL;
6071 ++ ip6_frags.qsize = sizeof(struct frag_queue);
6072 ++ ip6_frags.frag_expire = ip6_frag_expire;
6073 ++ ip6_frags.frags_cache_name = ip6_frag_cache_name;
6074 ++ ip6_frags.rhash_params = ip6_rhash_params;
6075 ++ ret = inet_frags_init(&ip6_frags);
6076 + if (ret)
6077 + goto out;
6078 +
6079 ++ ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
6080 ++ if (ret)
6081 ++ goto err_protocol;
6082 ++
6083 + ret = ip6_frags_sysctl_register();
6084 + if (ret)
6085 + goto err_sysctl;
6086 +@@ -750,16 +763,6 @@ int __init ipv6_frag_init(void)
6087 + if (ret)
6088 + goto err_pernet;
6089 +
6090 +- ip6_frags.hashfn = ip6_hashfn;
6091 +- ip6_frags.constructor = ip6_frag_init;
6092 +- ip6_frags.destructor = NULL;
6093 +- ip6_frags.qsize = sizeof(struct frag_queue);
6094 +- ip6_frags.match = ip6_frag_match;
6095 +- ip6_frags.frag_expire = ip6_frag_expire;
6096 +- ip6_frags.frags_cache_name = ip6_frag_cache_name;
6097 +- ret = inet_frags_init(&ip6_frags);
6098 +- if (ret)
6099 +- goto err_pernet;
6100 + out:
6101 + return ret;
6102 +
6103 +@@ -767,6 +770,8 @@ err_pernet:
6104 + ip6_frags_sysctl_unregister();
6105 + err_sysctl:
6106 + inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
6107 ++err_protocol:
6108 ++ inet_frags_fini(&ip6_frags);
6109 + goto out;
6110 + }
6111 +
6112 +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
6113 +index 8c8df75dbead..2a2ab6bfe5d8 100644
6114 +--- a/net/sched/sch_netem.c
6115 ++++ b/net/sched/sch_netem.c
6116 +@@ -149,12 +149,6 @@ struct netem_skb_cb {
6117 + ktime_t tstamp_save;
6118 + };
6119 +
6120 +-
6121 +-static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
6122 +-{
6123 +- return rb_entry(rb, struct sk_buff, rbnode);
6124 +-}
6125 +-
6126 + static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
6127 + {
6128 + /* we assume we can use skb next/prev/tstamp as storage for rb_node */
6129 +@@ -365,7 +359,7 @@ static void tfifo_reset(struct Qdisc *sch)
6130 + struct rb_node *p;
6131 +
6132 + while ((p = rb_first(&q->t_root))) {
6133 +- struct sk_buff *skb = netem_rb_to_skb(p);
6134 ++ struct sk_buff *skb = rb_to_skb(p);
6135 +
6136 + rb_erase(p, &q->t_root);
6137 + rtnl_kfree_skbs(skb, skb);
6138 +@@ -382,7 +376,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
6139 + struct sk_buff *skb;
6140 +
6141 + parent = *p;
6142 +- skb = netem_rb_to_skb(parent);
6143 ++ skb = rb_to_skb(parent);
6144 + if (tnext >= netem_skb_cb(skb)->time_to_send)
6145 + p = &parent->rb_right;
6146 + else
6147 +@@ -538,7 +532,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
6148 + struct sk_buff *t_skb;
6149 + struct netem_skb_cb *t_last;
6150 +
6151 +- t_skb = netem_rb_to_skb(rb_last(&q->t_root));
6152 ++ t_skb = skb_rb_last(&q->t_root);
6153 + t_last = netem_skb_cb(t_skb);
6154 + if (!last ||
6155 + t_last->time_to_send > last->time_to_send) {
6156 +@@ -618,7 +612,7 @@ deliver:
6157 + if (p) {
6158 + psched_time_t time_to_send;
6159 +
6160 +- skb = netem_rb_to_skb(p);
6161 ++ skb = rb_to_skb(p);
6162 +
6163 + /* if more time remaining? */
6164 + time_to_send = netem_skb_cb(skb)->time_to_send;
6165 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
6166 +index 417abbb1f72c..8a027973f2ad 100644
6167 +--- a/sound/pci/hda/hda_codec.c
6168 ++++ b/sound/pci/hda/hda_codec.c
6169 +@@ -3923,7 +3923,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)
6170 +
6171 + list_for_each_codec(codec, bus) {
6172 + /* FIXME: maybe a better way needed for forced reset */
6173 +- cancel_delayed_work_sync(&codec->jackpoll_work);
6174 ++ if (current_work() != &codec->jackpoll_work.work)
6175 ++ cancel_delayed_work_sync(&codec->jackpoll_work);
6176 + #ifdef CONFIG_PM
6177 + if (hda_codec_is_power_on(codec)) {
6178 + hda_call_codec_suspend(codec);
6179 +diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
6180 +index 3479a1bc7caa..fb76423022e8 100644
6181 +--- a/tools/perf/builtin-c2c.c
6182 ++++ b/tools/perf/builtin-c2c.c
6183 +@@ -2229,6 +2229,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
6184 + " s Togle full lenght of symbol and source line columns \n"
6185 + " q Return back to cacheline list \n";
6186 +
6187 ++ if (!he)
6188 ++ return 0;
6189 ++
6190 + /* Display compact version first. */
6191 + c2c.symbol_full = false;
6192 +
6193 +diff --git a/tools/perf/perf.h b/tools/perf/perf.h
6194 +index 55086389fc06..96f62dd7e3ed 100644
6195 +--- a/tools/perf/perf.h
6196 ++++ b/tools/perf/perf.h
6197 +@@ -24,7 +24,9 @@ static inline unsigned long long rdclock(void)
6198 + return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
6199 + }
6200 +
6201 ++#ifndef MAX_NR_CPUS
6202 + #define MAX_NR_CPUS 1024
6203 ++#endif
6204 +
6205 + extern const char *input_name;
6206 + extern bool perf_host, perf_guest;
6207 +diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
6208 +index 226a9245d1db..2227ee92d8e2 100644
6209 +--- a/tools/perf/util/evsel.c
6210 ++++ b/tools/perf/util/evsel.c
6211 +@@ -824,6 +824,12 @@ static void apply_config_terms(struct perf_evsel *evsel,
6212 + }
6213 + }
6214 +
6215 ++static bool is_dummy_event(struct perf_evsel *evsel)
6216 ++{
6217 ++ return (evsel->attr.type == PERF_TYPE_SOFTWARE) &&
6218 ++ (evsel->attr.config == PERF_COUNT_SW_DUMMY);
6219 ++}
6220 ++
6221 + /*
6222 + * The enable_on_exec/disabled value strategy:
6223 + *
6224 +@@ -1054,6 +1060,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
6225 + else
6226 + perf_evsel__reset_sample_bit(evsel, PERIOD);
6227 + }
6228 ++
6229 ++ /*
6230 ++ * For initial_delay, a dummy event is added implicitly.
6231 ++ * The software event will trigger -EOPNOTSUPP error out,
6232 ++ * if BRANCH_STACK bit is set.
6233 ++ */
6234 ++ if (opts->initial_delay && is_dummy_event(evsel))
6235 ++ perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
6236 + }
6237 +
6238 + static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
6239 +diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c
6240 +index b53596ad601b..2e7fd8227969 100644
6241 +--- a/tools/testing/nvdimm/pmem-dax.c
6242 ++++ b/tools/testing/nvdimm/pmem-dax.c
6243 +@@ -31,17 +31,21 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
6244 + if (get_nfit_res(pmem->phys_addr + offset)) {
6245 + struct page *page;
6246 +
6247 +- *kaddr = pmem->virt_addr + offset;
6248 ++ if (kaddr)
6249 ++ *kaddr = pmem->virt_addr + offset;
6250 + page = vmalloc_to_page(pmem->virt_addr + offset);
6251 +- *pfn = page_to_pfn_t(page);
6252 ++ if (pfn)
6253 ++ *pfn = page_to_pfn_t(page);
6254 + pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
6255 + __func__, pmem, pgoff, page_to_pfn(page));
6256 +
6257 + return 1;
6258 + }
6259 +
6260 +- *kaddr = pmem->virt_addr + offset;
6261 +- *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
6262 ++ if (kaddr)
6263 ++ *kaddr = pmem->virt_addr + offset;
6264 ++ if (pfn)
6265 ++ *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
6266 +
6267 + /*
6268 + * If badblocks are present, limit known good range to the
6269 +diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
6270 +index 9167ee976314..041dbbb30ff0 100644
6271 +--- a/tools/testing/selftests/bpf/test_verifier.c
6272 ++++ b/tools/testing/selftests/bpf/test_verifier.c
6273 +@@ -5895,7 +5895,7 @@ static struct bpf_test tests[] = {
6274 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6275 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6276 + BPF_FUNC_map_lookup_elem),
6277 +- BPF_MOV64_REG(BPF_REG_0, 0),
6278 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
6279 + BPF_EXIT_INSN(),
6280 + },
6281 + .fixup_map_in_map = { 3 },
6282 +@@ -5918,7 +5918,7 @@ static struct bpf_test tests[] = {
6283 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6284 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6285 + BPF_FUNC_map_lookup_elem),
6286 +- BPF_MOV64_REG(BPF_REG_0, 0),
6287 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
6288 + BPF_EXIT_INSN(),
6289 + },
6290 + .fixup_map_in_map = { 3 },
6291 +@@ -5941,7 +5941,7 @@ static struct bpf_test tests[] = {
6292 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6293 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6294 + BPF_FUNC_map_lookup_elem),
6295 +- BPF_MOV64_REG(BPF_REG_0, 0),
6296 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
6297 + BPF_EXIT_INSN(),
6298 + },
6299 + .fixup_map_in_map = { 3 },