Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Wed, 30 Aug 2017 10:06:47
Message-Id: 1504087593.30030103dd1826abf4c3dbb3f9c6ba981a93dbfa.mpagano@gentoo
1 commit: 30030103dd1826abf4c3dbb3f9c6ba981a93dbfa
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Aug 30 10:06:33 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Aug 30 10:06:33 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=30030103
7
8 Linux patch 4.9.46
9
10 0000_README | 4 +
11 1045_linux-4.9.46.patch | 3114 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3118 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index e142b57..9af1e8a 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -223,6 +223,10 @@ Patch: 1044_linux-4.9.45.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.45
21
22 +Patch: 1045_linux-4.9.46.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.46
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1045_linux-4.9.46.patch b/1045_linux-4.9.46.patch
31 new file mode 100644
32 index 0000000..72e724f
33 --- /dev/null
34 +++ b/1045_linux-4.9.46.patch
35 @@ -0,0 +1,3114 @@
36 +diff --git a/Makefile b/Makefile
37 +index ccd6d91f616e..846ef1b57a02 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 9
43 +-SUBLEVEL = 45
44 ++SUBLEVEL = 46
45 + EXTRAVERSION =
46 + NAME = Roaring Lionus
47 +
48 +diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
49 +index b3410ff6a62d..4fd6272e6c01 100644
50 +--- a/arch/arc/include/asm/cache.h
51 ++++ b/arch/arc/include/asm/cache.h
52 +@@ -89,7 +89,9 @@ extern unsigned long perip_base, perip_end;
53 + #define ARC_REG_SLC_FLUSH 0x904
54 + #define ARC_REG_SLC_INVALIDATE 0x905
55 + #define ARC_REG_SLC_RGN_START 0x914
56 ++#define ARC_REG_SLC_RGN_START1 0x915
57 + #define ARC_REG_SLC_RGN_END 0x916
58 ++#define ARC_REG_SLC_RGN_END1 0x917
59 +
60 + /* Bit val in SLC_CONTROL */
61 + #define SLC_CTRL_IM 0x040
62 +diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
63 +index 8147583c4434..bbdfeb31dee6 100644
64 +--- a/arch/arc/mm/cache.c
65 ++++ b/arch/arc/mm/cache.c
66 +@@ -562,6 +562,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
67 + static DEFINE_SPINLOCK(lock);
68 + unsigned long flags;
69 + unsigned int ctrl;
70 ++ phys_addr_t end;
71 +
72 + spin_lock_irqsave(&lock, flags);
73 +
74 +@@ -591,8 +592,16 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
75 + * END needs to be setup before START (latter triggers the operation)
76 + * END can't be same as START, so add (l2_line_sz - 1) to sz
77 + */
78 +- write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
79 +- write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
80 ++ end = paddr + sz + l2_line_sz - 1;
81 ++ if (is_pae40_enabled())
82 ++ write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
83 ++
84 ++ write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
85 ++
86 ++ if (is_pae40_enabled())
87 ++ write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
88 ++
89 ++ write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
90 +
91 + while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
92 +
93 +diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
94 +index 0012f0353fd6..fe208b70b8b1 100644
95 +--- a/arch/powerpc/include/asm/mmu_context.h
96 ++++ b/arch/powerpc/include/asm/mmu_context.h
97 +@@ -75,9 +75,27 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
98 + struct task_struct *tsk)
99 + {
100 + /* Mark this context has been used on the new CPU */
101 +- if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
102 ++ if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
103 + cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
104 +
105 ++ /*
106 ++ * This full barrier orders the store to the cpumask above vs
107 ++ * a subsequent operation which allows this CPU to begin loading
108 ++ * translations for next.
109 ++ *
110 ++ * When using the radix MMU that operation is the load of the
111 ++ * MMU context id, which is then moved to SPRN_PID.
112 ++ *
113 ++ * For the hash MMU it is either the first load from slb_cache
114 ++ * in switch_slb(), and/or the store of paca->mm_ctx_id in
115 ++ * copy_mm_to_paca().
116 ++ *
117 ++ * On the read side the barrier is in pte_xchg(), which orders
118 ++ * the store to the PTE vs the load of mm_cpumask.
119 ++ */
120 ++ smp_mb();
121 ++ }
122 ++
123 + /* 32-bit keeps track of the current PGDIR in the thread struct */
124 + #ifdef CONFIG_PPC32
125 + tsk->thread.pgdir = next->pgd;
126 +diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
127 +index 49c0a5a80efa..68e087e807f8 100644
128 +--- a/arch/powerpc/include/asm/pgtable-be-types.h
129 ++++ b/arch/powerpc/include/asm/pgtable-be-types.h
130 +@@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
131 + unsigned long *p = (unsigned long *)ptep;
132 + __be64 prev;
133 +
134 ++ /* See comment in switch_mm_irqs_off() */
135 + prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
136 + (__force unsigned long)pte_raw(new));
137 +
138 +diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
139 +index e7f4f3e0fcde..41e9d0a6cbeb 100644
140 +--- a/arch/powerpc/include/asm/pgtable-types.h
141 ++++ b/arch/powerpc/include/asm/pgtable-types.h
142 +@@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
143 + {
144 + unsigned long *p = (unsigned long *)ptep;
145 +
146 ++ /* See comment in switch_mm_irqs_off() */
147 + return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
148 + }
149 + #endif
150 +diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
151 +index 05c98bb853cf..2f04ad1ea01c 100644
152 +--- a/arch/s390/kvm/sthyi.c
153 ++++ b/arch/s390/kvm/sthyi.c
154 +@@ -394,7 +394,7 @@ static int sthyi(u64 vaddr)
155 + "srl %[cc],28\n"
156 + : [cc] "=d" (cc)
157 + : [code] "d" (code), [addr] "a" (addr)
158 +- : "memory", "cc");
159 ++ : "3", "memory", "cc");
160 + return cc;
161 + }
162 +
163 +@@ -422,7 +422,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
164 + VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
165 + trace_kvm_s390_handle_sthyi(vcpu, code, addr);
166 +
167 +- if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK)
168 ++ if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
169 + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
170 +
171 + if (code & 0xffff) {
172 +@@ -430,6 +430,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
173 + goto out;
174 + }
175 +
176 ++ if (addr & ~PAGE_MASK)
177 ++ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
178 ++
179 + /*
180 + * If the page has not yet been faulted in, we want to do that
181 + * now and not after all the expensive calculations.
182 +diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
183 +index 06981cc716b6..d04111a5c615 100644
184 +--- a/arch/sparc/kernel/pci_sun4v.c
185 ++++ b/arch/sparc/kernel/pci_sun4v.c
186 +@@ -1240,8 +1240,6 @@ static int pci_sun4v_probe(struct platform_device *op)
187 + * ATU group, but ATU hcalls won't be available.
188 + */
189 + hv_atu = false;
190 +- pr_err(PFX "Could not register hvapi ATU err=%d\n",
191 +- err);
192 + } else {
193 + pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
194 + vatu_major, vatu_minor);
195 +diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
196 +index 970c1de3b86e..4c1b7ea18541 100644
197 +--- a/arch/x86/events/intel/rapl.c
198 ++++ b/arch/x86/events/intel/rapl.c
199 +@@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
200 +
201 + static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
202 + {
203 +- return rapl_pmus->pmus[topology_logical_package_id(cpu)];
204 ++ unsigned int pkgid = topology_logical_package_id(cpu);
205 ++
206 ++ /*
207 ++ * The unsigned check also catches the '-1' return value for non
208 ++ * existent mappings in the topology map.
209 ++ */
210 ++ return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
211 + }
212 +
213 + static inline u64 rapl_read_counter(struct perf_event *event)
214 +@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
215 +
216 + /* must be done before validate_group */
217 + pmu = cpu_to_rapl_pmu(event->cpu);
218 ++ if (!pmu)
219 ++ return -EINVAL;
220 + event->cpu = pmu->cpu;
221 + event->pmu_private = pmu;
222 + event->hw.event_base = msr;
223 +@@ -585,6 +593,19 @@ static int rapl_cpu_online(unsigned int cpu)
224 + struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
225 + int target;
226 +
227 ++ if (!pmu) {
228 ++ pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
229 ++ if (!pmu)
230 ++ return -ENOMEM;
231 ++
232 ++ raw_spin_lock_init(&pmu->lock);
233 ++ INIT_LIST_HEAD(&pmu->active_list);
234 ++ pmu->pmu = &rapl_pmus->pmu;
235 ++ pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
236 ++ rapl_hrtimer_init(pmu);
237 ++
238 ++ rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
239 ++ }
240 + /*
241 + * Check if there is an online cpu in the package which collects rapl
242 + * events already.
243 +@@ -598,27 +619,6 @@ static int rapl_cpu_online(unsigned int cpu)
244 + return 0;
245 + }
246 +
247 +-static int rapl_cpu_prepare(unsigned int cpu)
248 +-{
249 +- struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
250 +-
251 +- if (pmu)
252 +- return 0;
253 +-
254 +- pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
255 +- if (!pmu)
256 +- return -ENOMEM;
257 +-
258 +- raw_spin_lock_init(&pmu->lock);
259 +- INIT_LIST_HEAD(&pmu->active_list);
260 +- pmu->pmu = &rapl_pmus->pmu;
261 +- pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
262 +- pmu->cpu = -1;
263 +- rapl_hrtimer_init(pmu);
264 +- rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
265 +- return 0;
266 +-}
267 +-
268 + static int rapl_check_hw_unit(bool apply_quirk)
269 + {
270 + u64 msr_rapl_power_unit_bits;
271 +@@ -804,28 +804,21 @@ static int __init rapl_pmu_init(void)
272 + * Install callbacks. Core will call them for each online cpu.
273 + */
274 +
275 +- ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "PERF_X86_RAPL_PREP",
276 +- rapl_cpu_prepare, NULL);
277 +- if (ret)
278 +- goto out;
279 +-
280 + ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
281 + "AP_PERF_X86_RAPL_ONLINE",
282 + rapl_cpu_online, rapl_cpu_offline);
283 + if (ret)
284 +- goto out1;
285 ++ goto out;
286 +
287 + ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
288 + if (ret)
289 +- goto out2;
290 ++ goto out1;
291 +
292 + rapl_advertise();
293 + return 0;
294 +
295 +-out2:
296 +- cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
297 + out1:
298 +- cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
299 ++ cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
300 + out:
301 + pr_warn("Initialization failed (%d), disabled\n", ret);
302 + cleanup_rapl_pmus();
303 +@@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
304 + static void __exit intel_rapl_exit(void)
305 + {
306 + cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
307 +- cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
308 + perf_pmu_unregister(&rapl_pmus->pmu);
309 + cleanup_rapl_pmus();
310 + }
311 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
312 +index 8e0a9fe86de4..f9dd22469388 100644
313 +--- a/arch/x86/include/asm/mmu_context.h
314 ++++ b/arch/x86/include/asm/mmu_context.h
315 +@@ -116,9 +116,7 @@ static inline int init_new_context(struct task_struct *tsk,
316 + mm->context.execute_only_pkey = -1;
317 + }
318 + #endif
319 +- init_new_context_ldt(tsk, mm);
320 +-
321 +- return 0;
322 ++ return init_new_context_ldt(tsk, mm);
323 + }
324 + static inline void destroy_context(struct mm_struct *mm)
325 + {
326 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
327 +index 649d8f2c1e40..91af75e37306 100644
328 +--- a/arch/x86/kvm/cpuid.c
329 ++++ b/arch/x86/kvm/cpuid.c
330 +@@ -456,7 +456,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
331 + entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
332 + cpuid_mask(&entry->ecx, CPUID_7_ECX);
333 + /* PKU is not yet implemented for shadow paging. */
334 +- if (!tdp_enabled)
335 ++ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
336 + entry->ecx &= ~F(PKU);
337 + } else {
338 + entry->ebx = 0;
339 +diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
340 +index e53bef6cf53c..0375c6024062 100644
341 +--- a/drivers/acpi/apei/ghes.c
342 ++++ b/drivers/acpi/apei/ghes.c
343 +@@ -1072,6 +1072,7 @@ static int ghes_remove(struct platform_device *ghes_dev)
344 + if (list_empty(&ghes_sci))
345 + unregister_acpi_hed_notifier(&ghes_notifier_sci);
346 + mutex_unlock(&ghes_list_mutex);
347 ++ synchronize_rcu();
348 + break;
349 + case ACPI_HEST_NOTIFY_NMI:
350 + ghes_nmi_remove(ghes);
351 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
352 +index 79152dbc5528..51874695a730 100644
353 +--- a/drivers/acpi/ec.c
354 ++++ b/drivers/acpi/ec.c
355 +@@ -1728,7 +1728,7 @@ int __init acpi_ec_dsdt_probe(void)
356 + * functioning ECDT EC first in order to handle the events.
357 + * https://bugzilla.kernel.org/show_bug.cgi?id=115021
358 + */
359 +-int __init acpi_ec_ecdt_start(void)
360 ++static int __init acpi_ec_ecdt_start(void)
361 + {
362 + acpi_handle handle;
363 +
364 +@@ -1959,20 +1959,17 @@ static inline void acpi_ec_query_exit(void)
365 + int __init acpi_ec_init(void)
366 + {
367 + int result;
368 ++ int ecdt_fail, dsdt_fail;
369 +
370 + /* register workqueue for _Qxx evaluations */
371 + result = acpi_ec_query_init();
372 + if (result)
373 +- goto err_exit;
374 +- /* Now register the driver for the EC */
375 +- result = acpi_bus_register_driver(&acpi_ec_driver);
376 +- if (result)
377 +- goto err_exit;
378 ++ return result;
379 +
380 +-err_exit:
381 +- if (result)
382 +- acpi_ec_query_exit();
383 +- return result;
384 ++ /* Drivers must be started after acpi_ec_query_init() */
385 ++ ecdt_fail = acpi_ec_ecdt_start();
386 ++ dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
387 ++ return ecdt_fail && dsdt_fail ? -ENODEV : 0;
388 + }
389 +
390 + /* EC driver currently not unloadable */
391 +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
392 +index 219b90bc0922..08b3ca0ead69 100644
393 +--- a/drivers/acpi/internal.h
394 ++++ b/drivers/acpi/internal.h
395 +@@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data);
396 + int acpi_ec_init(void);
397 + int acpi_ec_ecdt_probe(void);
398 + int acpi_ec_dsdt_probe(void);
399 +-int acpi_ec_ecdt_start(void);
400 + void acpi_ec_block_transactions(void);
401 + void acpi_ec_unblock_transactions(void);
402 + int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
403 +diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
404 +index 6d7ce6e12aaa..5e18ccf5ab57 100644
405 +--- a/drivers/acpi/ioapic.c
406 ++++ b/drivers/acpi/ioapic.c
407 +@@ -45,6 +45,12 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data)
408 + struct resource *res = data;
409 + struct resource_win win;
410 +
411 ++ /*
412 ++ * We might assign this to 'res' later, make sure all pointers are
413 ++ * cleared before the resource is added to the global list
414 ++ */
415 ++ memset(&win, 0, sizeof(win));
416 ++
417 + res->flags = 0;
418 + if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM))
419 + return AE_OK;
420 +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
421 +index dd3786acba89..cf725d581cae 100644
422 +--- a/drivers/acpi/scan.c
423 ++++ b/drivers/acpi/scan.c
424 +@@ -2051,7 +2051,6 @@ int __init acpi_scan_init(void)
425 +
426 + acpi_gpe_apply_masked_gpes();
427 + acpi_update_all_gpes();
428 +- acpi_ec_ecdt_start();
429 +
430 + acpi_scan_initialized = true;
431 +
432 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
433 +index 3c71b982bf2a..15009b2b33c7 100644
434 +--- a/drivers/android/binder.c
435 ++++ b/drivers/android/binder.c
436 +@@ -1724,8 +1724,12 @@ static void binder_transaction(struct binder_proc *proc,
437 + list_add_tail(&t->work.entry, target_list);
438 + tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
439 + list_add_tail(&tcomplete->entry, &thread->todo);
440 +- if (target_wait)
441 +- wake_up_interruptible(target_wait);
442 ++ if (target_wait) {
443 ++ if (reply || !(t->flags & TF_ONE_WAY))
444 ++ wake_up_interruptible_sync(target_wait);
445 ++ else
446 ++ wake_up_interruptible(target_wait);
447 ++ }
448 + return;
449 +
450 + err_get_unused_fd_failed:
451 +@@ -2760,10 +2764,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
452 + /*pr_info("binder_ioctl: %d:%d %x %lx\n",
453 + proc->pid, current->pid, cmd, arg);*/
454 +
455 +- if (unlikely(current->mm != proc->vma_vm_mm)) {
456 +- pr_err("current mm mismatch proc mm\n");
457 +- return -EINVAL;
458 +- }
459 + trace_binder_ioctl(cmd, arg);
460 +
461 + ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
462 +@@ -2875,7 +2875,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
463 + const char *failure_string;
464 + struct binder_buffer *buffer;
465 +
466 +- if (proc->tsk != current)
467 ++ if (proc->tsk != current->group_leader)
468 + return -EINVAL;
469 +
470 + if ((vma->vm_end - vma->vm_start) > SZ_4M)
471 +@@ -2976,9 +2976,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
472 + proc = kzalloc(sizeof(*proc), GFP_KERNEL);
473 + if (proc == NULL)
474 + return -ENOMEM;
475 +- get_task_struct(current);
476 +- proc->tsk = current;
477 +- proc->vma_vm_mm = current->mm;
478 ++ get_task_struct(current->group_leader);
479 ++ proc->tsk = current->group_leader;
480 + INIT_LIST_HEAD(&proc->todo);
481 + init_waitqueue_head(&proc->wait);
482 + proc->default_priority = task_nice(current);
483 +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
484 +index 4e19bde4bbff..34adde169a78 100644
485 +--- a/drivers/gpu/drm/drm_atomic.c
486 ++++ b/drivers/gpu/drm/drm_atomic.c
487 +@@ -1386,6 +1386,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
488 + if (config->funcs->atomic_check)
489 + ret = config->funcs->atomic_check(state->dev, state);
490 +
491 ++ if (ret)
492 ++ return ret;
493 ++
494 + if (!state->allow_modeset) {
495 + for_each_crtc_in_state(state, crtc, crtc_state, i) {
496 + if (drm_atomic_crtc_needs_modeset(crtc_state)) {
497 +@@ -1396,7 +1399,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
498 + }
499 + }
500 +
501 +- return ret;
502 ++ return 0;
503 + }
504 + EXPORT_SYMBOL(drm_atomic_check_only);
505 +
506 +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
507 +index 465bacd0a630..48e99ab525c3 100644
508 +--- a/drivers/gpu/drm/drm_gem.c
509 ++++ b/drivers/gpu/drm/drm_gem.c
510 +@@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
511 + struct drm_gem_object *obj = ptr;
512 + struct drm_device *dev = obj->dev;
513 +
514 ++ if (dev->driver->gem_close_object)
515 ++ dev->driver->gem_close_object(obj, file_priv);
516 ++
517 + if (drm_core_check_feature(dev, DRIVER_PRIME))
518 + drm_gem_remove_prime_handles(obj, file_priv);
519 + drm_vma_node_revoke(&obj->vma_node, file_priv);
520 +
521 +- if (dev->driver->gem_close_object)
522 +- dev->driver->gem_close_object(obj, file_priv);
523 +-
524 + drm_gem_object_handle_unreference_unlocked(obj);
525 +
526 + return 0;
527 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
528 +index 7316fc7fa0bd..a2ec6d8796a0 100644
529 +--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
530 ++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
531 +@@ -149,8 +149,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
532 + rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
533 +
534 + /* Signal polarities */
535 +- value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
536 +- | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
537 ++ value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
538 ++ | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
539 + | DSMR_DIPM_DISP | DSMR_CSPM;
540 + rcar_du_crtc_write(rcrtc, DSMR, value);
541 +
542 +@@ -172,7 +172,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
543 + mode->crtc_vsync_start - 1);
544 + rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1);
545 +
546 +- rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start);
547 ++ rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1);
548 + rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
549 + }
550 +
551 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
552 +index cfc302c65b0b..c58602b638e4 100644
553 +--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
554 ++++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
555 +@@ -453,13 +453,13 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
556 + }
557 +
558 + ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
559 +- of_node_put(encoder);
560 +- of_node_put(connector);
561 +-
562 + if (ret && ret != -EPROBE_DEFER)
563 + dev_warn(rcdu->dev,
564 +- "failed to initialize encoder %s (%d), skipping\n",
565 +- encoder->full_name, ret);
566 ++ "failed to initialize encoder %s on output %u (%d), skipping\n",
567 ++ of_node_full_name(encoder), output, ret);
568 ++
569 ++ of_node_put(encoder);
570 ++ of_node_put(connector);
571 +
572 + return ret;
573 + }
574 +diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
575 +index 0b42a12171f3..b42d95f09c68 100644
576 +--- a/drivers/i2c/busses/i2c-designware-platdrv.c
577 ++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
578 +@@ -319,7 +319,7 @@ static void dw_i2c_plat_complete(struct device *dev)
579 + #endif
580 +
581 + #ifdef CONFIG_PM
582 +-static int dw_i2c_plat_suspend(struct device *dev)
583 ++static int dw_i2c_plat_runtime_suspend(struct device *dev)
584 + {
585 + struct platform_device *pdev = to_platform_device(dev);
586 + struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
587 +@@ -343,11 +343,21 @@ static int dw_i2c_plat_resume(struct device *dev)
588 + return 0;
589 + }
590 +
591 ++#ifdef CONFIG_PM_SLEEP
592 ++static int dw_i2c_plat_suspend(struct device *dev)
593 ++{
594 ++ pm_runtime_resume(dev);
595 ++ return dw_i2c_plat_runtime_suspend(dev);
596 ++}
597 ++#endif
598 ++
599 + static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
600 + .prepare = dw_i2c_plat_prepare,
601 + .complete = dw_i2c_plat_complete,
602 + SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
603 +- SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL)
604 ++ SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
605 ++ dw_i2c_plat_resume,
606 ++ NULL)
607 + };
608 +
609 + #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
610 +diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
611 +index 60829340a82e..b60e5d87c257 100644
612 +--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
613 ++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
614 +@@ -36,8 +36,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
615 + s32 poll_value = 0;
616 +
617 + if (state) {
618 +- if (!atomic_read(&st->user_requested_state))
619 +- return 0;
620 + if (sensor_hub_device_open(st->hsdev))
621 + return -EIO;
622 +
623 +@@ -86,6 +84,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
624 + &report_val);
625 + }
626 +
627 ++ pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
628 ++ st->pdev->name, state_val, report_val);
629 ++
630 + sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
631 + st->power_state.index,
632 + sizeof(state_val), &state_val);
633 +@@ -107,6 +108,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
634 + ret = pm_runtime_get_sync(&st->pdev->dev);
635 + else {
636 + pm_runtime_mark_last_busy(&st->pdev->dev);
637 ++ pm_runtime_use_autosuspend(&st->pdev->dev);
638 + ret = pm_runtime_put_autosuspend(&st->pdev->dev);
639 + }
640 + if (ret < 0) {
641 +@@ -201,8 +203,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
642 + /* Default to 3 seconds, but can be changed from sysfs */
643 + pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
644 + 3000);
645 +- pm_runtime_use_autosuspend(&attrb->pdev->dev);
646 +-
647 + return ret;
648 + error_unreg_trigger:
649 + iio_trigger_unregister(trig);
650 +diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
651 +index 8cf84d3488b2..12898424d838 100644
652 +--- a/drivers/iio/imu/adis16480.c
653 ++++ b/drivers/iio/imu/adis16480.c
654 +@@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
655 + .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
656 + .gyro_max_scale = 450,
657 + .accel_max_val = IIO_M_S_2_TO_G(12500),
658 +- .accel_max_scale = 5,
659 ++ .accel_max_scale = 10,
660 + },
661 + [ADIS16485] = {
662 + .channels = adis16485_channels,
663 +diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
664 +index 518e8a7bd5f9..f26807c75be4 100644
665 +--- a/drivers/input/mouse/alps.c
666 ++++ b/drivers/input/mouse/alps.c
667 +@@ -1212,14 +1212,24 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
668 +
669 + case SS4_PACKET_ID_TWO:
670 + if (priv->flags & ALPS_BUTTONPAD) {
671 +- f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
672 ++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
673 ++ f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
674 ++ f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
675 ++ } else {
676 ++ f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
677 ++ f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
678 ++ }
679 + f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0);
680 +- f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
681 + f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1);
682 + } else {
683 +- f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
684 ++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
685 ++ f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
686 ++ f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
687 ++ } else {
688 ++ f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
689 ++ f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
690 ++ }
691 + f->mt[0].y = SS4_STD_MF_Y_V2(p, 0);
692 +- f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
693 + f->mt[1].y = SS4_STD_MF_Y_V2(p, 1);
694 + }
695 + f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0;
696 +@@ -1236,16 +1246,27 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
697 +
698 + case SS4_PACKET_ID_MULTI:
699 + if (priv->flags & ALPS_BUTTONPAD) {
700 +- f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
701 ++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
702 ++ f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
703 ++ f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
704 ++ } else {
705 ++ f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
706 ++ f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
707 ++ }
708 ++
709 + f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
710 +- f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
711 + f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
712 + no_data_x = SS4_MFPACKET_NO_AX_BL;
713 + no_data_y = SS4_MFPACKET_NO_AY_BL;
714 + } else {
715 +- f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
716 ++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
717 ++ f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
718 ++ f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
719 ++ } else {
720 ++ f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
721 ++ f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
722 ++ }
723 + f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
724 +- f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
725 + f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
726 + no_data_x = SS4_MFPACKET_NO_AX;
727 + no_data_y = SS4_MFPACKET_NO_AY;
728 +@@ -2535,8 +2556,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
729 +
730 + memset(otp, 0, sizeof(otp));
731 +
732 +- if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) ||
733 +- alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]))
734 ++ if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) ||
735 ++ alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]))
736 + return -1;
737 +
738 + alps_update_device_area_ss4_v2(otp, priv);
739 +diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
740 +index dbfd26073e1a..793123717145 100644
741 +--- a/drivers/input/mouse/alps.h
742 ++++ b/drivers/input/mouse/alps.h
743 +@@ -91,6 +91,10 @@ enum SS4_PACKET_ID {
744 + ((_b[1 + _i * 3] << 5) & 0x1F00) \
745 + )
746 +
747 ++#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \
748 ++ ((_b[1 + (_i) * 3] << 4) & 0x0F80) \
749 ++ )
750 ++
751 + #define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \
752 + ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \
753 + ((_b[2 + (_i) * 3] << 4) & 0x0E00) \
754 +@@ -100,6 +104,10 @@ enum SS4_PACKET_ID {
755 + ((_b[0 + (_i) * 3] >> 3) & 0x0010) \
756 + )
757 +
758 ++#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) | \
759 ++ ((_b[0 + (_i) * 3] >> 4) & 0x0008) \
760 ++ )
761 ++
762 + #define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \
763 + ((_b[0 + (_i) * 3] >> 3) & 0x0008) \
764 + )
765 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
766 +index 98d4e515587a..681dce15fbc8 100644
767 +--- a/drivers/input/mouse/elan_i2c_core.c
768 ++++ b/drivers/input/mouse/elan_i2c_core.c
769 +@@ -1234,6 +1234,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
770 + { "ELAN0000", 0 },
771 + { "ELAN0100", 0 },
772 + { "ELAN0600", 0 },
773 ++ { "ELAN0602", 0 },
774 + { "ELAN0605", 0 },
775 + { "ELAN0608", 0 },
776 + { "ELAN0605", 0 },
777 +diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
778 +index 354d47ecd66a..ce6ff9b301bb 100644
779 +--- a/drivers/input/mouse/trackpoint.c
780 ++++ b/drivers/input/mouse/trackpoint.c
781 +@@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir
782 + if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
783 + return -1;
784 +
785 +- if (param[0] != TP_MAGIC_IDENT)
786 ++ /* add new TP ID. */
787 ++ if (!(param[0] & TP_MAGIC_IDENT))
788 + return -1;
789 +
790 + if (firmware_id)
791 +diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
792 +index 5617ed3a7d7a..88055755f82e 100644
793 +--- a/drivers/input/mouse/trackpoint.h
794 ++++ b/drivers/input/mouse/trackpoint.h
795 +@@ -21,8 +21,9 @@
796 + #define TP_COMMAND 0xE2 /* Commands start with this */
797 +
798 + #define TP_READ_ID 0xE1 /* Sent for device identification */
799 +-#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */
800 ++#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
801 + /* by the firmware ID */
802 ++ /* Firmware ID includes 0x1, 0x2, 0x3 */
803 +
804 +
805 + /*
806 +diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
807 +index c9f386213e9e..410c39c62dc7 100644
808 +--- a/drivers/leds/trigger/ledtrig-heartbeat.c
809 ++++ b/drivers/leds/trigger/ledtrig-heartbeat.c
810 +@@ -19,7 +19,6 @@
811 + #include <linux/sched.h>
812 + #include <linux/leds.h>
813 + #include <linux/reboot.h>
814 +-#include <linux/suspend.h>
815 + #include "../leds.h"
816 +
817 + static int panic_heartbeats;
818 +@@ -155,30 +154,6 @@ static struct led_trigger heartbeat_led_trigger = {
819 + .deactivate = heartbeat_trig_deactivate,
820 + };
821 +
822 +-static int heartbeat_pm_notifier(struct notifier_block *nb,
823 +- unsigned long pm_event, void *unused)
824 +-{
825 +- int rc;
826 +-
827 +- switch (pm_event) {
828 +- case PM_SUSPEND_PREPARE:
829 +- case PM_HIBERNATION_PREPARE:
830 +- case PM_RESTORE_PREPARE:
831 +- led_trigger_unregister(&heartbeat_led_trigger);
832 +- break;
833 +- case PM_POST_SUSPEND:
834 +- case PM_POST_HIBERNATION:
835 +- case PM_POST_RESTORE:
836 +- rc = led_trigger_register(&heartbeat_led_trigger);
837 +- if (rc)
838 +- pr_err("could not re-register heartbeat trigger\n");
839 +- break;
840 +- default:
841 +- break;
842 +- }
843 +- return NOTIFY_DONE;
844 +-}
845 +-
846 + static int heartbeat_reboot_notifier(struct notifier_block *nb,
847 + unsigned long code, void *unused)
848 + {
849 +@@ -193,10 +168,6 @@ static int heartbeat_panic_notifier(struct notifier_block *nb,
850 + return NOTIFY_DONE;
851 + }
852 +
853 +-static struct notifier_block heartbeat_pm_nb = {
854 +- .notifier_call = heartbeat_pm_notifier,
855 +-};
856 +-
857 + static struct notifier_block heartbeat_reboot_nb = {
858 + .notifier_call = heartbeat_reboot_notifier,
859 + };
860 +@@ -213,14 +184,12 @@ static int __init heartbeat_trig_init(void)
861 + atomic_notifier_chain_register(&panic_notifier_list,
862 + &heartbeat_panic_nb);
863 + register_reboot_notifier(&heartbeat_reboot_nb);
864 +- register_pm_notifier(&heartbeat_pm_nb);
865 + }
866 + return rc;
867 + }
868 +
869 + static void __exit heartbeat_trig_exit(void)
870 + {
871 +- unregister_pm_notifier(&heartbeat_pm_nb);
872 + unregister_reboot_notifier(&heartbeat_reboot_nb);
873 + atomic_notifier_chain_unregister(&panic_notifier_list,
874 + &heartbeat_panic_nb);
875 +diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
876 +index 551786f58e59..ba652d8a2b93 100644
877 +--- a/drivers/net/ethernet/mellanox/mlx4/main.c
878 ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
879 +@@ -430,7 +430,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
880 + /* Virtual PCI function needs to determine UAR page size from
881 + * firmware. Only master PCI function can set the uar page size
882 + */
883 +- if (enable_4k_uar)
884 ++ if (enable_4k_uar || !dev->persist->num_vfs)
885 + dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
886 + else
887 + dev->uar_page_shift = PAGE_SHIFT;
888 +@@ -2269,7 +2269,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
889 +
890 + dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
891 +
892 +- if (enable_4k_uar) {
893 ++ if (enable_4k_uar || !dev->persist->num_vfs) {
894 + init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
895 + PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
896 + init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
897 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
898 +index aee3fd2b6538..4ca82bd8c4f0 100644
899 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
900 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
901 +@@ -871,8 +871,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
902 + return NETDEV_TX_OK;
903 +
904 + err_unmap:
905 +- --f;
906 +- while (f >= 0) {
907 ++ while (--f >= 0) {
908 + frag = &skb_shinfo(skb)->frags[f];
909 + dma_unmap_page(&nn->pdev->dev,
910 + tx_ring->txbufs[wr_idx].dma_addr,
911 +diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
912 +index c234ee43b6ef..24222a5d8df2 100644
913 +--- a/drivers/ntb/ntb_transport.c
914 ++++ b/drivers/ntb/ntb_transport.c
915 +@@ -176,14 +176,12 @@ struct ntb_transport_qp {
916 + u64 rx_err_ver;
917 + u64 rx_memcpy;
918 + u64 rx_async;
919 +- u64 dma_rx_prep_err;
920 + u64 tx_bytes;
921 + u64 tx_pkts;
922 + u64 tx_ring_full;
923 + u64 tx_err_no_buf;
924 + u64 tx_memcpy;
925 + u64 tx_async;
926 +- u64 dma_tx_prep_err;
927 + };
928 +
929 + struct ntb_transport_mw {
930 +@@ -256,8 +254,6 @@ enum {
931 + #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
932 + #define NTB_QP_DEF_NUM_ENTRIES 100
933 + #define NTB_LINK_DOWN_TIMEOUT 10
934 +-#define DMA_RETRIES 20
935 +-#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
936 +
937 + static void ntb_transport_rxc_db(unsigned long data);
938 + static const struct ntb_ctx_ops ntb_transport_ops;
939 +@@ -518,12 +514,6 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
940 + out_offset += snprintf(buf + out_offset, out_count - out_offset,
941 + "free tx - \t%u\n",
942 + ntb_transport_tx_free_entry(qp));
943 +- out_offset += snprintf(buf + out_offset, out_count - out_offset,
944 +- "DMA tx prep err - \t%llu\n",
945 +- qp->dma_tx_prep_err);
946 +- out_offset += snprintf(buf + out_offset, out_count - out_offset,
947 +- "DMA rx prep err - \t%llu\n",
948 +- qp->dma_rx_prep_err);
949 +
950 + out_offset += snprintf(buf + out_offset, out_count - out_offset,
951 + "\n");
952 +@@ -625,7 +615,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
953 + if (!mw->virt_addr)
954 + return -ENOMEM;
955 +
956 +- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
957 ++ if (mw_num < qp_count % mw_count)
958 + num_qps_mw = qp_count / mw_count + 1;
959 + else
960 + num_qps_mw = qp_count / mw_count;
961 +@@ -770,8 +760,6 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
962 + qp->tx_err_no_buf = 0;
963 + qp->tx_memcpy = 0;
964 + qp->tx_async = 0;
965 +- qp->dma_tx_prep_err = 0;
966 +- qp->dma_rx_prep_err = 0;
967 + }
968 +
969 + static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
970 +@@ -933,10 +921,8 @@ static void ntb_transport_link_work(struct work_struct *work)
971 + ntb_free_mw(nt, i);
972 +
973 + /* if there's an actual failure, we should just bail */
974 +- if (rc < 0) {
975 +- ntb_link_disable(ndev);
976 ++ if (rc < 0)
977 + return;
978 +- }
979 +
980 + out:
981 + if (ntb_link_is_up(ndev, NULL, NULL) == 1)
982 +@@ -1002,7 +988,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
983 + qp->event_handler = NULL;
984 + ntb_qp_link_down_reset(qp);
985 +
986 +- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
987 ++ if (mw_num < qp_count % mw_count)
988 + num_qps_mw = qp_count / mw_count + 1;
989 + else
990 + num_qps_mw = qp_count / mw_count;
991 +@@ -1125,8 +1111,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
992 + qp_count = ilog2(qp_bitmap);
993 + if (max_num_clients && max_num_clients < qp_count)
994 + qp_count = max_num_clients;
995 +- else if (mw_count < qp_count)
996 +- qp_count = mw_count;
997 ++ else if (nt->mw_count < qp_count)
998 ++ qp_count = nt->mw_count;
999 +
1000 + qp_bitmap &= BIT_ULL(qp_count) - 1;
1001 +
1002 +@@ -1314,7 +1300,6 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
1003 + struct dmaengine_unmap_data *unmap;
1004 + dma_cookie_t cookie;
1005 + void *buf = entry->buf;
1006 +- int retries = 0;
1007 +
1008 + len = entry->len;
1009 + device = chan->device;
1010 +@@ -1343,22 +1328,11 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
1011 +
1012 + unmap->from_cnt = 1;
1013 +
1014 +- for (retries = 0; retries < DMA_RETRIES; retries++) {
1015 +- txd = device->device_prep_dma_memcpy(chan,
1016 +- unmap->addr[1],
1017 +- unmap->addr[0], len,
1018 +- DMA_PREP_INTERRUPT);
1019 +- if (txd)
1020 +- break;
1021 +-
1022 +- set_current_state(TASK_INTERRUPTIBLE);
1023 +- schedule_timeout(DMA_OUT_RESOURCE_TO);
1024 +- }
1025 +-
1026 +- if (!txd) {
1027 +- qp->dma_rx_prep_err++;
1028 ++ txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1029 ++ unmap->addr[0], len,
1030 ++ DMA_PREP_INTERRUPT);
1031 ++ if (!txd)
1032 + goto err_get_unmap;
1033 +- }
1034 +
1035 + txd->callback_result = ntb_rx_copy_callback;
1036 + txd->callback_param = entry;
1037 +@@ -1603,7 +1577,6 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1038 + struct dmaengine_unmap_data *unmap;
1039 + dma_addr_t dest;
1040 + dma_cookie_t cookie;
1041 +- int retries = 0;
1042 +
1043 + device = chan->device;
1044 + dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
1045 +@@ -1625,21 +1598,10 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1046 +
1047 + unmap->to_cnt = 1;
1048 +
1049 +- for (retries = 0; retries < DMA_RETRIES; retries++) {
1050 +- txd = device->device_prep_dma_memcpy(chan, dest,
1051 +- unmap->addr[0], len,
1052 +- DMA_PREP_INTERRUPT);
1053 +- if (txd)
1054 +- break;
1055 +-
1056 +- set_current_state(TASK_INTERRUPTIBLE);
1057 +- schedule_timeout(DMA_OUT_RESOURCE_TO);
1058 +- }
1059 +-
1060 +- if (!txd) {
1061 +- qp->dma_tx_prep_err++;
1062 ++ txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1063 ++ DMA_PREP_INTERRUPT);
1064 ++ if (!txd)
1065 + goto err_get_unmap;
1066 +- }
1067 +
1068 + txd->callback_result = ntb_tx_copy_callback;
1069 + txd->callback_param = entry;
1070 +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
1071 +index b432153a6c5a..0f63a36a519e 100644
1072 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
1073 ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
1074 +@@ -45,6 +45,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
1075 + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
1076 + {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
1077 + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
1078 ++ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
1079 + {} /* Terminating entry */
1080 + };
1081 +
1082 +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
1083 +index 789ff1df2d8d..581712534c93 100644
1084 +--- a/fs/cifs/dir.c
1085 ++++ b/fs/cifs/dir.c
1086 +@@ -183,15 +183,20 @@ build_path_from_dentry(struct dentry *direntry)
1087 + }
1088 +
1089 + /*
1090 ++ * Don't allow path components longer than the server max.
1091 + * Don't allow the separator character in a path component.
1092 + * The VFS will not allow "/", but "\" is allowed by posix.
1093 + */
1094 + static int
1095 +-check_name(struct dentry *direntry)
1096 ++check_name(struct dentry *direntry, struct cifs_tcon *tcon)
1097 + {
1098 + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
1099 + int i;
1100 +
1101 ++ if (unlikely(direntry->d_name.len >
1102 ++ tcon->fsAttrInfo.MaxPathNameComponentLength))
1103 ++ return -ENAMETOOLONG;
1104 ++
1105 + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
1106 + for (i = 0; i < direntry->d_name.len; i++) {
1107 + if (direntry->d_name.name[i] == '\\') {
1108 +@@ -489,10 +494,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
1109 + return finish_no_open(file, res);
1110 + }
1111 +
1112 +- rc = check_name(direntry);
1113 +- if (rc)
1114 +- return rc;
1115 +-
1116 + xid = get_xid();
1117 +
1118 + cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
1119 +@@ -505,6 +506,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
1120 + }
1121 +
1122 + tcon = tlink_tcon(tlink);
1123 ++
1124 ++ rc = check_name(direntry, tcon);
1125 ++ if (rc)
1126 ++ goto out_free_xid;
1127 ++
1128 + server = tcon->ses->server;
1129 +
1130 + if (server->ops->new_lease_key)
1131 +@@ -765,7 +771,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
1132 + }
1133 + pTcon = tlink_tcon(tlink);
1134 +
1135 +- rc = check_name(direntry);
1136 ++ rc = check_name(direntry, pTcon);
1137 + if (rc)
1138 + goto lookup_out;
1139 +
1140 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1141 +index 7c1c6c39d582..0437e5fdba56 100644
1142 +--- a/fs/cifs/smb2pdu.c
1143 ++++ b/fs/cifs/smb2pdu.c
1144 +@@ -2930,8 +2930,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
1145 + kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
1146 + le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
1147 + kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
1148 +- kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
1149 +- kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
1150 ++ kst->f_bfree = kst->f_bavail =
1151 ++ le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
1152 + return;
1153 + }
1154 +
1155 +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
1156 +index 4e7a56a0a9b6..2c4f7a22e128 100644
1157 +--- a/fs/nfsd/nfs4xdr.c
1158 ++++ b/fs/nfsd/nfs4xdr.c
1159 +@@ -129,7 +129,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp)
1160 + argp->p = page_address(argp->pagelist[0]);
1161 + argp->pagelist++;
1162 + if (argp->pagelen < PAGE_SIZE) {
1163 +- argp->end = argp->p + (argp->pagelen>>2);
1164 ++ argp->end = argp->p + XDR_QUADLEN(argp->pagelen);
1165 + argp->pagelen = 0;
1166 + } else {
1167 + argp->end = argp->p + (PAGE_SIZE>>2);
1168 +@@ -1246,9 +1246,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
1169 + argp->pagelen -= pages * PAGE_SIZE;
1170 + len -= pages * PAGE_SIZE;
1171 +
1172 +- argp->p = (__be32 *)page_address(argp->pagelist[0]);
1173 +- argp->pagelist++;
1174 +- argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
1175 ++ next_decode_page(argp);
1176 + }
1177 + argp->p += XDR_QUADLEN(len);
1178 +
1179 +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
1180 +index 31e1d639abed..dc81e5287ebf 100644
1181 +--- a/include/asm-generic/vmlinux.lds.h
1182 ++++ b/include/asm-generic/vmlinux.lds.h
1183 +@@ -59,6 +59,22 @@
1184 + /* Align . to a 8 byte boundary equals to maximum function alignment. */
1185 + #define ALIGN_FUNCTION() . = ALIGN(8)
1186 +
1187 ++/*
1188 ++ * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
1189 ++ * generates .data.identifier sections, which need to be pulled in with
1190 ++ * .data. We don't want to pull in .data..other sections, which Linux
1191 ++ * has defined. Same for text and bss.
1192 ++ */
1193 ++#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
1194 ++#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
1195 ++#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
1196 ++#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
1197 ++#else
1198 ++#define TEXT_MAIN .text
1199 ++#define DATA_MAIN .data
1200 ++#define BSS_MAIN .bss
1201 ++#endif
1202 ++
1203 + /*
1204 + * Align to a 32 byte boundary equal to the
1205 + * alignment gcc 4.5 uses for a struct
1206 +@@ -198,12 +214,9 @@
1207 +
1208 + /*
1209 + * .data section
1210 +- * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates
1211 +- * .data.identifier which needs to be pulled in with .data, but don't want to
1212 +- * pull in .data..stuff which has its own requirements. Same for bss.
1213 + */
1214 + #define DATA_DATA \
1215 +- *(.data .data.[0-9a-zA-Z_]*) \
1216 ++ *(DATA_MAIN) \
1217 + *(.ref.data) \
1218 + *(.data..shared_aligned) /* percpu related */ \
1219 + MEM_KEEP(init.data) \
1220 +@@ -436,16 +449,17 @@
1221 + VMLINUX_SYMBOL(__security_initcall_end) = .; \
1222 + }
1223 +
1224 +-/* .text section. Map to function alignment to avoid address changes
1225 ++/*
1226 ++ * .text section. Map to function alignment to avoid address changes
1227 + * during second ld run in second ld pass when generating System.map
1228 +- * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates
1229 +- * .text.identifier which needs to be pulled in with .text , but some
1230 +- * architectures define .text.foo which is not intended to be pulled in here.
1231 +- * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have
1232 +- * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */
1233 ++ *
1234 ++ * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
1235 ++ * code elimination is enabled, so these sections should be converted
1236 ++ * to use ".." first.
1237 ++ */
1238 + #define TEXT_TEXT \
1239 + ALIGN_FUNCTION(); \
1240 +- *(.text.hot .text .text.fixup .text.unlikely) \
1241 ++ *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
1242 + *(.ref.text) \
1243 + MEM_KEEP(init.text) \
1244 + MEM_KEEP(exit.text) \
1245 +@@ -613,7 +627,7 @@
1246 + BSS_FIRST_SECTIONS \
1247 + *(.bss..page_aligned) \
1248 + *(.dynbss) \
1249 +- *(.bss .bss.[0-9a-zA-Z_]*) \
1250 ++ *(BSS_MAIN) \
1251 + *(COMMON) \
1252 + }
1253 +
1254 +diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
1255 +index a13b031dc6b8..3101141661a1 100644
1256 +--- a/include/linux/bpf_verifier.h
1257 ++++ b/include/linux/bpf_verifier.h
1258 +@@ -40,6 +40,7 @@ struct bpf_reg_state {
1259 + */
1260 + s64 min_value;
1261 + u64 max_value;
1262 ++ bool value_from_signed;
1263 + };
1264 +
1265 + enum bpf_stack_slot_type {
1266 +diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
1267 +index ba1cad7b97cf..965cc5693a46 100644
1268 +--- a/include/linux/cpuhotplug.h
1269 ++++ b/include/linux/cpuhotplug.h
1270 +@@ -10,7 +10,6 @@ enum cpuhp_state {
1271 + CPUHP_PERF_X86_PREPARE,
1272 + CPUHP_PERF_X86_UNCORE_PREP,
1273 + CPUHP_PERF_X86_AMD_UNCORE_PREP,
1274 +- CPUHP_PERF_X86_RAPL_PREP,
1275 + CPUHP_PERF_BFIN,
1276 + CPUHP_PERF_POWER,
1277 + CPUHP_PERF_SUPERH,
1278 +diff --git a/include/linux/fs.h b/include/linux/fs.h
1279 +index 2f63d44368bd..dd88ded27fc8 100644
1280 +--- a/include/linux/fs.h
1281 ++++ b/include/linux/fs.h
1282 +@@ -941,9 +941,9 @@ static inline struct file *get_file(struct file *f)
1283 + /* Page cache limit. The filesystems should put that into their s_maxbytes
1284 + limits, otherwise bad things can happen in VM. */
1285 + #if BITS_PER_LONG==32
1286 +-#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
1287 ++#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
1288 + #elif BITS_PER_LONG==64
1289 +-#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
1290 ++#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
1291 + #endif
1292 +
1293 + #define FL_POSIX 1
1294 +diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
1295 +index 6c70444da3b9..b83507c0640c 100644
1296 +--- a/include/linux/ptr_ring.h
1297 ++++ b/include/linux/ptr_ring.h
1298 +@@ -340,9 +340,9 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
1299 + __PTR_RING_PEEK_CALL_v; \
1300 + })
1301 +
1302 +-static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
1303 ++static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
1304 + {
1305 +- return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
1306 ++ return kcalloc(size, sizeof(void *), gfp);
1307 + }
1308 +
1309 + static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
1310 +@@ -417,7 +417,8 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
1311 + * In particular if you consume ring in interrupt or BH context, you must
1312 + * disable interrupts/BH when doing so.
1313 + */
1314 +-static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
1315 ++static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
1316 ++ unsigned int nrings,
1317 + int size,
1318 + gfp_t gfp, void (*destroy)(void *))
1319 + {
1320 +@@ -425,7 +426,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
1321 + void ***queues;
1322 + int i;
1323 +
1324 +- queues = kmalloc(nrings * sizeof *queues, gfp);
1325 ++ queues = kmalloc_array(nrings, sizeof(*queues), gfp);
1326 + if (!queues)
1327 + goto noqueues;
1328 +
1329 +diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
1330 +index f4dfade428f0..be8b902b5845 100644
1331 +--- a/include/linux/skb_array.h
1332 ++++ b/include/linux/skb_array.h
1333 +@@ -162,7 +162,8 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
1334 + }
1335 +
1336 + static inline int skb_array_resize_multiple(struct skb_array **rings,
1337 +- int nrings, int size, gfp_t gfp)
1338 ++ int nrings, unsigned int size,
1339 ++ gfp_t gfp)
1340 + {
1341 + BUILD_BUG_ON(offsetof(struct skb_array, ring));
1342 + return ptr_ring_resize_multiple((struct ptr_ring **)rings,
1343 +diff --git a/include/net/ip.h b/include/net/ip.h
1344 +index d3a107850a41..51c6b9786c46 100644
1345 +--- a/include/net/ip.h
1346 ++++ b/include/net/ip.h
1347 +@@ -339,7 +339,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
1348 + !forwarding)
1349 + return dst_mtu(dst);
1350 +
1351 +- return min(dst->dev->mtu, IP_MAX_MTU);
1352 ++ return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
1353 + }
1354 +
1355 + static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
1356 +@@ -351,7 +351,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
1357 + return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
1358 + }
1359 +
1360 +- return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
1361 ++ return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
1362 + }
1363 +
1364 + u32 ip_idents_reserve(u32 hash, int segs);
1365 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
1366 +index e6aa0a249672..f18fc1a0321f 100644
1367 +--- a/include/net/sch_generic.h
1368 ++++ b/include/net/sch_generic.h
1369 +@@ -768,8 +768,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
1370 + old = *pold;
1371 + *pold = new;
1372 + if (old != NULL) {
1373 +- qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
1374 ++ unsigned int qlen = old->q.qlen;
1375 ++ unsigned int backlog = old->qstats.backlog;
1376 ++
1377 + qdisc_reset(old);
1378 ++ qdisc_tree_reduce_backlog(old, qlen, backlog);
1379 + }
1380 + sch_tree_unlock(sch);
1381 +
1382 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
1383 +index 8ce679d36c58..779c871c5dcd 100644
1384 +--- a/kernel/bpf/verifier.c
1385 ++++ b/kernel/bpf/verifier.c
1386 +@@ -139,7 +139,7 @@ struct bpf_verifier_stack_elem {
1387 + struct bpf_verifier_stack_elem *next;
1388 + };
1389 +
1390 +-#define BPF_COMPLEXITY_LIMIT_INSNS 65536
1391 ++#define BPF_COMPLEXITY_LIMIT_INSNS 98304
1392 + #define BPF_COMPLEXITY_LIMIT_STACK 1024
1393 +
1394 + struct bpf_call_arg_meta {
1395 +@@ -682,12 +682,13 @@ static int check_ctx_access(struct bpf_verifier_env *env, int off, int size,
1396 + return -EACCES;
1397 + }
1398 +
1399 +-static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1400 ++static bool __is_pointer_value(bool allow_ptr_leaks,
1401 ++ const struct bpf_reg_state *reg)
1402 + {
1403 +- if (env->allow_ptr_leaks)
1404 ++ if (allow_ptr_leaks)
1405 + return false;
1406 +
1407 +- switch (env->cur_state.regs[regno].type) {
1408 ++ switch (reg->type) {
1409 + case UNKNOWN_VALUE:
1410 + case CONST_IMM:
1411 + return false;
1412 +@@ -696,6 +697,11 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1413 + }
1414 + }
1415 +
1416 ++static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1417 ++{
1418 ++ return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
1419 ++}
1420 ++
1421 + static int check_ptr_alignment(struct bpf_verifier_env *env,
1422 + struct bpf_reg_state *reg, int off, int size)
1423 + {
1424 +@@ -1467,6 +1473,65 @@ static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn)
1425 + return 0;
1426 + }
1427 +
1428 ++static int evaluate_reg_imm_alu_unknown(struct bpf_verifier_env *env,
1429 ++ struct bpf_insn *insn)
1430 ++{
1431 ++ struct bpf_reg_state *regs = env->cur_state.regs;
1432 ++ struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
1433 ++ struct bpf_reg_state *src_reg = &regs[insn->src_reg];
1434 ++ u8 opcode = BPF_OP(insn->code);
1435 ++ s64 imm_log2 = __ilog2_u64((long long)dst_reg->imm);
1436 ++
1437 ++ /* BPF_X code with src_reg->type UNKNOWN_VALUE here. */
1438 ++ if (src_reg->imm > 0 && dst_reg->imm) {
1439 ++ switch (opcode) {
1440 ++ case BPF_ADD:
1441 ++ /* dreg += sreg
1442 ++ * where both have zero upper bits. Adding them
1443 ++ * can only result making one more bit non-zero
1444 ++ * in the larger value.
1445 ++ * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47)
1446 ++ * 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47)
1447 ++ */
1448 ++ dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
1449 ++ dst_reg->imm--;
1450 ++ break;
1451 ++ case BPF_AND:
1452 ++ /* dreg &= sreg
1453 ++ * AND can not extend zero bits only shrink
1454 ++ * Ex. 0x00..00ffffff
1455 ++ * & 0x0f..ffffffff
1456 ++ * ----------------
1457 ++ * 0x00..00ffffff
1458 ++ */
1459 ++ dst_reg->imm = max(src_reg->imm, 63 - imm_log2);
1460 ++ break;
1461 ++ case BPF_OR:
1462 ++ /* dreg |= sreg
1463 ++ * OR can only extend zero bits
1464 ++ * Ex. 0x00..00ffffff
1465 ++ * | 0x0f..ffffffff
1466 ++ * ----------------
1467 ++ * 0x0f..00ffffff
1468 ++ */
1469 ++ dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
1470 ++ break;
1471 ++ case BPF_SUB:
1472 ++ case BPF_MUL:
1473 ++ case BPF_RSH:
1474 ++ case BPF_LSH:
1475 ++ /* These may be flushed out later */
1476 ++ default:
1477 ++ mark_reg_unknown_value(regs, insn->dst_reg);
1478 ++ }
1479 ++ } else {
1480 ++ mark_reg_unknown_value(regs, insn->dst_reg);
1481 ++ }
1482 ++
1483 ++ dst_reg->type = UNKNOWN_VALUE;
1484 ++ return 0;
1485 ++}
1486 ++
1487 + static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
1488 + struct bpf_insn *insn)
1489 + {
1490 +@@ -1475,6 +1540,9 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
1491 + struct bpf_reg_state *src_reg = &regs[insn->src_reg];
1492 + u8 opcode = BPF_OP(insn->code);
1493 +
1494 ++ if (BPF_SRC(insn->code) == BPF_X && src_reg->type == UNKNOWN_VALUE)
1495 ++ return evaluate_reg_imm_alu_unknown(env, insn);
1496 ++
1497 + /* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn.
1498 + * Don't care about overflow or negative values, just add them
1499 + */
1500 +@@ -1530,10 +1598,24 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1501 + }
1502 +
1503 + /* We don't know anything about what was done to this register, mark it
1504 +- * as unknown.
1505 ++ * as unknown. Also, if both derived bounds came from signed/unsigned
1506 ++ * mixed compares and one side is unbounded, we cannot really do anything
1507 ++ * with them as boundaries cannot be trusted. Thus, arithmetic of two
1508 ++ * regs of such kind will get invalidated bounds on the dst side.
1509 + */
1510 +- if (min_val == BPF_REGISTER_MIN_RANGE &&
1511 +- max_val == BPF_REGISTER_MAX_RANGE) {
1512 ++ if ((min_val == BPF_REGISTER_MIN_RANGE &&
1513 ++ max_val == BPF_REGISTER_MAX_RANGE) ||
1514 ++ (BPF_SRC(insn->code) == BPF_X &&
1515 ++ ((min_val != BPF_REGISTER_MIN_RANGE &&
1516 ++ max_val == BPF_REGISTER_MAX_RANGE) ||
1517 ++ (min_val == BPF_REGISTER_MIN_RANGE &&
1518 ++ max_val != BPF_REGISTER_MAX_RANGE) ||
1519 ++ (dst_reg->min_value != BPF_REGISTER_MIN_RANGE &&
1520 ++ dst_reg->max_value == BPF_REGISTER_MAX_RANGE) ||
1521 ++ (dst_reg->min_value == BPF_REGISTER_MIN_RANGE &&
1522 ++ dst_reg->max_value != BPF_REGISTER_MAX_RANGE)) &&
1523 ++ regs[insn->dst_reg].value_from_signed !=
1524 ++ regs[insn->src_reg].value_from_signed)) {
1525 + reset_reg_range_values(regs, insn->dst_reg);
1526 + return;
1527 + }
1528 +@@ -1542,10 +1624,12 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1529 + * do our normal operations to the register, we need to set the values
1530 + * to the min/max since they are undefined.
1531 + */
1532 +- if (min_val == BPF_REGISTER_MIN_RANGE)
1533 +- dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1534 +- if (max_val == BPF_REGISTER_MAX_RANGE)
1535 +- dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1536 ++ if (opcode != BPF_SUB) {
1537 ++ if (min_val == BPF_REGISTER_MIN_RANGE)
1538 ++ dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1539 ++ if (max_val == BPF_REGISTER_MAX_RANGE)
1540 ++ dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1541 ++ }
1542 +
1543 + switch (opcode) {
1544 + case BPF_ADD:
1545 +@@ -1555,10 +1639,17 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1546 + dst_reg->max_value += max_val;
1547 + break;
1548 + case BPF_SUB:
1549 ++ /* If one of our values was at the end of our ranges, then the
1550 ++ * _opposite_ value in the dst_reg goes to the end of our range.
1551 ++ */
1552 ++ if (min_val == BPF_REGISTER_MIN_RANGE)
1553 ++ dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1554 ++ if (max_val == BPF_REGISTER_MAX_RANGE)
1555 ++ dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1556 + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1557 +- dst_reg->min_value -= min_val;
1558 ++ dst_reg->min_value -= max_val;
1559 + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1560 +- dst_reg->max_value -= max_val;
1561 ++ dst_reg->max_value -= min_val;
1562 + break;
1563 + case BPF_MUL:
1564 + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1565 +@@ -1808,6 +1899,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
1566 + * register as unknown.
1567 + */
1568 + if (env->allow_ptr_leaks &&
1569 ++ BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD &&
1570 + (dst_reg->type == PTR_TO_MAP_VALUE ||
1571 + dst_reg->type == PTR_TO_MAP_VALUE_ADJ))
1572 + dst_reg->type = PTR_TO_MAP_VALUE_ADJ;
1573 +@@ -1876,38 +1968,63 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
1574 + struct bpf_reg_state *false_reg, u64 val,
1575 + u8 opcode)
1576 + {
1577 ++ bool value_from_signed = true;
1578 ++ bool is_range = true;
1579 ++
1580 + switch (opcode) {
1581 + case BPF_JEQ:
1582 + /* If this is false then we know nothing Jon Snow, but if it is
1583 + * true then we know for sure.
1584 + */
1585 + true_reg->max_value = true_reg->min_value = val;
1586 ++ is_range = false;
1587 + break;
1588 + case BPF_JNE:
1589 + /* If this is true we know nothing Jon Snow, but if it is false
1590 + * we know the value for sure;
1591 + */
1592 + false_reg->max_value = false_reg->min_value = val;
1593 ++ is_range = false;
1594 + break;
1595 + case BPF_JGT:
1596 +- /* Unsigned comparison, the minimum value is 0. */
1597 +- false_reg->min_value = 0;
1598 ++ value_from_signed = false;
1599 ++ /* fallthrough */
1600 + case BPF_JSGT:
1601 ++ if (true_reg->value_from_signed != value_from_signed)
1602 ++ reset_reg_range_values(true_reg, 0);
1603 ++ if (false_reg->value_from_signed != value_from_signed)
1604 ++ reset_reg_range_values(false_reg, 0);
1605 ++ if (opcode == BPF_JGT) {
1606 ++ /* Unsigned comparison, the minimum value is 0. */
1607 ++ false_reg->min_value = 0;
1608 ++ }
1609 + /* If this is false then we know the maximum val is val,
1610 + * otherwise we know the min val is val+1.
1611 + */
1612 + false_reg->max_value = val;
1613 ++ false_reg->value_from_signed = value_from_signed;
1614 + true_reg->min_value = val + 1;
1615 ++ true_reg->value_from_signed = value_from_signed;
1616 + break;
1617 + case BPF_JGE:
1618 +- /* Unsigned comparison, the minimum value is 0. */
1619 +- false_reg->min_value = 0;
1620 ++ value_from_signed = false;
1621 ++ /* fallthrough */
1622 + case BPF_JSGE:
1623 ++ if (true_reg->value_from_signed != value_from_signed)
1624 ++ reset_reg_range_values(true_reg, 0);
1625 ++ if (false_reg->value_from_signed != value_from_signed)
1626 ++ reset_reg_range_values(false_reg, 0);
1627 ++ if (opcode == BPF_JGE) {
1628 ++ /* Unsigned comparison, the minimum value is 0. */
1629 ++ false_reg->min_value = 0;
1630 ++ }
1631 + /* If this is false then we know the maximum value is val - 1,
1632 + * otherwise we know the mimimum value is val.
1633 + */
1634 + false_reg->max_value = val - 1;
1635 ++ false_reg->value_from_signed = value_from_signed;
1636 + true_reg->min_value = val;
1637 ++ true_reg->value_from_signed = value_from_signed;
1638 + break;
1639 + default:
1640 + break;
1641 +@@ -1915,6 +2032,12 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
1642 +
1643 + check_reg_overflow(false_reg);
1644 + check_reg_overflow(true_reg);
1645 ++ if (is_range) {
1646 ++ if (__is_pointer_value(false, false_reg))
1647 ++ reset_reg_range_values(false_reg, 0);
1648 ++ if (__is_pointer_value(false, true_reg))
1649 ++ reset_reg_range_values(true_reg, 0);
1650 ++ }
1651 + }
1652 +
1653 + /* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg
1654 +@@ -1924,39 +2047,64 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
1655 + struct bpf_reg_state *false_reg, u64 val,
1656 + u8 opcode)
1657 + {
1658 ++ bool value_from_signed = true;
1659 ++ bool is_range = true;
1660 ++
1661 + switch (opcode) {
1662 + case BPF_JEQ:
1663 + /* If this is false then we know nothing Jon Snow, but if it is
1664 + * true then we know for sure.
1665 + */
1666 + true_reg->max_value = true_reg->min_value = val;
1667 ++ is_range = false;
1668 + break;
1669 + case BPF_JNE:
1670 + /* If this is true we know nothing Jon Snow, but if it is false
1671 + * we know the value for sure;
1672 + */
1673 + false_reg->max_value = false_reg->min_value = val;
1674 ++ is_range = false;
1675 + break;
1676 + case BPF_JGT:
1677 +- /* Unsigned comparison, the minimum value is 0. */
1678 +- true_reg->min_value = 0;
1679 ++ value_from_signed = false;
1680 ++ /* fallthrough */
1681 + case BPF_JSGT:
1682 ++ if (true_reg->value_from_signed != value_from_signed)
1683 ++ reset_reg_range_values(true_reg, 0);
1684 ++ if (false_reg->value_from_signed != value_from_signed)
1685 ++ reset_reg_range_values(false_reg, 0);
1686 ++ if (opcode == BPF_JGT) {
1687 ++ /* Unsigned comparison, the minimum value is 0. */
1688 ++ true_reg->min_value = 0;
1689 ++ }
1690 + /*
1691 + * If this is false, then the val is <= the register, if it is
1692 + * true the register <= to the val.
1693 + */
1694 + false_reg->min_value = val;
1695 ++ false_reg->value_from_signed = value_from_signed;
1696 + true_reg->max_value = val - 1;
1697 ++ true_reg->value_from_signed = value_from_signed;
1698 + break;
1699 + case BPF_JGE:
1700 +- /* Unsigned comparison, the minimum value is 0. */
1701 +- true_reg->min_value = 0;
1702 ++ value_from_signed = false;
1703 ++ /* fallthrough */
1704 + case BPF_JSGE:
1705 ++ if (true_reg->value_from_signed != value_from_signed)
1706 ++ reset_reg_range_values(true_reg, 0);
1707 ++ if (false_reg->value_from_signed != value_from_signed)
1708 ++ reset_reg_range_values(false_reg, 0);
1709 ++ if (opcode == BPF_JGE) {
1710 ++ /* Unsigned comparison, the minimum value is 0. */
1711 ++ true_reg->min_value = 0;
1712 ++ }
1713 + /* If this is false then constant < register, if it is true then
1714 + * the register < constant.
1715 + */
1716 + false_reg->min_value = val + 1;
1717 ++ false_reg->value_from_signed = value_from_signed;
1718 + true_reg->max_value = val;
1719 ++ true_reg->value_from_signed = value_from_signed;
1720 + break;
1721 + default:
1722 + break;
1723 +@@ -1964,6 +2112,12 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
1724 +
1725 + check_reg_overflow(false_reg);
1726 + check_reg_overflow(true_reg);
1727 ++ if (is_range) {
1728 ++ if (__is_pointer_value(false, false_reg))
1729 ++ reset_reg_range_values(false_reg, 0);
1730 ++ if (__is_pointer_value(false, true_reg))
1731 ++ reset_reg_range_values(true_reg, 0);
1732 ++ }
1733 + }
1734 +
1735 + static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
1736 +@@ -2390,6 +2544,7 @@ static int check_cfg(struct bpf_verifier_env *env)
1737 + env->explored_states[t + 1] = STATE_LIST_MARK;
1738 + } else {
1739 + /* conditional jump with two edges */
1740 ++ env->explored_states[t] = STATE_LIST_MARK;
1741 + ret = push_insn(t, t + 1, FALLTHROUGH, env);
1742 + if (ret == 1)
1743 + goto peek_stack;
1744 +@@ -2548,6 +2703,12 @@ static bool states_equal(struct bpf_verifier_env *env,
1745 + rcur->type != NOT_INIT))
1746 + continue;
1747 +
1748 ++ /* Don't care about the reg->id in this case. */
1749 ++ if (rold->type == PTR_TO_MAP_VALUE_OR_NULL &&
1750 ++ rcur->type == PTR_TO_MAP_VALUE_OR_NULL &&
1751 ++ rold->map_ptr == rcur->map_ptr)
1752 ++ continue;
1753 ++
1754 + if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
1755 + compare_ptrs_to_packet(rold, rcur))
1756 + continue;
1757 +@@ -2682,6 +2843,9 @@ static int do_check(struct bpf_verifier_env *env)
1758 + goto process_bpf_exit;
1759 + }
1760 +
1761 ++ if (need_resched())
1762 ++ cond_resched();
1763 ++
1764 + if (log_level && do_print_state) {
1765 + verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx);
1766 + print_verifier_state(&env->cur_state);
1767 +diff --git a/kernel/events/core.c b/kernel/events/core.c
1768 +index f5a693589d66..c774773ac3a4 100644
1769 +--- a/kernel/events/core.c
1770 ++++ b/kernel/events/core.c
1771 +@@ -9786,28 +9786,27 @@ SYSCALL_DEFINE5(perf_event_open,
1772 + goto err_context;
1773 +
1774 + /*
1775 +- * Do not allow to attach to a group in a different
1776 +- * task or CPU context:
1777 ++ * Make sure we're both events for the same CPU;
1778 ++ * grouping events for different CPUs is broken; since
1779 ++ * you can never concurrently schedule them anyhow.
1780 + */
1781 +- if (move_group) {
1782 +- /*
1783 +- * Make sure we're both on the same task, or both
1784 +- * per-cpu events.
1785 +- */
1786 +- if (group_leader->ctx->task != ctx->task)
1787 +- goto err_context;
1788 ++ if (group_leader->cpu != event->cpu)
1789 ++ goto err_context;
1790 +
1791 +- /*
1792 +- * Make sure we're both events for the same CPU;
1793 +- * grouping events for different CPUs is broken; since
1794 +- * you can never concurrently schedule them anyhow.
1795 +- */
1796 +- if (group_leader->cpu != event->cpu)
1797 +- goto err_context;
1798 +- } else {
1799 +- if (group_leader->ctx != ctx)
1800 +- goto err_context;
1801 +- }
1802 ++ /*
1803 ++ * Make sure we're both on the same task, or both
1804 ++ * per-CPU events.
1805 ++ */
1806 ++ if (group_leader->ctx->task != ctx->task)
1807 ++ goto err_context;
1808 ++
1809 ++ /*
1810 ++ * Do not allow to attach to a group in a different task
1811 ++ * or CPU context. If we're moving SW events, we'll fix
1812 ++ * this up later, so allow that.
1813 ++ */
1814 ++ if (!move_group && group_leader->ctx != ctx)
1815 ++ goto err_context;
1816 +
1817 + /*
1818 + * Only a group leader can be exclusive or pinned
1819 +diff --git a/kernel/fork.c b/kernel/fork.c
1820 +index 59faac4de181..50bf262cc427 100644
1821 +--- a/kernel/fork.c
1822 ++++ b/kernel/fork.c
1823 +@@ -766,6 +766,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
1824 + mm_init_cpumask(mm);
1825 + mm_init_aio(mm);
1826 + mm_init_owner(mm, p);
1827 ++ RCU_INIT_POINTER(mm->exe_file, NULL);
1828 + mmu_notifier_mm_init(mm);
1829 + clear_tlb_flush_pending(mm);
1830 + #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
1831 +diff --git a/kernel/time/timer.c b/kernel/time/timer.c
1832 +index 944ad64277a6..df445cde8a1e 100644
1833 +--- a/kernel/time/timer.c
1834 ++++ b/kernel/time/timer.c
1835 +@@ -201,6 +201,7 @@ struct timer_base {
1836 + bool migration_enabled;
1837 + bool nohz_active;
1838 + bool is_idle;
1839 ++ bool must_forward_clk;
1840 + DECLARE_BITMAP(pending_map, WHEEL_SIZE);
1841 + struct hlist_head vectors[WHEEL_SIZE];
1842 + } ____cacheline_aligned;
1843 +@@ -891,13 +892,19 @@ get_target_base(struct timer_base *base, unsigned tflags)
1844 +
1845 + static inline void forward_timer_base(struct timer_base *base)
1846 + {
1847 +- unsigned long jnow = READ_ONCE(jiffies);
1848 ++ unsigned long jnow;
1849 +
1850 + /*
1851 +- * We only forward the base when it's idle and we have a delta between
1852 +- * base clock and jiffies.
1853 ++ * We only forward the base when we are idle or have just come out of
1854 ++ * idle (must_forward_clk logic), and have a delta between base clock
1855 ++ * and jiffies. In the common case, run_timers will take care of it.
1856 + */
1857 +- if (!base->is_idle || (long) (jnow - base->clk) < 2)
1858 ++ if (likely(!base->must_forward_clk))
1859 ++ return;
1860 ++
1861 ++ jnow = READ_ONCE(jiffies);
1862 ++ base->must_forward_clk = base->is_idle;
1863 ++ if ((long)(jnow - base->clk) < 2)
1864 + return;
1865 +
1866 + /*
1867 +@@ -973,6 +980,11 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
1868 + * same array bucket then just return:
1869 + */
1870 + if (timer_pending(timer)) {
1871 ++ /*
1872 ++ * The downside of this optimization is that it can result in
1873 ++ * larger granularity than you would get from adding a new
1874 ++ * timer with this expiry.
1875 ++ */
1876 + if (timer->expires == expires)
1877 + return 1;
1878 +
1879 +@@ -983,6 +995,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
1880 + * dequeue/enqueue dance.
1881 + */
1882 + base = lock_timer_base(timer, &flags);
1883 ++ forward_timer_base(base);
1884 +
1885 + clk = base->clk;
1886 + idx = calc_wheel_index(expires, clk);
1887 +@@ -999,6 +1012,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
1888 + }
1889 + } else {
1890 + base = lock_timer_base(timer, &flags);
1891 ++ forward_timer_base(base);
1892 + }
1893 +
1894 + timer_stats_timer_set_start_info(timer);
1895 +@@ -1028,12 +1042,10 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
1896 + spin_lock(&base->lock);
1897 + WRITE_ONCE(timer->flags,
1898 + (timer->flags & ~TIMER_BASEMASK) | base->cpu);
1899 ++ forward_timer_base(base);
1900 + }
1901 + }
1902 +
1903 +- /* Try to forward a stale timer base clock */
1904 +- forward_timer_base(base);
1905 +-
1906 + timer->expires = expires;
1907 + /*
1908 + * If 'idx' was calculated above and the base time did not advance
1909 +@@ -1150,6 +1162,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
1910 + WRITE_ONCE(timer->flags,
1911 + (timer->flags & ~TIMER_BASEMASK) | cpu);
1912 + }
1913 ++ forward_timer_base(base);
1914 +
1915 + debug_activate(timer, timer->expires);
1916 + internal_add_timer(base, timer);
1917 +@@ -1538,10 +1551,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1918 + if (!is_max_delta)
1919 + expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
1920 + /*
1921 +- * If we expect to sleep more than a tick, mark the base idle:
1922 ++ * If we expect to sleep more than a tick, mark the base idle.
1923 ++ * Also the tick is stopped so any added timer must forward
1924 ++ * the base clk itself to keep granularity small. This idle
1925 ++ * logic is only maintained for the BASE_STD base, deferrable
1926 ++ * timers may still see large granularity skew (by design).
1927 + */
1928 +- if ((expires - basem) > TICK_NSEC)
1929 ++ if ((expires - basem) > TICK_NSEC) {
1930 ++ base->must_forward_clk = true;
1931 + base->is_idle = true;
1932 ++ }
1933 + }
1934 + spin_unlock(&base->lock);
1935 +
1936 +@@ -1651,6 +1670,19 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
1937 + {
1938 + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1939 +
1940 ++ /*
1941 ++ * must_forward_clk must be cleared before running timers so that any
1942 ++ * timer functions that call mod_timer will not try to forward the
1943 ++ * base. idle trcking / clock forwarding logic is only used with
1944 ++ * BASE_STD timers.
1945 ++ *
1946 ++ * The deferrable base does not do idle tracking at all, so we do
1947 ++ * not forward it. This can result in very large variations in
1948 ++ * granularity for deferrable timers, but they can be deferred for
1949 ++ * long periods due to idle.
1950 ++ */
1951 ++ base->must_forward_clk = false;
1952 ++
1953 + __run_timers(base);
1954 + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
1955 + __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
1956 +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
1957 +index 5dcb99281259..41805fb3c661 100644
1958 +--- a/kernel/trace/bpf_trace.c
1959 ++++ b/kernel/trace/bpf_trace.c
1960 +@@ -203,10 +203,36 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
1961 + fmt_cnt++;
1962 + }
1963 +
1964 +- return __trace_printk(1/* fake ip will not be printed */, fmt,
1965 +- mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
1966 +- mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
1967 +- mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
1968 ++/* Horrid workaround for getting va_list handling working with different
1969 ++ * argument type combinations generically for 32 and 64 bit archs.
1970 ++ */
1971 ++#define __BPF_TP_EMIT() __BPF_ARG3_TP()
1972 ++#define __BPF_TP(...) \
1973 ++ __trace_printk(1 /* Fake ip will not be printed. */, \
1974 ++ fmt, ##__VA_ARGS__)
1975 ++
1976 ++#define __BPF_ARG1_TP(...) \
1977 ++ ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
1978 ++ ? __BPF_TP(arg1, ##__VA_ARGS__) \
1979 ++ : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
1980 ++ ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
1981 ++ : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
1982 ++
1983 ++#define __BPF_ARG2_TP(...) \
1984 ++ ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
1985 ++ ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
1986 ++ : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
1987 ++ ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
1988 ++ : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
1989 ++
1990 ++#define __BPF_ARG3_TP(...) \
1991 ++ ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
1992 ++ ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
1993 ++ : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
1994 ++ ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
1995 ++ : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
1996 ++
1997 ++ return __BPF_TP_EMIT();
1998 + }
1999 +
2000 + static const struct bpf_func_proto bpf_trace_printk_proto = {
2001 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
2002 +index 4f7ea8446bb5..6e432ed7d0fe 100644
2003 +--- a/kernel/trace/ftrace.c
2004 ++++ b/kernel/trace/ftrace.c
2005 +@@ -876,6 +876,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace)
2006 +
2007 + function_profile_call(trace->func, 0, NULL, NULL);
2008 +
2009 ++ /* If function graph is shutting down, ret_stack can be NULL */
2010 ++ if (!current->ret_stack)
2011 ++ return 0;
2012 ++
2013 + if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
2014 + current->ret_stack[index].subtime = 0;
2015 +
2016 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2017 +index 53c308068e39..7379f735a9f4 100644
2018 +--- a/kernel/trace/trace.c
2019 ++++ b/kernel/trace/trace.c
2020 +@@ -7767,4 +7767,4 @@ __init static int clear_boot_tracer(void)
2021 + }
2022 +
2023 + fs_initcall(tracer_init_tracefs);
2024 +-late_initcall(clear_boot_tracer);
2025 ++late_initcall_sync(clear_boot_tracer);
2026 +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
2027 +index 9daa9b3bc6d9..0193f58c45f0 100644
2028 +--- a/kernel/trace/trace_events_filter.c
2029 ++++ b/kernel/trace/trace_events_filter.c
2030 +@@ -1926,6 +1926,10 @@ static int create_filter(struct trace_event_call *call,
2031 + if (err && set_str)
2032 + append_filter_err(ps, filter);
2033 + }
2034 ++ if (err && !set_str) {
2035 ++ free_event_filter(filter);
2036 ++ filter = NULL;
2037 ++ }
2038 + create_filter_finish(ps);
2039 +
2040 + *filterp = filter;
2041 +diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
2042 +index 0a689bbb78ef..305039b122fa 100644
2043 +--- a/kernel/trace/tracing_map.c
2044 ++++ b/kernel/trace/tracing_map.c
2045 +@@ -221,16 +221,19 @@ void tracing_map_array_free(struct tracing_map_array *a)
2046 + if (!a)
2047 + return;
2048 +
2049 +- if (!a->pages) {
2050 +- kfree(a);
2051 +- return;
2052 +- }
2053 ++ if (!a->pages)
2054 ++ goto free;
2055 +
2056 + for (i = 0; i < a->n_pages; i++) {
2057 + if (!a->pages[i])
2058 + break;
2059 + free_page((unsigned long)a->pages[i]);
2060 + }
2061 ++
2062 ++ kfree(a->pages);
2063 ++
2064 ++ free:
2065 ++ kfree(a);
2066 + }
2067 +
2068 + struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
2069 +diff --git a/mm/madvise.c b/mm/madvise.c
2070 +index 253b1533fba5..63a12162f4c6 100644
2071 +--- a/mm/madvise.c
2072 ++++ b/mm/madvise.c
2073 +@@ -331,8 +331,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
2074 + pte_offset_map_lock(mm, pmd, addr, &ptl);
2075 + goto out;
2076 + }
2077 +- put_page(page);
2078 + unlock_page(page);
2079 ++ put_page(page);
2080 + pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
2081 + pte--;
2082 + addr -= PAGE_SIZE;
2083 +diff --git a/mm/memblock.c b/mm/memblock.c
2084 +index ccec42c12ba8..42b98af6a415 100644
2085 +--- a/mm/memblock.c
2086 ++++ b/mm/memblock.c
2087 +@@ -311,7 +311,7 @@ void __init memblock_discard(void)
2088 + __memblock_free_late(addr, size);
2089 + }
2090 +
2091 +- if (memblock.memory.regions == memblock_memory_init_regions) {
2092 ++ if (memblock.memory.regions != memblock_memory_init_regions) {
2093 + addr = __pa(memblock.memory.regions);
2094 + size = PAGE_ALIGN(sizeof(struct memblock_region) *
2095 + memblock.memory.max);
2096 +diff --git a/mm/shmem.c b/mm/shmem.c
2097 +index 7ee5444ffb6d..004e0f87e8a8 100644
2098 +--- a/mm/shmem.c
2099 ++++ b/mm/shmem.c
2100 +@@ -3810,7 +3810,7 @@ int __init shmem_init(void)
2101 + }
2102 +
2103 + #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
2104 +- if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY)
2105 ++ if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
2106 + SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
2107 + else
2108 + shmem_huge = 0; /* just in case it was patched */
2109 +@@ -3871,7 +3871,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
2110 + return -EINVAL;
2111 +
2112 + shmem_huge = huge;
2113 +- if (shmem_huge < SHMEM_HUGE_DENY)
2114 ++ if (shmem_huge > SHMEM_HUGE_DENY)
2115 + SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
2116 + return count;
2117 + }
2118 +diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
2119 +index fbf251fef70f..4d6b94d7ce5f 100644
2120 +--- a/net/bluetooth/bnep/core.c
2121 ++++ b/net/bluetooth/bnep/core.c
2122 +@@ -484,16 +484,16 @@ static int bnep_session(void *arg)
2123 + struct net_device *dev = s->dev;
2124 + struct sock *sk = s->sock->sk;
2125 + struct sk_buff *skb;
2126 +- wait_queue_t wait;
2127 ++ DEFINE_WAIT_FUNC(wait, woken_wake_function);
2128 +
2129 + BT_DBG("");
2130 +
2131 + set_user_nice(current, -15);
2132 +
2133 +- init_waitqueue_entry(&wait, current);
2134 + add_wait_queue(sk_sleep(sk), &wait);
2135 + while (1) {
2136 +- set_current_state(TASK_INTERRUPTIBLE);
2137 ++ /* Ensure session->terminate is updated */
2138 ++ smp_mb__before_atomic();
2139 +
2140 + if (atomic_read(&s->terminate))
2141 + break;
2142 +@@ -515,9 +515,8 @@ static int bnep_session(void *arg)
2143 + break;
2144 + netif_wake_queue(dev);
2145 +
2146 +- schedule();
2147 ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2148 + }
2149 +- __set_current_state(TASK_RUNNING);
2150 + remove_wait_queue(sk_sleep(sk), &wait);
2151 +
2152 + /* Cleanup session */
2153 +@@ -666,7 +665,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
2154 + s = __bnep_get_session(req->dst);
2155 + if (s) {
2156 + atomic_inc(&s->terminate);
2157 +- wake_up_process(s->task);
2158 ++ wake_up_interruptible(sk_sleep(s->sock->sk));
2159 + } else
2160 + err = -ENOENT;
2161 +
2162 +diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
2163 +index 9e59b6654126..1152ce34dad4 100644
2164 +--- a/net/bluetooth/cmtp/core.c
2165 ++++ b/net/bluetooth/cmtp/core.c
2166 +@@ -280,16 +280,16 @@ static int cmtp_session(void *arg)
2167 + struct cmtp_session *session = arg;
2168 + struct sock *sk = session->sock->sk;
2169 + struct sk_buff *skb;
2170 +- wait_queue_t wait;
2171 ++ DEFINE_WAIT_FUNC(wait, woken_wake_function);
2172 +
2173 + BT_DBG("session %p", session);
2174 +
2175 + set_user_nice(current, -15);
2176 +
2177 +- init_waitqueue_entry(&wait, current);
2178 + add_wait_queue(sk_sleep(sk), &wait);
2179 + while (1) {
2180 +- set_current_state(TASK_INTERRUPTIBLE);
2181 ++ /* Ensure session->terminate is updated */
2182 ++ smp_mb__before_atomic();
2183 +
2184 + if (atomic_read(&session->terminate))
2185 + break;
2186 +@@ -306,9 +306,8 @@ static int cmtp_session(void *arg)
2187 +
2188 + cmtp_process_transmit(session);
2189 +
2190 +- schedule();
2191 ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2192 + }
2193 +- __set_current_state(TASK_RUNNING);
2194 + remove_wait_queue(sk_sleep(sk), &wait);
2195 +
2196 + down_write(&cmtp_session_sem);
2197 +@@ -393,7 +392,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
2198 + err = cmtp_attach_device(session);
2199 + if (err < 0) {
2200 + atomic_inc(&session->terminate);
2201 +- wake_up_process(session->task);
2202 ++ wake_up_interruptible(sk_sleep(session->sock->sk));
2203 + up_write(&cmtp_session_sem);
2204 + return err;
2205 + }
2206 +@@ -431,7 +430,11 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
2207 +
2208 + /* Stop session thread */
2209 + atomic_inc(&session->terminate);
2210 +- wake_up_process(session->task);
2211 ++
2212 ++ /* Ensure session->terminate is updated */
2213 ++ smp_mb__after_atomic();
2214 ++
2215 ++ wake_up_interruptible(sk_sleep(session->sock->sk));
2216 + } else
2217 + err = -ENOENT;
2218 +
2219 +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
2220 +index 0bec4588c3c8..1fc076420d1e 100644
2221 +--- a/net/bluetooth/hidp/core.c
2222 ++++ b/net/bluetooth/hidp/core.c
2223 +@@ -36,6 +36,7 @@
2224 + #define VERSION "1.2"
2225 +
2226 + static DECLARE_RWSEM(hidp_session_sem);
2227 ++static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq);
2228 + static LIST_HEAD(hidp_session_list);
2229 +
2230 + static unsigned char hidp_keycode[256] = {
2231 +@@ -1068,12 +1069,12 @@ static int hidp_session_start_sync(struct hidp_session *session)
2232 + * Wake up session thread and notify it to stop. This is asynchronous and
2233 + * returns immediately. Call this whenever a runtime error occurs and you want
2234 + * the session to stop.
2235 +- * Note: wake_up_process() performs any necessary memory-barriers for us.
2236 ++ * Note: wake_up_interruptible() performs any necessary memory-barriers for us.
2237 + */
2238 + static void hidp_session_terminate(struct hidp_session *session)
2239 + {
2240 + atomic_inc(&session->terminate);
2241 +- wake_up_process(session->task);
2242 ++ wake_up_interruptible(&hidp_session_wq);
2243 + }
2244 +
2245 + /*
2246 +@@ -1180,7 +1181,9 @@ static void hidp_session_run(struct hidp_session *session)
2247 + struct sock *ctrl_sk = session->ctrl_sock->sk;
2248 + struct sock *intr_sk = session->intr_sock->sk;
2249 + struct sk_buff *skb;
2250 ++ DEFINE_WAIT_FUNC(wait, woken_wake_function);
2251 +
2252 ++ add_wait_queue(&hidp_session_wq, &wait);
2253 + for (;;) {
2254 + /*
2255 + * This thread can be woken up two ways:
2256 +@@ -1188,12 +1191,10 @@ static void hidp_session_run(struct hidp_session *session)
2257 + * session->terminate flag and wakes this thread up.
2258 + * - Via modifying the socket state of ctrl/intr_sock. This
2259 + * thread is woken up by ->sk_state_changed().
2260 +- *
2261 +- * Note: set_current_state() performs any necessary
2262 +- * memory-barriers for us.
2263 + */
2264 +- set_current_state(TASK_INTERRUPTIBLE);
2265 +
2266 ++ /* Ensure session->terminate is updated */
2267 ++ smp_mb__before_atomic();
2268 + if (atomic_read(&session->terminate))
2269 + break;
2270 +
2271 +@@ -1227,11 +1228,22 @@ static void hidp_session_run(struct hidp_session *session)
2272 + hidp_process_transmit(session, &session->ctrl_transmit,
2273 + session->ctrl_sock);
2274 +
2275 +- schedule();
2276 ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2277 + }
2278 ++ remove_wait_queue(&hidp_session_wq, &wait);
2279 +
2280 + atomic_inc(&session->terminate);
2281 +- set_current_state(TASK_RUNNING);
2282 ++
2283 ++ /* Ensure session->terminate is updated */
2284 ++ smp_mb__after_atomic();
2285 ++}
2286 ++
2287 ++static int hidp_session_wake_function(wait_queue_t *wait,
2288 ++ unsigned int mode,
2289 ++ int sync, void *key)
2290 ++{
2291 ++ wake_up_interruptible(&hidp_session_wq);
2292 ++ return false;
2293 + }
2294 +
2295 + /*
2296 +@@ -1244,7 +1256,8 @@ static void hidp_session_run(struct hidp_session *session)
2297 + static int hidp_session_thread(void *arg)
2298 + {
2299 + struct hidp_session *session = arg;
2300 +- wait_queue_t ctrl_wait, intr_wait;
2301 ++ DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function);
2302 ++ DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function);
2303 +
2304 + BT_DBG("session %p", session);
2305 +
2306 +@@ -1254,8 +1267,6 @@ static int hidp_session_thread(void *arg)
2307 + set_user_nice(current, -15);
2308 + hidp_set_timer(session);
2309 +
2310 +- init_waitqueue_entry(&ctrl_wait, current);
2311 +- init_waitqueue_entry(&intr_wait, current);
2312 + add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
2313 + add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
2314 + /* This memory barrier is paired with wq_has_sleeper(). See
2315 +diff --git a/net/dccp/proto.c b/net/dccp/proto.c
2316 +index 9fe25bf63296..b68168fcc06a 100644
2317 +--- a/net/dccp/proto.c
2318 ++++ b/net/dccp/proto.c
2319 +@@ -24,6 +24,7 @@
2320 + #include <net/checksum.h>
2321 +
2322 + #include <net/inet_sock.h>
2323 ++#include <net/inet_common.h>
2324 + #include <net/sock.h>
2325 + #include <net/xfrm.h>
2326 +
2327 +@@ -170,6 +171,15 @@ const char *dccp_packet_name(const int type)
2328 +
2329 + EXPORT_SYMBOL_GPL(dccp_packet_name);
2330 +
2331 ++static void dccp_sk_destruct(struct sock *sk)
2332 ++{
2333 ++ struct dccp_sock *dp = dccp_sk(sk);
2334 ++
2335 ++ ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
2336 ++ dp->dccps_hc_tx_ccid = NULL;
2337 ++ inet_sock_destruct(sk);
2338 ++}
2339 ++
2340 + int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
2341 + {
2342 + struct dccp_sock *dp = dccp_sk(sk);
2343 +@@ -179,6 +189,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
2344 + icsk->icsk_syn_retries = sysctl_dccp_request_retries;
2345 + sk->sk_state = DCCP_CLOSED;
2346 + sk->sk_write_space = dccp_write_space;
2347 ++ sk->sk_destruct = dccp_sk_destruct;
2348 + icsk->icsk_sync_mss = dccp_sync_mss;
2349 + dp->dccps_mss_cache = 536;
2350 + dp->dccps_rate_last = jiffies;
2351 +@@ -201,10 +212,7 @@ void dccp_destroy_sock(struct sock *sk)
2352 + {
2353 + struct dccp_sock *dp = dccp_sk(sk);
2354 +
2355 +- /*
2356 +- * DCCP doesn't use sk_write_queue, just sk_send_head
2357 +- * for retransmissions
2358 +- */
2359 ++ __skb_queue_purge(&sk->sk_write_queue);
2360 + if (sk->sk_send_head != NULL) {
2361 + kfree_skb(sk->sk_send_head);
2362 + sk->sk_send_head = NULL;
2363 +@@ -222,8 +230,7 @@ void dccp_destroy_sock(struct sock *sk)
2364 + dp->dccps_hc_rx_ackvec = NULL;
2365 + }
2366 + ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
2367 +- ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
2368 +- dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
2369 ++ dp->dccps_hc_rx_ccid = NULL;
2370 +
2371 + /* clean up feature negotiation state */
2372 + dccp_feat_list_purge(&dp->dccps_featneg);
2373 +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
2374 +index 7563831fa432..38c1c979ecb1 100644
2375 +--- a/net/ipv4/fib_semantics.c
2376 ++++ b/net/ipv4/fib_semantics.c
2377 +@@ -1044,15 +1044,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
2378 + fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
2379 + if (!fi)
2380 + goto failure;
2381 +- fib_info_cnt++;
2382 + if (cfg->fc_mx) {
2383 + fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
2384 +- if (!fi->fib_metrics)
2385 +- goto failure;
2386 ++ if (unlikely(!fi->fib_metrics)) {
2387 ++ kfree(fi);
2388 ++ return ERR_PTR(err);
2389 ++ }
2390 + atomic_set(&fi->fib_metrics->refcnt, 1);
2391 +- } else
2392 ++ } else {
2393 + fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
2394 +-
2395 ++ }
2396 ++ fib_info_cnt++;
2397 + fi->fib_net = net;
2398 + fi->fib_protocol = cfg->fc_protocol;
2399 + fi->fib_scope = cfg->fc_scope;
2400 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
2401 +index 6cd49fd17ac0..6a5b7783932e 100644
2402 +--- a/net/ipv4/route.c
2403 ++++ b/net/ipv4/route.c
2404 +@@ -1247,7 +1247,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
2405 + if (mtu)
2406 + return mtu;
2407 +
2408 +- mtu = dst->dev->mtu;
2409 ++ mtu = READ_ONCE(dst->dev->mtu);
2410 +
2411 + if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
2412 + if (rt->rt_uses_gateway && mtu > 576)
2413 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2414 +index 32c540145c17..c03850771a4e 100644
2415 +--- a/net/ipv4/tcp_input.c
2416 ++++ b/net/ipv4/tcp_input.c
2417 +@@ -3036,8 +3036,7 @@ void tcp_rearm_rto(struct sock *sk)
2418 + /* delta may not be positive if the socket is locked
2419 + * when the retrans timer fires and is rescheduled.
2420 + */
2421 +- if (delta > 0)
2422 +- rto = delta;
2423 ++ rto = max(delta, 1);
2424 + }
2425 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
2426 + TCP_RTO_MAX);
2427 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2428 +index 4345ee39f180..ff389591a340 100644
2429 +--- a/net/ipv6/ip6_fib.c
2430 ++++ b/net/ipv6/ip6_fib.c
2431 +@@ -897,6 +897,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
2432 + }
2433 + nsiblings = iter->rt6i_nsiblings;
2434 + fib6_purge_rt(iter, fn, info->nl_net);
2435 ++ if (fn->rr_ptr == iter)
2436 ++ fn->rr_ptr = NULL;
2437 + rt6_release(iter);
2438 +
2439 + if (nsiblings) {
2440 +@@ -909,6 +911,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
2441 + if (rt6_qualify_for_ecmp(iter)) {
2442 + *ins = iter->dst.rt6_next;
2443 + fib6_purge_rt(iter, fn, info->nl_net);
2444 ++ if (fn->rr_ptr == iter)
2445 ++ fn->rr_ptr = NULL;
2446 + rt6_release(iter);
2447 + nsiblings--;
2448 + } else {
2449 +@@ -997,7 +1001,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
2450 + /* Create subtree root node */
2451 + sfn = node_alloc();
2452 + if (!sfn)
2453 +- goto st_failure;
2454 ++ goto failure;
2455 +
2456 + sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
2457 + atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
2458 +@@ -1013,12 +1017,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
2459 +
2460 + if (IS_ERR(sn)) {
2461 + /* If it is failed, discard just allocated
2462 +- root, and then (in st_failure) stale node
2463 ++ root, and then (in failure) stale node
2464 + in main tree.
2465 + */
2466 + node_free(sfn);
2467 + err = PTR_ERR(sn);
2468 +- goto st_failure;
2469 ++ goto failure;
2470 + }
2471 +
2472 + /* Now link new subtree to main tree */
2473 +@@ -1032,7 +1036,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
2474 +
2475 + if (IS_ERR(sn)) {
2476 + err = PTR_ERR(sn);
2477 +- goto st_failure;
2478 ++ goto failure;
2479 + }
2480 + }
2481 +
2482 +@@ -1074,22 +1078,22 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
2483 + atomic_inc(&pn->leaf->rt6i_ref);
2484 + }
2485 + #endif
2486 +- if (!(rt->dst.flags & DST_NOCACHE))
2487 +- dst_free(&rt->dst);
2488 ++ goto failure;
2489 + }
2490 + return err;
2491 +
2492 +-#ifdef CONFIG_IPV6_SUBTREES
2493 +- /* Subtree creation failed, probably main tree node
2494 +- is orphan. If it is, shoot it.
2495 ++failure:
2496 ++ /* fn->leaf could be NULL if fn is an intermediate node and we
2497 ++ * failed to add the new route to it in both subtree creation
2498 ++ * failure and fib6_add_rt2node() failure case.
2499 ++ * In both cases, fib6_repair_tree() should be called to fix
2500 ++ * fn->leaf.
2501 + */
2502 +-st_failure:
2503 + if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
2504 + fib6_repair_tree(info->nl_net, fn);
2505 + if (!(rt->dst.flags & DST_NOCACHE))
2506 + dst_free(&rt->dst);
2507 + return err;
2508 +-#endif
2509 + }
2510 +
2511 + /*
2512 +diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
2513 +index 391c3cbd2eed..101ed6c42808 100644
2514 +--- a/net/irda/af_irda.c
2515 ++++ b/net/irda/af_irda.c
2516 +@@ -2223,7 +2223,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
2517 + {
2518 + struct sock *sk = sock->sk;
2519 + struct irda_sock *self = irda_sk(sk);
2520 +- struct irda_device_list list;
2521 ++ struct irda_device_list list = { 0 };
2522 + struct irda_device_info *discoveries;
2523 + struct irda_ias_set * ias_opt; /* IAS get/query params */
2524 + struct ias_object * ias_obj; /* Object in IAS */
2525 +diff --git a/net/key/af_key.c b/net/key/af_key.c
2526 +index 2e1050ec2cf0..94bf810ad242 100644
2527 +--- a/net/key/af_key.c
2528 ++++ b/net/key/af_key.c
2529 +@@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
2530 + #define BROADCAST_ONE 1
2531 + #define BROADCAST_REGISTERED 2
2532 + #define BROADCAST_PROMISC_ONLY 4
2533 +-static int pfkey_broadcast(struct sk_buff *skb,
2534 ++static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
2535 + int broadcast_flags, struct sock *one_sk,
2536 + struct net *net)
2537 + {
2538 +@@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb,
2539 + rcu_read_unlock();
2540 +
2541 + if (one_sk != NULL)
2542 +- err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
2543 ++ err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
2544 +
2545 + kfree_skb(skb2);
2546 + kfree_skb(skb);
2547 +@@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
2548 + hdr = (struct sadb_msg *) pfk->dump.skb->data;
2549 + hdr->sadb_msg_seq = 0;
2550 + hdr->sadb_msg_errno = rc;
2551 +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
2552 ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
2553 + &pfk->sk, sock_net(&pfk->sk));
2554 + pfk->dump.skb = NULL;
2555 + }
2556 +@@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
2557 + hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
2558 + sizeof(uint64_t));
2559 +
2560 +- pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
2561 ++ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
2562 +
2563 + return 0;
2564 + }
2565 +@@ -1396,7 +1396,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
2566 +
2567 + xfrm_state_put(x);
2568 +
2569 +- pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
2570 ++ pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
2571 +
2572 + return 0;
2573 + }
2574 +@@ -1483,7 +1483,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
2575 + hdr->sadb_msg_seq = c->seq;
2576 + hdr->sadb_msg_pid = c->portid;
2577 +
2578 +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
2579 ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
2580 +
2581 + return 0;
2582 + }
2583 +@@ -1596,7 +1596,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
2584 + out_hdr->sadb_msg_reserved = 0;
2585 + out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
2586 + out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
2587 +- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
2588 ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
2589 +
2590 + return 0;
2591 + }
2592 +@@ -1701,8 +1701,8 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
2593 + return -ENOBUFS;
2594 + }
2595 +
2596 +- pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
2597 +-
2598 ++ pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
2599 ++ sock_net(sk));
2600 + return 0;
2601 + }
2602 +
2603 +@@ -1720,7 +1720,8 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
2604 + hdr->sadb_msg_errno = (uint8_t) 0;
2605 + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
2606 +
2607 +- return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
2608 ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
2609 ++ sock_net(sk));
2610 + }
2611 +
2612 + static int key_notify_sa_flush(const struct km_event *c)
2613 +@@ -1741,7 +1742,7 @@ static int key_notify_sa_flush(const struct km_event *c)
2614 + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
2615 + hdr->sadb_msg_reserved = 0;
2616 +
2617 +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
2618 ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
2619 +
2620 + return 0;
2621 + }
2622 +@@ -1798,7 +1799,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr)
2623 + out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
2624 +
2625 + if (pfk->dump.skb)
2626 +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
2627 ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
2628 + &pfk->sk, sock_net(&pfk->sk));
2629 + pfk->dump.skb = out_skb;
2630 +
2631 +@@ -1886,7 +1887,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb
2632 + new_hdr->sadb_msg_errno = 0;
2633 + }
2634 +
2635 +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
2636 ++ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
2637 + return 0;
2638 + }
2639 +
2640 +@@ -2219,7 +2220,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
2641 + out_hdr->sadb_msg_errno = 0;
2642 + out_hdr->sadb_msg_seq = c->seq;
2643 + out_hdr->sadb_msg_pid = c->portid;
2644 +- pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
2645 ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
2646 + return 0;
2647 +
2648 + }
2649 +@@ -2439,7 +2440,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
2650 + out_hdr->sadb_msg_errno = 0;
2651 + out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
2652 + out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
2653 +- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
2654 ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
2655 + err = 0;
2656 +
2657 + out:
2658 +@@ -2695,7 +2696,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
2659 + out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
2660 +
2661 + if (pfk->dump.skb)
2662 +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
2663 ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
2664 + &pfk->sk, sock_net(&pfk->sk));
2665 + pfk->dump.skb = out_skb;
2666 +
2667 +@@ -2752,7 +2753,7 @@ static int key_notify_policy_flush(const struct km_event *c)
2668 + hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
2669 + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
2670 + hdr->sadb_msg_reserved = 0;
2671 +- pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
2672 ++ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
2673 + return 0;
2674 +
2675 + }
2676 +@@ -2814,7 +2815,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
2677 + void *ext_hdrs[SADB_EXT_MAX];
2678 + int err;
2679 +
2680 +- pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
2681 ++ pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
2682 + BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
2683 +
2684 + memset(ext_hdrs, 0, sizeof(ext_hdrs));
2685 +@@ -3036,7 +3037,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
2686 + out_hdr->sadb_msg_seq = 0;
2687 + out_hdr->sadb_msg_pid = 0;
2688 +
2689 +- pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
2690 ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
2691 ++ xs_net(x));
2692 + return 0;
2693 + }
2694 +
2695 +@@ -3226,7 +3228,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
2696 + xfrm_ctx->ctx_len);
2697 + }
2698 +
2699 +- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
2700 ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
2701 ++ xs_net(x));
2702 + }
2703 +
2704 + static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
2705 +@@ -3424,7 +3427,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
2706 + n_port->sadb_x_nat_t_port_port = sport;
2707 + n_port->sadb_x_nat_t_port_reserved = 0;
2708 +
2709 +- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
2710 ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
2711 ++ xs_net(x));
2712 + }
2713 +
2714 + #ifdef CONFIG_NET_KEY_MIGRATE
2715 +@@ -3616,7 +3620,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2716 + }
2717 +
2718 + /* broadcast migrate message to sockets */
2719 +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
2720 ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
2721 +
2722 + return 0;
2723 +
2724 +diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
2725 +index 5b9c884a452e..dde64c4565d2 100644
2726 +--- a/net/netfilter/nf_nat_core.c
2727 ++++ b/net/netfilter/nf_nat_core.c
2728 +@@ -225,20 +225,21 @@ find_appropriate_src(struct net *net,
2729 + .tuple = tuple,
2730 + .zone = zone
2731 + };
2732 +- struct rhlist_head *hl;
2733 ++ struct rhlist_head *hl, *h;
2734 +
2735 + hl = rhltable_lookup(&nf_nat_bysource_table, &key,
2736 + nf_nat_bysource_params);
2737 +- if (!hl)
2738 +- return 0;
2739 +
2740 +- ct = container_of(hl, typeof(*ct), nat_bysource);
2741 ++ rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
2742 ++ nf_ct_invert_tuplepr(result,
2743 ++ &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
2744 ++ result->dst = tuple->dst;
2745 +
2746 +- nf_ct_invert_tuplepr(result,
2747 +- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
2748 +- result->dst = tuple->dst;
2749 ++ if (in_range(l3proto, l4proto, result, range))
2750 ++ return 1;
2751 ++ }
2752 +
2753 +- return in_range(l3proto, l4proto, result, range);
2754 ++ return 0;
2755 + }
2756 +
2757 + /* For [FUTURE] fragmentation handling, we want the least-used
2758 +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
2759 +index 4e03f64709bc..05d9f42fc309 100644
2760 +--- a/net/openvswitch/actions.c
2761 ++++ b/net/openvswitch/actions.c
2762 +@@ -1240,6 +1240,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
2763 + goto out;
2764 + }
2765 +
2766 ++ OVS_CB(skb)->acts_origlen = acts->orig_len;
2767 + err = do_execute_actions(dp, skb, key,
2768 + acts->actions, acts->actions_len);
2769 +
2770 +diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
2771 +index 4d67ea856067..453f806afe6e 100644
2772 +--- a/net/openvswitch/datapath.c
2773 ++++ b/net/openvswitch/datapath.c
2774 +@@ -383,7 +383,7 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
2775 + }
2776 +
2777 + static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
2778 +- unsigned int hdrlen)
2779 ++ unsigned int hdrlen, int actions_attrlen)
2780 + {
2781 + size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
2782 + + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
2783 +@@ -400,7 +400,7 @@ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
2784 +
2785 + /* OVS_PACKET_ATTR_ACTIONS */
2786 + if (upcall_info->actions_len)
2787 +- size += nla_total_size(upcall_info->actions_len);
2788 ++ size += nla_total_size(actions_attrlen);
2789 +
2790 + /* OVS_PACKET_ATTR_MRU */
2791 + if (upcall_info->mru)
2792 +@@ -467,7 +467,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
2793 + else
2794 + hlen = skb->len;
2795 +
2796 +- len = upcall_msg_size(upcall_info, hlen - cutlen);
2797 ++ len = upcall_msg_size(upcall_info, hlen - cutlen,
2798 ++ OVS_CB(skb)->acts_origlen);
2799 + user_skb = genlmsg_new(len, GFP_ATOMIC);
2800 + if (!user_skb) {
2801 + err = -ENOMEM;
2802 +diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
2803 +index ab85c1cae255..e19ace428e38 100644
2804 +--- a/net/openvswitch/datapath.h
2805 ++++ b/net/openvswitch/datapath.h
2806 +@@ -100,12 +100,14 @@ struct datapath {
2807 + * @input_vport: The original vport packet came in on. This value is cached
2808 + * when a packet is received by OVS.
2809 + * @mru: The maximum received fragement size; 0 if the packet is not
2810 ++ * @acts_origlen: The netlink size of the flow actions applied to this skb.
2811 + * @cutlen: The number of bytes from the packet end to be removed.
2812 + * fragmented.
2813 + */
2814 + struct ovs_skb_cb {
2815 + struct vport *input_vport;
2816 + u16 mru;
2817 ++ u16 acts_origlen;
2818 + u32 cutlen;
2819 + };
2820 + #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
2821 +diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
2822 +index a1aec0a6c789..50030519a89b 100644
2823 +--- a/net/sched/act_ipt.c
2824 ++++ b/net/sched/act_ipt.c
2825 +@@ -41,6 +41,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table,
2826 + {
2827 + struct xt_tgchk_param par;
2828 + struct xt_target *target;
2829 ++ struct ipt_entry e = {};
2830 + int ret = 0;
2831 +
2832 + target = xt_request_find_target(AF_INET, t->u.user.name,
2833 +@@ -51,6 +52,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table,
2834 + t->u.kernel.target = target;
2835 + memset(&par, 0, sizeof(par));
2836 + par.table = table;
2837 ++ par.entryinfo = &e;
2838 + par.target = target;
2839 + par.targinfo = t->data;
2840 + par.hook_mask = hook;
2841 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
2842 +index ff27a85a71a9..195a3b2d9afc 100644
2843 +--- a/net/sched/sch_api.c
2844 ++++ b/net/sched/sch_api.c
2845 +@@ -277,9 +277,6 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
2846 + void qdisc_hash_add(struct Qdisc *q)
2847 + {
2848 + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
2849 +- struct Qdisc *root = qdisc_dev(q)->qdisc;
2850 +-
2851 +- WARN_ON_ONCE(root == &noop_qdisc);
2852 + ASSERT_RTNL();
2853 + hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
2854 + }
2855 +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
2856 +index bc5e99584e41..ea8a56f76b32 100644
2857 +--- a/net/sched/sch_sfq.c
2858 ++++ b/net/sched/sch_sfq.c
2859 +@@ -434,6 +434,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
2860 + qdisc_drop(head, sch, to_free);
2861 +
2862 + slot_queue_add(slot, skb);
2863 ++ qdisc_tree_reduce_backlog(sch, 0, delta);
2864 + return NET_XMIT_CN;
2865 + }
2866 +
2867 +@@ -465,8 +466,10 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
2868 + /* Return Congestion Notification only if we dropped a packet
2869 + * from this flow.
2870 + */
2871 +- if (qlen != slot->qlen)
2872 ++ if (qlen != slot->qlen) {
2873 ++ qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
2874 + return NET_XMIT_CN;
2875 ++ }
2876 +
2877 + /* As we dropped a packet, better let upper stack know this */
2878 + qdisc_tree_reduce_backlog(sch, 1, dropped);
2879 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
2880 +index 0c090600f377..ca4a63e3eadd 100644
2881 +--- a/net/sctp/ipv6.c
2882 ++++ b/net/sctp/ipv6.c
2883 +@@ -512,7 +512,9 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
2884 + {
2885 + addr->sa.sa_family = AF_INET6;
2886 + addr->v6.sin6_port = port;
2887 ++ addr->v6.sin6_flowinfo = 0;
2888 + addr->v6.sin6_addr = *saddr;
2889 ++ addr->v6.sin6_scope_id = 0;
2890 + }
2891 +
2892 + /* Compare addresses exactly.
2893 +diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
2894 +index a4bc98265d88..266a30c8b88b 100644
2895 +--- a/net/sunrpc/svcsock.c
2896 ++++ b/net/sunrpc/svcsock.c
2897 +@@ -408,6 +408,9 @@ static void svc_data_ready(struct sock *sk)
2898 + dprintk("svc: socket %p(inet %p), busy=%d\n",
2899 + svsk, sk,
2900 + test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
2901 ++
2902 ++ /* Refer to svc_setup_socket() for details. */
2903 ++ rmb();
2904 + svsk->sk_odata(sk);
2905 + if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
2906 + svc_xprt_enqueue(&svsk->sk_xprt);
2907 +@@ -424,6 +427,9 @@ static void svc_write_space(struct sock *sk)
2908 + if (svsk) {
2909 + dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
2910 + svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
2911 ++
2912 ++ /* Refer to svc_setup_socket() for details. */
2913 ++ rmb();
2914 + svsk->sk_owspace(sk);
2915 + svc_xprt_enqueue(&svsk->sk_xprt);
2916 + }
2917 +@@ -748,8 +754,12 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
2918 + dprintk("svc: socket %p TCP (listen) state change %d\n",
2919 + sk, sk->sk_state);
2920 +
2921 +- if (svsk)
2922 ++ if (svsk) {
2923 ++ /* Refer to svc_setup_socket() for details. */
2924 ++ rmb();
2925 + svsk->sk_odata(sk);
2926 ++ }
2927 ++
2928 + /*
2929 + * This callback may called twice when a new connection
2930 + * is established as a child socket inherits everything
2931 +@@ -782,6 +792,8 @@ static void svc_tcp_state_change(struct sock *sk)
2932 + if (!svsk)
2933 + printk("svc: socket %p: no user data\n", sk);
2934 + else {
2935 ++ /* Refer to svc_setup_socket() for details. */
2936 ++ rmb();
2937 + svsk->sk_ostate(sk);
2938 + if (sk->sk_state != TCP_ESTABLISHED) {
2939 + set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
2940 +@@ -1368,12 +1380,18 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
2941 + return ERR_PTR(err);
2942 + }
2943 +
2944 +- inet->sk_user_data = svsk;
2945 + svsk->sk_sock = sock;
2946 + svsk->sk_sk = inet;
2947 + svsk->sk_ostate = inet->sk_state_change;
2948 + svsk->sk_odata = inet->sk_data_ready;
2949 + svsk->sk_owspace = inet->sk_write_space;
2950 ++ /*
2951 ++ * This barrier is necessary in order to prevent race condition
2952 ++ * with svc_data_ready(), svc_listen_data_ready() and others
2953 ++ * when calling callbacks above.
2954 ++ */
2955 ++ wmb();
2956 ++ inet->sk_user_data = svsk;
2957 +
2958 + /* Initialize the socket */
2959 + if (sock->type == SOCK_DGRAM)
2960 +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
2961 +index 1fd464764765..aedc476fac02 100644
2962 +--- a/net/tipc/netlink_compat.c
2963 ++++ b/net/tipc/netlink_compat.c
2964 +@@ -258,13 +258,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
2965 + arg = nlmsg_new(0, GFP_KERNEL);
2966 + if (!arg) {
2967 + kfree_skb(msg->rep);
2968 ++ msg->rep = NULL;
2969 + return -ENOMEM;
2970 + }
2971 +
2972 + err = __tipc_nl_compat_dumpit(cmd, msg, arg);
2973 +- if (err)
2974 ++ if (err) {
2975 + kfree_skb(msg->rep);
2976 +-
2977 ++ msg->rep = NULL;
2978 ++ }
2979 + kfree_skb(arg);
2980 +
2981 + return err;
2982 +diff --git a/sound/core/control.c b/sound/core/control.c
2983 +index fb096cb20a80..995cde48c1be 100644
2984 +--- a/sound/core/control.c
2985 ++++ b/sound/core/control.c
2986 +@@ -1156,7 +1156,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
2987 + mutex_lock(&ue->card->user_ctl_lock);
2988 + change = ue->tlv_data_size != size;
2989 + if (!change)
2990 +- change = memcmp(ue->tlv_data, new_data, size);
2991 ++ change = memcmp(ue->tlv_data, new_data, size) != 0;
2992 + kfree(ue->tlv_data);
2993 + ue->tlv_data = new_data;
2994 + ue->tlv_data_size = size;
2995 +diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c
2996 +index f0e4d502d604..066b5df666f4 100644
2997 +--- a/sound/firewire/iso-resources.c
2998 ++++ b/sound/firewire/iso-resources.c
2999 +@@ -210,9 +210,14 @@ EXPORT_SYMBOL(fw_iso_resources_update);
3000 + */
3001 + void fw_iso_resources_free(struct fw_iso_resources *r)
3002 + {
3003 +- struct fw_card *card = fw_parent_device(r->unit)->card;
3004 ++ struct fw_card *card;
3005 + int bandwidth, channel;
3006 +
3007 ++ /* Not initialized. */
3008 ++ if (r->unit == NULL)
3009 ++ return;
3010 ++ card = fw_parent_device(r->unit)->card;
3011 ++
3012 + mutex_lock(&r->mutex);
3013 +
3014 + if (r->allocated) {
3015 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
3016 +index c15c51bea26d..f2e4e99ce651 100644
3017 +--- a/sound/pci/hda/patch_conexant.c
3018 ++++ b/sound/pci/hda/patch_conexant.c
3019 +@@ -854,6 +854,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
3020 + SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
3021 + SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
3022 + SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
3023 ++ SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
3024 + SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
3025 + SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
3026 + SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
3027 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
3028 +index 95c2749ac8a3..286efc3a6116 100644
3029 +--- a/sound/usb/quirks.c
3030 ++++ b/sound/usb/quirks.c
3031 +@@ -1309,10 +1309,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
3032 + && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
3033 + mdelay(20);
3034 +
3035 +- /* Zoom R16/24 needs a tiny delay here, otherwise requests like
3036 +- * get/set frequency return as failed despite actually succeeding.
3037 ++ /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here,
3038 ++ * otherwise requests like get/set frequency return as failed despite
3039 ++ * actually succeeding.
3040 + */
3041 +- if (chip->usb_id == USB_ID(0x1686, 0x00dd) &&
3042 ++ if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
3043 ++ chip->usb_id == USB_ID(0x046d, 0x0a46) ||
3044 ++ chip->usb_id == USB_ID(0x0b0e, 0x0349)) &&
3045 + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
3046 + mdelay(1);
3047 + }
3048 +diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
3049 +index 6c50d9f8e210..6a6f44dd594b 100644
3050 +--- a/tools/perf/util/probe-event.c
3051 ++++ b/tools/perf/util/probe-event.c
3052 +@@ -163,7 +163,7 @@ static struct map *kernel_get_module_map(const char *module)
3053 +
3054 + /* A file path -- this is an offline module */
3055 + if (module && strchr(module, '/'))
3056 +- return machine__findnew_module_map(host_machine, 0, module);
3057 ++ return dso__new_map(module);
3058 +
3059 + if (!module)
3060 + module = "kernel";
3061 +@@ -173,6 +173,7 @@ static struct map *kernel_get_module_map(const char *module)
3062 + if (strncmp(pos->dso->short_name + 1, module,
3063 + pos->dso->short_name_len - 2) == 0 &&
3064 + module[pos->dso->short_name_len - 2] == '\0') {
3065 ++ map__get(pos);
3066 + return pos;
3067 + }
3068 + }
3069 +@@ -188,15 +189,6 @@ struct map *get_target_map(const char *target, bool user)
3070 + return kernel_get_module_map(target);
3071 + }
3072 +
3073 +-static void put_target_map(struct map *map, bool user)
3074 +-{
3075 +- if (map && user) {
3076 +- /* Only the user map needs to be released */
3077 +- map__put(map);
3078 +- }
3079 +-}
3080 +-
3081 +-
3082 + static int convert_exec_to_group(const char *exec, char **result)
3083 + {
3084 + char *ptr1, *ptr2, *exec_copy;
3085 +@@ -412,7 +404,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
3086 + }
3087 +
3088 + out:
3089 +- put_target_map(map, uprobes);
3090 ++ map__put(map);
3091 + return ret;
3092 +
3093 + }
3094 +@@ -2944,7 +2936,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
3095 + }
3096 +
3097 + out:
3098 +- put_target_map(map, pev->uprobes);
3099 ++ map__put(map);
3100 + free(syms);
3101 + return ret;
3102 +
3103 +@@ -3437,10 +3429,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
3104 + return ret;
3105 +
3106 + /* Get a symbol map */
3107 +- if (user)
3108 +- map = dso__new_map(target);
3109 +- else
3110 +- map = kernel_get_module_map(target);
3111 ++ map = get_target_map(target, user);
3112 + if (!map) {
3113 + pr_err("Failed to get a map for %s\n", (target) ? : "kernel");
3114 + return -EINVAL;
3115 +@@ -3472,9 +3461,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
3116 + }
3117 +
3118 + end:
3119 +- if (user) {
3120 +- map__put(map);
3121 +- }
3122 ++ map__put(map);
3123 + exit_probe_symbol_maps();
3124 +
3125 + return ret;
3126 +diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh
3127 +index a676d3eefefb..b3c48fc6ea4b 100755
3128 +--- a/tools/testing/selftests/ntb/ntb_test.sh
3129 ++++ b/tools/testing/selftests/ntb/ntb_test.sh
3130 +@@ -305,7 +305,7 @@ function perf_test()
3131 + echo "Running remote perf test $WITH DMA"
3132 + write_file "" $REMOTE_PERF/run
3133 + echo -n " "
3134 +- read_file $LOCAL_PERF/run
3135 ++ read_file $REMOTE_PERF/run
3136 + echo " Passed"
3137 +
3138 + _modprobe -r ntb_perf
3139 +@@ -326,6 +326,10 @@ function ntb_tool_tests()
3140 + link_test $LOCAL_TOOL $REMOTE_TOOL
3141 + link_test $REMOTE_TOOL $LOCAL_TOOL
3142 +
3143 ++ #Ensure the link is up on both sides before continuing
3144 ++ write_file Y $LOCAL_TOOL/link_event
3145 ++ write_file Y $REMOTE_TOOL/link_event
3146 ++
3147 + for PEER_TRANS in $(ls $LOCAL_TOOL/peer_trans*); do
3148 + PT=$(basename $PEER_TRANS)
3149 + write_file $MW_SIZE $LOCAL_TOOL/$PT