Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Sun, 12 Jan 2020 14:52:45
Message-Id: 1578840744.e98abf3583bf21dcbae41dcb808e371be5e71565.mpagano@gentoo
1 commit: e98abf3583bf21dcbae41dcb808e371be5e71565
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Jan 12 14:52:24 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Jan 12 14:52:24 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e98abf35
7
8 Linux patch 4.9.209
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1208_linux-4.9.209.patch | 2684 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2688 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index a26dbf7..c11a663 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -875,6 +875,10 @@ Patch: 1207_linux-4.9.208.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.208
23
24 +Patch: 1208_linux-4.9.209.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.9.209
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1208_linux-4.9.209.patch b/1208_linux-4.9.209.patch
33 new file mode 100644
34 index 0000000..ccea173
35 --- /dev/null
36 +++ b/1208_linux-4.9.209.patch
37 @@ -0,0 +1,2684 @@
38 +diff --git a/Makefile b/Makefile
39 +index 1d1d9f68e962..ed9a08ab3772 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 9
45 +-SUBLEVEL = 208
46 ++SUBLEVEL = 209
47 + EXTRAVERSION =
48 + NAME = Roaring Lionus
49 +
50 +diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
51 +index 957840cc7b78..b55c094893c6 100644
52 +--- a/arch/arm/boot/dts/am437x-gp-evm.dts
53 ++++ b/arch/arm/boot/dts/am437x-gp-evm.dts
54 +@@ -79,7 +79,7 @@
55 + };
56 +
57 + lcd0: display {
58 +- compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
59 ++ compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
60 + label = "lcd";
61 +
62 + panel-timing {
63 +diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
64 +index 9d35c3f07cad..21918807c9f6 100644
65 +--- a/arch/arm/boot/dts/am43x-epos-evm.dts
66 ++++ b/arch/arm/boot/dts/am43x-epos-evm.dts
67 +@@ -41,7 +41,7 @@
68 + };
69 +
70 + lcd0: display {
71 +- compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
72 ++ compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
73 + label = "lcd";
74 +
75 + panel-timing {
76 +diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
77 +index fe488523694c..635b0d549487 100644
78 +--- a/arch/arm/mach-vexpress/spc.c
79 ++++ b/arch/arm/mach-vexpress/spc.c
80 +@@ -555,8 +555,9 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
81 +
82 + static int __init ve_spc_clk_init(void)
83 + {
84 +- int cpu;
85 ++ int cpu, cluster;
86 + struct clk *clk;
87 ++ bool init_opp_table[MAX_CLUSTERS] = { false };
88 +
89 + if (!info)
90 + return 0; /* Continue only if SPC is initialised */
91 +@@ -582,8 +583,17 @@ static int __init ve_spc_clk_init(void)
92 + continue;
93 + }
94 +
95 ++ cluster = topology_physical_package_id(cpu_dev->id);
96 ++ if (init_opp_table[cluster])
97 ++ continue;
98 ++
99 + if (ve_init_opp_table(cpu_dev))
100 + pr_warn("failed to initialise cpu%d opp table\n", cpu);
101 ++ else if (dev_pm_opp_set_sharing_cpus(cpu_dev,
102 ++ topology_core_cpumask(cpu_dev->id)))
103 ++ pr_warn("failed to mark OPPs shared for cpu%d\n", cpu);
104 ++ else
105 ++ init_opp_table[cluster] = true;
106 + }
107 +
108 + platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
109 +diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
110 +index f705d96a76f2..5bc3de78306a 100644
111 +--- a/arch/arm64/include/asm/pgtable-prot.h
112 ++++ b/arch/arm64/include/asm/pgtable-prot.h
113 +@@ -77,13 +77,12 @@
114 + #define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
115 + #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
116 + #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
117 +-#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN)
118 +
119 + #define __P000 PAGE_NONE
120 + #define __P001 PAGE_READONLY
121 + #define __P010 PAGE_COPY
122 + #define __P011 PAGE_COPY
123 +-#define __P100 PAGE_EXECONLY
124 ++#define __P100 PAGE_READONLY_EXEC
125 + #define __P101 PAGE_READONLY_EXEC
126 + #define __P110 PAGE_COPY_EXEC
127 + #define __P111 PAGE_COPY_EXEC
128 +@@ -92,7 +91,7 @@
129 + #define __S001 PAGE_READONLY
130 + #define __S010 PAGE_SHARED
131 + #define __S011 PAGE_SHARED
132 +-#define __S100 PAGE_EXECONLY
133 ++#define __S100 PAGE_READONLY_EXEC
134 + #define __S101 PAGE_READONLY_EXEC
135 + #define __S110 PAGE_SHARED_EXEC
136 + #define __S111 PAGE_SHARED_EXEC
137 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
138 +index edb2c359480d..56ba1389a95a 100644
139 +--- a/arch/arm64/include/asm/pgtable.h
140 ++++ b/arch/arm64/include/asm/pgtable.h
141 +@@ -83,12 +83,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
142 + #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
143 +
144 + #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
145 +-/*
146 +- * Execute-only user mappings do not have the PTE_USER bit set. All valid
147 +- * kernel mappings have the PTE_UXN bit set.
148 +- */
149 + #define pte_valid_not_user(pte) \
150 +- ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
151 ++ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
152 + #define pte_valid_young(pte) \
153 + ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
154 + #define pte_valid_user(pte) \
155 +@@ -104,8 +100,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
156 +
157 + /*
158 + * p??_access_permitted() is true for valid user mappings (subject to the
159 +- * write permission check) other than user execute-only which do not have the
160 +- * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
161 ++ * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
162 ++ * set.
163 + */
164 + #define pte_access_permitted(pte, write) \
165 + (pte_valid_user(pte) && (!(write) || pte_write(pte)))
166 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
167 +index 575c11a6f9b6..f3d3f2e97add 100644
168 +--- a/arch/arm64/mm/fault.c
169 ++++ b/arch/arm64/mm/fault.c
170 +@@ -319,7 +319,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
171 + struct task_struct *tsk;
172 + struct mm_struct *mm;
173 + int fault, sig, code;
174 +- unsigned long vm_flags = VM_READ | VM_WRITE;
175 ++ unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
176 + unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
177 +
178 + if (notify_page_fault(regs, esr))
179 +diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
180 +index e309d8fcb516..da1cb0499d6c 100644
181 +--- a/arch/mips/include/asm/thread_info.h
182 ++++ b/arch/mips/include/asm/thread_info.h
183 +@@ -52,8 +52,26 @@ struct thread_info {
184 + #define init_thread_info (init_thread_union.thread_info)
185 + #define init_stack (init_thread_union.stack)
186 +
187 +-/* How to get the thread information struct from C. */
188 ++/*
189 ++ * A pointer to the struct thread_info for the currently executing thread is
190 ++ * held in register $28/$gp.
191 ++ *
192 ++ * We declare __current_thread_info as a global register variable rather than a
193 ++ * local register variable within current_thread_info() because clang doesn't
194 ++ * support explicit local register variables.
195 ++ *
196 ++ * When building the VDSO we take care not to declare the global register
197 ++ * variable because this causes GCC to not preserve the value of $28/$gp in
198 ++ * functions that change its value (which is common in the PIC VDSO when
199 ++ * accessing the GOT). Since the VDSO shouldn't be accessing
200 ++ * __current_thread_info anyway we declare it extern in order to cause a link
201 ++ * failure if it's referenced.
202 ++ */
203 ++#ifdef __VDSO__
204 ++extern struct thread_info *__current_thread_info;
205 ++#else
206 + register struct thread_info *__current_thread_info __asm__("$28");
207 ++#endif
208 +
209 + static inline struct thread_info *current_thread_info(void)
210 + {
211 +diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
212 +index 7ada30900807..90253bdc2ee5 100644
213 +--- a/arch/parisc/include/asm/cmpxchg.h
214 ++++ b/arch/parisc/include/asm/cmpxchg.h
215 +@@ -43,8 +43,14 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size)
216 + ** if (((unsigned long)p & 0xf) == 0)
217 + ** return __ldcw(p);
218 + */
219 +-#define xchg(ptr, x) \
220 +- ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
221 ++#define xchg(ptr, x) \
222 ++({ \
223 ++ __typeof__(*(ptr)) __ret; \
224 ++ __typeof__(*(ptr)) _x_ = (x); \
225 ++ __ret = (__typeof__(*(ptr))) \
226 ++ __xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \
227 ++ __ret; \
228 ++})
229 +
230 + /* bug catcher for when unsupported size is used - won't link */
231 + extern void __cmpxchg_called_with_bad_pointer(void);
232 +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
233 +index 1e93dbc88e80..34f70d36b16d 100644
234 +--- a/arch/powerpc/mm/mem.c
235 ++++ b/arch/powerpc/mm/mem.c
236 +@@ -345,6 +345,14 @@ void __init mem_init(void)
237 + BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
238 +
239 + #ifdef CONFIG_SWIOTLB
240 ++ /*
241 ++ * Some platforms (e.g. 85xx) limit DMA-able memory way below
242 ++ * 4G. We force memblock to bottom-up mode to ensure that the
243 ++ * memory allocated in swiotlb_init() is DMA-able.
244 ++ * As it's the last memblock allocation, no need to reset it
245 ++ * back to to-down.
246 ++ */
247 ++ memblock_set_bottom_up(true);
248 + swiotlb_init(0);
249 + #endif
250 +
251 +diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c
252 +index 74da18de853a..73ec15cd2708 100644
253 +--- a/arch/powerpc/platforms/pseries/hvconsole.c
254 ++++ b/arch/powerpc/platforms/pseries/hvconsole.c
255 +@@ -62,7 +62,7 @@ EXPORT_SYMBOL(hvc_get_chars);
256 + * @vtermno: The vtermno or unit_address of the adapter from which the data
257 + * originated.
258 + * @buf: The character buffer that contains the character data to send to
259 +- * firmware.
260 ++ * firmware. Must be at least 16 bytes, even if count is less than 16.
261 + * @count: Send this number of characters.
262 + */
263 + int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
264 +diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
265 +index 02476d2333df..c62eb09b2ba7 100644
266 +--- a/arch/s390/kernel/perf_cpum_sf.c
267 ++++ b/arch/s390/kernel/perf_cpum_sf.c
268 +@@ -1295,18 +1295,28 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
269 + */
270 + if (flush_all && done)
271 + break;
272 +-
273 +- /* If an event overflow happened, discard samples by
274 +- * processing any remaining sample-data-blocks.
275 +- */
276 +- if (event_overflow)
277 +- flush_all = 1;
278 + }
279 +
280 + /* Account sample overflows in the event hardware structure */
281 + if (sampl_overflow)
282 + OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
283 + sampl_overflow, 1 + num_sdb);
284 ++
285 ++ /* Perf_event_overflow() and perf_event_account_interrupt() limit
286 ++ * the interrupt rate to an upper limit. Roughly 1000 samples per
287 ++ * task tick.
288 ++ * Hitting this limit results in a large number
289 ++ * of throttled REF_REPORT_THROTTLE entries and the samples
290 ++ * are dropped.
291 ++ * Slightly increase the interval to avoid hitting this limit.
292 ++ */
293 ++ if (event_overflow) {
294 ++ SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
295 ++ debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n",
296 ++ __func__,
297 ++ DIV_ROUND_UP(SAMPL_RATE(hwc), 10));
298 ++ }
299 ++
300 + if (sampl_overflow || event_overflow)
301 + debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: "
302 + "overflow stats: sample=%llu event=%llu\n",
303 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
304 +index d52a94e9f57f..cba8e56cd63d 100644
305 +--- a/arch/s390/kernel/smp.c
306 ++++ b/arch/s390/kernel/smp.c
307 +@@ -691,39 +691,67 @@ static struct sclp_core_info *smp_get_core_info(void)
308 +
309 + static int smp_add_present_cpu(int cpu);
310 +
311 +-static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
312 ++static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
313 ++ bool configured, bool early)
314 + {
315 + struct pcpu *pcpu;
316 +- cpumask_t avail;
317 +- int cpu, nr, i, j;
318 ++ int cpu, nr, i;
319 + u16 address;
320 +
321 + nr = 0;
322 +- cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
323 +- cpu = cpumask_first(&avail);
324 +- for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
325 +- if (sclp.has_core_type && info->core[i].type != boot_core_type)
326 ++ if (sclp.has_core_type && core->type != boot_core_type)
327 ++ return nr;
328 ++ cpu = cpumask_first(avail);
329 ++ address = core->core_id << smp_cpu_mt_shift;
330 ++ for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
331 ++ if (pcpu_find_address(cpu_present_mask, address + i))
332 + continue;
333 +- address = info->core[i].core_id << smp_cpu_mt_shift;
334 +- for (j = 0; j <= smp_cpu_mtid; j++) {
335 +- if (pcpu_find_address(cpu_present_mask, address + j))
336 +- continue;
337 +- pcpu = pcpu_devices + cpu;
338 +- pcpu->address = address + j;
339 +- pcpu->state =
340 +- (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
341 +- CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
342 +- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
343 +- set_cpu_present(cpu, true);
344 +- if (sysfs_add && smp_add_present_cpu(cpu) != 0)
345 +- set_cpu_present(cpu, false);
346 +- else
347 +- nr++;
348 +- cpu = cpumask_next(cpu, &avail);
349 +- if (cpu >= nr_cpu_ids)
350 ++ pcpu = pcpu_devices + cpu;
351 ++ pcpu->address = address + i;
352 ++ if (configured)
353 ++ pcpu->state = CPU_STATE_CONFIGURED;
354 ++ else
355 ++ pcpu->state = CPU_STATE_STANDBY;
356 ++ smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
357 ++ set_cpu_present(cpu, true);
358 ++ if (!early && smp_add_present_cpu(cpu) != 0)
359 ++ set_cpu_present(cpu, false);
360 ++ else
361 ++ nr++;
362 ++ cpumask_clear_cpu(cpu, avail);
363 ++ cpu = cpumask_next(cpu, avail);
364 ++ }
365 ++ return nr;
366 ++}
367 ++
368 ++static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
369 ++{
370 ++ struct sclp_core_entry *core;
371 ++ cpumask_t avail;
372 ++ bool configured;
373 ++ u16 core_id;
374 ++ int nr, i;
375 ++
376 ++ nr = 0;
377 ++ cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
378 ++ /*
379 ++ * Add IPL core first (which got logical CPU number 0) to make sure
380 ++ * that all SMT threads get subsequent logical CPU numbers.
381 ++ */
382 ++ if (early) {
383 ++ core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
384 ++ for (i = 0; i < info->configured; i++) {
385 ++ core = &info->core[i];
386 ++ if (core->core_id == core_id) {
387 ++ nr += smp_add_core(core, &avail, true, early);
388 + break;
389 ++ }
390 + }
391 + }
392 ++ for (i = 0; i < info->combined; i++) {
393 ++ configured = i < info->configured;
394 ++ nr += smp_add_core(&info->core[i], &avail, configured, early);
395 ++ }
396 + return nr;
397 + }
398 +
399 +@@ -771,7 +799,7 @@ static void __init smp_detect_cpus(void)
400 +
401 + /* Add CPUs present at boot */
402 + get_online_cpus();
403 +- __smp_rescan_cpus(info, 0);
404 ++ __smp_rescan_cpus(info, true);
405 + put_online_cpus();
406 + kfree(info);
407 + }
408 +@@ -1127,7 +1155,7 @@ int __ref smp_rescan_cpus(void)
409 + return -ENOMEM;
410 + get_online_cpus();
411 + mutex_lock(&smp_cpu_state_mutex);
412 +- nr = __smp_rescan_cpus(info, 1);
413 ++ nr = __smp_rescan_cpus(info, false);
414 + mutex_unlock(&smp_cpu_state_mutex);
415 + put_online_cpus();
416 + kfree(info);
417 +diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
418 +index 1a70e6c0f259..94709ab41ed8 100644
419 +--- a/arch/tile/lib/atomic_asm_32.S
420 ++++ b/arch/tile/lib/atomic_asm_32.S
421 +@@ -24,8 +24,7 @@
422 + * has an opportunity to return -EFAULT to the user if needed.
423 + * The 64-bit routines just return a "long long" with the value,
424 + * since they are only used from kernel space and don't expect to fault.
425 +- * Support for 16-bit ops is included in the framework but we don't provide
426 +- * any (x86_64 has an atomic_inc_short(), so we might want to some day).
427 ++ * Support for 16-bit ops is included in the framework but we don't provide any.
428 + *
429 + * Note that the caller is advised to issue a suitable L1 or L2
430 + * prefetch on the address being manipulated to avoid extra stalls.
431 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
432 +index 1e9f610d36a4..c26cca506f64 100644
433 +--- a/arch/x86/events/core.c
434 ++++ b/arch/x86/events/core.c
435 +@@ -374,7 +374,7 @@ int x86_add_exclusive(unsigned int what)
436 + * LBR and BTS are still mutually exclusive.
437 + */
438 + if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
439 +- return 0;
440 ++ goto out;
441 +
442 + if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
443 + mutex_lock(&pmc_reserve_mutex);
444 +@@ -386,6 +386,7 @@ int x86_add_exclusive(unsigned int what)
445 + mutex_unlock(&pmc_reserve_mutex);
446 + }
447 +
448 ++out:
449 + atomic_inc(&active_events);
450 + return 0;
451 +
452 +@@ -396,11 +397,15 @@ fail_unlock:
453 +
454 + void x86_del_exclusive(unsigned int what)
455 + {
456 ++ atomic_dec(&active_events);
457 ++
458 ++ /*
459 ++ * See the comment in x86_add_exclusive().
460 ++ */
461 + if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
462 + return;
463 +
464 + atomic_dec(&x86_pmu.lbr_exclusive[what]);
465 +- atomic_dec(&active_events);
466 + }
467 +
468 + int x86_setup_perfctr(struct perf_event *event)
469 +diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
470 +index 76a35c1213d2..305c6eed9141 100644
471 +--- a/arch/x86/include/asm/atomic.h
472 ++++ b/arch/x86/include/asm/atomic.h
473 +@@ -249,19 +249,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
474 + return c;
475 + }
476 +
477 +-/**
478 +- * atomic_inc_short - increment of a short integer
479 +- * @v: pointer to type int
480 +- *
481 +- * Atomically adds 1 to @v
482 +- * Returns the new value of @u
483 +- */
484 +-static __always_inline short int atomic_inc_short(short int *v)
485 +-{
486 +- asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
487 +- return *v;
488 +-}
489 +-
490 + #ifdef CONFIG_X86_32
491 + # include <asm/atomic64_32.h>
492 + #else
493 +diff --git a/block/blk-map.c b/block/blk-map.c
494 +index a8b4f526d8bb..52edbe6b9380 100644
495 +--- a/block/blk-map.c
496 ++++ b/block/blk-map.c
497 +@@ -142,7 +142,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
498 + return 0;
499 +
500 + unmap_rq:
501 +- __blk_rq_unmap_user(bio);
502 ++ blk_rq_unmap_user(bio);
503 + fail:
504 + rq->bio = NULL;
505 + return ret;
506 +diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
507 +index 3c9fdd6983aa..b6e5447d563e 100644
508 +--- a/block/compat_ioctl.c
509 ++++ b/block/compat_ioctl.c
510 +@@ -5,6 +5,7 @@
511 + #include <linux/compat.h>
512 + #include <linux/elevator.h>
513 + #include <linux/hdreg.h>
514 ++#include <linux/pr.h>
515 + #include <linux/slab.h>
516 + #include <linux/syscalls.h>
517 + #include <linux/types.h>
518 +@@ -406,6 +407,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
519 + case BLKTRACETEARDOWN: /* compatible */
520 + ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
521 + return ret;
522 ++ case IOC_PR_REGISTER:
523 ++ case IOC_PR_RESERVE:
524 ++ case IOC_PR_RELEASE:
525 ++ case IOC_PR_PREEMPT:
526 ++ case IOC_PR_PREEMPT_ABORT:
527 ++ case IOC_PR_CLEAR:
528 ++ return blkdev_ioctl(bdev, mode, cmd,
529 ++ (unsigned long)compat_ptr(arg));
530 + default:
531 + if (disk->fops->compat_ioctl)
532 + ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
533 +diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
534 +index 6f8a7341fa08..f50a76ad63e4 100644
535 +--- a/drivers/ata/ahci_brcm.c
536 ++++ b/drivers/ata/ahci_brcm.c
537 +@@ -25,6 +25,7 @@
538 + #include <linux/module.h>
539 + #include <linux/of.h>
540 + #include <linux/platform_device.h>
541 ++#include <linux/reset.h>
542 + #include <linux/string.h>
543 +
544 + #include "ahci.h"
545 +@@ -88,6 +89,7 @@ struct brcm_ahci_priv {
546 + u32 port_mask;
547 + u32 quirks;
548 + enum brcm_ahci_version version;
549 ++ struct reset_control *rcdev;
550 + };
551 +
552 + static const struct ata_port_info ahci_brcm_port_info = {
553 +@@ -226,19 +228,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv)
554 + brcm_sata_phy_disable(priv, i);
555 + }
556 +
557 +-static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
558 ++static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv,
559 + struct brcm_ahci_priv *priv)
560 + {
561 +- void __iomem *ahci;
562 +- struct resource *res;
563 + u32 impl;
564 +
565 +- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci");
566 +- ahci = devm_ioremap_resource(&pdev->dev, res);
567 +- if (IS_ERR(ahci))
568 +- return 0;
569 +-
570 +- impl = readl(ahci + HOST_PORTS_IMPL);
571 ++ impl = readl(hpriv->mmio + HOST_PORTS_IMPL);
572 +
573 + if (fls(impl) > SATA_TOP_MAX_PHYS)
574 + dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n",
575 +@@ -246,9 +241,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
576 + else if (!impl)
577 + dev_info(priv->dev, "no ports found\n");
578 +
579 +- devm_iounmap(&pdev->dev, ahci);
580 +- devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
581 +-
582 + return impl;
583 + }
584 +
585 +@@ -275,11 +267,10 @@ static int brcm_ahci_suspend(struct device *dev)
586 + struct ata_host *host = dev_get_drvdata(dev);
587 + struct ahci_host_priv *hpriv = host->private_data;
588 + struct brcm_ahci_priv *priv = hpriv->plat_data;
589 +- int ret;
590 +
591 +- ret = ahci_platform_suspend(dev);
592 + brcm_sata_phys_disable(priv);
593 +- return ret;
594 ++
595 ++ return ahci_platform_suspend(dev);
596 + }
597 +
598 + static int brcm_ahci_resume(struct device *dev)
599 +@@ -287,11 +278,44 @@ static int brcm_ahci_resume(struct device *dev)
600 + struct ata_host *host = dev_get_drvdata(dev);
601 + struct ahci_host_priv *hpriv = host->private_data;
602 + struct brcm_ahci_priv *priv = hpriv->plat_data;
603 ++ int ret;
604 ++
605 ++ /* Make sure clocks are turned on before re-configuration */
606 ++ ret = ahci_platform_enable_clks(hpriv);
607 ++ if (ret)
608 ++ return ret;
609 +
610 + brcm_sata_init(priv);
611 + brcm_sata_phys_enable(priv);
612 + brcm_sata_alpm_init(hpriv);
613 +- return ahci_platform_resume(dev);
614 ++
615 ++ /* Since we had to enable clocks earlier on, we cannot use
616 ++ * ahci_platform_resume() as-is since a second call to
617 ++ * ahci_platform_enable_resources() would bump up the resources
618 ++ * (regulators, clocks, PHYs) count artificially so we copy the part
619 ++ * after ahci_platform_enable_resources().
620 ++ */
621 ++ ret = ahci_platform_enable_phys(hpriv);
622 ++ if (ret)
623 ++ goto out_disable_phys;
624 ++
625 ++ ret = ahci_platform_resume_host(dev);
626 ++ if (ret)
627 ++ goto out_disable_platform_phys;
628 ++
629 ++ /* We resumed so update PM runtime state */
630 ++ pm_runtime_disable(dev);
631 ++ pm_runtime_set_active(dev);
632 ++ pm_runtime_enable(dev);
633 ++
634 ++ return 0;
635 ++
636 ++out_disable_platform_phys:
637 ++ ahci_platform_disable_phys(hpriv);
638 ++out_disable_phys:
639 ++ brcm_sata_phys_disable(priv);
640 ++ ahci_platform_disable_clks(hpriv);
641 ++ return ret;
642 + }
643 + #endif
644 +
645 +@@ -332,43 +356,73 @@ static int brcm_ahci_probe(struct platform_device *pdev)
646 + if (IS_ERR(priv->top_ctrl))
647 + return PTR_ERR(priv->top_ctrl);
648 +
649 ++ /* Reset is optional depending on platform */
650 ++ priv->rcdev = devm_reset_control_get(&pdev->dev, "ahci");
651 ++ if (!IS_ERR_OR_NULL(priv->rcdev))
652 ++ reset_control_deassert(priv->rcdev);
653 ++
654 + if ((priv->version == BRCM_SATA_BCM7425) ||
655 + (priv->version == BRCM_SATA_NSP)) {
656 + priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
657 + priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
658 + }
659 +
660 ++ hpriv = ahci_platform_get_resources(pdev);
661 ++ if (IS_ERR(hpriv)) {
662 ++ ret = PTR_ERR(hpriv);
663 ++ goto out_reset;
664 ++ }
665 ++
666 ++ ret = ahci_platform_enable_clks(hpriv);
667 ++ if (ret)
668 ++ goto out_reset;
669 ++
670 ++ /* Must be first so as to configure endianness including that
671 ++ * of the standard AHCI register space.
672 ++ */
673 + brcm_sata_init(priv);
674 +
675 +- priv->port_mask = brcm_ahci_get_portmask(pdev, priv);
676 +- if (!priv->port_mask)
677 +- return -ENODEV;
678 ++ /* Initializes priv->port_mask which is used below */
679 ++ priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
680 ++ if (!priv->port_mask) {
681 ++ ret = -ENODEV;
682 ++ goto out_disable_clks;
683 ++ }
684 +
685 ++ /* Must be done before ahci_platform_enable_phys() */
686 + brcm_sata_phys_enable(priv);
687 +
688 +- hpriv = ahci_platform_get_resources(pdev);
689 +- if (IS_ERR(hpriv))
690 +- return PTR_ERR(hpriv);
691 + hpriv->plat_data = priv;
692 + hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
693 +
694 + brcm_sata_alpm_init(hpriv);
695 +
696 +- ret = ahci_platform_enable_resources(hpriv);
697 +- if (ret)
698 +- return ret;
699 +-
700 + if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ)
701 + hpriv->flags |= AHCI_HFLAG_NO_NCQ;
702 +
703 ++ ret = ahci_platform_enable_phys(hpriv);
704 ++ if (ret)
705 ++ goto out_disable_phys;
706 ++
707 + ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
708 + &ahci_platform_sht);
709 + if (ret)
710 +- return ret;
711 ++ goto out_disable_platform_phys;
712 +
713 + dev_info(dev, "Broadcom AHCI SATA3 registered\n");
714 +
715 + return 0;
716 ++
717 ++out_disable_platform_phys:
718 ++ ahci_platform_disable_phys(hpriv);
719 ++out_disable_phys:
720 ++ brcm_sata_phys_disable(priv);
721 ++out_disable_clks:
722 ++ ahci_platform_disable_clks(hpriv);
723 ++out_reset:
724 ++ if (!IS_ERR_OR_NULL(priv->rcdev))
725 ++ reset_control_assert(priv->rcdev);
726 ++ return ret;
727 + }
728 +
729 + static int brcm_ahci_remove(struct platform_device *pdev)
730 +@@ -378,12 +432,12 @@ static int brcm_ahci_remove(struct platform_device *pdev)
731 + struct brcm_ahci_priv *priv = hpriv->plat_data;
732 + int ret;
733 +
734 ++ brcm_sata_phys_disable(priv);
735 ++
736 + ret = ata_platform_remove_one(pdev);
737 + if (ret)
738 + return ret;
739 +
740 +- brcm_sata_phys_disable(priv);
741 +-
742 + return 0;
743 + }
744 +
745 +diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
746 +index 65371e1befe8..0b80502bc1c5 100644
747 +--- a/drivers/ata/libahci_platform.c
748 ++++ b/drivers/ata/libahci_platform.c
749 +@@ -46,7 +46,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops);
750 + * RETURNS:
751 + * 0 on success otherwise a negative error code
752 + */
753 +-static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
754 ++int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
755 + {
756 + int rc, i;
757 +
758 +@@ -71,6 +71,7 @@ disable_phys:
759 + }
760 + return rc;
761 + }
762 ++EXPORT_SYMBOL_GPL(ahci_platform_enable_phys);
763 +
764 + /**
765 + * ahci_platform_disable_phys - Disable PHYs
766 +@@ -78,7 +79,7 @@ disable_phys:
767 + *
768 + * This function disables all PHYs found in hpriv->phys.
769 + */
770 +-static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
771 ++void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
772 + {
773 + int i;
774 +
775 +@@ -87,6 +88,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
776 + phy_exit(hpriv->phys[i]);
777 + }
778 + }
779 ++EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
780 +
781 + /**
782 + * ahci_platform_enable_clks - Enable platform clocks
783 +diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
784 +index d6eaaa25d1cc..a700e525535c 100644
785 +--- a/drivers/block/xen-blkback/blkback.c
786 ++++ b/drivers/block/xen-blkback/blkback.c
787 +@@ -929,6 +929,8 @@ next:
788 + out_of_memory:
789 + pr_alert("%s: out of memory\n", __func__);
790 + put_free_pages(ring, pages_to_gnt, segs_to_map);
791 ++ for (i = last_map; i < num; i++)
792 ++ pages[i]->handle = BLKBACK_INVALID_HANDLE;
793 + return -ENOMEM;
794 + }
795 +
796 +diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
797 +index ad736d7de838..1d1f86657967 100644
798 +--- a/drivers/block/xen-blkback/xenbus.c
799 ++++ b/drivers/block/xen-blkback/xenbus.c
800 +@@ -178,6 +178,15 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
801 + blkif->domid = domid;
802 + atomic_set(&blkif->refcnt, 1);
803 + init_completion(&blkif->drain_complete);
804 ++
805 ++ /*
806 ++ * Because freeing back to the cache may be deferred, it is not
807 ++ * safe to unload the module (and hence destroy the cache) until
808 ++ * this has completed. To prevent premature unloading, take an
809 ++ * extra module reference here and release only when the object
810 ++ * has been freed back to the cache.
811 ++ */
812 ++ __module_get(THIS_MODULE);
813 + INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
814 +
815 + return blkif;
816 +@@ -322,6 +331,7 @@ static void xen_blkif_free(struct xen_blkif *blkif)
817 +
818 + /* Make sure everything is drained before shutting down */
819 + kmem_cache_free(xen_blkif_cachep, blkif);
820 ++ module_put(THIS_MODULE);
821 + }
822 +
823 + int __init xen_blkif_interface_init(void)
824 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
825 +index 1d1c0d7aec88..4e3b24a0511f 100644
826 +--- a/drivers/bluetooth/btusb.c
827 ++++ b/drivers/bluetooth/btusb.c
828 +@@ -1069,7 +1069,7 @@ static int btusb_open(struct hci_dev *hdev)
829 + if (data->setup_on_usb) {
830 + err = data->setup_on_usb(hdev);
831 + if (err < 0)
832 +- return err;
833 ++ goto setup_fail;
834 + }
835 +
836 + data->intf->needs_remote_wakeup = 1;
837 +@@ -1101,6 +1101,7 @@ done:
838 +
839 + failed:
840 + clear_bit(BTUSB_INTR_RUNNING, &data->flags);
841 ++setup_fail:
842 + usb_autopm_put_interface(data->intf);
843 + return err;
844 + }
845 +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
846 +index df62e38de5f5..1ba9d02381e8 100644
847 +--- a/drivers/devfreq/devfreq.c
848 ++++ b/drivers/devfreq/devfreq.c
849 +@@ -482,11 +482,6 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
850 + static void _remove_devfreq(struct devfreq *devfreq)
851 + {
852 + mutex_lock(&devfreq_list_lock);
853 +- if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
854 +- mutex_unlock(&devfreq_list_lock);
855 +- dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
856 +- return;
857 +- }
858 + list_del(&devfreq->node);
859 + mutex_unlock(&devfreq_list_lock);
860 +
861 +@@ -558,6 +553,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
862 + devfreq->dev.parent = dev;
863 + devfreq->dev.class = devfreq_class;
864 + devfreq->dev.release = devfreq_dev_release;
865 ++ INIT_LIST_HEAD(&devfreq->node);
866 + devfreq->profile = profile;
867 + strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
868 + devfreq->previous_freq = profile->initial_freq;
869 +@@ -986,7 +982,7 @@ static ssize_t available_governors_show(struct device *d,
870 + * The devfreq with immutable governor (e.g., passive) shows
871 + * only own governor.
872 + */
873 +- if (df->governor->immutable) {
874 ++ if (df->governor && df->governor->immutable) {
875 + count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
876 + "%s ", df->governor_name);
877 + /*
878 +diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
879 +index 15475892af0c..bc19ac0e662e 100644
880 +--- a/drivers/firewire/net.c
881 ++++ b/drivers/firewire/net.c
882 +@@ -249,7 +249,11 @@ static int fwnet_header_cache(const struct neighbour *neigh,
883 + h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h)));
884 + h->h_proto = type;
885 + memcpy(h->h_dest, neigh->ha, net->addr_len);
886 +- hh->hh_len = FWNET_HLEN;
887 ++
888 ++ /* Pairs with the READ_ONCE() in neigh_resolve_output(),
889 ++ * neigh_hh_output() and neigh_update_hhs().
890 ++ */
891 ++ smp_store_release(&hh->hh_len, FWNET_HLEN);
892 +
893 + return 0;
894 + }
895 +diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
896 +index 24c461dea7af..fd8053f9556e 100644
897 +--- a/drivers/firmware/efi/libstub/gop.c
898 ++++ b/drivers/firmware/efi/libstub/gop.c
899 +@@ -85,30 +85,6 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
900 + }
901 + }
902 +
903 +-static efi_status_t
904 +-__gop_query32(efi_system_table_t *sys_table_arg,
905 +- struct efi_graphics_output_protocol_32 *gop32,
906 +- struct efi_graphics_output_mode_info **info,
907 +- unsigned long *size, u64 *fb_base)
908 +-{
909 +- struct efi_graphics_output_protocol_mode_32 *mode;
910 +- efi_graphics_output_protocol_query_mode query_mode;
911 +- efi_status_t status;
912 +- unsigned long m;
913 +-
914 +- m = gop32->mode;
915 +- mode = (struct efi_graphics_output_protocol_mode_32 *)m;
916 +- query_mode = (void *)(unsigned long)gop32->query_mode;
917 +-
918 +- status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size,
919 +- info);
920 +- if (status != EFI_SUCCESS)
921 +- return status;
922 +-
923 +- *fb_base = mode->frame_buffer_base;
924 +- return status;
925 +-}
926 +-
927 + static efi_status_t
928 + setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
929 + efi_guid_t *proto, unsigned long size, void **gop_handle)
930 +@@ -121,7 +97,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
931 + u64 fb_base;
932 + struct efi_pixel_bitmask pixel_info;
933 + int pixel_format;
934 +- efi_status_t status = EFI_NOT_FOUND;
935 ++ efi_status_t status;
936 + u32 *handles = (u32 *)(unsigned long)gop_handle;
937 + int i;
938 +
939 +@@ -130,6 +106,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
940 +
941 + nr_gops = size / sizeof(u32);
942 + for (i = 0; i < nr_gops; i++) {
943 ++ struct efi_graphics_output_protocol_mode_32 *mode;
944 + struct efi_graphics_output_mode_info *info = NULL;
945 + efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
946 + bool conout_found = false;
947 +@@ -147,9 +124,11 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
948 + if (status == EFI_SUCCESS)
949 + conout_found = true;
950 +
951 +- status = __gop_query32(sys_table_arg, gop32, &info, &size,
952 +- &current_fb_base);
953 +- if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
954 ++ mode = (void *)(unsigned long)gop32->mode;
955 ++ info = (void *)(unsigned long)mode->info;
956 ++ current_fb_base = mode->frame_buffer_base;
957 ++
958 ++ if ((!first_gop || conout_found) &&
959 + info->pixel_format != PIXEL_BLT_ONLY) {
960 + /*
961 + * Systems that use the UEFI Console Splitter may
962 +@@ -177,7 +156,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
963 +
964 + /* Did we find any GOPs? */
965 + if (!first_gop)
966 +- goto out;
967 ++ return EFI_NOT_FOUND;
968 +
969 + /* EFI framebuffer */
970 + si->orig_video_isVGA = VIDEO_TYPE_EFI;
971 +@@ -199,32 +178,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
972 + si->lfb_size = si->lfb_linelength * si->lfb_height;
973 +
974 + si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
975 +-out:
976 +- return status;
977 +-}
978 +-
979 +-static efi_status_t
980 +-__gop_query64(efi_system_table_t *sys_table_arg,
981 +- struct efi_graphics_output_protocol_64 *gop64,
982 +- struct efi_graphics_output_mode_info **info,
983 +- unsigned long *size, u64 *fb_base)
984 +-{
985 +- struct efi_graphics_output_protocol_mode_64 *mode;
986 +- efi_graphics_output_protocol_query_mode query_mode;
987 +- efi_status_t status;
988 +- unsigned long m;
989 +-
990 +- m = gop64->mode;
991 +- mode = (struct efi_graphics_output_protocol_mode_64 *)m;
992 +- query_mode = (void *)(unsigned long)gop64->query_mode;
993 +-
994 +- status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size,
995 +- info);
996 +- if (status != EFI_SUCCESS)
997 +- return status;
998 +
999 +- *fb_base = mode->frame_buffer_base;
1000 +- return status;
1001 ++ return EFI_SUCCESS;
1002 + }
1003 +
1004 + static efi_status_t
1005 +@@ -239,7 +194,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
1006 + u64 fb_base;
1007 + struct efi_pixel_bitmask pixel_info;
1008 + int pixel_format;
1009 +- efi_status_t status = EFI_NOT_FOUND;
1010 ++ efi_status_t status;
1011 + u64 *handles = (u64 *)(unsigned long)gop_handle;
1012 + int i;
1013 +
1014 +@@ -248,6 +203,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
1015 +
1016 + nr_gops = size / sizeof(u64);
1017 + for (i = 0; i < nr_gops; i++) {
1018 ++ struct efi_graphics_output_protocol_mode_64 *mode;
1019 + struct efi_graphics_output_mode_info *info = NULL;
1020 + efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
1021 + bool conout_found = false;
1022 +@@ -265,9 +221,11 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
1023 + if (status == EFI_SUCCESS)
1024 + conout_found = true;
1025 +
1026 +- status = __gop_query64(sys_table_arg, gop64, &info, &size,
1027 +- &current_fb_base);
1028 +- if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
1029 ++ mode = (void *)(unsigned long)gop64->mode;
1030 ++ info = (void *)(unsigned long)mode->info;
1031 ++ current_fb_base = mode->frame_buffer_base;
1032 ++
1033 ++ if ((!first_gop || conout_found) &&
1034 + info->pixel_format != PIXEL_BLT_ONLY) {
1035 + /*
1036 + * Systems that use the UEFI Console Splitter may
1037 +@@ -295,7 +253,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
1038 +
1039 + /* Did we find any GOPs? */
1040 + if (!first_gop)
1041 +- goto out;
1042 ++ return EFI_NOT_FOUND;
1043 +
1044 + /* EFI framebuffer */
1045 + si->orig_video_isVGA = VIDEO_TYPE_EFI;
1046 +@@ -317,8 +275,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
1047 + si->lfb_size = si->lfb_linelength * si->lfb_height;
1048 +
1049 + si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
1050 +-out:
1051 +- return status;
1052 ++
1053 ++ return EFI_SUCCESS;
1054 + }
1055 +
1056 + /*
1057 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1058 +index 6008a30a17d0..58193c939691 100644
1059 +--- a/drivers/gpio/gpiolib.c
1060 ++++ b/drivers/gpio/gpiolib.c
1061 +@@ -188,6 +188,14 @@ int gpiod_get_direction(struct gpio_desc *desc)
1062 + chip = gpiod_to_chip(desc);
1063 + offset = gpio_chip_hwgpio(desc);
1064 +
1065 ++ /*
1066 ++ * Open drain emulation using input mode may incorrectly report
1067 ++ * input here, fix that up.
1068 ++ */
1069 ++ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) &&
1070 ++ test_bit(FLAG_IS_OUT, &desc->flags))
1071 ++ return 0;
1072 ++
1073 + if (!chip->get_direction)
1074 + return status;
1075 +
1076 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1077 +index 4a959740058e..f68dcf5790ad 100644
1078 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
1079 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1080 +@@ -1536,7 +1536,11 @@ static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1081 + if (ret != 1)
1082 + DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1083 +
1084 +- txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1085 ++ if (txmsg->seqno != -1) {
1086 ++ WARN_ON((unsigned int)txmsg->seqno >
1087 ++ ARRAY_SIZE(txmsg->dst->tx_slots));
1088 ++ txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1089 ++ }
1090 + }
1091 +
1092 + static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1093 +diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
1094 +index a4d81cf4ffa0..16c72d2ddc2e 100644
1095 +--- a/drivers/gpu/drm/drm_property.c
1096 ++++ b/drivers/gpu/drm/drm_property.c
1097 +@@ -554,7 +554,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
1098 + struct drm_property_blob *blob;
1099 + int ret;
1100 +
1101 +- if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
1102 ++ if (!length || length > INT_MAX - sizeof(struct drm_property_blob))
1103 + return ERR_PTR(-EINVAL);
1104 +
1105 + blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
1106 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1107 +index dcfbf326f45c..27653aad8f21 100644
1108 +--- a/drivers/infiniband/core/cma.c
1109 ++++ b/drivers/infiniband/core/cma.c
1110 +@@ -4440,6 +4440,7 @@ err:
1111 + unregister_netdevice_notifier(&cma_nb);
1112 + rdma_addr_unregister_client(&addr_client);
1113 + ib_sa_unregister_client(&sa_client);
1114 ++ unregister_pernet_subsys(&cma_pernet_operations);
1115 + err_wq:
1116 + destroy_workqueue(cma_wq);
1117 + return ret;
1118 +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
1119 +index 7ccf7225f75a..adc46b809ef2 100644
1120 +--- a/drivers/infiniband/hw/mlx4/main.c
1121 ++++ b/drivers/infiniband/hw/mlx4/main.c
1122 +@@ -3031,16 +3031,17 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
1123 + ibdev->ib_active = false;
1124 + flush_workqueue(wq);
1125 +
1126 +- mlx4_ib_close_sriov(ibdev);
1127 +- mlx4_ib_mad_cleanup(ibdev);
1128 +- ib_unregister_device(&ibdev->ib_dev);
1129 +- mlx4_ib_diag_cleanup(ibdev);
1130 + if (ibdev->iboe.nb.notifier_call) {
1131 + if (unregister_netdevice_notifier(&ibdev->iboe.nb))
1132 + pr_warn("failure unregistering notifier\n");
1133 + ibdev->iboe.nb.notifier_call = NULL;
1134 + }
1135 +
1136 ++ mlx4_ib_close_sriov(ibdev);
1137 ++ mlx4_ib_mad_cleanup(ibdev);
1138 ++ ib_unregister_device(&ibdev->ib_dev);
1139 ++ mlx4_ib_diag_cleanup(ibdev);
1140 ++
1141 + mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
1142 + ibdev->steer_qpn_count);
1143 + kfree(ibdev->ib_uc_qpns_bitmap);
1144 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
1145 +index 9892c41de441..8a50da4f148f 100644
1146 +--- a/drivers/md/raid1.c
1147 ++++ b/drivers/md/raid1.c
1148 +@@ -2633,7 +2633,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
1149 + write_targets++;
1150 + }
1151 + }
1152 +- if (bio->bi_end_io) {
1153 ++ if (rdev && bio->bi_end_io) {
1154 + atomic_inc(&rdev->nr_pending);
1155 + bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
1156 + bio->bi_bdev = rdev->bdev;
1157 +diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
1158 +index 2594d6a7393f..78809bb5e69e 100644
1159 +--- a/drivers/media/usb/b2c2/flexcop-usb.c
1160 ++++ b/drivers/media/usb/b2c2/flexcop-usb.c
1161 +@@ -295,7 +295,7 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
1162 +
1163 + mutex_unlock(&fc_usb->data_mutex);
1164 +
1165 +- return 0;
1166 ++ return ret;
1167 + }
1168 +
1169 + /* actual bus specific access functions,
1170 +diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
1171 +index 7853261906b1..e5d411007ae4 100644
1172 +--- a/drivers/media/usb/dvb-usb/af9005.c
1173 ++++ b/drivers/media/usb/dvb-usb/af9005.c
1174 +@@ -990,8 +990,9 @@ static int af9005_identify_state(struct usb_device *udev,
1175 + else if (reply == 0x02)
1176 + *cold = 0;
1177 + else
1178 +- return -EIO;
1179 +- deb_info("Identify state cold = %d\n", *cold);
1180 ++ ret = -EIO;
1181 ++ if (!ret)
1182 ++ deb_info("Identify state cold = %d\n", *cold);
1183 +
1184 + err:
1185 + kfree(buf);
1186 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
1187 +index 2ec1c43270b7..bb36312c9696 100644
1188 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
1189 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
1190 +@@ -1112,7 +1112,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
1191 + for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
1192 + u32 func_config =
1193 + MF_CFG_RD(bp,
1194 +- func_mf_config[BP_PORT(bp) + 2 * i].
1195 ++ func_mf_config[BP_PATH(bp) + 2 * i].
1196 + config);
1197 + func_num +=
1198 + ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
1199 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1200 +index ce8a777b1e97..8d17d464c067 100644
1201 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1202 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1203 +@@ -9995,10 +9995,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp)
1204 + */
1205 + static void bnx2x_parity_recover(struct bnx2x *bp)
1206 + {
1207 +- bool global = false;
1208 + u32 error_recovered, error_unrecovered;
1209 +- bool is_parity;
1210 ++ bool is_parity, global = false;
1211 ++#ifdef CONFIG_BNX2X_SRIOV
1212 ++ int vf_idx;
1213 ++
1214 ++ for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
1215 ++ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
1216 +
1217 ++ if (vf)
1218 ++ vf->state = VF_LOST;
1219 ++ }
1220 ++#endif
1221 + DP(NETIF_MSG_HW, "Handling parity\n");
1222 + while (1) {
1223 + switch (bp->recovery_state) {
1224 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
1225 +index 888d0b6632e8..7152a03e3607 100644
1226 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
1227 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
1228 +@@ -139,6 +139,7 @@ struct bnx2x_virtf {
1229 + #define VF_ACQUIRED 1 /* VF acquired, but not initialized */
1230 + #define VF_ENABLED 2 /* VF Enabled */
1231 + #define VF_RESET 3 /* VF FLR'd, pending cleanup */
1232 ++#define VF_LOST 4 /* Recovery while VFs are loaded */
1233 +
1234 + bool flr_clnup_stage; /* true during flr cleanup */
1235 +
1236 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1237 +index c2d327d9dff0..27142fb195b6 100644
1238 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1239 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1240 +@@ -2095,6 +2095,18 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1241 + {
1242 + int i;
1243 +
1244 ++ if (vf->state == VF_LOST) {
1245 ++ /* Just ack the FW and return if VFs are lost
1246 ++ * in case of parity error. VFs are supposed to be timedout
1247 ++ * on waiting for PF response.
1248 ++ */
1249 ++ DP(BNX2X_MSG_IOV,
1250 ++ "VF 0x%x lost, not handling the request\n", vf->abs_vfid);
1251 ++
1252 ++ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1253 ++ return;
1254 ++ }
1255 ++
1256 + /* check if tlv type is known */
1257 + if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
1258 + /* Lock the per vf op mutex and note the locker's identity.
1259 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
1260 +index 62ccbd47c1db..fc1fa0f9f338 100644
1261 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
1262 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
1263 +@@ -53,7 +53,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
1264 + * rate, which then uses the auto-reparenting feature of the
1265 + * clock driver, and enabling/disabling the clock.
1266 + */
1267 +- if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
1268 ++ if (phy_interface_mode_is_rgmii(gmac->interface)) {
1269 + clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
1270 + clk_prepare_enable(gmac->tx_clk);
1271 + gmac->clk_enabled = 1;
1272 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1273 +index 5ac48a594951..a2b7c685cbf1 100644
1274 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1275 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1276 +@@ -55,7 +55,7 @@
1277 + #include <linux/of_mdio.h>
1278 + #include "dwmac1000.h"
1279 +
1280 +-#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
1281 ++#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
1282 + #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
1283 +
1284 + /* Module parameters */
1285 +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
1286 +index b1dcc7448b4f..854947b9db4e 100644
1287 +--- a/drivers/net/macvlan.c
1288 ++++ b/drivers/net/macvlan.c
1289 +@@ -234,7 +234,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
1290 + struct net_device *src,
1291 + enum macvlan_mode mode)
1292 + {
1293 +- const struct ethhdr *eth = eth_hdr(skb);
1294 ++ const struct ethhdr *eth = skb_eth_hdr(skb);
1295 + const struct macvlan_dev *vlan;
1296 + struct sk_buff *nskb;
1297 + unsigned int i;
1298 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
1299 +index 96258e6a1920..8045cc401009 100644
1300 +--- a/drivers/net/usb/lan78xx.c
1301 ++++ b/drivers/net/usb/lan78xx.c
1302 +@@ -442,7 +442,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev,
1303 + }
1304 + } else {
1305 + netdev_warn(dev->net,
1306 +- "Failed to read stat ret = 0x%x", ret);
1307 ++ "Failed to read stat ret = %d", ret);
1308 + }
1309 +
1310 + kfree(stats);
1311 +@@ -2407,11 +2407,6 @@ static int lan78xx_stop(struct net_device *net)
1312 + return 0;
1313 + }
1314 +
1315 +-static int lan78xx_linearize(struct sk_buff *skb)
1316 +-{
1317 +- return skb_linearize(skb);
1318 +-}
1319 +-
1320 + static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
1321 + struct sk_buff *skb, gfp_t flags)
1322 + {
1323 +@@ -2422,8 +2417,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
1324 + return NULL;
1325 + }
1326 +
1327 +- if (lan78xx_linearize(skb) < 0)
1328 ++ if (skb_linearize(skb)) {
1329 ++ dev_kfree_skb_any(skb);
1330 + return NULL;
1331 ++ }
1332 +
1333 + tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
1334 +
1335 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
1336 +index 340bd98b8dbd..987bb1db8265 100644
1337 +--- a/drivers/net/vxlan.c
1338 ++++ b/drivers/net/vxlan.c
1339 +@@ -2104,7 +2104,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1340 + else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
1341 + df = htons(IP_DF);
1342 +
1343 +- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1344 ++ tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
1345 + ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1346 + err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr),
1347 + vni, md, flags, udp_sum);
1348 +@@ -2163,7 +2163,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1349 + if (!info)
1350 + udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
1351 +
1352 +- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1353 ++ tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
1354 + ttl = ttl ? : ip6_dst_hoplimit(ndst);
1355 + skb_scrub_packet(skb, xnet);
1356 + err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
1357 +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
1358 +index f333ef1e3e7b..52b42ecee621 100644
1359 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
1360 ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
1361 +@@ -972,6 +972,8 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
1362 + struct ath_htc_rx_status *rxstatus;
1363 + struct ath_rx_status rx_stats;
1364 + bool decrypt_error = false;
1365 ++ __be16 rs_datalen;
1366 ++ bool is_phyerr;
1367 +
1368 + if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
1369 + ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
1370 +@@ -981,11 +983,24 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
1371 +
1372 + rxstatus = (struct ath_htc_rx_status *)skb->data;
1373 +
1374 +- if (be16_to_cpu(rxstatus->rs_datalen) -
1375 +- (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
1376 ++ rs_datalen = be16_to_cpu(rxstatus->rs_datalen);
1377 ++ if (unlikely(rs_datalen -
1378 ++ (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0)) {
1379 + ath_err(common,
1380 + "Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n",
1381 +- rxstatus->rs_datalen, skb->len);
1382 ++ rs_datalen, skb->len);
1383 ++ goto rx_next;
1384 ++ }
1385 ++
1386 ++ is_phyerr = rxstatus->rs_status & ATH9K_RXERR_PHY;
1387 ++ /*
1388 ++ * Discard zero-length packets and packets smaller than an ACK
1389 ++ * which are not PHY_ERROR (short radar pulses have a length of 3)
1390 ++ */
1391 ++ if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) {
1392 ++ ath_warn(common,
1393 ++ "Short RX data len, dropping (dlen: %d)\n",
1394 ++ rs_datalen);
1395 + goto rx_next;
1396 + }
1397 +
1398 +@@ -1010,7 +1025,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
1399 + * Process PHY errors and return so that the packet
1400 + * can be dropped.
1401 + */
1402 +- if (rx_stats.rs_status & ATH9K_RXERR_PHY) {
1403 ++ if (unlikely(is_phyerr)) {
1404 + /* TODO: Not using DFS processing now. */
1405 + if (ath_cmn_process_fft(&priv->spec_priv, hdr,
1406 + &rx_stats, rx_status->mactime)) {
1407 +diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
1408 +index 0f97514e3474..c9f20e1394e3 100644
1409 +--- a/drivers/regulator/ab8500.c
1410 ++++ b/drivers/regulator/ab8500.c
1411 +@@ -1099,23 +1099,6 @@ static struct ab8500_regulator_info
1412 + .update_val_idle = 0x82,
1413 + .update_val_normal = 0x02,
1414 + },
1415 +- [AB8505_LDO_USB] = {
1416 +- .desc = {
1417 +- .name = "LDO-USB",
1418 +- .ops = &ab8500_regulator_mode_ops,
1419 +- .type = REGULATOR_VOLTAGE,
1420 +- .id = AB8505_LDO_USB,
1421 +- .owner = THIS_MODULE,
1422 +- .n_voltages = 1,
1423 +- .volt_table = fixed_3300000_voltage,
1424 +- },
1425 +- .update_bank = 0x03,
1426 +- .update_reg = 0x82,
1427 +- .update_mask = 0x03,
1428 +- .update_val = 0x01,
1429 +- .update_val_idle = 0x03,
1430 +- .update_val_normal = 0x01,
1431 +- },
1432 + [AB8505_LDO_AUDIO] = {
1433 + .desc = {
1434 + .name = "LDO-AUDIO",
1435 +diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
1436 +index 9c930eb68cda..ffc34e1ee35d 100644
1437 +--- a/drivers/regulator/rn5t618-regulator.c
1438 ++++ b/drivers/regulator/rn5t618-regulator.c
1439 +@@ -127,6 +127,7 @@ static struct platform_driver rn5t618_regulator_driver = {
1440 +
1441 + module_platform_driver(rn5t618_regulator_driver);
1442 +
1443 ++MODULE_ALIAS("platform:rn5t618-regulator");
1444 + MODULE_AUTHOR("Beniamino Galvani <b.galvani@×××××.com>");
1445 + MODULE_DESCRIPTION("RN5T618 regulator driver");
1446 + MODULE_LICENSE("GPL v2");
1447 +diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
1448 +index 60de66252fa2..b200edc665a5 100644
1449 +--- a/drivers/scsi/libsas/sas_discover.c
1450 ++++ b/drivers/scsi/libsas/sas_discover.c
1451 +@@ -97,12 +97,21 @@ static int sas_get_port_device(struct asd_sas_port *port)
1452 + else
1453 + dev->dev_type = SAS_SATA_DEV;
1454 + dev->tproto = SAS_PROTOCOL_SATA;
1455 +- } else {
1456 ++ } else if (port->oob_mode == SAS_OOB_MODE) {
1457 + struct sas_identify_frame *id =
1458 + (struct sas_identify_frame *) dev->frame_rcvd;
1459 + dev->dev_type = id->dev_type;
1460 + dev->iproto = id->initiator_bits;
1461 + dev->tproto = id->target_bits;
1462 ++ } else {
1463 ++ /* If the oob mode is OOB_NOT_CONNECTED, the port is
1464 ++ * disconnected due to race with PHY down. We cannot
1465 ++ * continue to discover this port
1466 ++ */
1467 ++ sas_put_device(dev);
1468 ++ pr_warn("Port %016llx is disconnected when discovering\n",
1469 ++ SAS_ADDR(port->attached_sas_addr));
1470 ++ return -ENODEV;
1471 + }
1472 +
1473 + sas_init_dev(dev);
1474 +diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
1475 +index 05dcc2abd541..99f06ac7bf4c 100644
1476 +--- a/drivers/scsi/lpfc/lpfc_bsg.c
1477 ++++ b/drivers/scsi/lpfc/lpfc_bsg.c
1478 +@@ -4352,12 +4352,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
1479 + phba->mbox_ext_buf_ctx.seqNum++;
1480 + nemb_tp = phba->mbox_ext_buf_ctx.nembType;
1481 +
1482 +- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1483 +- if (!dd_data) {
1484 +- rc = -ENOMEM;
1485 +- goto job_error;
1486 +- }
1487 +-
1488 + pbuf = (uint8_t *)dmabuf->virt;
1489 + size = job->request_payload.payload_len;
1490 + sg_copy_to_buffer(job->request_payload.sg_list,
1491 +@@ -4394,6 +4388,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
1492 + "2968 SLI_CONFIG ext-buffer wr all %d "
1493 + "ebuffers received\n",
1494 + phba->mbox_ext_buf_ctx.numBuf);
1495 ++
1496 ++ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1497 ++ if (!dd_data) {
1498 ++ rc = -ENOMEM;
1499 ++ goto job_error;
1500 ++ }
1501 ++
1502 + /* mailbox command structure for base driver */
1503 + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1504 + if (!pmboxq) {
1505 +@@ -4441,6 +4442,8 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
1506 + return SLI_CONFIG_HANDLED;
1507 +
1508 + job_error:
1509 ++ if (pmboxq)
1510 ++ mempool_free(pmboxq, phba->mbox_mem_pool);
1511 + lpfc_bsg_dma_page_free(phba, dmabuf);
1512 + kfree(dd_data);
1513 +
1514 +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
1515 +index f0fcff032f8a..17b1525d492b 100644
1516 +--- a/drivers/scsi/qla2xxx/qla_isr.c
1517 ++++ b/drivers/scsi/qla2xxx/qla_isr.c
1518 +@@ -973,8 +973,6 @@ global_port_update:
1519 + ql_dbg(ql_dbg_async, vha, 0x5011,
1520 + "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1521 + mb[1], mb[2], mb[3]);
1522 +-
1523 +- qlt_async_event(mb[0], vha, mb);
1524 + break;
1525 + }
1526 +
1527 +@@ -995,8 +993,6 @@ global_port_update:
1528 + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1529 + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1530 + set_bit(VP_CONFIG_OK, &vha->vp_flags);
1531 +-
1532 +- qlt_async_event(mb[0], vha, mb);
1533 + break;
1534 +
1535 + case MBA_RSCN_UPDATE: /* State Change Registration */
1536 +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
1537 +index d220b4f691c7..f714d5f917d1 100644
1538 +--- a/drivers/scsi/qla4xxx/ql4_os.c
1539 ++++ b/drivers/scsi/qla4xxx/ql4_os.c
1540 +@@ -4285,7 +4285,6 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
1541 + return QLA_SUCCESS;
1542 +
1543 + mem_alloc_error_exit:
1544 +- qla4xxx_mem_free(ha);
1545 + return QLA_ERROR;
1546 + }
1547 +
1548 +diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c
1549 +index 877937706240..828fbbebc3c4 100644
1550 +--- a/drivers/spi/spi-cavium-thunderx.c
1551 ++++ b/drivers/spi/spi-cavium-thunderx.c
1552 +@@ -81,6 +81,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
1553 +
1554 + error:
1555 + clk_disable_unprepare(p->clk);
1556 ++ pci_release_regions(pdev);
1557 + spi_master_put(master);
1558 + return ret;
1559 + }
1560 +@@ -95,6 +96,7 @@ static void thunderx_spi_remove(struct pci_dev *pdev)
1561 + return;
1562 +
1563 + clk_disable_unprepare(p->clk);
1564 ++ pci_release_regions(pdev);
1565 + /* Put everything in a known state. */
1566 + writeq(0, p->register_base + OCTEON_SPI_CFG(p));
1567 + }
1568 +diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
1569 +index b05dc5086627..8bab8b00d47d 100644
1570 +--- a/drivers/tty/hvc/hvc_vio.c
1571 ++++ b/drivers/tty/hvc/hvc_vio.c
1572 +@@ -120,6 +120,14 @@ static int hvterm_raw_get_chars(uint32_t vtermno, char *buf, int count)
1573 + return got;
1574 + }
1575 +
1576 ++/**
1577 ++ * hvterm_raw_put_chars: send characters to firmware for given vterm adapter
1578 ++ * @vtermno: The virtual terminal number.
1579 ++ * @buf: The characters to send. Because of the underlying hypercall in
1580 ++ * hvc_put_chars(), this buffer must be at least 16 bytes long, even if
1581 ++ * you are sending fewer chars.
1582 ++ * @count: number of chars to send.
1583 ++ */
1584 + static int hvterm_raw_put_chars(uint32_t vtermno, const char *buf, int count)
1585 + {
1586 + struct hvterm_priv *pv = hvterm_privs[vtermno];
1587 +@@ -232,6 +240,7 @@ static const struct hv_ops hvterm_hvsi_ops = {
1588 + static void udbg_hvc_putc(char c)
1589 + {
1590 + int count = -1;
1591 ++ unsigned char bounce_buffer[16];
1592 +
1593 + if (!hvterm_privs[0])
1594 + return;
1595 +@@ -242,7 +251,12 @@ static void udbg_hvc_putc(char c)
1596 + do {
1597 + switch(hvterm_privs[0]->proto) {
1598 + case HV_PROTOCOL_RAW:
1599 +- count = hvterm_raw_put_chars(0, &c, 1);
1600 ++ /*
1601 ++ * hvterm_raw_put_chars requires at least a 16-byte
1602 ++ * buffer, so go via the bounce buffer
1603 ++ */
1604 ++ bounce_buffer[0] = c;
1605 ++ count = hvterm_raw_put_chars(0, bounce_buffer, 1);
1606 + break;
1607 + case HV_PROTOCOL_HVSI:
1608 + count = hvterm_hvsi_put_chars(0, &c, 1);
1609 +diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
1610 +index 2ed219c837c9..9e6d44df3fab 100644
1611 +--- a/drivers/tty/serial/msm_serial.c
1612 ++++ b/drivers/tty/serial/msm_serial.c
1613 +@@ -1579,6 +1579,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
1614 + int num_newlines = 0;
1615 + bool replaced = false;
1616 + void __iomem *tf;
1617 ++ int locked = 1;
1618 +
1619 + if (is_uartdm)
1620 + tf = port->membase + UARTDM_TF;
1621 +@@ -1591,7 +1592,13 @@ static void __msm_console_write(struct uart_port *port, const char *s,
1622 + num_newlines++;
1623 + count += num_newlines;
1624 +
1625 +- spin_lock(&port->lock);
1626 ++ if (port->sysrq)
1627 ++ locked = 0;
1628 ++ else if (oops_in_progress)
1629 ++ locked = spin_trylock(&port->lock);
1630 ++ else
1631 ++ spin_lock(&port->lock);
1632 ++
1633 + if (is_uartdm)
1634 + msm_reset_dm_count(port, count);
1635 +
1636 +@@ -1627,7 +1634,9 @@ static void __msm_console_write(struct uart_port *port, const char *s,
1637 + iowrite32_rep(tf, buf, 1);
1638 + i += num_chars;
1639 + }
1640 +- spin_unlock(&port->lock);
1641 ++
1642 ++ if (locked)
1643 ++ spin_unlock(&port->lock);
1644 + }
1645 +
1646 + static void msm_console_write(struct console *co, const char *s,
1647 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
1648 +index e8061b02b7e3..32f5ccd5f2c7 100644
1649 +--- a/drivers/usb/core/config.c
1650 ++++ b/drivers/usb/core/config.c
1651 +@@ -198,9 +198,58 @@ static const unsigned short super_speed_maxpacket_maxes[4] = {
1652 + [USB_ENDPOINT_XFER_INT] = 1024,
1653 + };
1654 +
1655 +-static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
1656 +- int asnum, struct usb_host_interface *ifp, int num_ep,
1657 +- unsigned char *buffer, int size)
1658 ++static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1,
1659 ++ struct usb_endpoint_descriptor *e2)
1660 ++{
1661 ++ if (e1->bEndpointAddress == e2->bEndpointAddress)
1662 ++ return true;
1663 ++
1664 ++ if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) {
1665 ++ if (usb_endpoint_num(e1) == usb_endpoint_num(e2))
1666 ++ return true;
1667 ++ }
1668 ++
1669 ++ return false;
1670 ++}
1671 ++
1672 ++/*
1673 ++ * Check for duplicate endpoint addresses in other interfaces and in the
1674 ++ * altsetting currently being parsed.
1675 ++ */
1676 ++static bool config_endpoint_is_duplicate(struct usb_host_config *config,
1677 ++ int inum, int asnum, struct usb_endpoint_descriptor *d)
1678 ++{
1679 ++ struct usb_endpoint_descriptor *epd;
1680 ++ struct usb_interface_cache *intfc;
1681 ++ struct usb_host_interface *alt;
1682 ++ int i, j, k;
1683 ++
1684 ++ for (i = 0; i < config->desc.bNumInterfaces; ++i) {
1685 ++ intfc = config->intf_cache[i];
1686 ++
1687 ++ for (j = 0; j < intfc->num_altsetting; ++j) {
1688 ++ alt = &intfc->altsetting[j];
1689 ++
1690 ++ if (alt->desc.bInterfaceNumber == inum &&
1691 ++ alt->desc.bAlternateSetting != asnum)
1692 ++ continue;
1693 ++
1694 ++ for (k = 0; k < alt->desc.bNumEndpoints; ++k) {
1695 ++ epd = &alt->endpoint[k].desc;
1696 ++
1697 ++ if (endpoint_is_duplicate(epd, d))
1698 ++ return true;
1699 ++ }
1700 ++ }
1701 ++ }
1702 ++
1703 ++ return false;
1704 ++}
1705 ++
1706 ++static int usb_parse_endpoint(struct device *ddev, int cfgno,
1707 ++ struct usb_host_config *config, int inum, int asnum,
1708 ++ struct usb_host_interface *ifp, int num_ep,
1709 ++ unsigned char *buffer, int size)
1710 + {
1711 + unsigned char *buffer0 = buffer;
1712 + struct usb_endpoint_descriptor *d;
1713 +@@ -237,13 +286,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
1714 + goto skip_to_next_endpoint_or_interface_descriptor;
1715 +
1716 + /* Check for duplicate endpoint addresses */
1717 +- for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
1718 +- if (ifp->endpoint[i].desc.bEndpointAddress ==
1719 +- d->bEndpointAddress) {
1720 +- dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
1721 +- cfgno, inum, asnum, d->bEndpointAddress);
1722 +- goto skip_to_next_endpoint_or_interface_descriptor;
1723 +- }
1724 ++ if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
1725 ++ dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
1726 ++ cfgno, inum, asnum, d->bEndpointAddress);
1727 ++ goto skip_to_next_endpoint_or_interface_descriptor;
1728 + }
1729 +
1730 + endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
1731 +@@ -517,8 +563,8 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
1732 + if (((struct usb_descriptor_header *) buffer)->bDescriptorType
1733 + == USB_DT_INTERFACE)
1734 + break;
1735 +- retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt,
1736 +- num_ep, buffer, size);
1737 ++ retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum,
1738 ++ alt, num_ep, buffer, size);
1739 + if (retval < 0)
1740 + return retval;
1741 + ++n;
1742 +diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
1743 +index 4c488d15b6f6..dc99ed94f03d 100644
1744 +--- a/drivers/usb/gadget/function/f_ecm.c
1745 ++++ b/drivers/usb/gadget/function/f_ecm.c
1746 +@@ -625,8 +625,12 @@ static void ecm_disable(struct usb_function *f)
1747 +
1748 + DBG(cdev, "ecm deactivated\n");
1749 +
1750 +- if (ecm->port.in_ep->enabled)
1751 ++ if (ecm->port.in_ep->enabled) {
1752 + gether_disconnect(&ecm->port);
1753 ++ } else {
1754 ++ ecm->port.in_ep->desc = NULL;
1755 ++ ecm->port.out_ep->desc = NULL;
1756 ++ }
1757 +
1758 + usb_ep_disable(ecm->notify);
1759 + ecm->notify->desc = NULL;
1760 +diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
1761 +index ba00cdb809d6..865cb070bf8b 100644
1762 +--- a/drivers/usb/gadget/function/f_rndis.c
1763 ++++ b/drivers/usb/gadget/function/f_rndis.c
1764 +@@ -622,6 +622,7 @@ static void rndis_disable(struct usb_function *f)
1765 + gether_disconnect(&rndis->port);
1766 +
1767 + usb_ep_disable(rndis->notify);
1768 ++ rndis->notify->desc = NULL;
1769 + }
1770 +
1771 + /*-------------------------------------------------------------------------*/
1772 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1773 +index 084332a5855e..2d302ff62cc1 100644
1774 +--- a/drivers/usb/serial/option.c
1775 ++++ b/drivers/usb/serial/option.c
1776 +@@ -1167,6 +1167,8 @@ static const struct usb_device_id option_ids[] = {
1777 + .driver_info = NCTRL(0) | RSVD(3) },
1778 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */
1779 + .driver_info = NCTRL(0) },
1780 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */
1781 ++ .driver_info = NCTRL(0) | RSVD(3) },
1782 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
1783 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
1784 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
1785 +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
1786 +index 731cf54f75c6..05f9f5983ee1 100644
1787 +--- a/drivers/xen/balloon.c
1788 ++++ b/drivers/xen/balloon.c
1789 +@@ -403,7 +403,8 @@ static struct notifier_block xen_memory_nb = {
1790 + #else
1791 + static enum bp_state reserve_additional_memory(void)
1792 + {
1793 +- balloon_stats.target_pages = balloon_stats.current_pages;
1794 ++ balloon_stats.target_pages = balloon_stats.current_pages +
1795 ++ balloon_stats.target_unpopulated;
1796 + return BP_ECANCELED;
1797 + }
1798 + #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
1799 +diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
1800 +index 5b832e83772a..02ac9067a354 100644
1801 +--- a/fs/compat_ioctl.c
1802 ++++ b/fs/compat_ioctl.c
1803 +@@ -1585,9 +1585,10 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
1804 + #endif
1805 +
1806 + case FICLONE:
1807 ++ goto do_ioctl;
1808 + case FICLONERANGE:
1809 + case FIDEDUPERANGE:
1810 +- goto do_ioctl;
1811 ++ goto found_handler;
1812 +
1813 + case FIBMAP:
1814 + case FIGETBSZ:
1815 +diff --git a/fs/locks.c b/fs/locks.c
1816 +index 22c5b4aa4961..8252647c6084 100644
1817 +--- a/fs/locks.c
1818 ++++ b/fs/locks.c
1819 +@@ -2681,7 +2681,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
1820 + }
1821 + if (inode) {
1822 + /* userspace relies on this representation of dev_t */
1823 +- seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
1824 ++ seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
1825 + MAJOR(inode->i_sb->s_dev),
1826 + MINOR(inode->i_sb->s_dev), inode->i_ino);
1827 + } else {
1828 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1829 +index 032fcae3a94f..db4bd70b62d0 100644
1830 +--- a/fs/nfsd/nfs4state.c
1831 ++++ b/fs/nfsd/nfs4state.c
1832 +@@ -3067,12 +3067,17 @@ static bool replay_matches_cache(struct svc_rqst *rqstp,
1833 + (bool)seq->cachethis)
1834 + return false;
1835 + /*
1836 +- * If there's an error than the reply can have fewer ops than
1837 +- * the call. But if we cached a reply with *more* ops than the
1838 +- * call you're sending us now, then this new call is clearly not
1839 +- * really a replay of the old one:
1840 ++ * If there's an error then the reply can have fewer ops than
1841 ++ * the call.
1842 + */
1843 +- if (slot->sl_opcnt < argp->opcnt)
1844 ++ if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
1845 ++ return false;
1846 ++ /*
1847 ++ * But if we cached a reply with *more* ops than the call you're
1848 ++ * sending us now, then this new call is clearly not really a
1849 ++ * replay of the old one:
1850 ++ */
1851 ++ if (slot->sl_opcnt > argp->opcnt)
1852 + return false;
1853 + /* This is the only check explicitly called by spec: */
1854 + if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
1855 +diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
1856 +index 8b09271e5d66..a73959e6ae32 100644
1857 +--- a/fs/pstore/ram.c
1858 ++++ b/fs/pstore/ram.c
1859 +@@ -321,6 +321,17 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
1860 +
1861 + prz = cxt->przs[cxt->dump_write_cnt];
1862 +
1863 ++ /*
1864 ++ * Since this is a new crash dump, we need to reset the buffer in
1865 ++ * case it still has an old dump present. Without this, the new dump
1866 ++ * will get appended, which would seriously confuse anything trying
1867 ++ * to check dump file contents. Specifically, ramoops_read_kmsg_hdr()
1868 ++ * expects to find a dump header in the beginning of buffer data, so
1869 ++ * we must to reset the buffer values, in order to ensure that the
1870 ++ * header will be written to the beginning of the buffer.
1871 ++ */
1872 ++ persistent_ram_zap(prz);
1873 ++
1874 + hlen = ramoops_write_kmsg_hdr(prz, compressed);
1875 + if (size + hlen > prz->buffer_size)
1876 + size = prz->buffer_size - hlen;
1877 +diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
1878 +index d34085bf4a40..9ca8809ee3d0 100644
1879 +--- a/fs/xfs/libxfs/xfs_bmap.c
1880 ++++ b/fs/xfs/libxfs/xfs_bmap.c
1881 +@@ -5688,7 +5688,7 @@ __xfs_bunmapi(
1882 + * Make sure we don't touch multiple AGF headers out of order
1883 + * in a single transaction, as that could cause AB-BA deadlocks.
1884 + */
1885 +- if (!wasdel) {
1886 ++ if (!wasdel && !isrt) {
1887 + agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
1888 + if (prev_agno != NULLAGNUMBER && prev_agno > agno)
1889 + break;
1890 +diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
1891 +index 33c9a3aae948..7bfcd09d446b 100644
1892 +--- a/fs/xfs/xfs_log.c
1893 ++++ b/fs/xfs/xfs_log.c
1894 +@@ -1540,6 +1540,8 @@ out_free_iclog:
1895 + if (iclog->ic_bp)
1896 + xfs_buf_free(iclog->ic_bp);
1897 + kmem_free(iclog);
1898 ++ if (prev_iclog == log->l_iclog)
1899 ++ break;
1900 + }
1901 + spinlock_destroy(&log->l_icloglock);
1902 + xfs_buf_free(log->l_xbuf);
1903 +diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
1904 +index a270f25ee7c7..1a527e40d601 100644
1905 +--- a/include/linux/ahci_platform.h
1906 ++++ b/include/linux/ahci_platform.h
1907 +@@ -23,6 +23,8 @@ struct ahci_host_priv;
1908 + struct platform_device;
1909 + struct scsi_host_template;
1910 +
1911 ++int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
1912 ++void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
1913 + int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
1914 + void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
1915 + int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
1916 +diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
1917 +index cc535a478bae..710f269764dc 100644
1918 +--- a/include/linux/dmaengine.h
1919 ++++ b/include/linux/dmaengine.h
1920 +@@ -1358,8 +1358,11 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
1921 + static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1922 + {
1923 + struct dma_slave_caps caps;
1924 ++ int ret;
1925 +
1926 +- dma_get_slave_caps(tx->chan, &caps);
1927 ++ ret = dma_get_slave_caps(tx->chan, &caps);
1928 ++ if (ret)
1929 ++ return ret;
1930 +
1931 + if (caps.descriptor_reuse) {
1932 + tx->flags |= DMA_CTRL_REUSE;
1933 +diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
1934 +index 548fd535fd02..d433f5e292c9 100644
1935 +--- a/include/linux/if_ether.h
1936 ++++ b/include/linux/if_ether.h
1937 +@@ -28,6 +28,14 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
1938 + return (struct ethhdr *)skb_mac_header(skb);
1939 + }
1940 +
1941 ++/* Prefer this version in TX path, instead of
1942 ++ * skb_reset_mac_header() + eth_hdr()
1943 ++ */
1944 ++static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb)
1945 ++{
1946 ++ return (struct ethhdr *)skb->data;
1947 ++}
1948 ++
1949 + static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
1950 + {
1951 + return (struct ethhdr *)skb_inner_mac_header(skb);
1952 +diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
1953 +index d8ecefaf63ca..260c4aa1d976 100644
1954 +--- a/include/linux/regulator/ab8500.h
1955 ++++ b/include/linux/regulator/ab8500.h
1956 +@@ -38,7 +38,6 @@ enum ab8505_regulator_id {
1957 + AB8505_LDO_AUX6,
1958 + AB8505_LDO_INTCORE,
1959 + AB8505_LDO_ADC,
1960 +- AB8505_LDO_USB,
1961 + AB8505_LDO_AUDIO,
1962 + AB8505_LDO_ANAMIC1,
1963 + AB8505_LDO_ANAMIC2,
1964 +diff --git a/include/net/neighbour.h b/include/net/neighbour.h
1965 +index 1c0d07376125..a68a460fa4f3 100644
1966 +--- a/include/net/neighbour.h
1967 ++++ b/include/net/neighbour.h
1968 +@@ -454,7 +454,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
1969 +
1970 + do {
1971 + seq = read_seqbegin(&hh->hh_lock);
1972 +- hh_len = hh->hh_len;
1973 ++ hh_len = READ_ONCE(hh->hh_len);
1974 + if (likely(hh_len <= HH_DATA_MOD)) {
1975 + hh_alen = HH_DATA_MOD;
1976 +
1977 +diff --git a/include/uapi/linux/netfilter/xt_sctp.h b/include/uapi/linux/netfilter/xt_sctp.h
1978 +index 58ffcfb7978e..c2b0886c7c25 100644
1979 +--- a/include/uapi/linux/netfilter/xt_sctp.h
1980 ++++ b/include/uapi/linux/netfilter/xt_sctp.h
1981 +@@ -40,19 +40,19 @@ struct xt_sctp_info {
1982 + #define SCTP_CHUNKMAP_SET(chunkmap, type) \
1983 + do { \
1984 + (chunkmap)[type / bytes(__u32)] |= \
1985 +- 1 << (type % bytes(__u32)); \
1986 ++ 1u << (type % bytes(__u32)); \
1987 + } while (0)
1988 +
1989 + #define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \
1990 + do { \
1991 + (chunkmap)[type / bytes(__u32)] &= \
1992 +- ~(1 << (type % bytes(__u32))); \
1993 ++ ~(1u << (type % bytes(__u32))); \
1994 + } while (0)
1995 +
1996 + #define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \
1997 + ({ \
1998 + ((chunkmap)[type / bytes (__u32)] & \
1999 +- (1 << (type % bytes (__u32)))) ? 1: 0; \
2000 ++ (1u << (type % bytes (__u32)))) ? 1: 0; \
2001 + })
2002 +
2003 + #define SCTP_CHUNKMAP_RESET(chunkmap) \
2004 +diff --git a/kernel/cred.c b/kernel/cred.c
2005 +index 0966fab0f48b..d63a2d861ac2 100644
2006 +--- a/kernel/cred.c
2007 ++++ b/kernel/cred.c
2008 +@@ -219,7 +219,7 @@ struct cred *cred_alloc_blank(void)
2009 + new->magic = CRED_MAGIC;
2010 + #endif
2011 +
2012 +- if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
2013 ++ if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
2014 + goto error;
2015 +
2016 + return new;
2017 +@@ -278,7 +278,7 @@ struct cred *prepare_creds(void)
2018 + new->security = NULL;
2019 + #endif
2020 +
2021 +- if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
2022 ++ if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
2023 + goto error;
2024 + validate_creds(new);
2025 + return new;
2026 +@@ -653,7 +653,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
2027 + #ifdef CONFIG_SECURITY
2028 + new->security = NULL;
2029 + #endif
2030 +- if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
2031 ++ if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
2032 + goto error;
2033 +
2034 + put_cred(old);
2035 +diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
2036 +index 9aa0fccd5d43..03595c29c566 100644
2037 +--- a/kernel/locking/spinlock_debug.c
2038 ++++ b/kernel/locking/spinlock_debug.c
2039 +@@ -51,19 +51,19 @@ EXPORT_SYMBOL(__rwlock_init);
2040 +
2041 + static void spin_dump(raw_spinlock_t *lock, const char *msg)
2042 + {
2043 +- struct task_struct *owner = NULL;
2044 ++ struct task_struct *owner = READ_ONCE(lock->owner);
2045 +
2046 +- if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
2047 +- owner = lock->owner;
2048 ++ if (owner == SPINLOCK_OWNER_INIT)
2049 ++ owner = NULL;
2050 + printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
2051 + msg, raw_smp_processor_id(),
2052 + current->comm, task_pid_nr(current));
2053 + printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
2054 + ".owner_cpu: %d\n",
2055 +- lock, lock->magic,
2056 ++ lock, READ_ONCE(lock->magic),
2057 + owner ? owner->comm : "<none>",
2058 + owner ? task_pid_nr(owner) : -1,
2059 +- lock->owner_cpu);
2060 ++ READ_ONCE(lock->owner_cpu));
2061 + dump_stack();
2062 + }
2063 +
2064 +@@ -80,16 +80,16 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg)
2065 + static inline void
2066 + debug_spin_lock_before(raw_spinlock_t *lock)
2067 + {
2068 +- SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
2069 +- SPIN_BUG_ON(lock->owner == current, lock, "recursion");
2070 +- SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
2071 ++ SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
2072 ++ SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
2073 ++ SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
2074 + lock, "cpu recursion");
2075 + }
2076 +
2077 + static inline void debug_spin_lock_after(raw_spinlock_t *lock)
2078 + {
2079 +- lock->owner_cpu = raw_smp_processor_id();
2080 +- lock->owner = current;
2081 ++ WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
2082 ++ WRITE_ONCE(lock->owner, current);
2083 + }
2084 +
2085 + static inline void debug_spin_unlock(raw_spinlock_t *lock)
2086 +@@ -99,8 +99,8 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
2087 + SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
2088 + SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
2089 + lock, "wrong CPU");
2090 +- lock->owner = SPINLOCK_OWNER_INIT;
2091 +- lock->owner_cpu = -1;
2092 ++ WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
2093 ++ WRITE_ONCE(lock->owner_cpu, -1);
2094 + }
2095 +
2096 + /*
2097 +@@ -183,8 +183,8 @@ static inline void debug_write_lock_before(rwlock_t *lock)
2098 +
2099 + static inline void debug_write_lock_after(rwlock_t *lock)
2100 + {
2101 +- lock->owner_cpu = raw_smp_processor_id();
2102 +- lock->owner = current;
2103 ++ WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
2104 ++ WRITE_ONCE(lock->owner, current);
2105 + }
2106 +
2107 + static inline void debug_write_unlock(rwlock_t *lock)
2108 +@@ -193,8 +193,8 @@ static inline void debug_write_unlock(rwlock_t *lock)
2109 + RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
2110 + RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
2111 + lock, "wrong CPU");
2112 +- lock->owner = SPINLOCK_OWNER_INIT;
2113 +- lock->owner_cpu = -1;
2114 ++ WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
2115 ++ WRITE_ONCE(lock->owner_cpu, -1);
2116 + }
2117 +
2118 + void do_raw_write_lock(rwlock_t *lock)
2119 +diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
2120 +index 4f0f0604f1c4..5dfac92521fa 100644
2121 +--- a/kernel/power/snapshot.c
2122 ++++ b/kernel/power/snapshot.c
2123 +@@ -732,8 +732,15 @@ zone_found:
2124 + * We have found the zone. Now walk the radix tree to find the leaf node
2125 + * for our PFN.
2126 + */
2127 ++
2128 ++ /*
2129 ++ * If the zone we wish to scan is the the current zone and the
2130 ++ * pfn falls into the current node then we do not need to walk
2131 ++ * the tree.
2132 ++ */
2133 + node = bm->cur.node;
2134 +- if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
2135 ++ if (zone == bm->cur.zone &&
2136 ++ ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
2137 + goto node_found;
2138 +
2139 + node = zone->rtree;
2140 +diff --git a/kernel/taskstats.c b/kernel/taskstats.c
2141 +index cbb387a265db..23df1fbad4b4 100644
2142 +--- a/kernel/taskstats.c
2143 ++++ b/kernel/taskstats.c
2144 +@@ -559,25 +559,33 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
2145 + static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
2146 + {
2147 + struct signal_struct *sig = tsk->signal;
2148 +- struct taskstats *stats;
2149 ++ struct taskstats *stats_new, *stats;
2150 +
2151 +- if (sig->stats || thread_group_empty(tsk))
2152 +- goto ret;
2153 ++ /* Pairs with smp_store_release() below. */
2154 ++ stats = smp_load_acquire(&sig->stats);
2155 ++ if (stats || thread_group_empty(tsk))
2156 ++ return stats;
2157 +
2158 + /* No problem if kmem_cache_zalloc() fails */
2159 +- stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
2160 ++ stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
2161 +
2162 + spin_lock_irq(&tsk->sighand->siglock);
2163 +- if (!sig->stats) {
2164 +- sig->stats = stats;
2165 +- stats = NULL;
2166 ++ stats = sig->stats;
2167 ++ if (!stats) {
2168 ++ /*
2169 ++ * Pairs with smp_store_release() above and order the
2170 ++ * kmem_cache_zalloc().
2171 ++ */
2172 ++ smp_store_release(&sig->stats, stats_new);
2173 ++ stats = stats_new;
2174 ++ stats_new = NULL;
2175 + }
2176 + spin_unlock_irq(&tsk->sighand->siglock);
2177 +
2178 +- if (stats)
2179 +- kmem_cache_free(taskstats_cache, stats);
2180 +-ret:
2181 +- return sig->stats;
2182 ++ if (stats_new)
2183 ++ kmem_cache_free(taskstats_cache, stats_new);
2184 ++
2185 ++ return stats;
2186 + }
2187 +
2188 + /* Send pid data out on exit */
2189 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
2190 +index 77109b9cf733..71a40e5c3a9f 100644
2191 +--- a/kernel/trace/ftrace.c
2192 ++++ b/kernel/trace/ftrace.c
2193 +@@ -609,8 +609,7 @@ static int function_stat_show(struct seq_file *m, void *v)
2194 + }
2195 +
2196 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2197 +- avg = rec->time;
2198 +- do_div(avg, rec->counter);
2199 ++ avg = div64_ul(rec->time, rec->counter);
2200 + if (tracing_thresh && (avg < tracing_thresh))
2201 + goto out;
2202 + #endif
2203 +@@ -636,7 +635,8 @@ static int function_stat_show(struct seq_file *m, void *v)
2204 + * Divide only 1000 for ns^2 -> us^2 conversion.
2205 + * trace_print_graph_duration will divide 1000 again.
2206 + */
2207 +- do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
2208 ++ stddev = div64_ul(stddev,
2209 ++ rec->counter * (rec->counter - 1) * 1000);
2210 + }
2211 +
2212 + trace_seq_init(&s);
2213 +diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
2214 +index 305039b122fa..35b2ba07f3c6 100644
2215 +--- a/kernel/trace/tracing_map.c
2216 ++++ b/kernel/trace/tracing_map.c
2217 +@@ -90,8 +90,8 @@ static int tracing_map_cmp_atomic64(void *val_a, void *val_b)
2218 + #define DEFINE_TRACING_MAP_CMP_FN(type) \
2219 + static int tracing_map_cmp_##type(void *val_a, void *val_b) \
2220 + { \
2221 +- type a = *(type *)val_a; \
2222 +- type b = *(type *)val_b; \
2223 ++ type a = (type)(*(u64 *)val_a); \
2224 ++ type b = (type)(*(u64 *)val_b); \
2225 + \
2226 + return (a > b) ? 1 : ((a < b) ? -1 : 0); \
2227 + }
2228 +diff --git a/mm/mmap.c b/mm/mmap.c
2229 +index 19368fbba42a..d221266d100f 100644
2230 +--- a/mm/mmap.c
2231 ++++ b/mm/mmap.c
2232 +@@ -87,12 +87,6 @@ static void unmap_region(struct mm_struct *mm,
2233 + * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
2234 + * w: (no) no w: (no) no w: (copy) copy w: (no) no
2235 + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
2236 +- *
2237 +- * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
2238 +- * MAP_PRIVATE:
2239 +- * r: (no) no
2240 +- * w: (no) no
2241 +- * x: (yes) yes
2242 + */
2243 + pgprot_t protection_map[16] = {
2244 + __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
2245 +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
2246 +index 5a50ad517f0f..e4cca3f5331e 100644
2247 +--- a/mm/zsmalloc.c
2248 ++++ b/mm/zsmalloc.c
2249 +@@ -2138,6 +2138,11 @@ int zs_page_migrate(struct address_space *mapping, struct page *newpage,
2250 + zs_pool_dec_isolated(pool);
2251 + }
2252 +
2253 ++ if (page_zone(newpage) != page_zone(page)) {
2254 ++ dec_zone_page_state(page, NR_ZSPAGES);
2255 ++ inc_zone_page_state(newpage, NR_ZSPAGES);
2256 ++ }
2257 ++
2258 + reset_page(page);
2259 + put_page(page);
2260 + page = newpage;
2261 +diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
2262 +index cc1557978066..ecdfeaafba9c 100644
2263 +--- a/net/8021q/vlan.h
2264 ++++ b/net/8021q/vlan.h
2265 +@@ -109,6 +109,7 @@ int vlan_check_real_dev(struct net_device *real_dev,
2266 + void vlan_setup(struct net_device *dev);
2267 + int register_vlan_dev(struct net_device *dev);
2268 + void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
2269 ++void vlan_dev_uninit(struct net_device *dev);
2270 + bool vlan_dev_inherit_address(struct net_device *dev,
2271 + struct net_device *real_dev);
2272 +
2273 +diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
2274 +index d06d15db3232..892929d43898 100644
2275 +--- a/net/8021q/vlan_dev.c
2276 ++++ b/net/8021q/vlan_dev.c
2277 +@@ -610,7 +610,8 @@ static int vlan_dev_init(struct net_device *dev)
2278 + return 0;
2279 + }
2280 +
2281 +-static void vlan_dev_uninit(struct net_device *dev)
2282 ++/* Note: this function might be called multiple times for the same device. */
2283 ++void vlan_dev_uninit(struct net_device *dev)
2284 + {
2285 + struct vlan_priority_tci_mapping *pm;
2286 + struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
2287 +diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
2288 +index 1270207f3d7c..214cc068ffc2 100644
2289 +--- a/net/8021q/vlan_netlink.c
2290 ++++ b/net/8021q/vlan_netlink.c
2291 +@@ -92,11 +92,13 @@ static int vlan_changelink(struct net_device *dev,
2292 + struct ifla_vlan_flags *flags;
2293 + struct ifla_vlan_qos_mapping *m;
2294 + struct nlattr *attr;
2295 +- int rem;
2296 ++ int rem, err;
2297 +
2298 + if (data[IFLA_VLAN_FLAGS]) {
2299 + flags = nla_data(data[IFLA_VLAN_FLAGS]);
2300 +- vlan_dev_change_flags(dev, flags->flags, flags->mask);
2301 ++ err = vlan_dev_change_flags(dev, flags->flags, flags->mask);
2302 ++ if (err)
2303 ++ return err;
2304 + }
2305 + if (data[IFLA_VLAN_INGRESS_QOS]) {
2306 + nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
2307 +@@ -107,7 +109,9 @@ static int vlan_changelink(struct net_device *dev,
2308 + if (data[IFLA_VLAN_EGRESS_QOS]) {
2309 + nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
2310 + m = nla_data(attr);
2311 +- vlan_dev_set_egress_priority(dev, m->from, m->to);
2312 ++ err = vlan_dev_set_egress_priority(dev, m->from, m->to);
2313 ++ if (err)
2314 ++ return err;
2315 + }
2316 + }
2317 + return 0;
2318 +@@ -153,10 +157,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
2319 + return -EINVAL;
2320 +
2321 + err = vlan_changelink(dev, tb, data);
2322 +- if (err < 0)
2323 +- return err;
2324 +-
2325 +- return register_vlan_dev(dev);
2326 ++ if (!err)
2327 ++ err = register_vlan_dev(dev);
2328 ++ if (err)
2329 ++ vlan_dev_uninit(dev);
2330 ++ return err;
2331 + }
2332 +
2333 + static inline size_t vlan_qos_map_size(unsigned int n)
2334 +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
2335 +index bd41b78d131d..1d085eed72d0 100644
2336 +--- a/net/bluetooth/hci_conn.c
2337 ++++ b/net/bluetooth/hci_conn.c
2338 +@@ -1054,8 +1054,10 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
2339 + if (!conn)
2340 + return ERR_PTR(-ENOMEM);
2341 +
2342 +- if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0)
2343 ++ if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
2344 ++ hci_conn_del(conn);
2345 + return ERR_PTR(-EBUSY);
2346 ++ }
2347 +
2348 + conn->state = BT_CONNECT;
2349 + set_bit(HCI_CONN_SCANNING, &conn->flags);
2350 +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
2351 +index 1306962a792a..11012a509070 100644
2352 +--- a/net/bluetooth/l2cap_core.c
2353 ++++ b/net/bluetooth/l2cap_core.c
2354 +@@ -4908,10 +4908,8 @@ void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
2355 + BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
2356 + chan, result, local_amp_id, remote_amp_id);
2357 +
2358 +- if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
2359 +- l2cap_chan_unlock(chan);
2360 ++ if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
2361 + return;
2362 +- }
2363 +
2364 + if (chan->state != BT_CONNECTED) {
2365 + l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
2366 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2367 +index 44a29be7bfff..cd85cee14bd0 100644
2368 +--- a/net/core/neighbour.c
2369 ++++ b/net/core/neighbour.c
2370 +@@ -1058,7 +1058,7 @@ static void neigh_update_hhs(struct neighbour *neigh)
2371 +
2372 + if (update) {
2373 + hh = &neigh->hh;
2374 +- if (hh->hh_len) {
2375 ++ if (READ_ONCE(hh->hh_len)) {
2376 + write_seqlock_bh(&hh->hh_lock);
2377 + update(hh, neigh->dev, neigh->ha);
2378 + write_sequnlock_bh(&hh->hh_lock);
2379 +@@ -1319,7 +1319,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
2380 + struct net_device *dev = neigh->dev;
2381 + unsigned int seq;
2382 +
2383 +- if (dev->header_ops->cache && !neigh->hh.hh_len)
2384 ++ if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
2385 + neigh_hh_init(neigh);
2386 +
2387 + do {
2388 +diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
2389 +index 24d7aff8db1a..204aa0131fbe 100644
2390 +--- a/net/ethernet/eth.c
2391 ++++ b/net/ethernet/eth.c
2392 +@@ -238,7 +238,12 @@ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16
2393 + eth->h_proto = type;
2394 + memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
2395 + memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
2396 +- hh->hh_len = ETH_HLEN;
2397 ++
2398 ++ /* Pairs with READ_ONCE() in neigh_resolve_output(),
2399 ++ * neigh_hh_output() and neigh_update_hhs().
2400 ++ */
2401 ++ smp_store_release(&hh->hh_len, ETH_HLEN);
2402 ++
2403 + return 0;
2404 + }
2405 + EXPORT_SYMBOL(eth_header_cache);
2406 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2407 +index 84ff36a6d4e3..4901d17a8e63 100644
2408 +--- a/net/ipv4/tcp_input.c
2409 ++++ b/net/ipv4/tcp_input.c
2410 +@@ -1741,8 +1741,11 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
2411 + }
2412 +
2413 + /* Ignore very old stuff early */
2414 +- if (!after(sp[used_sacks].end_seq, prior_snd_una))
2415 ++ if (!after(sp[used_sacks].end_seq, prior_snd_una)) {
2416 ++ if (i == 0)
2417 ++ first_sack_index = -1;
2418 + continue;
2419 ++ }
2420 +
2421 + used_sacks++;
2422 + }
2423 +diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
2424 +index 204a8351efff..c29170e767a8 100644
2425 +--- a/net/llc/llc_station.c
2426 ++++ b/net/llc/llc_station.c
2427 +@@ -32,7 +32,7 @@ static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb)
2428 + return LLC_PDU_IS_CMD(pdu) && /* command PDU */
2429 + LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
2430 + LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID &&
2431 +- !pdu->dsap ? 0 : 1; /* NULL DSAP value */
2432 ++ !pdu->dsap; /* NULL DSAP value */
2433 + }
2434 +
2435 + static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
2436 +@@ -42,7 +42,7 @@ static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
2437 + return LLC_PDU_IS_CMD(pdu) && /* command PDU */
2438 + LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
2439 + LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST &&
2440 +- !pdu->dsap ? 0 : 1; /* NULL DSAP */
2441 ++ !pdu->dsap; /* NULL DSAP */
2442 + }
2443 +
2444 + static int llc_station_ac_send_xid_r(struct sk_buff *skb)
2445 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
2446 +index deea281ab169..5e28702c801f 100644
2447 +--- a/net/netfilter/nf_conntrack_netlink.c
2448 ++++ b/net/netfilter/nf_conntrack_netlink.c
2449 +@@ -3388,6 +3388,9 @@ static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
2450 +
2451 + list_for_each_entry(net, net_exit_list, exit_list)
2452 + ctnetlink_net_exit(net);
2453 ++
2454 ++ /* wait for other cpus until they are done with ctnl_notifiers */
2455 ++ synchronize_rcu();
2456 + }
2457 +
2458 + static struct pernet_operations ctnetlink_net_ops = {
2459 +diff --git a/net/rfkill/core.c b/net/rfkill/core.c
2460 +index 884027f62783..87c35844d7d9 100644
2461 +--- a/net/rfkill/core.c
2462 ++++ b/net/rfkill/core.c
2463 +@@ -940,10 +940,13 @@ static void rfkill_sync_work(struct work_struct *work)
2464 + int __must_check rfkill_register(struct rfkill *rfkill)
2465 + {
2466 + static unsigned long rfkill_no;
2467 +- struct device *dev = &rfkill->dev;
2468 ++ struct device *dev;
2469 + int error;
2470 +
2471 +- BUG_ON(!rfkill);
2472 ++ if (!rfkill)
2473 ++ return -EINVAL;
2474 ++
2475 ++ dev = &rfkill->dev;
2476 +
2477 + mutex_lock(&rfkill_global_mutex);
2478 +
2479 +diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
2480 +index bf13b8470c9a..80950a4384aa 100644
2481 +--- a/net/rxrpc/peer_event.c
2482 ++++ b/net/rxrpc/peer_event.c
2483 +@@ -148,6 +148,9 @@ void rxrpc_error_report(struct sock *sk)
2484 + struct rxrpc_peer *peer;
2485 + struct sk_buff *skb;
2486 +
2487 ++ if (unlikely(!local))
2488 ++ return;
2489 ++
2490 + _enter("%p{%d}", sk, local->debug_id);
2491 +
2492 + skb = sock_dequeue_err_skb(sk);
2493 +diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
2494 +index b57b4de73038..7e7eba33bbdb 100644
2495 +--- a/net/sched/sch_fq.c
2496 ++++ b/net/sched/sch_fq.c
2497 +@@ -736,7 +736,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
2498 + if (tb[TCA_FQ_QUANTUM]) {
2499 + u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
2500 +
2501 +- if (quantum > 0)
2502 ++ if (quantum > 0 && quantum <= (1 << 20))
2503 + q->quantum = quantum;
2504 + else
2505 + err = -EINVAL;
2506 +diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
2507 +index 2cca1ead96b5..2332984ba422 100644
2508 +--- a/net/sched/sch_prio.c
2509 ++++ b/net/sched/sch_prio.c
2510 +@@ -232,8 +232,14 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
2511 + struct prio_sched_data *q = qdisc_priv(sch);
2512 + unsigned long band = arg - 1;
2513 +
2514 +- if (new == NULL)
2515 +- new = &noop_qdisc;
2516 ++ if (!new) {
2517 ++ new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
2518 ++ TC_H_MAKE(sch->handle, arg));
2519 ++ if (!new)
2520 ++ new = &noop_qdisc;
2521 ++ else
2522 ++ qdisc_hash_add(new);
2523 ++ }
2524 +
2525 + *old = qdisc_replace(sch, new, &q->queues[band]);
2526 + return 0;
2527 +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
2528 +index 8b4cf78987e4..1133fa0830f4 100644
2529 +--- a/net/sctp/sm_sideeffect.c
2530 ++++ b/net/sctp/sm_sideeffect.c
2531 +@@ -1321,8 +1321,10 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
2532 + /* Generate an INIT ACK chunk. */
2533 + new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
2534 + 0);
2535 +- if (!new_obj)
2536 +- goto nomem;
2537 ++ if (!new_obj) {
2538 ++ error = -ENOMEM;
2539 ++ break;
2540 ++ }
2541 +
2542 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
2543 + SCTP_CHUNK(new_obj));
2544 +@@ -1344,7 +1346,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
2545 + if (!new_obj) {
2546 + if (cmd->obj.chunk)
2547 + sctp_chunk_free(cmd->obj.chunk);
2548 +- goto nomem;
2549 ++ error = -ENOMEM;
2550 ++ break;
2551 + }
2552 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
2553 + SCTP_CHUNK(new_obj));
2554 +@@ -1391,8 +1394,10 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
2555 +
2556 + /* Generate a SHUTDOWN chunk. */
2557 + new_obj = sctp_make_shutdown(asoc, chunk);
2558 +- if (!new_obj)
2559 +- goto nomem;
2560 ++ if (!new_obj) {
2561 ++ error = -ENOMEM;
2562 ++ break;
2563 ++ }
2564 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
2565 + SCTP_CHUNK(new_obj));
2566 + break;
2567 +@@ -1721,11 +1726,17 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
2568 + break;
2569 + }
2570 +
2571 +- if (error)
2572 ++ if (error) {
2573 ++ cmd = sctp_next_cmd(commands);
2574 ++ while (cmd) {
2575 ++ if (cmd->verb == SCTP_CMD_REPLY)
2576 ++ sctp_chunk_free(cmd->obj.chunk);
2577 ++ cmd = sctp_next_cmd(commands);
2578 ++ }
2579 + break;
2580 ++ }
2581 + }
2582 +
2583 +-out:
2584 + /* If this is in response to a received chunk, wait until
2585 + * we are done with the packet to open the queue so that we don't
2586 + * send multiple packets in response to a single request.
2587 +@@ -1740,8 +1751,5 @@ out:
2588 + sp->data_ready_signalled = 0;
2589 +
2590 + return error;
2591 +-nomem:
2592 +- error = -ENOMEM;
2593 +- goto out;
2594 + }
2595 +
2596 +diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
2597 +index 9a130d31ecf2..6fbb5eb9daf3 100644
2598 +--- a/samples/bpf/trace_event_user.c
2599 ++++ b/samples/bpf/trace_event_user.c
2600 +@@ -33,9 +33,9 @@ static void print_ksym(__u64 addr)
2601 + return;
2602 + sym = ksym_search(addr);
2603 + printf("%s;", sym->name);
2604 +- if (!strcmp(sym->name, "sys_read"))
2605 ++ if (!strstr(sym->name, "sys_read"))
2606 + sys_read_seen = true;
2607 +- else if (!strcmp(sym->name, "sys_write"))
2608 ++ else if (!strstr(sym->name, "sys_write"))
2609 + sys_write_seen = true;
2610 + }
2611 +
2612 +diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
2613 +index ed29bad1f03a..96420b620963 100644
2614 +--- a/scripts/kconfig/expr.c
2615 ++++ b/scripts/kconfig/expr.c
2616 +@@ -201,6 +201,13 @@ static int expr_eq(struct expr *e1, struct expr *e2)
2617 + {
2618 + int res, old_count;
2619 +
2620 ++ /*
2621 ++ * A NULL expr is taken to be yes, but there's also a different way to
2622 ++ * represent yes. expr_is_yes() checks for either representation.
2623 ++ */
2624 ++ if (!e1 || !e2)
2625 ++ return expr_is_yes(e1) && expr_is_yes(e2);
2626 ++
2627 + if (e1->type != e2->type)
2628 + return 0;
2629 + switch (e1->type) {
2630 +diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c
2631 +index 9d7582c90a95..c67d379cb6d6 100644
2632 +--- a/sound/isa/cs423x/cs4236.c
2633 ++++ b/sound/isa/cs423x/cs4236.c
2634 +@@ -293,7 +293,8 @@ static int snd_cs423x_pnp_init_mpu(int dev, struct pnp_dev *pdev)
2635 + } else {
2636 + mpu_port[dev] = pnp_port_start(pdev, 0);
2637 + if (mpu_irq[dev] >= 0 &&
2638 +- pnp_irq_valid(pdev, 0) && pnp_irq(pdev, 0) >= 0) {
2639 ++ pnp_irq_valid(pdev, 0) &&
2640 ++ pnp_irq(pdev, 0) != (resource_size_t)-1) {
2641 + mpu_irq[dev] = pnp_irq(pdev, 0);
2642 + } else {
2643 + mpu_irq[dev] = -1; /* disable interrupt */
2644 +diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
2645 +index e5c52ed9b674..8c06de37b467 100644
2646 +--- a/sound/pci/ice1712/ice1724.c
2647 ++++ b/sound/pci/ice1712/ice1724.c
2648 +@@ -661,6 +661,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
2649 + unsigned long flags;
2650 + unsigned char mclk_change;
2651 + unsigned int i, old_rate;
2652 ++ bool call_set_rate = false;
2653 +
2654 + if (rate > ice->hw_rates->list[ice->hw_rates->count - 1])
2655 + return -EINVAL;
2656 +@@ -684,7 +685,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
2657 + * setting clock rate for internal clock mode */
2658 + old_rate = ice->get_rate(ice);
2659 + if (force || (old_rate != rate))
2660 +- ice->set_rate(ice, rate);
2661 ++ call_set_rate = true;
2662 + else if (rate == ice->cur_rate) {
2663 + spin_unlock_irqrestore(&ice->reg_lock, flags);
2664 + return 0;
2665 +@@ -692,12 +693,14 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
2666 + }
2667 +
2668 + ice->cur_rate = rate;
2669 ++ spin_unlock_irqrestore(&ice->reg_lock, flags);
2670 ++
2671 ++ if (call_set_rate)
2672 ++ ice->set_rate(ice, rate);
2673 +
2674 + /* setting master clock */
2675 + mclk_change = ice->set_mclk(ice, rate);
2676 +
2677 +- spin_unlock_irqrestore(&ice->reg_lock, flags);
2678 +-
2679 + if (mclk_change && ice->gpio.i2s_mclk_changed)
2680 + ice->gpio.i2s_mclk_changed(ice);
2681 + if (ice->gpio.set_pro_rate)
2682 +diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
2683 +index fd2731d171dd..0e8008d38161 100644
2684 +--- a/sound/soc/codecs/wm8962.c
2685 ++++ b/sound/soc/codecs/wm8962.c
2686 +@@ -2791,7 +2791,7 @@ static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
2687 +
2688 + if (target % Fref == 0) {
2689 + fll_div->theta = 0;
2690 +- fll_div->lambda = 0;
2691 ++ fll_div->lambda = 1;
2692 + } else {
2693 + gcd_fll = gcd(target, fratio * Fref);
2694 +
2695 +@@ -2861,7 +2861,7 @@ static int wm8962_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
2696 + return -EINVAL;
2697 + }
2698 +
2699 +- if (fll_div.theta || fll_div.lambda)
2700 ++ if (fll_div.theta)
2701 + fll1 |= WM8962_FLL_FRAC;
2702 +
2703 + /* Stop the FLL while we reconfigure */
2704 +diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
2705 +index 33ff5c843346..6e88460cd13d 100644
2706 +--- a/tools/perf/builtin-report.c
2707 ++++ b/tools/perf/builtin-report.c
2708 +@@ -292,13 +292,6 @@ static int report__setup_sample_type(struct report *rep)
2709 + PERF_SAMPLE_BRANCH_ANY))
2710 + rep->nonany_branch_mode = true;
2711 +
2712 +-#ifndef HAVE_LIBUNWIND_SUPPORT
2713 +- if (dwarf_callchain_users) {
2714 +- ui__warning("Please install libunwind development packages "
2715 +- "during the perf build.\n");
2716 +- }
2717 +-#endif
2718 +-
2719 + return 0;
2720 + }
2721 +