Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.2 commit in: /
Date: Sun, 21 Jul 2019 14:43:48
Message-Id: 1563720210.9b389634cc124ae7238533cc9784f5ff9dcf1029.mpagano@gentoo
1 commit: 9b389634cc124ae7238533cc9784f5ff9dcf1029
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Jul 21 14:43:30 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Jul 21 14:43:30 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9b389634
7
8 Linux patch 5.2.2
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1001_linux-5.2.2.patch | 1203 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1207 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 3d37d29..d2c1e9b 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -47,6 +47,10 @@ Patch: 1000_linux-5.2.1.patch
21 From: https://www.kernel.org
22 Desc: Linux 5.2.1
23
24 +Patch: 1001_linux-5.2.2.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 5.2.2
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1001_linux-5.2.2.patch b/1001_linux-5.2.2.patch
33 new file mode 100644
34 index 0000000..d6a081e
35 --- /dev/null
36 +++ b/1001_linux-5.2.2.patch
37 @@ -0,0 +1,1203 @@
38 +diff --git a/Makefile b/Makefile
39 +index d8f5dbfd6b76..d6c65b678d21 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 2
46 +-SUBLEVEL = 1
47 ++SUBLEVEL = 2
48 + EXTRAVERSION =
49 + NAME = Bobtail Squid
50 +
51 +diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
52 +index 182ce67dfe10..c2663fce7f6c 100644
53 +--- a/arch/arc/kernel/unwind.c
54 ++++ b/arch/arc/kernel/unwind.c
55 +@@ -181,11 +181,6 @@ static void *__init unw_hdr_alloc_early(unsigned long sz)
56 + return memblock_alloc_from(sz, sizeof(unsigned int), MAX_DMA_ADDRESS);
57 + }
58 +
59 +-static void *unw_hdr_alloc(unsigned long sz)
60 +-{
61 +- return kmalloc(sz, GFP_KERNEL);
62 +-}
63 +-
64 + static void init_unwind_table(struct unwind_table *table, const char *name,
65 + const void *core_start, unsigned long core_size,
66 + const void *init_start, unsigned long init_size,
67 +@@ -366,6 +361,10 @@ ret_err:
68 + }
69 +
70 + #ifdef CONFIG_MODULES
71 ++static void *unw_hdr_alloc(unsigned long sz)
72 ++{
73 ++ return kmalloc(sz, GFP_KERNEL);
74 ++}
75 +
76 + static struct unwind_table *last_table;
77 +
78 +diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
79 +index e78cda94456b..68c476b20b57 100644
80 +--- a/arch/s390/include/asm/facility.h
81 ++++ b/arch/s390/include/asm/facility.h
82 +@@ -59,6 +59,18 @@ static inline int test_facility(unsigned long nr)
83 + return __test_facility(nr, &S390_lowcore.stfle_fac_list);
84 + }
85 +
86 ++static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
87 ++{
88 ++ register unsigned long reg0 asm("0") = size - 1;
89 ++
90 ++ asm volatile(
91 ++ ".insn s,0xb2b00000,0(%1)" /* stfle */
92 ++ : "+d" (reg0)
93 ++ : "a" (stfle_fac_list)
94 ++ : "memory", "cc");
95 ++ return reg0;
96 ++}
97 ++
98 + /**
99 + * stfle - Store facility list extended
100 + * @stfle_fac_list: array where facility list can be stored
101 +@@ -75,13 +87,8 @@ static inline void __stfle(u64 *stfle_fac_list, int size)
102 + memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
103 + if (S390_lowcore.stfl_fac_list & 0x01000000) {
104 + /* More facility bits available with stfle */
105 +- register unsigned long reg0 asm("0") = size - 1;
106 +-
107 +- asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
108 +- : "+d" (reg0)
109 +- : "a" (stfle_fac_list)
110 +- : "memory", "cc");
111 +- nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
112 ++ nr = __stfle_asm(stfle_fac_list, size);
113 ++ nr = min_t(unsigned long, (nr + 1) * 8, size * 8);
114 + }
115 + memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
116 + }
117 +diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
118 +index f577c5f6031a..c563f8368b19 100644
119 +--- a/arch/s390/include/asm/sclp.h
120 ++++ b/arch/s390/include/asm/sclp.h
121 +@@ -80,7 +80,6 @@ struct sclp_info {
122 + unsigned char has_gisaf : 1;
123 + unsigned char has_diag318 : 1;
124 + unsigned char has_sipl : 1;
125 +- unsigned char has_sipl_g2 : 1;
126 + unsigned char has_dirq : 1;
127 + unsigned int ibc;
128 + unsigned int mtid;
129 +diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
130 +index d836af3ccc38..2c0a515428d6 100644
131 +--- a/arch/s390/kernel/ipl.c
132 ++++ b/arch/s390/kernel/ipl.c
133 +@@ -286,12 +286,7 @@ static struct kobj_attribute sys_ipl_secure_attr =
134 + static ssize_t ipl_has_secure_show(struct kobject *kobj,
135 + struct kobj_attribute *attr, char *page)
136 + {
137 +- if (MACHINE_IS_LPAR)
138 +- return sprintf(page, "%i\n", !!sclp.has_sipl);
139 +- else if (MACHINE_IS_VM)
140 +- return sprintf(page, "%i\n", !!sclp.has_sipl_g2);
141 +- else
142 +- return sprintf(page, "%i\n", 0);
143 ++ return sprintf(page, "%i\n", !!sclp.has_sipl);
144 + }
145 +
146 + static struct kobj_attribute sys_ipl_has_secure_attr =
147 +diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
148 +index 7b23431be5cb..f49e11669271 100644
149 +--- a/arch/x86/entry/entry_32.S
150 ++++ b/arch/x86/entry/entry_32.S
151 +@@ -1104,6 +1104,30 @@ ENTRY(irq_entries_start)
152 + .endr
153 + END(irq_entries_start)
154 +
155 ++#ifdef CONFIG_X86_LOCAL_APIC
156 ++ .align 8
157 ++ENTRY(spurious_entries_start)
158 ++ vector=FIRST_SYSTEM_VECTOR
159 ++ .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
160 ++ pushl $(~vector+0x80) /* Note: always in signed byte range */
161 ++ vector=vector+1
162 ++ jmp common_spurious
163 ++ .align 8
164 ++ .endr
165 ++END(spurious_entries_start)
166 ++
167 ++common_spurious:
168 ++ ASM_CLAC
169 ++ addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
170 ++ SAVE_ALL switch_stacks=1
171 ++ ENCODE_FRAME_POINTER
172 ++ TRACE_IRQS_OFF
173 ++ movl %esp, %eax
174 ++ call smp_spurious_interrupt
175 ++ jmp ret_from_intr
176 ++ENDPROC(common_spurious)
177 ++#endif
178 ++
179 + /*
180 + * the CPU automatically disables interrupts when executing an IRQ vector,
181 + * so IRQ-flags tracing has to follow that:
182 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
183 +index 11aa3b2afa4d..8dbca86c249b 100644
184 +--- a/arch/x86/entry/entry_64.S
185 ++++ b/arch/x86/entry/entry_64.S
186 +@@ -375,6 +375,18 @@ ENTRY(irq_entries_start)
187 + .endr
188 + END(irq_entries_start)
189 +
190 ++ .align 8
191 ++ENTRY(spurious_entries_start)
192 ++ vector=FIRST_SYSTEM_VECTOR
193 ++ .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
194 ++ UNWIND_HINT_IRET_REGS
195 ++ pushq $(~vector+0x80) /* Note: always in signed byte range */
196 ++ jmp common_spurious
197 ++ .align 8
198 ++ vector=vector+1
199 ++ .endr
200 ++END(spurious_entries_start)
201 ++
202 + .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
203 + #ifdef CONFIG_DEBUG_ENTRY
204 + pushq %rax
205 +@@ -571,10 +583,20 @@ _ASM_NOKPROBE(interrupt_entry)
206 +
207 + /* Interrupt entry/exit. */
208 +
209 +- /*
210 +- * The interrupt stubs push (~vector+0x80) onto the stack and
211 +- * then jump to common_interrupt.
212 +- */
213 ++/*
214 ++ * The interrupt stubs push (~vector+0x80) onto the stack and
215 ++ * then jump to common_spurious/interrupt.
216 ++ */
217 ++common_spurious:
218 ++ addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
219 ++ call interrupt_entry
220 ++ UNWIND_HINT_REGS indirect=1
221 ++ call smp_spurious_interrupt /* rdi points to pt_regs */
222 ++ jmp ret_from_intr
223 ++END(common_spurious)
224 ++_ASM_NOKPROBE(common_spurious)
225 ++
226 ++/* common_interrupt is a hotpath. Align it */
227 + .p2align CONFIG_X86_L1_CACHE_SHIFT
228 + common_interrupt:
229 + addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
230 +diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
231 +index 32e666e1231e..cbd97e22d2f3 100644
232 +--- a/arch/x86/include/asm/hw_irq.h
233 ++++ b/arch/x86/include/asm/hw_irq.h
234 +@@ -150,8 +150,11 @@ extern char irq_entries_start[];
235 + #define trace_irq_entries_start irq_entries_start
236 + #endif
237 +
238 ++extern char spurious_entries_start[];
239 ++
240 + #define VECTOR_UNUSED NULL
241 +-#define VECTOR_RETRIGGERED ((void *)~0UL)
242 ++#define VECTOR_SHUTDOWN ((void *)~0UL)
243 ++#define VECTOR_RETRIGGERED ((void *)~1UL)
244 +
245 + typedef struct irq_desc* vector_irq_t[NR_VECTORS];
246 + DECLARE_PER_CPU(vector_irq_t, vector_irq);
247 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
248 +index 85be316665b4..16c21ed97cb2 100644
249 +--- a/arch/x86/kernel/apic/apic.c
250 ++++ b/arch/x86/kernel/apic/apic.c
251 +@@ -2041,21 +2041,32 @@ __visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
252 + entering_irq();
253 + trace_spurious_apic_entry(vector);
254 +
255 ++ inc_irq_stat(irq_spurious_count);
256 ++
257 ++ /*
258 ++ * If this is a spurious interrupt then do not acknowledge
259 ++ */
260 ++ if (vector == SPURIOUS_APIC_VECTOR) {
261 ++ /* See SDM vol 3 */
262 ++ pr_info("Spurious APIC interrupt (vector 0xFF) on CPU#%d, should never happen.\n",
263 ++ smp_processor_id());
264 ++ goto out;
265 ++ }
266 ++
267 + /*
268 +- * Check if this really is a spurious interrupt and ACK it
269 +- * if it is a vectored one. Just in case...
270 +- * Spurious interrupts should not be ACKed.
271 ++ * If it is a vectored one, verify it's set in the ISR. If set,
272 ++ * acknowledge it.
273 + */
274 + v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
275 +- if (v & (1 << (vector & 0x1f)))
276 ++ if (v & (1 << (vector & 0x1f))) {
277 ++ pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n",
278 ++ vector, smp_processor_id());
279 + ack_APIC_irq();
280 +-
281 +- inc_irq_stat(irq_spurious_count);
282 +-
283 +- /* see sw-dev-man vol 3, chapter 7.4.13.5 */
284 +- pr_info("spurious APIC interrupt through vector %02x on CPU#%d, "
285 +- "should never happen.\n", vector, smp_processor_id());
286 +-
287 ++ } else {
288 ++ pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n",
289 ++ vector, smp_processor_id());
290 ++ }
291 ++out:
292 + trace_spurious_apic_exit(vector);
293 + exiting_irq();
294 + }
295 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
296 +index 53aa234a6803..c9fec0657eea 100644
297 +--- a/arch/x86/kernel/apic/io_apic.c
298 ++++ b/arch/x86/kernel/apic/io_apic.c
299 +@@ -1893,6 +1893,50 @@ static int ioapic_set_affinity(struct irq_data *irq_data,
300 + return ret;
301 + }
302 +
303 ++/*
304 ++ * Interrupt shutdown masks the ioapic pin, but the interrupt might already
305 ++ * be in flight, but not yet serviced by the target CPU. That means
306 ++ * __synchronize_hardirq() would return and claim that everything is calmed
307 ++ * down. So free_irq() would proceed and deactivate the interrupt and free
308 ++ * resources.
309 ++ *
310 ++ * Once the target CPU comes around to service it it will find a cleared
311 ++ * vector and complain. While the spurious interrupt is harmless, the full
312 ++ * release of resources might prevent the interrupt from being acknowledged
313 ++ * which keeps the hardware in a weird state.
314 ++ *
315 ++ * Verify that the corresponding Remote-IRR bits are clear.
316 ++ */
317 ++static int ioapic_irq_get_chip_state(struct irq_data *irqd,
318 ++ enum irqchip_irq_state which,
319 ++ bool *state)
320 ++{
321 ++ struct mp_chip_data *mcd = irqd->chip_data;
322 ++ struct IO_APIC_route_entry rentry;
323 ++ struct irq_pin_list *p;
324 ++
325 ++ if (which != IRQCHIP_STATE_ACTIVE)
326 ++ return -EINVAL;
327 ++
328 ++ *state = false;
329 ++ raw_spin_lock(&ioapic_lock);
330 ++ for_each_irq_pin(p, mcd->irq_2_pin) {
331 ++ rentry = __ioapic_read_entry(p->apic, p->pin);
332 ++ /*
333 ++ * The remote IRR is only valid in level trigger mode. It's
334 ++ * meaning is undefined for edge triggered interrupts and
335 ++ * irrelevant because the IO-APIC treats them as fire and
336 ++ * forget.
337 ++ */
338 ++ if (rentry.irr && rentry.trigger) {
339 ++ *state = true;
340 ++ break;
341 ++ }
342 ++ }
343 ++ raw_spin_unlock(&ioapic_lock);
344 ++ return 0;
345 ++}
346 ++
347 + static struct irq_chip ioapic_chip __read_mostly = {
348 + .name = "IO-APIC",
349 + .irq_startup = startup_ioapic_irq,
350 +@@ -1902,6 +1946,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
351 + .irq_eoi = ioapic_ack_level,
352 + .irq_set_affinity = ioapic_set_affinity,
353 + .irq_retrigger = irq_chip_retrigger_hierarchy,
354 ++ .irq_get_irqchip_state = ioapic_irq_get_chip_state,
355 + .flags = IRQCHIP_SKIP_SET_WAKE,
356 + };
357 +
358 +@@ -1914,6 +1959,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
359 + .irq_eoi = ioapic_ir_ack_level,
360 + .irq_set_affinity = ioapic_set_affinity,
361 + .irq_retrigger = irq_chip_retrigger_hierarchy,
362 ++ .irq_get_irqchip_state = ioapic_irq_get_chip_state,
363 + .flags = IRQCHIP_SKIP_SET_WAKE,
364 + };
365 +
366 +diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
367 +index e7cb78aed644..fdacb864c3dd 100644
368 +--- a/arch/x86/kernel/apic/vector.c
369 ++++ b/arch/x86/kernel/apic/vector.c
370 +@@ -340,7 +340,7 @@ static void clear_irq_vector(struct irq_data *irqd)
371 + trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
372 + apicd->prev_cpu);
373 +
374 +- per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_UNUSED;
375 ++ per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
376 + irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
377 + apicd->vector = 0;
378 +
379 +@@ -349,7 +349,7 @@ static void clear_irq_vector(struct irq_data *irqd)
380 + if (!vector)
381 + return;
382 +
383 +- per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED;
384 ++ per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
385 + irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
386 + apicd->prev_vector = 0;
387 + apicd->move_in_progress = 0;
388 +diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
389 +index d2482bbbe3d0..87ef69a72c52 100644
390 +--- a/arch/x86/kernel/idt.c
391 ++++ b/arch/x86/kernel/idt.c
392 +@@ -319,7 +319,8 @@ void __init idt_setup_apic_and_irq_gates(void)
393 + #ifdef CONFIG_X86_LOCAL_APIC
394 + for_each_clear_bit_from(i, system_vectors, NR_VECTORS) {
395 + set_bit(i, system_vectors);
396 +- set_intr_gate(i, spurious_interrupt);
397 ++ entry = spurious_entries_start + 8 * (i - FIRST_SYSTEM_VECTOR);
398 ++ set_intr_gate(i, entry);
399 + }
400 + #endif
401 + }
402 +diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
403 +index 9b68b5b00ac9..cc496eb7a8d2 100644
404 +--- a/arch/x86/kernel/irq.c
405 ++++ b/arch/x86/kernel/irq.c
406 +@@ -247,7 +247,7 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
407 + if (!handle_irq(desc, regs)) {
408 + ack_APIC_irq();
409 +
410 +- if (desc != VECTOR_RETRIGGERED) {
411 ++ if (desc != VECTOR_RETRIGGERED && desc != VECTOR_SHUTDOWN) {
412 + pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
413 + __func__, smp_processor_id(),
414 + vector);
415 +diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
416 +index a7359535caf5..b444f89a2041 100644
417 +--- a/drivers/base/cacheinfo.c
418 ++++ b/drivers/base/cacheinfo.c
419 +@@ -655,7 +655,8 @@ static int cacheinfo_cpu_pre_down(unsigned int cpu)
420 +
421 + static int __init cacheinfo_sysfs_init(void)
422 + {
423 +- return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
424 ++ return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
425 ++ "base/cacheinfo:online",
426 + cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
427 + }
428 + device_initcall(cacheinfo_sysfs_init);
429 +diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
430 +index f962488546b6..103b5d37fa86 100644
431 +--- a/drivers/base/firmware_loader/fallback.c
432 ++++ b/drivers/base/firmware_loader/fallback.c
433 +@@ -659,7 +659,7 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
434 + /* Also permit LSMs and IMA to fail firmware sysfs fallback */
435 + ret = security_kernel_load_data(LOADING_FIRMWARE);
436 + if (ret < 0)
437 +- return ret;
438 ++ return false;
439 +
440 + return fw_force_sysfs_fallback(opt_flags);
441 + }
442 +diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
443 +index 4acbc47973e9..e78ff5c65ed6 100644
444 +--- a/drivers/crypto/nx/nx-842-powernv.c
445 ++++ b/drivers/crypto/nx/nx-842-powernv.c
446 +@@ -27,8 +27,6 @@ MODULE_ALIAS_CRYPTO("842-nx");
447 + #define WORKMEM_ALIGN (CRB_ALIGN)
448 + #define CSB_WAIT_MAX (5000) /* ms */
449 + #define VAS_RETRIES (10)
450 +-/* # of requests allowed per RxFIFO at a time. 0 for unlimited */
451 +-#define MAX_CREDITS_PER_RXFIFO (1024)
452 +
453 + struct nx842_workmem {
454 + /* Below fields must be properly aligned */
455 +@@ -812,7 +810,11 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
456 + rxattr.lnotify_lpid = lpid;
457 + rxattr.lnotify_pid = pid;
458 + rxattr.lnotify_tid = tid;
459 +- rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO;
460 ++ /*
461 ++ * Maximum RX window credits can not be more than #CRBs in
462 ++ * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns.
463 ++ */
464 ++ rxattr.wcreds_max = fifo_size / CRB_SIZE;
465 +
466 + /*
467 + * Open a VAS receice window which is used to configure RxFIFO
468 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
469 +index 427c78d4d948..8c57c5af0930 100644
470 +--- a/drivers/crypto/talitos.c
471 ++++ b/drivers/crypto/talitos.c
472 +@@ -321,6 +321,21 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
473 + }
474 + EXPORT_SYMBOL(talitos_submit);
475 +
476 ++static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
477 ++{
478 ++ struct talitos_edesc *edesc;
479 ++
480 ++ if (!is_sec1)
481 ++ return request->desc->hdr;
482 ++
483 ++ if (!request->desc->next_desc)
484 ++ return request->desc->hdr1;
485 ++
486 ++ edesc = container_of(request->desc, struct talitos_edesc, desc);
487 ++
488 ++ return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
489 ++}
490 ++
491 + /*
492 + * process what was done, notify callback of error if not
493 + */
494 +@@ -342,12 +357,7 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
495 +
496 + /* descriptors with their done bits set don't get the error */
497 + rmb();
498 +- if (!is_sec1)
499 +- hdr = request->desc->hdr;
500 +- else if (request->desc->next_desc)
501 +- hdr = (request->desc + 1)->hdr1;
502 +- else
503 +- hdr = request->desc->hdr1;
504 ++ hdr = get_request_hdr(request, is_sec1);
505 +
506 + if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
507 + status = 0;
508 +@@ -477,8 +487,14 @@ static u32 current_desc_hdr(struct device *dev, int ch)
509 + }
510 + }
511 +
512 +- if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
513 +- return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
514 ++ if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
515 ++ struct talitos_edesc *edesc;
516 ++
517 ++ edesc = container_of(priv->chan[ch].fifo[iter].desc,
518 ++ struct talitos_edesc, desc);
519 ++ return ((struct talitos_desc *)
520 ++ (edesc->buf + edesc->dma_len))->hdr;
521 ++ }
522 +
523 + return priv->chan[ch].fifo[iter].desc->hdr;
524 + }
525 +@@ -948,36 +964,6 @@ badkey:
526 + goto out;
527 + }
528 +
529 +-/*
530 +- * talitos_edesc - s/w-extended descriptor
531 +- * @src_nents: number of segments in input scatterlist
532 +- * @dst_nents: number of segments in output scatterlist
533 +- * @icv_ool: whether ICV is out-of-line
534 +- * @iv_dma: dma address of iv for checking continuity and link table
535 +- * @dma_len: length of dma mapped link_tbl space
536 +- * @dma_link_tbl: bus physical address of link_tbl/buf
537 +- * @desc: h/w descriptor
538 +- * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
539 +- * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
540 +- *
541 +- * if decrypting (with authcheck), or either one of src_nents or dst_nents
542 +- * is greater than 1, an integrity check value is concatenated to the end
543 +- * of link_tbl data
544 +- */
545 +-struct talitos_edesc {
546 +- int src_nents;
547 +- int dst_nents;
548 +- bool icv_ool;
549 +- dma_addr_t iv_dma;
550 +- int dma_len;
551 +- dma_addr_t dma_link_tbl;
552 +- struct talitos_desc desc;
553 +- union {
554 +- struct talitos_ptr link_tbl[0];
555 +- u8 buf[0];
556 +- };
557 +-};
558 +-
559 + static void talitos_sg_unmap(struct device *dev,
560 + struct talitos_edesc *edesc,
561 + struct scatterlist *src,
562 +@@ -1466,15 +1452,11 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
563 + edesc->dst_nents = dst_nents;
564 + edesc->iv_dma = iv_dma;
565 + edesc->dma_len = dma_len;
566 +- if (dma_len) {
567 +- void *addr = &edesc->link_tbl[0];
568 +-
569 +- if (is_sec1 && !dst)
570 +- addr += sizeof(struct talitos_desc);
571 +- edesc->dma_link_tbl = dma_map_single(dev, addr,
572 ++ if (dma_len)
573 ++ edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
574 + edesc->dma_len,
575 + DMA_BIDIRECTIONAL);
576 +- }
577 ++
578 + return edesc;
579 + }
580 +
581 +@@ -1759,14 +1741,16 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
582 + struct talitos_private *priv = dev_get_drvdata(dev);
583 + bool is_sec1 = has_ftr_sec1(priv);
584 + struct talitos_desc *desc = &edesc->desc;
585 +- struct talitos_desc *desc2 = desc + 1;
586 ++ struct talitos_desc *desc2 = (struct talitos_desc *)
587 ++ (edesc->buf + edesc->dma_len);
588 +
589 + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
590 + if (desc->next_desc &&
591 + desc->ptr[5].ptr != desc2->ptr[5].ptr)
592 + unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
593 +
594 +- talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
595 ++ if (req_ctx->psrc)
596 ++ talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
597 +
598 + /* When using hashctx-in, must unmap it. */
599 + if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
600 +@@ -1833,7 +1817,6 @@ static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
601 +
602 + static int common_nonsnoop_hash(struct talitos_edesc *edesc,
603 + struct ahash_request *areq, unsigned int length,
604 +- unsigned int offset,
605 + void (*callback) (struct device *dev,
606 + struct talitos_desc *desc,
607 + void *context, int error))
608 +@@ -1872,9 +1855,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
609 +
610 + sg_count = edesc->src_nents ?: 1;
611 + if (is_sec1 && sg_count > 1)
612 +- sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
613 +- edesc->buf + sizeof(struct talitos_desc),
614 +- length, req_ctx->nbuf);
615 ++ sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
616 + else if (length)
617 + sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
618 + DMA_TO_DEVICE);
619 +@@ -1887,7 +1868,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
620 + DMA_TO_DEVICE);
621 + } else {
622 + sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
623 +- &desc->ptr[3], sg_count, offset, 0);
624 ++ &desc->ptr[3], sg_count, 0, 0);
625 + if (sg_count > 1)
626 + sync_needed = true;
627 + }
628 +@@ -1911,7 +1892,8 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
629 + talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
630 +
631 + if (is_sec1 && req_ctx->nbuf && length) {
632 +- struct talitos_desc *desc2 = desc + 1;
633 ++ struct talitos_desc *desc2 = (struct talitos_desc *)
634 ++ (edesc->buf + edesc->dma_len);
635 + dma_addr_t next_desc;
636 +
637 + memset(desc2, 0, sizeof(*desc2));
638 +@@ -1932,7 +1914,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
639 + DMA_TO_DEVICE);
640 + copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
641 + sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
642 +- &desc2->ptr[3], sg_count, offset, 0);
643 ++ &desc2->ptr[3], sg_count, 0, 0);
644 + if (sg_count > 1)
645 + sync_needed = true;
646 + copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
647 +@@ -2043,7 +2025,6 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
648 + struct device *dev = ctx->dev;
649 + struct talitos_private *priv = dev_get_drvdata(dev);
650 + bool is_sec1 = has_ftr_sec1(priv);
651 +- int offset = 0;
652 + u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
653 +
654 + if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
655 +@@ -2083,6 +2064,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
656 + sg_chain(req_ctx->bufsl, 2, areq->src);
657 + req_ctx->psrc = req_ctx->bufsl;
658 + } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
659 ++ int offset;
660 ++
661 + if (nbytes_to_hash > blocksize)
662 + offset = blocksize - req_ctx->nbuf;
663 + else
664 +@@ -2095,7 +2078,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
665 + sg_copy_to_buffer(areq->src, nents,
666 + ctx_buf + req_ctx->nbuf, offset);
667 + req_ctx->nbuf += offset;
668 +- req_ctx->psrc = areq->src;
669 ++ req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
670 ++ offset);
671 + } else
672 + req_ctx->psrc = areq->src;
673 +
674 +@@ -2135,8 +2119,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
675 + if (ctx->keylen && (req_ctx->first || req_ctx->last))
676 + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
677 +
678 +- return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
679 +- ahash_done);
680 ++ return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
681 + }
682 +
683 + static int ahash_update(struct ahash_request *areq)
684 +diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
685 +index a65a63e0d6c1..979f6a61e545 100644
686 +--- a/drivers/crypto/talitos.h
687 ++++ b/drivers/crypto/talitos.h
688 +@@ -65,6 +65,36 @@ struct talitos_desc {
689 +
690 + #define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32))
691 +
692 ++/*
693 ++ * talitos_edesc - s/w-extended descriptor
694 ++ * @src_nents: number of segments in input scatterlist
695 ++ * @dst_nents: number of segments in output scatterlist
696 ++ * @icv_ool: whether ICV is out-of-line
697 ++ * @iv_dma: dma address of iv for checking continuity and link table
698 ++ * @dma_len: length of dma mapped link_tbl space
699 ++ * @dma_link_tbl: bus physical address of link_tbl/buf
700 ++ * @desc: h/w descriptor
701 ++ * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
702 ++ * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
703 ++ *
704 ++ * if decrypting (with authcheck), or either one of src_nents or dst_nents
705 ++ * is greater than 1, an integrity check value is concatenated to the end
706 ++ * of link_tbl data
707 ++ */
708 ++struct talitos_edesc {
709 ++ int src_nents;
710 ++ int dst_nents;
711 ++ bool icv_ool;
712 ++ dma_addr_t iv_dma;
713 ++ int dma_len;
714 ++ dma_addr_t dma_link_tbl;
715 ++ struct talitos_desc desc;
716 ++ union {
717 ++ struct talitos_ptr link_tbl[0];
718 ++ u8 buf[0];
719 ++ };
720 ++};
721 ++
722 + /**
723 + * talitos_request - descriptor submission request
724 + * @desc: descriptor pointer (kernel virtual)
725 +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
726 +index b8ec301025b7..1080c0c49815 100644
727 +--- a/drivers/input/mouse/synaptics.c
728 ++++ b/drivers/input/mouse/synaptics.c
729 +@@ -173,6 +173,7 @@ static const char * const smbus_pnp_ids[] = {
730 + "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
731 + "LEN0073", /* X1 Carbon G5 (Elantech) */
732 + "LEN0092", /* X1 Carbon 6 */
733 ++ "LEN0093", /* T480 */
734 + "LEN0096", /* X280 */
735 + "LEN0097", /* X280 -> ALPS trackpoint */
736 + "LEN200f", /* T450s */
737 +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
738 +index 0e09bede42a2..b081a1ef6859 100644
739 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c
740 ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
741 +@@ -4208,7 +4208,7 @@ void e1000e_up(struct e1000_adapter *adapter)
742 + e1000_configure_msix(adapter);
743 + e1000_irq_enable(adapter);
744 +
745 +- netif_start_queue(adapter->netdev);
746 ++ /* Tx queue started by watchdog timer when link is up */
747 +
748 + e1000e_trigger_lsc(adapter);
749 + }
750 +@@ -4606,6 +4606,7 @@ int e1000e_open(struct net_device *netdev)
751 + pm_runtime_get_sync(&pdev->dev);
752 +
753 + netif_carrier_off(netdev);
754 ++ netif_stop_queue(netdev);
755 +
756 + /* allocate transmit descriptors */
757 + err = e1000e_setup_tx_resources(adapter->tx_ring);
758 +@@ -4666,7 +4667,6 @@ int e1000e_open(struct net_device *netdev)
759 + e1000_irq_enable(adapter);
760 +
761 + adapter->tx_hang_recheck = false;
762 +- netif_start_queue(netdev);
763 +
764 + hw->mac.get_link_status = true;
765 + pm_runtime_put(&pdev->dev);
766 +@@ -5288,6 +5288,7 @@ static void e1000_watchdog_task(struct work_struct *work)
767 + if (phy->ops.cfg_on_link_up)
768 + phy->ops.cfg_on_link_up(hw);
769 +
770 ++ netif_wake_queue(netdev);
771 + netif_carrier_on(netdev);
772 +
773 + if (!test_bit(__E1000_DOWN, &adapter->state))
774 +@@ -5301,6 +5302,7 @@ static void e1000_watchdog_task(struct work_struct *work)
775 + /* Link status message must follow this format */
776 + pr_info("%s NIC Link is Down\n", adapter->netdev->name);
777 + netif_carrier_off(netdev);
778 ++ netif_stop_queue(netdev);
779 + if (!test_bit(__E1000_DOWN, &adapter->state))
780 + mod_timer(&adapter->phy_info_timer,
781 + round_jiffies(jiffies + 2 * HZ));
782 +@@ -5308,13 +5310,8 @@ static void e1000_watchdog_task(struct work_struct *work)
783 + /* 8000ES2LAN requires a Rx packet buffer work-around
784 + * on link down event; reset the controller to flush
785 + * the Rx packet buffer.
786 +- *
787 +- * If the link is lost the controller stops DMA, but
788 +- * if there is queued Tx work it cannot be done. So
789 +- * reset the controller to flush the Tx packet buffers.
790 + */
791 +- if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
792 +- e1000_desc_unused(tx_ring) + 1 < tx_ring->count)
793 ++ if (adapter->flags & FLAG_RX_NEEDS_RESTART)
794 + adapter->flags |= FLAG_RESTART_NOW;
795 + else
796 + pm_schedule_suspend(netdev->dev.parent,
797 +@@ -5337,6 +5334,14 @@ link_up:
798 + adapter->gotc_old = adapter->stats.gotc;
799 + spin_unlock(&adapter->stats64_lock);
800 +
801 ++ /* If the link is lost the controller stops DMA, but
802 ++ * if there is queued Tx work it cannot be done. So
803 ++ * reset the controller to flush the Tx packet buffers.
804 ++ */
805 ++ if (!netif_carrier_ok(netdev) &&
806 ++ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
807 ++ adapter->flags |= FLAG_RESTART_NOW;
808 ++
809 + /* If reset is necessary, do it outside of interrupt context. */
810 + if (adapter->flags & FLAG_RESTART_NOW) {
811 + schedule_work(&adapter->reset_task);
812 +diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
813 +index 6c90aa725f23..e71992a3c55f 100644
814 +--- a/drivers/s390/char/sclp_early.c
815 ++++ b/drivers/s390/char/sclp_early.c
816 +@@ -41,7 +41,6 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
817 + sclp.has_hvs = !!(sccb->fac119 & 0x80);
818 + sclp.has_kss = !!(sccb->fac98 & 0x01);
819 + sclp.has_sipl = !!(sccb->cbl & 0x02);
820 +- sclp.has_sipl_g2 = !!(sccb->cbl & 0x04);
821 + if (sccb->fac85 & 0x02)
822 + S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
823 + if (sccb->fac91 & 0x40)
824 +diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
825 +index 99d7d2566a3a..d4101cecdc8d 100644
826 +--- a/drivers/s390/cio/qdio_setup.c
827 ++++ b/drivers/s390/cio/qdio_setup.c
828 +@@ -150,6 +150,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
829 + return -ENOMEM;
830 + }
831 + irq_ptr_qs[i] = q;
832 ++ INIT_LIST_HEAD(&q->entry);
833 + }
834 + return 0;
835 + }
836 +@@ -178,6 +179,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
837 + q->mask = 1 << (31 - i);
838 + q->nr = i;
839 + q->handler = handler;
840 ++ INIT_LIST_HEAD(&q->entry);
841 + }
842 +
843 + static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
844 +diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
845 +index 28d59ac2204c..d9763bbecbf9 100644
846 +--- a/drivers/s390/cio/qdio_thinint.c
847 ++++ b/drivers/s390/cio/qdio_thinint.c
848 +@@ -79,7 +79,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
849 + mutex_lock(&tiq_list_lock);
850 + list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
851 + mutex_unlock(&tiq_list_lock);
852 +- xchg(irq_ptr->dsci, 1 << 7);
853 + }
854 +
855 + void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
856 +@@ -87,14 +86,14 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
857 + struct qdio_q *q;
858 +
859 + q = irq_ptr->input_qs[0];
860 +- /* if establish triggered an error */
861 +- if (!q || !q->entry.prev || !q->entry.next)
862 ++ if (!q)
863 + return;
864 +
865 + mutex_lock(&tiq_list_lock);
866 + list_del_rcu(&q->entry);
867 + mutex_unlock(&tiq_list_lock);
868 + synchronize_rcu();
869 ++ INIT_LIST_HEAD(&q->entry);
870 + }
871 +
872 + static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
873 +diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
874 +index 5c6062206760..52ec0d9fa1f7 100644
875 +--- a/include/linux/cpuhotplug.h
876 ++++ b/include/linux/cpuhotplug.h
877 +@@ -176,6 +176,7 @@ enum cpuhp_state {
878 + CPUHP_AP_WATCHDOG_ONLINE,
879 + CPUHP_AP_WORKQUEUE_ONLINE,
880 + CPUHP_AP_RCUTREE_ONLINE,
881 ++ CPUHP_AP_BASE_CACHEINFO_ONLINE,
882 + CPUHP_AP_ONLINE_DYN,
883 + CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
884 + CPUHP_AP_X86_HPET_ONLINE,
885 +diff --git a/include/uapi/linux/nilfs2_ondisk.h b/include/uapi/linux/nilfs2_ondisk.h
886 +index a7e66ab11d1d..c23f91ae5fe8 100644
887 +--- a/include/uapi/linux/nilfs2_ondisk.h
888 ++++ b/include/uapi/linux/nilfs2_ondisk.h
889 +@@ -29,7 +29,7 @@
890 +
891 + #include <linux/types.h>
892 + #include <linux/magic.h>
893 +-
894 ++#include <asm/byteorder.h>
895 +
896 + #define NILFS_INODE_BMAP_SIZE 7
897 +
898 +@@ -533,19 +533,19 @@ enum {
899 + static inline void \
900 + nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \
901 + { \
902 +- cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \
903 +- (1UL << NILFS_CHECKPOINT_##flag)); \
904 ++ cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) | \
905 ++ (1UL << NILFS_CHECKPOINT_##flag)); \
906 + } \
907 + static inline void \
908 + nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \
909 + { \
910 +- cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \
911 ++ cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) & \
912 + ~(1UL << NILFS_CHECKPOINT_##flag)); \
913 + } \
914 + static inline int \
915 + nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \
916 + { \
917 +- return !!(le32_to_cpu(cp->cp_flags) & \
918 ++ return !!(__le32_to_cpu(cp->cp_flags) & \
919 + (1UL << NILFS_CHECKPOINT_##flag)); \
920 + }
921 +
922 +@@ -595,20 +595,20 @@ enum {
923 + static inline void \
924 + nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \
925 + { \
926 +- su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \
927 ++ su->su_flags = __cpu_to_le32(__le32_to_cpu(su->su_flags) | \
928 + (1UL << NILFS_SEGMENT_USAGE_##flag));\
929 + } \
930 + static inline void \
931 + nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \
932 + { \
933 + su->su_flags = \
934 +- cpu_to_le32(le32_to_cpu(su->su_flags) & \
935 ++ __cpu_to_le32(__le32_to_cpu(su->su_flags) & \
936 + ~(1UL << NILFS_SEGMENT_USAGE_##flag)); \
937 + } \
938 + static inline int \
939 + nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \
940 + { \
941 +- return !!(le32_to_cpu(su->su_flags) & \
942 ++ return !!(__le32_to_cpu(su->su_flags) & \
943 + (1UL << NILFS_SEGMENT_USAGE_##flag)); \
944 + }
945 +
946 +@@ -619,15 +619,15 @@ NILFS_SEGMENT_USAGE_FNS(ERROR, error)
947 + static inline void
948 + nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su)
949 + {
950 +- su->su_lastmod = cpu_to_le64(0);
951 +- su->su_nblocks = cpu_to_le32(0);
952 +- su->su_flags = cpu_to_le32(0);
953 ++ su->su_lastmod = __cpu_to_le64(0);
954 ++ su->su_nblocks = __cpu_to_le32(0);
955 ++ su->su_flags = __cpu_to_le32(0);
956 + }
957 +
958 + static inline int
959 + nilfs_segment_usage_clean(const struct nilfs_segment_usage *su)
960 + {
961 +- return !le32_to_cpu(su->su_flags);
962 ++ return !__le32_to_cpu(su->su_flags);
963 + }
964 +
965 + /**
966 +diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
967 +index 16cbf6beb276..ae60cae24e9a 100644
968 +--- a/kernel/irq/autoprobe.c
969 ++++ b/kernel/irq/autoprobe.c
970 +@@ -90,7 +90,7 @@ unsigned long probe_irq_on(void)
971 + /* It triggered already - consider it spurious. */
972 + if (!(desc->istate & IRQS_WAITING)) {
973 + desc->istate &= ~IRQS_AUTODETECT;
974 +- irq_shutdown(desc);
975 ++ irq_shutdown_and_deactivate(desc);
976 + } else
977 + if (i < 32)
978 + mask |= 1 << i;
979 +@@ -127,7 +127,7 @@ unsigned int probe_irq_mask(unsigned long val)
980 + mask |= 1 << i;
981 +
982 + desc->istate &= ~IRQS_AUTODETECT;
983 +- irq_shutdown(desc);
984 ++ irq_shutdown_and_deactivate(desc);
985 + }
986 + raw_spin_unlock_irq(&desc->lock);
987 + }
988 +@@ -169,7 +169,7 @@ int probe_irq_off(unsigned long val)
989 + nr_of_irqs++;
990 + }
991 + desc->istate &= ~IRQS_AUTODETECT;
992 +- irq_shutdown(desc);
993 ++ irq_shutdown_and_deactivate(desc);
994 + }
995 + raw_spin_unlock_irq(&desc->lock);
996 + }
997 +diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
998 +index 29d6c7d070b4..3ff4a1260885 100644
999 +--- a/kernel/irq/chip.c
1000 ++++ b/kernel/irq/chip.c
1001 +@@ -314,6 +314,12 @@ void irq_shutdown(struct irq_desc *desc)
1002 + }
1003 + irq_state_clr_started(desc);
1004 + }
1005 ++}
1006 ++
1007 ++
1008 ++void irq_shutdown_and_deactivate(struct irq_desc *desc)
1009 ++{
1010 ++ irq_shutdown(desc);
1011 + /*
1012 + * This must be called even if the interrupt was never started up,
1013 + * because the activation can happen before the interrupt is
1014 +diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
1015 +index 5b1072e394b2..6c7ca2e983a5 100644
1016 +--- a/kernel/irq/cpuhotplug.c
1017 ++++ b/kernel/irq/cpuhotplug.c
1018 +@@ -116,7 +116,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
1019 + */
1020 + if (irqd_affinity_is_managed(d)) {
1021 + irqd_set_managed_shutdown(d);
1022 +- irq_shutdown(desc);
1023 ++ irq_shutdown_and_deactivate(desc);
1024 + return false;
1025 + }
1026 + affinity = cpu_online_mask;
1027 +diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
1028 +index 70c3053bc1f6..3a948f41ab00 100644
1029 +--- a/kernel/irq/internals.h
1030 ++++ b/kernel/irq/internals.h
1031 +@@ -82,6 +82,7 @@ extern int irq_activate_and_startup(struct irq_desc *desc, bool resend);
1032 + extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
1033 +
1034 + extern void irq_shutdown(struct irq_desc *desc);
1035 ++extern void irq_shutdown_and_deactivate(struct irq_desc *desc);
1036 + extern void irq_enable(struct irq_desc *desc);
1037 + extern void irq_disable(struct irq_desc *desc);
1038 + extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
1039 +@@ -96,6 +97,10 @@ static inline void irq_mark_irq(unsigned int irq) { }
1040 + extern void irq_mark_irq(unsigned int irq);
1041 + #endif
1042 +
1043 ++extern int __irq_get_irqchip_state(struct irq_data *data,
1044 ++ enum irqchip_irq_state which,
1045 ++ bool *state);
1046 ++
1047 + extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
1048 +
1049 + irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags);
1050 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
1051 +index 78f3ddeb7fe4..e8f7f179bf77 100644
1052 +--- a/kernel/irq/manage.c
1053 ++++ b/kernel/irq/manage.c
1054 +@@ -13,6 +13,7 @@
1055 + #include <linux/module.h>
1056 + #include <linux/random.h>
1057 + #include <linux/interrupt.h>
1058 ++#include <linux/irqdomain.h>
1059 + #include <linux/slab.h>
1060 + #include <linux/sched.h>
1061 + #include <linux/sched/rt.h>
1062 +@@ -34,8 +35,9 @@ static int __init setup_forced_irqthreads(char *arg)
1063 + early_param("threadirqs", setup_forced_irqthreads);
1064 + #endif
1065 +
1066 +-static void __synchronize_hardirq(struct irq_desc *desc)
1067 ++static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
1068 + {
1069 ++ struct irq_data *irqd = irq_desc_get_irq_data(desc);
1070 + bool inprogress;
1071 +
1072 + do {
1073 +@@ -51,6 +53,20 @@ static void __synchronize_hardirq(struct irq_desc *desc)
1074 + /* Ok, that indicated we're done: double-check carefully. */
1075 + raw_spin_lock_irqsave(&desc->lock, flags);
1076 + inprogress = irqd_irq_inprogress(&desc->irq_data);
1077 ++
1078 ++ /*
1079 ++ * If requested and supported, check at the chip whether it
1080 ++ * is in flight at the hardware level, i.e. already pending
1081 ++ * in a CPU and waiting for service and acknowledge.
1082 ++ */
1083 ++ if (!inprogress && sync_chip) {
1084 ++ /*
1085 ++ * Ignore the return code. inprogress is only updated
1086 ++ * when the chip supports it.
1087 ++ */
1088 ++ __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
1089 ++ &inprogress);
1090 ++ }
1091 + raw_spin_unlock_irqrestore(&desc->lock, flags);
1092 +
1093 + /* Oops, that failed? */
1094 +@@ -73,13 +89,18 @@ static void __synchronize_hardirq(struct irq_desc *desc)
1095 + * Returns: false if a threaded handler is active.
1096 + *
1097 + * This function may be called - with care - from IRQ context.
1098 ++ *
1099 ++ * It does not check whether there is an interrupt in flight at the
1100 ++ * hardware level, but not serviced yet, as this might deadlock when
1101 ++ * called with interrupts disabled and the target CPU of the interrupt
1102 ++ * is the current CPU.
1103 + */
1104 + bool synchronize_hardirq(unsigned int irq)
1105 + {
1106 + struct irq_desc *desc = irq_to_desc(irq);
1107 +
1108 + if (desc) {
1109 +- __synchronize_hardirq(desc);
1110 ++ __synchronize_hardirq(desc, false);
1111 + return !atomic_read(&desc->threads_active);
1112 + }
1113 +
1114 +@@ -95,14 +116,19 @@ EXPORT_SYMBOL(synchronize_hardirq);
1115 + * to complete before returning. If you use this function while
1116 + * holding a resource the IRQ handler may need you will deadlock.
1117 + *
1118 +- * This function may be called - with care - from IRQ context.
1119 ++ * Can only be called from preemptible code as it might sleep when
1120 ++ * an interrupt thread is associated to @irq.
1121 ++ *
1122 ++ * It optionally makes sure (when the irq chip supports that method)
1123 ++ * that the interrupt is not pending in any CPU and waiting for
1124 ++ * service.
1125 + */
1126 + void synchronize_irq(unsigned int irq)
1127 + {
1128 + struct irq_desc *desc = irq_to_desc(irq);
1129 +
1130 + if (desc) {
1131 +- __synchronize_hardirq(desc);
1132 ++ __synchronize_hardirq(desc, true);
1133 + /*
1134 + * We made sure that no hardirq handler is
1135 + * running. Now verify that no threaded handlers are
1136 +@@ -1699,6 +1725,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1137 + /* If this was the last handler, shut down the IRQ line: */
1138 + if (!desc->action) {
1139 + irq_settings_clr_disable_unlazy(desc);
1140 ++ /* Only shutdown. Deactivate after synchronize_hardirq() */
1141 + irq_shutdown(desc);
1142 + }
1143 +
1144 +@@ -1727,8 +1754,12 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1145 +
1146 + unregister_handler_proc(irq, action);
1147 +
1148 +- /* Make sure it's not being used on another CPU: */
1149 +- synchronize_hardirq(irq);
1150 ++ /*
1151 ++ * Make sure it's not being used on another CPU and if the chip
1152 ++ * supports it also make sure that there is no (not yet serviced)
1153 ++ * interrupt in flight at the hardware level.
1154 ++ */
1155 ++ __synchronize_hardirq(desc, true);
1156 +
1157 + #ifdef CONFIG_DEBUG_SHIRQ
1158 + /*
1159 +@@ -1768,6 +1799,14 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1160 + * require it to deallocate resources over the slow bus.
1161 + */
1162 + chip_bus_lock(desc);
1163 ++ /*
1164 ++ * There is no interrupt on the fly anymore. Deactivate it
1165 ++ * completely.
1166 ++ */
1167 ++ raw_spin_lock_irqsave(&desc->lock, flags);
1168 ++ irq_domain_deactivate_irq(&desc->irq_data);
1169 ++ raw_spin_unlock_irqrestore(&desc->lock, flags);
1170 ++
1171 + irq_release_resources(desc);
1172 + chip_bus_sync_unlock(desc);
1173 + irq_remove_timings(desc);
1174 +@@ -1855,7 +1894,7 @@ static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1175 + }
1176 +
1177 + irq_settings_clr_disable_unlazy(desc);
1178 +- irq_shutdown(desc);
1179 ++ irq_shutdown_and_deactivate(desc);
1180 +
1181 + irq_release_resources(desc);
1182 +
1183 +@@ -2578,6 +2617,28 @@ out:
1184 + irq_put_desc_unlock(desc, flags);
1185 + }
1186 +
1187 ++int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
1188 ++ bool *state)
1189 ++{
1190 ++ struct irq_chip *chip;
1191 ++ int err = -EINVAL;
1192 ++
1193 ++ do {
1194 ++ chip = irq_data_get_irq_chip(data);
1195 ++ if (chip->irq_get_irqchip_state)
1196 ++ break;
1197 ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1198 ++ data = data->parent_data;
1199 ++#else
1200 ++ data = NULL;
1201 ++#endif
1202 ++ } while (data);
1203 ++
1204 ++ if (data)
1205 ++ err = chip->irq_get_irqchip_state(data, which, state);
1206 ++ return err;
1207 ++}
1208 ++
1209 + /**
1210 + * irq_get_irqchip_state - returns the irqchip state of a interrupt.
1211 + * @irq: Interrupt line that is forwarded to a VM
1212 +@@ -2596,7 +2657,6 @@ int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
1213 + {
1214 + struct irq_desc *desc;
1215 + struct irq_data *data;
1216 +- struct irq_chip *chip;
1217 + unsigned long flags;
1218 + int err = -EINVAL;
1219 +
1220 +@@ -2606,19 +2666,7 @@ int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
1221 +
1222 + data = irq_desc_get_irq_data(desc);
1223 +
1224 +- do {
1225 +- chip = irq_data_get_irq_chip(data);
1226 +- if (chip->irq_get_irqchip_state)
1227 +- break;
1228 +-#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1229 +- data = data->parent_data;
1230 +-#else
1231 +- data = NULL;
1232 +-#endif
1233 +- } while (data);
1234 +-
1235 +- if (data)
1236 +- err = chip->irq_get_irqchip_state(data, which, state);
1237 ++ err = __irq_get_irqchip_state(data, which, state);
1238 +
1239 + irq_put_desc_busunlock(desc, flags);
1240 + return err;