Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.2 commit in: /
Date: Thu, 19 Sep 2019 10:05:52
Message-Id: 1568887530.94f39221cd730a186a590b0140ba24cc3c3334c5.mpagano@gentoo
1 commit: 94f39221cd730a186a590b0140ba24cc3c3334c5
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Sep 19 10:05:30 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Sep 19 10:05:30 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=94f39221
7
8 Linux ptach 5.2.16
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1015_linux-5.2.16.patch | 3120 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3124 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e8d3287..c046e8a 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -103,6 +103,10 @@ Patch: 1014_linux-5.2.15.patch
21 From: https://www.kernel.org
22 Desc: Linux 5.2.15
23
24 +Patch: 1015_linux-5.2.16.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 5.2.16
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1015_linux-5.2.16.patch b/1015_linux-5.2.16.patch
33 new file mode 100644
34 index 0000000..7eee1f4
35 --- /dev/null
36 +++ b/1015_linux-5.2.16.patch
37 @@ -0,0 +1,3120 @@
38 +diff --git a/Makefile b/Makefile
39 +index 3c977aa66650..3cec03e93b40 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 2
46 +-SUBLEVEL = 15
47 ++SUBLEVEL = 16
48 + EXTRAVERSION =
49 + NAME = Bobtail Squid
50 +
51 +diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
52 +index 76f34346b642..8b03eb44e876 100644
53 +--- a/arch/powerpc/include/asm/uaccess.h
54 ++++ b/arch/powerpc/include/asm/uaccess.h
55 +@@ -312,6 +312,7 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
56 + {
57 + unsigned long ret;
58 +
59 ++ barrier_nospec();
60 + allow_user_access(to, from, n);
61 + ret = __copy_tofrom_user(to, from, n);
62 + prevent_user_access(to, from, n);
63 +diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
64 +index 9dde4d7d8704..149525b5df1b 100644
65 +--- a/arch/s390/kvm/interrupt.c
66 ++++ b/arch/s390/kvm/interrupt.c
67 +@@ -1978,6 +1978,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
68 + case KVM_S390_MCHK:
69 + irq->u.mchk.mcic = s390int->parm64;
70 + break;
71 ++ case KVM_S390_INT_PFAULT_INIT:
72 ++ irq->u.ext.ext_params = s390int->parm;
73 ++ irq->u.ext.ext_params2 = s390int->parm64;
74 ++ break;
75 ++ case KVM_S390_RESTART:
76 ++ case KVM_S390_INT_CLOCK_COMP:
77 ++ case KVM_S390_INT_CPU_TIMER:
78 ++ break;
79 ++ default:
80 ++ return -EINVAL;
81 + }
82 + return 0;
83 + }
84 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
85 +index 28ebd647784c..4934141689d2 100644
86 +--- a/arch/s390/kvm/kvm-s390.c
87 ++++ b/arch/s390/kvm/kvm-s390.c
88 +@@ -1013,6 +1013,8 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
89 + /* mark all the pages in active slots as dirty */
90 + for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
91 + ms = slots->memslots + slotnr;
92 ++ if (!ms->dirty_bitmap)
93 ++ return -EINVAL;
94 + /*
95 + * The second half of the bitmap is only used on x86,
96 + * and would be wasted otherwise, so we put it to good
97 +@@ -4325,7 +4327,7 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
98 + }
99 + case KVM_S390_INTERRUPT: {
100 + struct kvm_s390_interrupt s390int;
101 +- struct kvm_s390_irq s390irq;
102 ++ struct kvm_s390_irq s390irq = {};
103 +
104 + if (copy_from_user(&s390int, argp, sizeof(s390int)))
105 + return -EFAULT;
106 +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
107 +index 9825ca6a6020..5cdca4208647 100644
108 +--- a/arch/sparc/kernel/sys_sparc_64.c
109 ++++ b/arch/sparc/kernel/sys_sparc_64.c
110 +@@ -336,25 +336,28 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
111 + {
112 + long err;
113 +
114 ++ if (!IS_ENABLED(CONFIG_SYSVIPC))
115 ++ return -ENOSYS;
116 ++
117 + /* No need for backward compatibility. We can start fresh... */
118 + if (call <= SEMTIMEDOP) {
119 + switch (call) {
120 + case SEMOP:
121 +- err = sys_semtimedop(first, ptr,
122 +- (unsigned int)second, NULL);
123 ++ err = ksys_semtimedop(first, ptr,
124 ++ (unsigned int)second, NULL);
125 + goto out;
126 + case SEMTIMEDOP:
127 +- err = sys_semtimedop(first, ptr, (unsigned int)second,
128 ++ err = ksys_semtimedop(first, ptr, (unsigned int)second,
129 + (const struct __kernel_timespec __user *)
130 +- (unsigned long) fifth);
131 ++ (unsigned long) fifth);
132 + goto out;
133 + case SEMGET:
134 +- err = sys_semget(first, (int)second, (int)third);
135 ++ err = ksys_semget(first, (int)second, (int)third);
136 + goto out;
137 + case SEMCTL: {
138 +- err = sys_semctl(first, second,
139 +- (int)third | IPC_64,
140 +- (unsigned long) ptr);
141 ++ err = ksys_old_semctl(first, second,
142 ++ (int)third | IPC_64,
143 ++ (unsigned long) ptr);
144 + goto out;
145 + }
146 + default:
147 +@@ -365,18 +368,18 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
148 + if (call <= MSGCTL) {
149 + switch (call) {
150 + case MSGSND:
151 +- err = sys_msgsnd(first, ptr, (size_t)second,
152 ++ err = ksys_msgsnd(first, ptr, (size_t)second,
153 + (int)third);
154 + goto out;
155 + case MSGRCV:
156 +- err = sys_msgrcv(first, ptr, (size_t)second, fifth,
157 ++ err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
158 + (int)third);
159 + goto out;
160 + case MSGGET:
161 +- err = sys_msgget((key_t)first, (int)second);
162 ++ err = ksys_msgget((key_t)first, (int)second);
163 + goto out;
164 + case MSGCTL:
165 +- err = sys_msgctl(first, (int)second | IPC_64, ptr);
166 ++ err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
167 + goto out;
168 + default:
169 + err = -ENOSYS;
170 +@@ -396,13 +399,13 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
171 + goto out;
172 + }
173 + case SHMDT:
174 +- err = sys_shmdt(ptr);
175 ++ err = ksys_shmdt(ptr);
176 + goto out;
177 + case SHMGET:
178 +- err = sys_shmget(first, (size_t)second, (int)third);
179 ++ err = ksys_shmget(first, (size_t)second, (int)third);
180 + goto out;
181 + case SHMCTL:
182 +- err = sys_shmctl(first, (int)second | IPC_64, ptr);
183 ++ err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
184 + goto out;
185 + default:
186 + err = -ENOSYS;
187 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
188 +index 56e748a7679f..94df0868804b 100644
189 +--- a/arch/x86/Makefile
190 ++++ b/arch/x86/Makefile
191 +@@ -38,6 +38,7 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
192 +
193 + REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
194 + REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
195 ++REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
196 + REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
197 + export REALMODE_CFLAGS
198 +
199 +diff --git a/arch/x86/boot/compressed/acpi.c b/arch/x86/boot/compressed/acpi.c
200 +index ad84239e595e..15255f388a85 100644
201 +--- a/arch/x86/boot/compressed/acpi.c
202 ++++ b/arch/x86/boot/compressed/acpi.c
203 +@@ -44,17 +44,109 @@ static acpi_physical_address get_acpi_rsdp(void)
204 + return addr;
205 + }
206 +
207 +-/* Search EFI system tables for RSDP. */
208 +-static acpi_physical_address efi_get_rsdp_addr(void)
209 ++/*
210 ++ * Search EFI system tables for RSDP. If both ACPI_20_TABLE_GUID and
211 ++ * ACPI_TABLE_GUID are found, take the former, which has more features.
212 ++ */
213 ++static acpi_physical_address
214 ++__efi_get_rsdp_addr(unsigned long config_tables, unsigned int nr_tables,
215 ++ bool efi_64)
216 + {
217 + acpi_physical_address rsdp_addr = 0;
218 +
219 + #ifdef CONFIG_EFI
220 +- unsigned long systab, systab_tables, config_tables;
221 ++ int i;
222 ++
223 ++ /* Get EFI tables from systab. */
224 ++ for (i = 0; i < nr_tables; i++) {
225 ++ acpi_physical_address table;
226 ++ efi_guid_t guid;
227 ++
228 ++ if (efi_64) {
229 ++ efi_config_table_64_t *tbl = (efi_config_table_64_t *)config_tables + i;
230 ++
231 ++ guid = tbl->guid;
232 ++ table = tbl->table;
233 ++
234 ++ if (!IS_ENABLED(CONFIG_X86_64) && table >> 32) {
235 ++ debug_putstr("Error getting RSDP address: EFI config table located above 4GB.\n");
236 ++ return 0;
237 ++ }
238 ++ } else {
239 ++ efi_config_table_32_t *tbl = (efi_config_table_32_t *)config_tables + i;
240 ++
241 ++ guid = tbl->guid;
242 ++ table = tbl->table;
243 ++ }
244 ++
245 ++ if (!(efi_guidcmp(guid, ACPI_TABLE_GUID)))
246 ++ rsdp_addr = table;
247 ++ else if (!(efi_guidcmp(guid, ACPI_20_TABLE_GUID)))
248 ++ return table;
249 ++ }
250 ++#endif
251 ++ return rsdp_addr;
252 ++}
253 ++
254 ++/* EFI/kexec support is 64-bit only. */
255 ++#ifdef CONFIG_X86_64
256 ++static struct efi_setup_data *get_kexec_setup_data_addr(void)
257 ++{
258 ++ struct setup_data *data;
259 ++ u64 pa_data;
260 ++
261 ++ pa_data = boot_params->hdr.setup_data;
262 ++ while (pa_data) {
263 ++ data = (struct setup_data *)pa_data;
264 ++ if (data->type == SETUP_EFI)
265 ++ return (struct efi_setup_data *)(pa_data + sizeof(struct setup_data));
266 ++
267 ++ pa_data = data->next;
268 ++ }
269 ++ return NULL;
270 ++}
271 ++
272 ++static acpi_physical_address kexec_get_rsdp_addr(void)
273 ++{
274 ++ efi_system_table_64_t *systab;
275 ++ struct efi_setup_data *esd;
276 ++ struct efi_info *ei;
277 ++ char *sig;
278 ++
279 ++ esd = (struct efi_setup_data *)get_kexec_setup_data_addr();
280 ++ if (!esd)
281 ++ return 0;
282 ++
283 ++ if (!esd->tables) {
284 ++ debug_putstr("Wrong kexec SETUP_EFI data.\n");
285 ++ return 0;
286 ++ }
287 ++
288 ++ ei = &boot_params->efi_info;
289 ++ sig = (char *)&ei->efi_loader_signature;
290 ++ if (strncmp(sig, EFI64_LOADER_SIGNATURE, 4)) {
291 ++ debug_putstr("Wrong kexec EFI loader signature.\n");
292 ++ return 0;
293 ++ }
294 ++
295 ++ /* Get systab from boot params. */
296 ++ systab = (efi_system_table_64_t *) (ei->efi_systab | ((__u64)ei->efi_systab_hi << 32));
297 ++ if (!systab)
298 ++ error("EFI system table not found in kexec boot_params.");
299 ++
300 ++ return __efi_get_rsdp_addr((unsigned long)esd->tables, systab->nr_tables, true);
301 ++}
302 ++#else
303 ++static acpi_physical_address kexec_get_rsdp_addr(void) { return 0; }
304 ++#endif /* CONFIG_X86_64 */
305 ++
306 ++static acpi_physical_address efi_get_rsdp_addr(void)
307 ++{
308 ++#ifdef CONFIG_EFI
309 ++ unsigned long systab, config_tables;
310 + unsigned int nr_tables;
311 + struct efi_info *ei;
312 + bool efi_64;
313 +- int size, i;
314 + char *sig;
315 +
316 + ei = &boot_params->efi_info;
317 +@@ -88,49 +180,20 @@ static acpi_physical_address efi_get_rsdp_addr(void)
318 +
319 + config_tables = stbl->tables;
320 + nr_tables = stbl->nr_tables;
321 +- size = sizeof(efi_config_table_64_t);
322 + } else {
323 + efi_system_table_32_t *stbl = (efi_system_table_32_t *)systab;
324 +
325 + config_tables = stbl->tables;
326 + nr_tables = stbl->nr_tables;
327 +- size = sizeof(efi_config_table_32_t);
328 + }
329 +
330 + if (!config_tables)
331 + error("EFI config tables not found.");
332 +
333 +- /* Get EFI tables from systab. */
334 +- for (i = 0; i < nr_tables; i++) {
335 +- acpi_physical_address table;
336 +- efi_guid_t guid;
337 +-
338 +- config_tables += size;
339 +-
340 +- if (efi_64) {
341 +- efi_config_table_64_t *tbl = (efi_config_table_64_t *)config_tables;
342 +-
343 +- guid = tbl->guid;
344 +- table = tbl->table;
345 +-
346 +- if (!IS_ENABLED(CONFIG_X86_64) && table >> 32) {
347 +- debug_putstr("Error getting RSDP address: EFI config table located above 4GB.\n");
348 +- return 0;
349 +- }
350 +- } else {
351 +- efi_config_table_32_t *tbl = (efi_config_table_32_t *)config_tables;
352 +-
353 +- guid = tbl->guid;
354 +- table = tbl->table;
355 +- }
356 +-
357 +- if (!(efi_guidcmp(guid, ACPI_TABLE_GUID)))
358 +- rsdp_addr = table;
359 +- else if (!(efi_guidcmp(guid, ACPI_20_TABLE_GUID)))
360 +- return table;
361 +- }
362 ++ return __efi_get_rsdp_addr(config_tables, nr_tables, efi_64);
363 ++#else
364 ++ return 0;
365 + #endif
366 +- return rsdp_addr;
367 + }
368 +
369 + static u8 compute_checksum(u8 *buffer, u32 length)
370 +@@ -220,6 +283,14 @@ acpi_physical_address get_rsdp_addr(void)
371 + if (!pa)
372 + pa = boot_params->acpi_rsdp_addr;
373 +
374 ++ /*
375 ++ * Try to get EFI data from setup_data. This can happen when we're a
376 ++ * kexec'ed kernel and kexec(1) has passed all the required EFI info to
377 ++ * us.
378 ++ */
379 ++ if (!pa)
380 ++ pa = kexec_get_rsdp_addr();
381 ++
382 + if (!pa)
383 + pa = efi_get_rsdp_addr();
384 +
385 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
386 +index 921c609c2af7..65d49452e6e0 100644
387 +--- a/arch/x86/include/asm/kvm_host.h
388 ++++ b/arch/x86/include/asm/kvm_host.h
389 +@@ -333,6 +333,7 @@ struct kvm_mmu_page {
390 + int root_count; /* Currently serving as active root */
391 + unsigned int unsync_children;
392 + struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
393 ++ unsigned long mmu_valid_gen;
394 + DECLARE_BITMAP(unsync_child_bitmap, 512);
395 +
396 + #ifdef CONFIG_X86_32
397 +@@ -851,6 +852,7 @@ struct kvm_arch {
398 + unsigned long n_requested_mmu_pages;
399 + unsigned long n_max_mmu_pages;
400 + unsigned int indirect_shadow_pages;
401 ++ unsigned long mmu_valid_gen;
402 + struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
403 + /*
404 + * Hash table of struct kvm_mmu_page.
405 +diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c
406 +index 64b973f0e985..4c407833faca 100644
407 +--- a/arch/x86/kernel/ima_arch.c
408 ++++ b/arch/x86/kernel/ima_arch.c
409 +@@ -11,10 +11,11 @@ extern struct boot_params boot_params;
410 + static enum efi_secureboot_mode get_sb_mode(void)
411 + {
412 + efi_char16_t efi_SecureBoot_name[] = L"SecureBoot";
413 ++ efi_char16_t efi_SetupMode_name[] = L"SecureBoot";
414 + efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
415 + efi_status_t status;
416 + unsigned long size;
417 +- u8 secboot;
418 ++ u8 secboot, setupmode;
419 +
420 + size = sizeof(secboot);
421 +
422 +@@ -36,7 +37,14 @@ static enum efi_secureboot_mode get_sb_mode(void)
423 + return efi_secureboot_mode_unknown;
424 + }
425 +
426 +- if (secboot == 0) {
427 ++ size = sizeof(setupmode);
428 ++ status = efi.get_variable(efi_SetupMode_name, &efi_variable_guid,
429 ++ NULL, &size, &setupmode);
430 ++
431 ++ if (status != EFI_SUCCESS) /* ignore unknown SetupMode */
432 ++ setupmode = 0;
433 ++
434 ++ if (secboot == 0 || setupmode == 1) {
435 + pr_info("ima: secureboot mode disabled\n");
436 + return efi_secureboot_mode_disabled;
437 + }
438 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
439 +index 01f04db1fa61..66055ca29b6b 100644
440 +--- a/arch/x86/kvm/mmu.c
441 ++++ b/arch/x86/kvm/mmu.c
442 +@@ -2066,6 +2066,12 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
443 + if (!direct)
444 + sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
445 + set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
446 ++
447 ++ /*
448 ++ * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
449 ++ * depends on valid pages being added to the head of the list. See
450 ++ * comments in kvm_zap_obsolete_pages().
451 ++ */
452 + list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
453 + kvm_mod_used_mmu_pages(vcpu->kvm, +1);
454 + return sp;
455 +@@ -2215,7 +2221,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
456 + #define for_each_valid_sp(_kvm, _sp, _gfn) \
457 + hlist_for_each_entry(_sp, \
458 + &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
459 +- if ((_sp)->role.invalid) { \
460 ++ if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) { \
461 + } else
462 +
463 + #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
464 +@@ -2272,6 +2278,11 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
465 + static void mmu_audit_disable(void) { }
466 + #endif
467 +
468 ++static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
469 ++{
470 ++ return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
471 ++}
472 ++
473 + static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
474 + struct list_head *invalid_list)
475 + {
476 +@@ -2496,6 +2507,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
477 + if (level > PT_PAGE_TABLE_LEVEL && need_sync)
478 + flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
479 + }
480 ++ sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
481 + clear_page(sp->spt);
482 + trace_kvm_mmu_get_page(sp, true);
483 +
484 +@@ -4229,6 +4241,13 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
485 + return false;
486 +
487 + if (cached_root_available(vcpu, new_cr3, new_role)) {
488 ++ /*
489 ++ * It is possible that the cached previous root page is
490 ++ * obsolete because of a change in the MMU generation
491 ++ * number. However, changing the generation number is
492 ++ * accompanied by KVM_REQ_MMU_RELOAD, which will free
493 ++ * the root set here and allocate a new one.
494 ++ */
495 + kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
496 + if (!skip_tlb_flush) {
497 + kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
498 +@@ -5645,11 +5664,89 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
499 + return alloc_mmu_pages(vcpu);
500 + }
501 +
502 ++
503 ++static void kvm_zap_obsolete_pages(struct kvm *kvm)
504 ++{
505 ++ struct kvm_mmu_page *sp, *node;
506 ++ LIST_HEAD(invalid_list);
507 ++ int ign;
508 ++
509 ++restart:
510 ++ list_for_each_entry_safe_reverse(sp, node,
511 ++ &kvm->arch.active_mmu_pages, link) {
512 ++ /*
513 ++ * No obsolete valid page exists before a newly created page
514 ++ * since active_mmu_pages is a FIFO list.
515 ++ */
516 ++ if (!is_obsolete_sp(kvm, sp))
517 ++ break;
518 ++
519 ++ /*
520 ++ * Do not repeatedly zap a root page to avoid unnecessary
521 ++ * KVM_REQ_MMU_RELOAD, otherwise we may not be able to
522 ++ * progress:
523 ++ * vcpu 0 vcpu 1
524 ++ * call vcpu_enter_guest():
525 ++ * 1): handle KVM_REQ_MMU_RELOAD
526 ++ * and require mmu-lock to
527 ++ * load mmu
528 ++ * repeat:
529 ++ * 1): zap root page and
530 ++ * send KVM_REQ_MMU_RELOAD
531 ++ *
532 ++ * 2): if (cond_resched_lock(mmu-lock))
533 ++ *
534 ++ * 2): hold mmu-lock and load mmu
535 ++ *
536 ++ * 3): see KVM_REQ_MMU_RELOAD bit
537 ++ * on vcpu->requests is set
538 ++ * then return 1 to call
539 ++ * vcpu_enter_guest() again.
540 ++ * goto repeat;
541 ++ *
542 ++ * Since we are reversely walking the list and the invalid
543 ++ * list will be moved to the head, skip the invalid page
544 ++ * can help us to avoid the infinity list walking.
545 ++ */
546 ++ if (sp->role.invalid)
547 ++ continue;
548 ++
549 ++ if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
550 ++ kvm_mmu_commit_zap_page(kvm, &invalid_list);
551 ++ cond_resched_lock(&kvm->mmu_lock);
552 ++ goto restart;
553 ++ }
554 ++
555 ++ if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
556 ++ goto restart;
557 ++ }
558 ++
559 ++ kvm_mmu_commit_zap_page(kvm, &invalid_list);
560 ++}
561 ++
562 ++/*
563 ++ * Fast invalidate all shadow pages and use lock-break technique
564 ++ * to zap obsolete pages.
565 ++ *
566 ++ * It's required when memslot is being deleted or VM is being
567 ++ * destroyed, in these cases, we should ensure that KVM MMU does
568 ++ * not use any resource of the being-deleted slot or all slots
569 ++ * after calling the function.
570 ++ */
571 ++static void kvm_mmu_zap_all_fast(struct kvm *kvm)
572 ++{
573 ++ spin_lock(&kvm->mmu_lock);
574 ++ kvm->arch.mmu_valid_gen++;
575 ++
576 ++ kvm_zap_obsolete_pages(kvm);
577 ++ spin_unlock(&kvm->mmu_lock);
578 ++}
579 ++
580 + static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
581 + struct kvm_memory_slot *slot,
582 + struct kvm_page_track_notifier_node *node)
583 + {
584 +- kvm_mmu_zap_all(kvm);
585 ++ kvm_mmu_zap_all_fast(kvm);
586 + }
587 +
588 + void kvm_mmu_init_vm(struct kvm *kvm)
589 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
590 +index 2c7daa3b968d..4ca86e70d3b4 100644
591 +--- a/arch/x86/kvm/svm.c
592 ++++ b/arch/x86/kvm/svm.c
593 +@@ -7116,13 +7116,41 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
594 +
595 + static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
596 + {
597 +- bool is_user, smap;
598 +-
599 +- is_user = svm_get_cpl(vcpu) == 3;
600 +- smap = !kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
601 ++ unsigned long cr4 = kvm_read_cr4(vcpu);
602 ++ bool smep = cr4 & X86_CR4_SMEP;
603 ++ bool smap = cr4 & X86_CR4_SMAP;
604 ++ bool is_user = svm_get_cpl(vcpu) == 3;
605 +
606 + /*
607 +- * Detect and workaround Errata 1096 Fam_17h_00_0Fh
608 ++ * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
609 ++ *
610 ++ * Errata:
611 ++ * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is
612 ++ * possible that CPU microcode implementing DecodeAssist will fail
613 ++ * to read bytes of instruction which caused #NPF. In this case,
614 ++ * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly
615 ++ * return 0 instead of the correct guest instruction bytes.
616 ++ *
617 ++ * This happens because CPU microcode reading instruction bytes
618 ++ * uses a special opcode which attempts to read data using CPL=0
619 ++ * priviledges. The microcode reads CS:RIP and if it hits a SMAP
620 ++ * fault, it gives up and returns no instruction bytes.
621 ++ *
622 ++ * Detection:
623 ++ * We reach here in case CPU supports DecodeAssist, raised #NPF and
624 ++ * returned 0 in GuestIntrBytes field of the VMCB.
625 ++ * First, errata can only be triggered in case vCPU CR4.SMAP=1.
626 ++ * Second, if vCPU CR4.SMEP=1, errata could only be triggered
627 ++ * in case vCPU CPL==3 (Because otherwise guest would have triggered
628 ++ * a SMEP fault instead of #NPF).
629 ++ * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL.
630 ++ * As most guests enable SMAP if they have also enabled SMEP, use above
631 ++ * logic in order to attempt minimize false-positive of detecting errata
632 ++ * while still preserving all cases semantic correctness.
633 ++ *
634 ++ * Workaround:
635 ++ * To determine what instruction the guest was executing, the hypervisor
636 ++ * will have to decode the instruction at the instruction pointer.
637 + *
638 + * In non SEV guest, hypervisor will be able to read the guest
639 + * memory to decode the instruction pointer when insn_len is zero
640 +@@ -7133,11 +7161,11 @@ static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
641 + * instruction pointer so we will not able to workaround it. Lets
642 + * print the error and request to kill the guest.
643 + */
644 +- if (is_user && smap) {
645 ++ if (smap && (!smep || is_user)) {
646 + if (!sev_guest(vcpu->kvm))
647 + return true;
648 +
649 +- pr_err_ratelimited("KVM: Guest triggered AMD Erratum 1096\n");
650 ++ pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
651 + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
652 + }
653 +
654 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
655 +index b96723294b2f..74ac35bbf1ef 100644
656 +--- a/arch/x86/kvm/vmx/nested.c
657 ++++ b/arch/x86/kvm/vmx/nested.c
658 +@@ -4411,6 +4411,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
659 + int len;
660 + gva_t gva = 0;
661 + struct vmcs12 *vmcs12;
662 ++ struct x86_exception e;
663 +
664 + if (!nested_vmx_check_permission(vcpu))
665 + return 1;
666 +@@ -4451,7 +4452,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
667 + vmx_instruction_info, true, len, &gva))
668 + return 1;
669 + /* _system ok, nested_vmx_check_permission has verified cpl=0 */
670 +- kvm_write_guest_virt_system(vcpu, gva, &field_value, len, NULL);
671 ++ if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
672 ++ kvm_inject_page_fault(vcpu, &e);
673 + }
674 +
675 + return nested_vmx_succeed(vcpu);
676 +@@ -4706,13 +4708,11 @@ static int handle_invept(struct kvm_vcpu *vcpu)
677 +
678 + switch (type) {
679 + case VMX_EPT_EXTENT_GLOBAL:
680 ++ case VMX_EPT_EXTENT_CONTEXT:
681 + /*
682 +- * TODO: track mappings and invalidate
683 +- * single context requests appropriately
684 ++ * TODO: Sync the necessary shadow EPT roots here, rather than
685 ++ * at the next emulated VM-entry.
686 + */
687 +- case VMX_EPT_EXTENT_CONTEXT:
688 +- kvm_mmu_sync_roots(vcpu);
689 +- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
690 + break;
691 + default:
692 + BUG_ON(1);
693 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
694 +index 1f80fd560ede..4000bcff47b0 100644
695 +--- a/arch/x86/kvm/x86.c
696 ++++ b/arch/x86/kvm/x86.c
697 +@@ -5265,6 +5265,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
698 + /* kvm_write_guest_virt_system can pull in tons of pages. */
699 + vcpu->arch.l1tf_flush_l1d = true;
700 +
701 ++ /*
702 ++ * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
703 ++ * is returned, but our callers are not ready for that and they blindly
704 ++ * call kvm_inject_page_fault. Ensure that they at least do not leak
705 ++ * uninitialized kernel stack memory into cr2 and error code.
706 ++ */
707 ++ memset(exception, 0, sizeof(*exception));
708 + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
709 + PFERR_WRITE_MASK, exception);
710 + }
711 +diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
712 +index 8901a1f89cf5..10fb42da0007 100644
713 +--- a/arch/x86/purgatory/Makefile
714 ++++ b/arch/x86/purgatory/Makefile
715 +@@ -18,37 +18,40 @@ targets += purgatory.ro
716 + KASAN_SANITIZE := n
717 + KCOV_INSTRUMENT := n
718 +
719 ++# These are adjustments to the compiler flags used for objects that
720 ++# make up the standalone purgatory.ro
721 ++
722 ++PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
723 ++PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
724 ++
725 + # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
726 + # in turn leaves some undefined symbols like __fentry__ in purgatory and not
727 + # sure how to relocate those.
728 + ifdef CONFIG_FUNCTION_TRACER
729 +-CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE)
730 +-CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE)
731 +-CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE)
732 +-CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE)
733 ++PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_FTRACE)
734 + endif
735 +
736 + ifdef CONFIG_STACKPROTECTOR
737 +-CFLAGS_REMOVE_sha256.o += -fstack-protector
738 +-CFLAGS_REMOVE_purgatory.o += -fstack-protector
739 +-CFLAGS_REMOVE_string.o += -fstack-protector
740 +-CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector
741 ++PURGATORY_CFLAGS_REMOVE += -fstack-protector
742 + endif
743 +
744 + ifdef CONFIG_STACKPROTECTOR_STRONG
745 +-CFLAGS_REMOVE_sha256.o += -fstack-protector-strong
746 +-CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong
747 +-CFLAGS_REMOVE_string.o += -fstack-protector-strong
748 +-CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong
749 ++PURGATORY_CFLAGS_REMOVE += -fstack-protector-strong
750 + endif
751 +
752 + ifdef CONFIG_RETPOLINE
753 +-CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS)
754 +-CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS)
755 +-CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS)
756 +-CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS)
757 ++PURGATORY_CFLAGS_REMOVE += $(RETPOLINE_CFLAGS)
758 + endif
759 +
760 ++CFLAGS_REMOVE_purgatory.o += $(PURGATORY_CFLAGS_REMOVE)
761 ++CFLAGS_purgatory.o += $(PURGATORY_CFLAGS)
762 ++
763 ++CFLAGS_REMOVE_sha256.o += $(PURGATORY_CFLAGS_REMOVE)
764 ++CFLAGS_sha256.o += $(PURGATORY_CFLAGS)
765 ++
766 ++CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE)
767 ++CFLAGS_string.o += $(PURGATORY_CFLAGS)
768 ++
769 + $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
770 + $(call if_changed,ld)
771 +
772 +diff --git a/drivers/base/core.c b/drivers/base/core.c
773 +index eaf3aa0cb803..2dc0123cbba1 100644
774 +--- a/drivers/base/core.c
775 ++++ b/drivers/base/core.c
776 +@@ -1820,12 +1820,63 @@ static inline struct kobject *get_glue_dir(struct device *dev)
777 + */
778 + static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
779 + {
780 ++ unsigned int ref;
781 ++
782 + /* see if we live in a "glue" directory */
783 + if (!live_in_glue_dir(glue_dir, dev))
784 + return;
785 +
786 + mutex_lock(&gdp_mutex);
787 +- if (!kobject_has_children(glue_dir))
788 ++ /**
789 ++ * There is a race condition between removing glue directory
790 ++ * and adding a new device under the glue directory.
791 ++ *
792 ++ * CPU1: CPU2:
793 ++ *
794 ++ * device_add()
795 ++ * get_device_parent()
796 ++ * class_dir_create_and_add()
797 ++ * kobject_add_internal()
798 ++ * create_dir() // create glue_dir
799 ++ *
800 ++ * device_add()
801 ++ * get_device_parent()
802 ++ * kobject_get() // get glue_dir
803 ++ *
804 ++ * device_del()
805 ++ * cleanup_glue_dir()
806 ++ * kobject_del(glue_dir)
807 ++ *
808 ++ * kobject_add()
809 ++ * kobject_add_internal()
810 ++ * create_dir() // in glue_dir
811 ++ * sysfs_create_dir_ns()
812 ++ * kernfs_create_dir_ns(sd)
813 ++ *
814 ++ * sysfs_remove_dir() // glue_dir->sd=NULL
815 ++ * sysfs_put() // free glue_dir->sd
816 ++ *
817 ++ * // sd is freed
818 ++ * kernfs_new_node(sd)
819 ++ * kernfs_get(glue_dir)
820 ++ * kernfs_add_one()
821 ++ * kernfs_put()
822 ++ *
823 ++ * Before CPU1 remove last child device under glue dir, if CPU2 add
824 ++ * a new device under glue dir, the glue_dir kobject reference count
825 ++ * will be increase to 2 in kobject_get(k). And CPU2 has been called
826 ++ * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
827 ++ * and sysfs_put(). This result in glue_dir->sd is freed.
828 ++ *
829 ++ * Then the CPU2 will see a stale "empty" but still potentially used
830 ++ * glue dir around in kernfs_new_node().
831 ++ *
832 ++ * In order to avoid this happening, we also should make sure that
833 ++ * kernfs_node for glue_dir is released in CPU1 only when refcount
834 ++ * for glue_dir kobj is 1.
835 ++ */
836 ++ ref = kref_read(&glue_dir->kref);
837 ++ if (!kobject_has_children(glue_dir) && !--ref)
838 + kobject_del(glue_dir);
839 + kobject_put(glue_dir);
840 + mutex_unlock(&gdp_mutex);
841 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
842 +index 6d61f5aafc78..7954a7924923 100644
843 +--- a/drivers/bluetooth/btusb.c
844 ++++ b/drivers/bluetooth/btusb.c
845 +@@ -1162,10 +1162,6 @@ static int btusb_open(struct hci_dev *hdev)
846 + }
847 +
848 + data->intf->needs_remote_wakeup = 1;
849 +- /* device specific wakeup source enabled and required for USB
850 +- * remote wakeup while host is suspended
851 +- */
852 +- device_wakeup_enable(&data->udev->dev);
853 +
854 + if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
855 + goto done;
856 +@@ -1229,7 +1225,6 @@ static int btusb_close(struct hci_dev *hdev)
857 + goto failed;
858 +
859 + data->intf->needs_remote_wakeup = 0;
860 +- device_wakeup_disable(&data->udev->dev);
861 + usb_autopm_put_interface(data->intf);
862 +
863 + failed:
864 +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
865 +index 3a4961dc5831..77d1d3894f8d 100644
866 +--- a/drivers/clk/clk.c
867 ++++ b/drivers/clk/clk.c
868 +@@ -3020,15 +3020,49 @@ static int clk_flags_show(struct seq_file *s, void *data)
869 + }
870 + DEFINE_SHOW_ATTRIBUTE(clk_flags);
871 +
872 ++static void possible_parent_show(struct seq_file *s, struct clk_core *core,
873 ++ unsigned int i, char terminator)
874 ++{
875 ++ struct clk_core *parent;
876 ++
877 ++ /*
878 ++ * Go through the following options to fetch a parent's name.
879 ++ *
880 ++ * 1. Fetch the registered parent clock and use its name
881 ++ * 2. Use the global (fallback) name if specified
882 ++ * 3. Use the local fw_name if provided
883 ++ * 4. Fetch parent clock's clock-output-name if DT index was set
884 ++ *
885 ++ * This may still fail in some cases, such as when the parent is
886 ++ * specified directly via a struct clk_hw pointer, but it isn't
887 ++ * registered (yet).
888 ++ */
889 ++ parent = clk_core_get_parent_by_index(core, i);
890 ++ if (parent)
891 ++ seq_printf(s, "%s", parent->name);
892 ++ else if (core->parents[i].name)
893 ++ seq_printf(s, "%s", core->parents[i].name);
894 ++ else if (core->parents[i].fw_name)
895 ++ seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
896 ++ else if (core->parents[i].index >= 0)
897 ++ seq_printf(s, "%s",
898 ++ of_clk_get_parent_name(core->of_node,
899 ++ core->parents[i].index));
900 ++ else
901 ++ seq_puts(s, "(missing)");
902 ++
903 ++ seq_putc(s, terminator);
904 ++}
905 ++
906 + static int possible_parents_show(struct seq_file *s, void *data)
907 + {
908 + struct clk_core *core = s->private;
909 + int i;
910 +
911 + for (i = 0; i < core->num_parents - 1; i++)
912 +- seq_printf(s, "%s ", core->parents[i].name);
913 ++ possible_parent_show(s, core, i, ' ');
914 +
915 +- seq_printf(s, "%s\n", core->parents[i].name);
916 ++ possible_parent_show(s, core, i, '\n');
917 +
918 + return 0;
919 + }
920 +diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
921 +index c61f4d3e52e2..2a841d38f8a7 100644
922 +--- a/drivers/clk/rockchip/clk-mmc-phase.c
923 ++++ b/drivers/clk/rockchip/clk-mmc-phase.c
924 +@@ -52,10 +52,8 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
925 + u32 delay_num = 0;
926 +
927 + /* See the comment for rockchip_mmc_set_phase below */
928 +- if (!rate) {
929 +- pr_err("%s: invalid clk rate\n", __func__);
930 ++ if (!rate)
931 + return -EINVAL;
932 +- }
933 +
934 + raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
935 +
936 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
937 +index 710e09e28227..f9d7d6aaf3db 100644
938 +--- a/drivers/crypto/talitos.c
939 ++++ b/drivers/crypto/talitos.c
940 +@@ -994,11 +994,13 @@ static void talitos_sg_unmap(struct device *dev,
941 +
942 + static void ipsec_esp_unmap(struct device *dev,
943 + struct talitos_edesc *edesc,
944 +- struct aead_request *areq)
945 ++ struct aead_request *areq, bool encrypt)
946 + {
947 + struct crypto_aead *aead = crypto_aead_reqtfm(areq);
948 + struct talitos_ctx *ctx = crypto_aead_ctx(aead);
949 + unsigned int ivsize = crypto_aead_ivsize(aead);
950 ++ unsigned int authsize = crypto_aead_authsize(aead);
951 ++ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
952 + bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
953 + struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
954 +
955 +@@ -1007,7 +1009,7 @@ static void ipsec_esp_unmap(struct device *dev,
956 + DMA_FROM_DEVICE);
957 + unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
958 +
959 +- talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
960 ++ talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen,
961 + areq->assoclen);
962 +
963 + if (edesc->dma_len)
964 +@@ -1018,7 +1020,7 @@ static void ipsec_esp_unmap(struct device *dev,
965 + unsigned int dst_nents = edesc->dst_nents ? : 1;
966 +
967 + sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
968 +- areq->assoclen + areq->cryptlen - ivsize);
969 ++ areq->assoclen + cryptlen - ivsize);
970 + }
971 + }
972 +
973 +@@ -1040,7 +1042,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
974 +
975 + edesc = container_of(desc, struct talitos_edesc, desc);
976 +
977 +- ipsec_esp_unmap(dev, edesc, areq);
978 ++ ipsec_esp_unmap(dev, edesc, areq, true);
979 +
980 + /* copy the generated ICV to dst */
981 + if (edesc->icv_ool) {
982 +@@ -1074,7 +1076,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
983 +
984 + edesc = container_of(desc, struct talitos_edesc, desc);
985 +
986 +- ipsec_esp_unmap(dev, edesc, req);
987 ++ ipsec_esp_unmap(dev, edesc, req, false);
988 +
989 + if (!err) {
990 + char icvdata[SHA512_DIGEST_SIZE];
991 +@@ -1120,7 +1122,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
992 +
993 + edesc = container_of(desc, struct talitos_edesc, desc);
994 +
995 +- ipsec_esp_unmap(dev, edesc, req);
996 ++ ipsec_esp_unmap(dev, edesc, req, false);
997 +
998 + /* check ICV auth status */
999 + if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1000 +@@ -1223,6 +1225,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1001 + * fill in and submit ipsec_esp descriptor
1002 + */
1003 + static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1004 ++ bool encrypt,
1005 + void (*callback)(struct device *dev,
1006 + struct talitos_desc *desc,
1007 + void *context, int error))
1008 +@@ -1232,7 +1235,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1009 + struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1010 + struct device *dev = ctx->dev;
1011 + struct talitos_desc *desc = &edesc->desc;
1012 +- unsigned int cryptlen = areq->cryptlen;
1013 ++ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1014 + unsigned int ivsize = crypto_aead_ivsize(aead);
1015 + int tbl_off = 0;
1016 + int sg_count, ret;
1017 +@@ -1359,7 +1362,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1018 +
1019 + ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1020 + if (ret != -EINPROGRESS) {
1021 +- ipsec_esp_unmap(dev, edesc, areq);
1022 ++ ipsec_esp_unmap(dev, edesc, areq, encrypt);
1023 + kfree(edesc);
1024 + }
1025 + return ret;
1026 +@@ -1473,9 +1476,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1027 + unsigned int authsize = crypto_aead_authsize(authenc);
1028 + struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1029 + unsigned int ivsize = crypto_aead_ivsize(authenc);
1030 ++ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1031 +
1032 + return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1033 +- iv, areq->assoclen, areq->cryptlen,
1034 ++ iv, areq->assoclen, cryptlen,
1035 + authsize, ivsize, icv_stashing,
1036 + areq->base.flags, encrypt);
1037 + }
1038 +@@ -1494,7 +1498,7 @@ static int aead_encrypt(struct aead_request *req)
1039 + /* set encrypt */
1040 + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1041 +
1042 +- return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1043 ++ return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1044 + }
1045 +
1046 + static int aead_decrypt(struct aead_request *req)
1047 +@@ -1506,14 +1510,13 @@ static int aead_decrypt(struct aead_request *req)
1048 + struct talitos_edesc *edesc;
1049 + void *icvdata;
1050 +
1051 +- req->cryptlen -= authsize;
1052 +-
1053 + /* allocate extended descriptor */
1054 + edesc = aead_edesc_alloc(req, req->iv, 1, false);
1055 + if (IS_ERR(edesc))
1056 + return PTR_ERR(edesc);
1057 +
1058 +- if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1059 ++ if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1060 ++ (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1061 + ((!edesc->src_nents && !edesc->dst_nents) ||
1062 + priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1063 +
1064 +@@ -1524,7 +1527,8 @@ static int aead_decrypt(struct aead_request *req)
1065 +
1066 + /* reset integrity check result bits */
1067 +
1068 +- return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1069 ++ return ipsec_esp(edesc, req, false,
1070 ++ ipsec_esp_decrypt_hwauth_done);
1071 + }
1072 +
1073 + /* Have to check the ICV with software */
1074 +@@ -1540,7 +1544,7 @@ static int aead_decrypt(struct aead_request *req)
1075 + sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1076 + req->assoclen + req->cryptlen - authsize);
1077 +
1078 +- return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1079 ++ return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1080 + }
1081 +
1082 + static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1083 +@@ -1591,6 +1595,18 @@ static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1084 + return ablkcipher_setkey(cipher, key, keylen);
1085 + }
1086 +
1087 ++static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1088 ++ const u8 *key, unsigned int keylen)
1089 ++{
1090 ++ if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1091 ++ keylen == AES_KEYSIZE_256)
1092 ++ return ablkcipher_setkey(cipher, key, keylen);
1093 ++
1094 ++ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1095 ++
1096 ++ return -EINVAL;
1097 ++}
1098 ++
1099 + static void common_nonsnoop_unmap(struct device *dev,
1100 + struct talitos_edesc *edesc,
1101 + struct ablkcipher_request *areq)
1102 +@@ -1713,6 +1729,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1103 + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1104 + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1105 + struct talitos_edesc *edesc;
1106 ++ unsigned int blocksize =
1107 ++ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1108 ++
1109 ++ if (!areq->nbytes)
1110 ++ return 0;
1111 ++
1112 ++ if (areq->nbytes % blocksize)
1113 ++ return -EINVAL;
1114 +
1115 + /* allocate extended descriptor */
1116 + edesc = ablkcipher_edesc_alloc(areq, true);
1117 +@@ -1730,6 +1754,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1118 + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1119 + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1120 + struct talitos_edesc *edesc;
1121 ++ unsigned int blocksize =
1122 ++ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1123 ++
1124 ++ if (!areq->nbytes)
1125 ++ return 0;
1126 ++
1127 ++ if (areq->nbytes % blocksize)
1128 ++ return -EINVAL;
1129 +
1130 + /* allocate extended descriptor */
1131 + edesc = ablkcipher_edesc_alloc(areq, false);
1132 +@@ -2752,6 +2784,7 @@ static struct talitos_alg_template driver_algs[] = {
1133 + .min_keysize = AES_MIN_KEY_SIZE,
1134 + .max_keysize = AES_MAX_KEY_SIZE,
1135 + .ivsize = AES_BLOCK_SIZE,
1136 ++ .setkey = ablkcipher_aes_setkey,
1137 + }
1138 + },
1139 + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
1140 +@@ -2768,6 +2801,7 @@ static struct talitos_alg_template driver_algs[] = {
1141 + .min_keysize = AES_MIN_KEY_SIZE,
1142 + .max_keysize = AES_MAX_KEY_SIZE,
1143 + .ivsize = AES_BLOCK_SIZE,
1144 ++ .setkey = ablkcipher_aes_setkey,
1145 + }
1146 + },
1147 + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
1148 +@@ -2778,13 +2812,13 @@ static struct talitos_alg_template driver_algs[] = {
1149 + .alg.crypto = {
1150 + .cra_name = "ctr(aes)",
1151 + .cra_driver_name = "ctr-aes-talitos",
1152 +- .cra_blocksize = AES_BLOCK_SIZE,
1153 ++ .cra_blocksize = 1,
1154 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1155 + CRYPTO_ALG_ASYNC,
1156 + .cra_ablkcipher = {
1157 + .min_keysize = AES_MIN_KEY_SIZE,
1158 + .max_keysize = AES_MAX_KEY_SIZE,
1159 +- .ivsize = AES_BLOCK_SIZE,
1160 ++ .setkey = ablkcipher_aes_setkey,
1161 + }
1162 + },
1163 + .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
1164 +@@ -2818,7 +2852,6 @@ static struct talitos_alg_template driver_algs[] = {
1165 + .cra_ablkcipher = {
1166 + .min_keysize = DES_KEY_SIZE,
1167 + .max_keysize = DES_KEY_SIZE,
1168 +- .ivsize = DES_BLOCK_SIZE,
1169 + .setkey = ablkcipher_des_setkey,
1170 + }
1171 + },
1172 +@@ -2854,7 +2887,6 @@ static struct talitos_alg_template driver_algs[] = {
1173 + .cra_ablkcipher = {
1174 + .min_keysize = DES3_EDE_KEY_SIZE,
1175 + .max_keysize = DES3_EDE_KEY_SIZE,
1176 +- .ivsize = DES3_EDE_BLOCK_SIZE,
1177 + .setkey = ablkcipher_des3_setkey,
1178 + }
1179 + },
1180 +diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
1181 +index ef93406ace1b..36ce11a67235 100644
1182 +--- a/drivers/firmware/ti_sci.c
1183 ++++ b/drivers/firmware/ti_sci.c
1184 +@@ -466,9 +466,9 @@ static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
1185 + struct ti_sci_xfer *xfer;
1186 + int ret;
1187 +
1188 +- /* No need to setup flags since it is expected to respond */
1189 + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
1190 +- 0x0, sizeof(struct ti_sci_msg_hdr),
1191 ++ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1192 ++ sizeof(struct ti_sci_msg_hdr),
1193 + sizeof(*rev_info));
1194 + if (IS_ERR(xfer)) {
1195 + ret = PTR_ERR(xfer);
1196 +@@ -596,9 +596,9 @@ static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
1197 + info = handle_to_ti_sci_info(handle);
1198 + dev = info->dev;
1199 +
1200 +- /* Response is expected, so need of any flags */
1201 + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
1202 +- 0, sizeof(*req), sizeof(*resp));
1203 ++ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1204 ++ sizeof(*req), sizeof(*resp));
1205 + if (IS_ERR(xfer)) {
1206 + ret = PTR_ERR(xfer);
1207 + dev_err(dev, "Message alloc failed(%d)\n", ret);
1208 +diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
1209 +index b6a4efce7c92..be8590d386b1 100644
1210 +--- a/drivers/gpio/gpio-mockup.c
1211 ++++ b/drivers/gpio/gpio-mockup.c
1212 +@@ -309,6 +309,7 @@ static const struct file_operations gpio_mockup_debugfs_ops = {
1213 + .read = gpio_mockup_debugfs_read,
1214 + .write = gpio_mockup_debugfs_write,
1215 + .llseek = no_llseek,
1216 ++ .release = single_release,
1217 + };
1218 +
1219 + static void gpio_mockup_debugfs_setup(struct device *dev,
1220 +diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
1221 +index c9fc9e232aaf..4d5c285c46f8 100644
1222 +--- a/drivers/gpio/gpiolib-acpi.c
1223 ++++ b/drivers/gpio/gpiolib-acpi.c
1224 +@@ -7,6 +7,7 @@
1225 + * Mika Westerberg <mika.westerberg@×××××××××××.com>
1226 + */
1227 +
1228 ++#include <linux/dmi.h>
1229 + #include <linux/errno.h>
1230 + #include <linux/gpio/consumer.h>
1231 + #include <linux/gpio/driver.h>
1232 +@@ -19,6 +20,11 @@
1233 +
1234 + #include "gpiolib.h"
1235 +
1236 ++static int run_edge_events_on_boot = -1;
1237 ++module_param(run_edge_events_on_boot, int, 0444);
1238 ++MODULE_PARM_DESC(run_edge_events_on_boot,
1239 ++ "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
1240 ++
1241 + /**
1242 + * struct acpi_gpio_event - ACPI GPIO event handler data
1243 + *
1244 +@@ -170,10 +176,13 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
1245 + event->irq_requested = true;
1246 +
1247 + /* Make sure we trigger the initial state of edge-triggered IRQs */
1248 +- value = gpiod_get_raw_value_cansleep(event->desc);
1249 +- if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
1250 +- ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
1251 +- event->handler(event->irq, event);
1252 ++ if (run_edge_events_on_boot &&
1253 ++ (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
1254 ++ value = gpiod_get_raw_value_cansleep(event->desc);
1255 ++ if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
1256 ++ ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
1257 ++ event->handler(event->irq, event);
1258 ++ }
1259 + }
1260 +
1261 + static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
1262 +@@ -1283,3 +1292,28 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
1263 + }
1264 + /* We must use _sync so that this runs after the first deferred_probe run */
1265 + late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
1266 ++
1267 ++static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
1268 ++ {
1269 ++ .matches = {
1270 ++ DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
1271 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
1272 ++ }
1273 ++ },
1274 ++ {} /* Terminating entry */
1275 ++};
1276 ++
1277 ++static int acpi_gpio_setup_params(void)
1278 ++{
1279 ++ if (run_edge_events_on_boot < 0) {
1280 ++ if (dmi_check_system(run_edge_events_on_boot_blacklist))
1281 ++ run_edge_events_on_boot = 0;
1282 ++ else
1283 ++ run_edge_events_on_boot = 1;
1284 ++ }
1285 ++
1286 ++ return 0;
1287 ++}
1288 ++
1289 ++/* Directly after dmi_setup() which runs as core_initcall() */
1290 ++postcore_initcall(acpi_gpio_setup_params);
1291 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1292 +index f272b5143997..e806cd9a14ba 100644
1293 +--- a/drivers/gpio/gpiolib.c
1294 ++++ b/drivers/gpio/gpiolib.c
1295 +@@ -535,6 +535,14 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
1296 + if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
1297 + return -EINVAL;
1298 +
1299 ++ /*
1300 ++ * Do not allow both INPUT & OUTPUT flags to be set as they are
1301 ++ * contradictory.
1302 ++ */
1303 ++ if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
1304 ++ (lflags & GPIOHANDLE_REQUEST_OUTPUT))
1305 ++ return -EINVAL;
1306 ++
1307 + /*
1308 + * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
1309 + * the hardware actually supports enabling both at the same time the
1310 +@@ -926,7 +934,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
1311 + }
1312 +
1313 + /* This is just wrong: we don't look for events on output lines */
1314 +- if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
1315 ++ if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
1316 ++ (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
1317 ++ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
1318 + ret = -EINVAL;
1319 + goto out_free_label;
1320 + }
1321 +@@ -940,10 +950,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
1322 +
1323 + if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
1324 + set_bit(FLAG_ACTIVE_LOW, &desc->flags);
1325 +- if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
1326 +- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
1327 +- if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
1328 +- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
1329 +
1330 + ret = gpiod_direction_input(desc);
1331 + if (ret)
1332 +diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
1333 +index d8a0bcd02f34..ffd95bfeaa94 100644
1334 +--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
1335 ++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
1336 +@@ -90,6 +90,12 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
1337 + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
1338 + };
1339 +
1340 ++static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
1341 ++ .width = 720,
1342 ++ .height = 1280,
1343 ++ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
1344 ++};
1345 ++
1346 + static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
1347 + .width = 800,
1348 + .height = 1280,
1349 +@@ -123,6 +129,12 @@ static const struct dmi_system_id orientation_data[] = {
1350 + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
1351 + },
1352 + .driver_data = (void *)&gpd_micropc,
1353 ++ }, { /* GPD MicroPC (later BIOS versions with proper DMI strings) */
1354 ++ .matches = {
1355 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
1356 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"),
1357 ++ },
1358 ++ .driver_data = (void *)&lcd720x1280_rightside_up,
1359 + }, { /*
1360 + * GPD Pocket, note that the the DMI data is less generic then
1361 + * it seems, devices with a board-vendor of "AMI Corporation"
1362 +diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
1363 +index d89120dcac67..8e6a7b8dffca 100644
1364 +--- a/drivers/gpu/drm/i915/intel_dp_mst.c
1365 ++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
1366 +@@ -125,7 +125,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
1367 + limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
1368 +
1369 + limits.min_bpp = intel_dp_min_bpp(pipe_config);
1370 +- limits.max_bpp = pipe_config->pipe_bpp;
1371 ++ /*
1372 ++ * FIXME: If all the streams can't fit into the link with
1373 ++ * their current pipe_bpp we should reduce pipe_bpp across
1374 ++ * the board until things start to fit. Until then we
1375 ++ * limit to <= 8bpc since that's what was hardcoded for all
1376 ++ * MST streams previously. This hack should be removed once
1377 ++ * we have the proper retry logic in place.
1378 ++ */
1379 ++ limits.max_bpp = min(pipe_config->pipe_bpp, 24);
1380 +
1381 + intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
1382 +
1383 +diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
1384 +index edd57a5e0495..b50a7c3f22bf 100644
1385 +--- a/drivers/gpu/drm/i915/intel_workarounds.c
1386 ++++ b/drivers/gpu/drm/i915/intel_workarounds.c
1387 +@@ -294,11 +294,6 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine)
1388 + FLOW_CONTROL_ENABLE |
1389 + PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
1390 +
1391 +- /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
1392 +- if (!IS_COFFEELAKE(i915))
1393 +- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
1394 +- GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
1395 +-
1396 + /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
1397 + /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
1398 + WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
1399 +diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
1400 +index 477c0f766663..b609dc030d6c 100644
1401 +--- a/drivers/gpu/drm/lima/lima_gem.c
1402 ++++ b/drivers/gpu/drm/lima/lima_gem.c
1403 +@@ -342,7 +342,7 @@ int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
1404 + timeout = drm_timeout_abs_to_jiffies(timeout_ns);
1405 +
1406 + ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
1407 +- if (ret == 0)
1408 ++ if (ret == -ETIME)
1409 + ret = timeout ? -ETIMEDOUT : -EBUSY;
1410 +
1411 + return ret;
1412 +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
1413 +index c021d4c8324f..7f5408cb2377 100644
1414 +--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
1415 ++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
1416 +@@ -567,12 +567,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
1417 + comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
1418 + if (!comp) {
1419 + ret = -ENOMEM;
1420 ++ of_node_put(node);
1421 + goto err_node;
1422 + }
1423 +
1424 + ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
1425 +- if (ret)
1426 ++ if (ret) {
1427 ++ of_node_put(node);
1428 + goto err_node;
1429 ++ }
1430 +
1431 + private->ddp_comp[comp_id] = comp;
1432 + }
1433 +diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
1434 +index d90427b93a51..2cccbcf5b53c 100644
1435 +--- a/drivers/gpu/drm/meson/meson_plane.c
1436 ++++ b/drivers/gpu/drm/meson/meson_plane.c
1437 +@@ -153,6 +153,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
1438 + priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
1439 + OSD_COLOR_MATRIX_32_ARGB;
1440 + break;
1441 ++ case DRM_FORMAT_XBGR8888:
1442 ++ /* For XRGB, replace the pixel's alpha by 0xFF */
1443 ++ writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
1444 ++ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
1445 ++ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
1446 ++ OSD_COLOR_MATRIX_32_ABGR;
1447 ++ break;
1448 + case DRM_FORMAT_ARGB8888:
1449 + /* For ARGB, use the pixel's alpha */
1450 + writel_bits_relaxed(OSD_REPLACE_EN, 0,
1451 +@@ -160,6 +167,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
1452 + priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
1453 + OSD_COLOR_MATRIX_32_ARGB;
1454 + break;
1455 ++ case DRM_FORMAT_ABGR8888:
1456 ++ /* For ARGB, use the pixel's alpha */
1457 ++ writel_bits_relaxed(OSD_REPLACE_EN, 0,
1458 ++ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
1459 ++ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
1460 ++ OSD_COLOR_MATRIX_32_ABGR;
1461 ++ break;
1462 + case DRM_FORMAT_RGB888:
1463 + priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
1464 + OSD_COLOR_MATRIX_24_RGB;
1465 +@@ -346,7 +360,9 @@ static const struct drm_plane_funcs meson_plane_funcs = {
1466 +
1467 + static const uint32_t supported_drm_formats[] = {
1468 + DRM_FORMAT_ARGB8888,
1469 ++ DRM_FORMAT_ABGR8888,
1470 + DRM_FORMAT_XRGB8888,
1471 ++ DRM_FORMAT_XBGR8888,
1472 + DRM_FORMAT_RGB888,
1473 + DRM_FORMAT_RGB565,
1474 + };
1475 +diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
1476 +index 588907cc3b6b..6b90a40882f2 100644
1477 +--- a/drivers/iio/adc/stm32-dfsdm-adc.c
1478 ++++ b/drivers/iio/adc/stm32-dfsdm-adc.c
1479 +@@ -39,9 +39,16 @@
1480 + #define DFSDM_MAX_INT_OVERSAMPLING 256
1481 + #define DFSDM_MAX_FL_OVERSAMPLING 1024
1482 +
1483 +-/* Max sample resolutions */
1484 +-#define DFSDM_MAX_RES BIT(31)
1485 +-#define DFSDM_DATA_RES BIT(23)
1486 ++/* Limit filter output resolution to 31 bits. (i.e. sample range is +/-2^30) */
1487 ++#define DFSDM_DATA_MAX BIT(30)
1488 ++/*
1489 ++ * Data are output as two's complement data in a 24 bit field.
1490 ++ * Data from filters are in the range +/-2^(n-1)
1491 ++ * 2^(n-1) maximum positive value cannot be coded in 2's complement n bits
1492 ++ * An extra bit is required to avoid wrap-around of the binary code for 2^(n-1)
1493 ++ * So, the resolution of samples from filter is actually limited to 23 bits
1494 ++ */
1495 ++#define DFSDM_DATA_RES 24
1496 +
1497 + /* Filter configuration */
1498 + #define DFSDM_CR1_CFG_MASK (DFSDM_CR1_RCH_MASK | DFSDM_CR1_RCONT_MASK | \
1499 +@@ -181,14 +188,15 @@ static int stm32_dfsdm_get_jextsel(struct iio_dev *indio_dev,
1500 + return -EINVAL;
1501 + }
1502 +
1503 +-static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
1504 +- unsigned int fast, unsigned int oversamp)
1505 ++static int stm32_dfsdm_compute_osrs(struct stm32_dfsdm_filter *fl,
1506 ++ unsigned int fast, unsigned int oversamp)
1507 + {
1508 + unsigned int i, d, fosr, iosr;
1509 +- u64 res;
1510 +- s64 delta;
1511 ++ u64 res, max;
1512 ++ int bits, shift;
1513 + unsigned int m = 1; /* multiplication factor */
1514 + unsigned int p = fl->ford; /* filter order (ford) */
1515 ++ struct stm32_dfsdm_filter_osr *flo = &fl->flo;
1516 +
1517 + pr_debug("%s: Requested oversampling: %d\n", __func__, oversamp);
1518 + /*
1519 +@@ -207,11 +215,9 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
1520 +
1521 + /*
1522 + * Look for filter and integrator oversampling ratios which allows
1523 +- * to reach 24 bits data output resolution.
1524 +- * Leave as soon as if exact resolution if reached.
1525 +- * Otherwise the higher resolution below 32 bits is kept.
1526 ++ * to maximize data output resolution.
1527 + */
1528 +- fl->res = 0;
1529 ++ flo->res = 0;
1530 + for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) {
1531 + for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) {
1532 + if (fast)
1533 +@@ -236,32 +242,68 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
1534 + res = fosr;
1535 + for (i = p - 1; i > 0; i--) {
1536 + res = res * (u64)fosr;
1537 +- if (res > DFSDM_MAX_RES)
1538 ++ if (res > DFSDM_DATA_MAX)
1539 + break;
1540 + }
1541 +- if (res > DFSDM_MAX_RES)
1542 ++ if (res > DFSDM_DATA_MAX)
1543 + continue;
1544 ++
1545 + res = res * (u64)m * (u64)iosr;
1546 +- if (res > DFSDM_MAX_RES)
1547 ++ if (res > DFSDM_DATA_MAX)
1548 + continue;
1549 +
1550 +- delta = res - DFSDM_DATA_RES;
1551 +-
1552 +- if (res >= fl->res) {
1553 +- fl->res = res;
1554 +- fl->fosr = fosr;
1555 +- fl->iosr = iosr;
1556 +- fl->fast = fast;
1557 +- pr_debug("%s: fosr = %d, iosr = %d\n",
1558 +- __func__, fl->fosr, fl->iosr);
1559 ++ if (res >= flo->res) {
1560 ++ flo->res = res;
1561 ++ flo->fosr = fosr;
1562 ++ flo->iosr = iosr;
1563 ++
1564 ++ bits = fls(flo->res);
1565 ++ /* 8 LBSs in data register contain chan info */
1566 ++ max = flo->res << 8;
1567 ++
1568 ++ /* if resolution is not a power of two */
1569 ++ if (flo->res > BIT(bits - 1))
1570 ++ bits++;
1571 ++ else
1572 ++ max--;
1573 ++
1574 ++ shift = DFSDM_DATA_RES - bits;
1575 ++ /*
1576 ++ * Compute right/left shift
1577 ++ * Right shift is performed by hardware
1578 ++ * when transferring samples to data register.
1579 ++ * Left shift is done by software on buffer
1580 ++ */
1581 ++ if (shift > 0) {
1582 ++ /* Resolution is lower than 24 bits */
1583 ++ flo->rshift = 0;
1584 ++ flo->lshift = shift;
1585 ++ } else {
1586 ++ /*
1587 ++ * If resolution is 24 bits or more,
1588 ++ * max positive value may be ambiguous
1589 ++ * (equal to max negative value as sign
1590 ++ * bit is dropped).
1591 ++ * Reduce resolution to 23 bits (rshift)
1592 ++ * to keep the sign on bit 23 and treat
1593 ++ * saturation before rescaling on 24
1594 ++ * bits (lshift).
1595 ++ */
1596 ++ flo->rshift = 1 - shift;
1597 ++ flo->lshift = 1;
1598 ++ max >>= flo->rshift;
1599 ++ }
1600 ++ flo->max = (s32)max;
1601 ++
1602 ++ pr_debug("%s: fast %d, fosr %d, iosr %d, res 0x%llx/%d bits, rshift %d, lshift %d\n",
1603 ++ __func__, fast, flo->fosr, flo->iosr,
1604 ++ flo->res, bits, flo->rshift,
1605 ++ flo->lshift);
1606 + }
1607 +-
1608 +- if (!delta)
1609 +- return 0;
1610 + }
1611 + }
1612 +
1613 +- if (!fl->res)
1614 ++ if (!flo->res)
1615 + return -EINVAL;
1616 +
1617 + return 0;
1618 +@@ -384,6 +426,36 @@ static int stm32_dfsdm_filter_set_trig(struct stm32_dfsdm_adc *adc,
1619 + return 0;
1620 + }
1621 +
1622 ++static int stm32_dfsdm_channels_configure(struct stm32_dfsdm_adc *adc,
1623 ++ unsigned int fl_id,
1624 ++ struct iio_trigger *trig)
1625 ++{
1626 ++ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
1627 ++ struct regmap *regmap = adc->dfsdm->regmap;
1628 ++ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[fl_id];
1629 ++ struct stm32_dfsdm_filter_osr *flo = &fl->flo;
1630 ++ const struct iio_chan_spec *chan;
1631 ++ unsigned int bit;
1632 ++ int ret;
1633 ++
1634 ++ if (!flo->res)
1635 ++ return -EINVAL;
1636 ++
1637 ++ for_each_set_bit(bit, &adc->smask,
1638 ++ sizeof(adc->smask) * BITS_PER_BYTE) {
1639 ++ chan = indio_dev->channels + bit;
1640 ++
1641 ++ ret = regmap_update_bits(regmap,
1642 ++ DFSDM_CHCFGR2(chan->channel),
1643 ++ DFSDM_CHCFGR2_DTRBS_MASK,
1644 ++ DFSDM_CHCFGR2_DTRBS(flo->rshift));
1645 ++ if (ret)
1646 ++ return ret;
1647 ++ }
1648 ++
1649 ++ return 0;
1650 ++}
1651 ++
1652 + static int stm32_dfsdm_filter_configure(struct stm32_dfsdm_adc *adc,
1653 + unsigned int fl_id,
1654 + struct iio_trigger *trig)
1655 +@@ -391,6 +463,7 @@ static int stm32_dfsdm_filter_configure(struct stm32_dfsdm_adc *adc,
1656 + struct iio_dev *indio_dev = iio_priv_to_dev(adc);
1657 + struct regmap *regmap = adc->dfsdm->regmap;
1658 + struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[fl_id];
1659 ++ struct stm32_dfsdm_filter_osr *flo = &fl->flo;
1660 + u32 cr1;
1661 + const struct iio_chan_spec *chan;
1662 + unsigned int bit, jchg = 0;
1663 +@@ -398,13 +471,13 @@ static int stm32_dfsdm_filter_configure(struct stm32_dfsdm_adc *adc,
1664 +
1665 + /* Average integrator oversampling */
1666 + ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_IOSR_MASK,
1667 +- DFSDM_FCR_IOSR(fl->iosr - 1));
1668 ++ DFSDM_FCR_IOSR(flo->iosr - 1));
1669 + if (ret)
1670 + return ret;
1671 +
1672 + /* Filter order and Oversampling */
1673 + ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_FOSR_MASK,
1674 +- DFSDM_FCR_FOSR(fl->fosr - 1));
1675 ++ DFSDM_FCR_FOSR(flo->fosr - 1));
1676 + if (ret)
1677 + return ret;
1678 +
1679 +@@ -573,7 +646,7 @@ static int dfsdm_adc_set_samp_freq(struct iio_dev *indio_dev,
1680 + "Rate not accurate. requested (%u), actual (%u)\n",
1681 + sample_freq, spi_freq / oversamp);
1682 +
1683 +- ret = stm32_dfsdm_set_osrs(fl, 0, oversamp);
1684 ++ ret = stm32_dfsdm_compute_osrs(fl, 0, oversamp);
1685 + if (ret < 0) {
1686 + dev_err(&indio_dev->dev, "No filter parameters that match!\n");
1687 + return ret;
1688 +@@ -623,6 +696,10 @@ static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc,
1689 + struct regmap *regmap = adc->dfsdm->regmap;
1690 + int ret;
1691 +
1692 ++ ret = stm32_dfsdm_channels_configure(adc, adc->fl_id, trig);
1693 ++ if (ret < 0)
1694 ++ return ret;
1695 ++
1696 + ret = stm32_dfsdm_start_channel(adc);
1697 + if (ret < 0)
1698 + return ret;
1699 +@@ -729,6 +806,8 @@ static void stm32_dfsdm_dma_buffer_done(void *data)
1700 + {
1701 + struct iio_dev *indio_dev = data;
1702 + struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
1703 ++ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
1704 ++ struct stm32_dfsdm_filter_osr *flo = &fl->flo;
1705 + int available = stm32_dfsdm_adc_dma_residue(adc);
1706 + size_t old_pos;
1707 +
1708 +@@ -751,10 +830,19 @@ static void stm32_dfsdm_dma_buffer_done(void *data)
1709 + old_pos = adc->bufi;
1710 +
1711 + while (available >= indio_dev->scan_bytes) {
1712 +- u32 *buffer = (u32 *)&adc->rx_buf[adc->bufi];
1713 ++ s32 *buffer = (s32 *)&adc->rx_buf[adc->bufi];
1714 +
1715 + /* Mask 8 LSB that contains the channel ID */
1716 +- *buffer = (*buffer & 0xFFFFFF00) << 8;
1717 ++ *buffer &= 0xFFFFFF00;
1718 ++ /* Convert 2^(n-1) sample to 2^(n-1)-1 to avoid wrap-around */
1719 ++ if (*buffer > flo->max)
1720 ++ *buffer -= 1;
1721 ++ /*
1722 ++ * Samples from filter are retrieved with 23 bits resolution
1723 ++ * or less. Shift left to align MSB on 24 bits.
1724 ++ */
1725 ++ *buffer <<= flo->lshift;
1726 ++
1727 + available -= indio_dev->scan_bytes;
1728 + adc->bufi += indio_dev->scan_bytes;
1729 + if (adc->bufi >= adc->buf_sz) {
1730 +@@ -1078,7 +1166,7 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
1731 + ret = iio_device_claim_direct_mode(indio_dev);
1732 + if (ret)
1733 + return ret;
1734 +- ret = stm32_dfsdm_set_osrs(fl, 0, val);
1735 ++ ret = stm32_dfsdm_compute_osrs(fl, 0, val);
1736 + if (!ret)
1737 + adc->oversamp = val;
1738 + iio_device_release_direct_mode(indio_dev);
1739 +@@ -1277,11 +1365,11 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
1740 + BIT(IIO_CHAN_INFO_SAMP_FREQ);
1741 +
1742 + if (adc->dev_data->type == DFSDM_AUDIO) {
1743 +- ch->scan_type.sign = 's';
1744 + ch->ext_info = dfsdm_adc_audio_ext_info;
1745 + } else {
1746 +- ch->scan_type.sign = 'u';
1747 ++ ch->scan_type.shift = 8;
1748 + }
1749 ++ ch->scan_type.sign = 's';
1750 + ch->scan_type.realbits = 24;
1751 + ch->scan_type.storagebits = 32;
1752 +
1753 +@@ -1327,8 +1415,8 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
1754 + int ret, chan_idx;
1755 +
1756 + adc->oversamp = DFSDM_DEFAULT_OVERSAMPLING;
1757 +- ret = stm32_dfsdm_set_osrs(&adc->dfsdm->fl_list[adc->fl_id], 0,
1758 +- adc->oversamp);
1759 ++ ret = stm32_dfsdm_compute_osrs(&adc->dfsdm->fl_list[adc->fl_id], 0,
1760 ++ adc->oversamp);
1761 + if (ret < 0)
1762 + return ret;
1763 +
1764 +diff --git a/drivers/iio/adc/stm32-dfsdm.h b/drivers/iio/adc/stm32-dfsdm.h
1765 +index 8708394b0725..18b06ee6ed7b 100644
1766 +--- a/drivers/iio/adc/stm32-dfsdm.h
1767 ++++ b/drivers/iio/adc/stm32-dfsdm.h
1768 +@@ -243,19 +243,33 @@ enum stm32_dfsdm_sinc_order {
1769 + };
1770 +
1771 + /**
1772 +- * struct stm32_dfsdm_filter - structure relative to stm32 FDSDM filter
1773 ++ * struct stm32_dfsdm_filter_osr - DFSDM filter settings linked to oversampling
1774 + * @iosr: integrator oversampling
1775 + * @fosr: filter oversampling
1776 +- * @ford: filter order
1777 ++ * @rshift: output sample right shift (hardware shift)
1778 ++ * @lshift: output sample left shift (software shift)
1779 + * @res: output sample resolution
1780 ++ * @max: output sample maximum positive value
1781 ++ */
1782 ++struct stm32_dfsdm_filter_osr {
1783 ++ unsigned int iosr;
1784 ++ unsigned int fosr;
1785 ++ unsigned int rshift;
1786 ++ unsigned int lshift;
1787 ++ u64 res;
1788 ++ s32 max;
1789 ++};
1790 ++
1791 ++/**
1792 ++ * struct stm32_dfsdm_filter - structure relative to stm32 FDSDM filter
1793 ++ * @ford: filter order
1794 ++ * @flo: filter oversampling structure
1795 + * @sync_mode: filter synchronized with filter 0
1796 + * @fast: filter fast mode
1797 + */
1798 + struct stm32_dfsdm_filter {
1799 +- unsigned int iosr;
1800 +- unsigned int fosr;
1801 + enum stm32_dfsdm_sinc_order ford;
1802 +- u64 res;
1803 ++ struct stm32_dfsdm_filter_osr flo;
1804 + unsigned int sync_mode;
1805 + unsigned int fast;
1806 + };
1807 +diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
1808 +index 3c3ad42f22bf..c92b405b7646 100644
1809 +--- a/drivers/isdn/capi/capi.c
1810 ++++ b/drivers/isdn/capi/capi.c
1811 +@@ -688,6 +688,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
1812 + if (!cdev->ap.applid)
1813 + return -ENODEV;
1814 +
1815 ++ if (count < CAPIMSG_BASELEN)
1816 ++ return -EINVAL;
1817 ++
1818 + skb = alloc_skb(count, GFP_USER);
1819 + if (!skb)
1820 + return -ENOMEM;
1821 +@@ -698,7 +701,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
1822 + }
1823 + mlen = CAPIMSG_LEN(skb->data);
1824 + if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
1825 +- if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
1826 ++ if (count < CAPI_DATA_B3_REQ_LEN ||
1827 ++ (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
1828 + kfree_skb(skb);
1829 + return -EINVAL;
1830 + }
1831 +@@ -711,6 +715,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
1832 + CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
1833 +
1834 + if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
1835 ++ if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
1836 ++ kfree_skb(skb);
1837 ++ return -EINVAL;
1838 ++ }
1839 + mutex_lock(&cdev->lock);
1840 + capincci_free(cdev, CAPIMSG_NCCI(skb->data));
1841 + mutex_unlock(&cdev->lock);
1842 +diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
1843 +index 7e0d3a49c06d..bb31e13648d6 100644
1844 +--- a/drivers/mmc/host/bcm2835.c
1845 ++++ b/drivers/mmc/host/bcm2835.c
1846 +@@ -597,7 +597,7 @@ static void bcm2835_finish_request(struct bcm2835_host *host)
1847 + struct dma_chan *terminate_chan = NULL;
1848 + struct mmc_request *mrq;
1849 +
1850 +- cancel_delayed_work_sync(&host->timeout_work);
1851 ++ cancel_delayed_work(&host->timeout_work);
1852 +
1853 + mrq = host->mrq;
1854 +
1855 +diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
1856 +index 9dc4548271b4..19944b0049db 100644
1857 +--- a/drivers/mmc/host/sdhci-pci-o2micro.c
1858 ++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
1859 +@@ -432,7 +432,6 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
1860 + mmc_hostname(host->mmc));
1861 + host->flags &= ~SDHCI_SIGNALING_330;
1862 + host->flags |= SDHCI_SIGNALING_180;
1863 +- host->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
1864 + host->mmc->caps2 |= MMC_CAP2_NO_SD;
1865 + host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
1866 + pci_write_config_dword(chip->pdev,
1867 +@@ -682,6 +681,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = {
1868 + const struct sdhci_pci_fixes sdhci_o2 = {
1869 + .probe = sdhci_pci_o2_probe,
1870 + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1871 ++ .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
1872 + .probe_slot = sdhci_pci_o2_probe_slot,
1873 + #ifdef CONFIG_PM_SLEEP
1874 + .resume = sdhci_pci_o2_resume,
1875 +diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
1876 +index c5ba13fae399..2f0b092d6dcc 100644
1877 +--- a/drivers/mmc/host/tmio_mmc.h
1878 ++++ b/drivers/mmc/host/tmio_mmc.h
1879 +@@ -163,6 +163,7 @@ struct tmio_mmc_host {
1880 + unsigned long last_req_ts;
1881 + struct mutex ios_lock; /* protect set_ios() context */
1882 + bool native_hotplug;
1883 ++ bool runtime_synced;
1884 + bool sdio_irq_enabled;
1885 +
1886 + /* Mandatory callback */
1887 +diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
1888 +index 84cb7d2aacdf..29ec78486e69 100644
1889 +--- a/drivers/mmc/host/tmio_mmc_core.c
1890 ++++ b/drivers/mmc/host/tmio_mmc_core.c
1891 +@@ -1258,20 +1258,22 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
1892 + /* See if we also get DMA */
1893 + tmio_mmc_request_dma(_host, pdata);
1894 +
1895 +- pm_runtime_set_active(&pdev->dev);
1896 + pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1897 + pm_runtime_use_autosuspend(&pdev->dev);
1898 + pm_runtime_enable(&pdev->dev);
1899 ++ pm_runtime_get_sync(&pdev->dev);
1900 +
1901 + ret = mmc_add_host(mmc);
1902 + if (ret)
1903 + goto remove_host;
1904 +
1905 + dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1906 ++ pm_runtime_put(&pdev->dev);
1907 +
1908 + return 0;
1909 +
1910 + remove_host:
1911 ++ pm_runtime_put_noidle(&pdev->dev);
1912 + tmio_mmc_host_remove(_host);
1913 + return ret;
1914 + }
1915 +@@ -1282,12 +1284,11 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1916 + struct platform_device *pdev = host->pdev;
1917 + struct mmc_host *mmc = host->mmc;
1918 +
1919 ++ pm_runtime_get_sync(&pdev->dev);
1920 ++
1921 + if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
1922 + sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
1923 +
1924 +- if (!host->native_hotplug)
1925 +- pm_runtime_get_sync(&pdev->dev);
1926 +-
1927 + dev_pm_qos_hide_latency_limit(&pdev->dev);
1928 +
1929 + mmc_remove_host(mmc);
1930 +@@ -1296,6 +1297,8 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1931 + tmio_mmc_release_dma(host);
1932 +
1933 + pm_runtime_dont_use_autosuspend(&pdev->dev);
1934 ++ if (host->native_hotplug)
1935 ++ pm_runtime_put_noidle(&pdev->dev);
1936 + pm_runtime_put_sync(&pdev->dev);
1937 + pm_runtime_disable(&pdev->dev);
1938 + }
1939 +@@ -1340,6 +1343,11 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
1940 + {
1941 + struct tmio_mmc_host *host = dev_get_drvdata(dev);
1942 +
1943 ++ if (!host->runtime_synced) {
1944 ++ host->runtime_synced = true;
1945 ++ return 0;
1946 ++ }
1947 ++
1948 + tmio_mmc_clk_enable(host);
1949 + tmio_mmc_hw_reset(host->mmc);
1950 +
1951 +diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
1952 +index 23fe19397315..d6a1354f4f62 100644
1953 +--- a/drivers/mtd/nand/raw/mtk_nand.c
1954 ++++ b/drivers/mtd/nand/raw/mtk_nand.c
1955 +@@ -853,19 +853,21 @@ static int mtk_nfc_write_oob_std(struct nand_chip *chip, int page)
1956 + return mtk_nfc_write_page_raw(chip, NULL, 1, page);
1957 + }
1958 +
1959 +-static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
1960 ++static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
1961 ++ u32 sectors)
1962 + {
1963 + struct nand_chip *chip = mtd_to_nand(mtd);
1964 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
1965 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1966 + struct mtk_ecc_stats stats;
1967 ++ u32 reg_size = mtk_nand->fdm.reg_size;
1968 + int rc, i;
1969 +
1970 + rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
1971 + if (rc) {
1972 + memset(buf, 0xff, sectors * chip->ecc.size);
1973 + for (i = 0; i < sectors; i++)
1974 +- memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
1975 ++ memset(oob_ptr(chip, start + i), 0xff, reg_size);
1976 + return 0;
1977 + }
1978 +
1979 +@@ -885,7 +887,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1980 + u32 spare = mtk_nand->spare_per_sector;
1981 + u32 column, sectors, start, end, reg;
1982 + dma_addr_t addr;
1983 +- int bitflips;
1984 ++ int bitflips = 0;
1985 + size_t len;
1986 + u8 *buf;
1987 + int rc;
1988 +@@ -952,14 +954,11 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1989 + if (rc < 0) {
1990 + dev_err(nfc->dev, "subpage done timeout\n");
1991 + bitflips = -EIO;
1992 +- } else {
1993 +- bitflips = 0;
1994 +- if (!raw) {
1995 +- rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
1996 +- bitflips = rc < 0 ? -ETIMEDOUT :
1997 +- mtk_nfc_update_ecc_stats(mtd, buf, sectors);
1998 +- mtk_nfc_read_fdm(chip, start, sectors);
1999 +- }
2000 ++ } else if (!raw) {
2001 ++ rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
2002 ++ bitflips = rc < 0 ? -ETIMEDOUT :
2003 ++ mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
2004 ++ mtk_nfc_read_fdm(chip, start, sectors);
2005 + }
2006 +
2007 + dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
2008 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2009 +index f7c049559c1a..f9f473ae4abe 100644
2010 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2011 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2012 +@@ -36,6 +36,7 @@
2013 + #include <net/vxlan.h>
2014 + #include <net/mpls.h>
2015 + #include <net/xdp_sock.h>
2016 ++#include <net/xfrm.h>
2017 +
2018 + #include "ixgbe.h"
2019 + #include "ixgbe_common.h"
2020 +@@ -2621,7 +2622,7 @@ adjust_by_size:
2021 + /* 16K ints/sec to 9.2K ints/sec */
2022 + avg_wire_size *= 15;
2023 + avg_wire_size += 11452;
2024 +- } else if (avg_wire_size <= 1980) {
2025 ++ } else if (avg_wire_size < 1968) {
2026 + /* 9.2K ints/sec to 8K ints/sec */
2027 + avg_wire_size *= 5;
2028 + avg_wire_size += 22420;
2029 +@@ -2654,6 +2655,8 @@ adjust_by_size:
2030 + case IXGBE_LINK_SPEED_2_5GB_FULL:
2031 + case IXGBE_LINK_SPEED_1GB_FULL:
2032 + case IXGBE_LINK_SPEED_10_FULL:
2033 ++ if (avg_wire_size > 8064)
2034 ++ avg_wire_size = 8064;
2035 + itr += DIV_ROUND_UP(avg_wire_size,
2036 + IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
2037 + IXGBE_ITR_ADAPTIVE_MIN_INC;
2038 +@@ -8691,7 +8694,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
2039 + #endif /* IXGBE_FCOE */
2040 +
2041 + #ifdef CONFIG_IXGBE_IPSEC
2042 +- if (secpath_exists(skb) &&
2043 ++ if (xfrm_offload(skb) &&
2044 + !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
2045 + goto out_drop;
2046 + #endif
2047 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
2048 +index bfe95ce0bd7f..1f5fe115bd99 100644
2049 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
2050 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
2051 +@@ -679,19 +679,17 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
2052 + bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
2053 + struct ixgbe_ring *tx_ring, int napi_budget)
2054 + {
2055 ++ u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
2056 + unsigned int total_packets = 0, total_bytes = 0;
2057 +- u32 i = tx_ring->next_to_clean, xsk_frames = 0;
2058 +- unsigned int budget = q_vector->tx.work_limit;
2059 + struct xdp_umem *umem = tx_ring->xsk_umem;
2060 + union ixgbe_adv_tx_desc *tx_desc;
2061 + struct ixgbe_tx_buffer *tx_bi;
2062 +- bool xmit_done;
2063 ++ u32 xsk_frames = 0;
2064 +
2065 +- tx_bi = &tx_ring->tx_buffer_info[i];
2066 +- tx_desc = IXGBE_TX_DESC(tx_ring, i);
2067 +- i -= tx_ring->count;
2068 ++ tx_bi = &tx_ring->tx_buffer_info[ntc];
2069 ++ tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
2070 +
2071 +- do {
2072 ++ while (ntc != ntu) {
2073 + if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
2074 + break;
2075 +
2076 +@@ -708,22 +706,18 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
2077 +
2078 + tx_bi++;
2079 + tx_desc++;
2080 +- i++;
2081 +- if (unlikely(!i)) {
2082 +- i -= tx_ring->count;
2083 ++ ntc++;
2084 ++ if (unlikely(ntc == tx_ring->count)) {
2085 ++ ntc = 0;
2086 + tx_bi = tx_ring->tx_buffer_info;
2087 + tx_desc = IXGBE_TX_DESC(tx_ring, 0);
2088 + }
2089 +
2090 + /* issue prefetch for next Tx descriptor */
2091 + prefetch(tx_desc);
2092 ++ }
2093 +
2094 +- /* update budget accounting */
2095 +- budget--;
2096 +- } while (likely(budget));
2097 +-
2098 +- i += tx_ring->count;
2099 +- tx_ring->next_to_clean = i;
2100 ++ tx_ring->next_to_clean = ntc;
2101 +
2102 + u64_stats_update_begin(&tx_ring->syncp);
2103 + tx_ring->stats.bytes += total_bytes;
2104 +@@ -735,8 +729,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
2105 + if (xsk_frames)
2106 + xsk_umem_complete_tx(umem, xsk_frames);
2107 +
2108 +- xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
2109 +- return budget > 0 && xmit_done;
2110 ++ return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
2111 + }
2112 +
2113 + int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
2114 +diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2115 +index d189ed247665..ac6c18821958 100644
2116 +--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2117 ++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2118 +@@ -30,6 +30,7 @@
2119 + #include <linux/bpf.h>
2120 + #include <linux/bpf_trace.h>
2121 + #include <linux/atomic.h>
2122 ++#include <net/xfrm.h>
2123 +
2124 + #include "ixgbevf.h"
2125 +
2126 +@@ -4158,7 +4159,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
2127 + first->protocol = vlan_get_protocol(skb);
2128 +
2129 + #ifdef CONFIG_IXGBEVF_IPSEC
2130 +- if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
2131 ++ if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
2132 + goto out_drop;
2133 + #endif
2134 + tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
2135 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
2136 +index c45ee6e3fe01..a094d7197015 100644
2137 +--- a/drivers/net/phy/phylink.c
2138 ++++ b/drivers/net/phy/phylink.c
2139 +@@ -356,8 +356,8 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat
2140 + * Local device Link partner
2141 + * Pause AsymDir Pause AsymDir Result
2142 + * 1 X 1 X TX+RX
2143 +- * 0 1 1 1 RX
2144 +- * 1 1 0 1 TX
2145 ++ * 0 1 1 1 TX
2146 ++ * 1 1 0 1 RX
2147 + */
2148 + static void phylink_resolve_flow(struct phylink *pl,
2149 + struct phylink_link_state *state)
2150 +@@ -378,7 +378,7 @@ static void phylink_resolve_flow(struct phylink *pl,
2151 + new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
2152 + else if (pause & MLO_PAUSE_ASYM)
2153 + new_pause = state->pause & MLO_PAUSE_SYM ?
2154 +- MLO_PAUSE_RX : MLO_PAUSE_TX;
2155 ++ MLO_PAUSE_TX : MLO_PAUSE_RX;
2156 + } else {
2157 + new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK;
2158 + }
2159 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
2160 +index 192ac47fd055..3f42cd433605 100644
2161 +--- a/drivers/net/tun.c
2162 ++++ b/drivers/net/tun.c
2163 +@@ -788,7 +788,8 @@ static void tun_detach_all(struct net_device *dev)
2164 + }
2165 +
2166 + static int tun_attach(struct tun_struct *tun, struct file *file,
2167 +- bool skip_filter, bool napi, bool napi_frags)
2168 ++ bool skip_filter, bool napi, bool napi_frags,
2169 ++ bool publish_tun)
2170 + {
2171 + struct tun_file *tfile = file->private_data;
2172 + struct net_device *dev = tun->dev;
2173 +@@ -871,7 +872,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
2174 + * initialized tfile; otherwise we risk using half-initialized
2175 + * object.
2176 + */
2177 +- rcu_assign_pointer(tfile->tun, tun);
2178 ++ if (publish_tun)
2179 ++ rcu_assign_pointer(tfile->tun, tun);
2180 + rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
2181 + tun->numqueues++;
2182 + tun_set_real_num_queues(tun);
2183 +@@ -2731,7 +2733,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2184 +
2185 + err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2186 + ifr->ifr_flags & IFF_NAPI,
2187 +- ifr->ifr_flags & IFF_NAPI_FRAGS);
2188 ++ ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2189 + if (err < 0)
2190 + return err;
2191 +
2192 +@@ -2830,13 +2832,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2193 +
2194 + INIT_LIST_HEAD(&tun->disabled);
2195 + err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
2196 +- ifr->ifr_flags & IFF_NAPI_FRAGS);
2197 ++ ifr->ifr_flags & IFF_NAPI_FRAGS, false);
2198 + if (err < 0)
2199 + goto err_free_flow;
2200 +
2201 + err = register_netdevice(tun->dev);
2202 + if (err < 0)
2203 + goto err_detach;
2204 ++ /* free_netdev() won't check refcnt, to aovid race
2205 ++ * with dev_put() we need publish tun after registration.
2206 ++ */
2207 ++ rcu_assign_pointer(tfile->tun, tun);
2208 + }
2209 +
2210 + netif_carrier_on(tun->dev);
2211 +@@ -2979,7 +2985,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
2212 + if (ret < 0)
2213 + goto unlock;
2214 + ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2215 +- tun->flags & IFF_NAPI_FRAGS);
2216 ++ tun->flags & IFF_NAPI_FRAGS, true);
2217 + } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2218 + tun = rtnl_dereference(tfile->tun);
2219 + if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
2220 +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
2221 +index 8458e88c18e9..32f53de5b1fe 100644
2222 +--- a/drivers/net/usb/cdc_ether.c
2223 ++++ b/drivers/net/usb/cdc_ether.c
2224 +@@ -206,7 +206,15 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
2225 + goto bad_desc;
2226 + }
2227 + skip:
2228 +- if (rndis && header.usb_cdc_acm_descriptor &&
2229 ++ /* Communcation class functions with bmCapabilities are not
2230 ++ * RNDIS. But some Wireless class RNDIS functions use
2231 ++ * bmCapabilities for their own purpose. The failsafe is
2232 ++ * therefore applied only to Communication class RNDIS
2233 ++ * functions. The rndis test is redundant, but a cheap
2234 ++ * optimization.
2235 ++ */
2236 ++ if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
2237 ++ header.usb_cdc_acm_descriptor &&
2238 + header.usb_cdc_acm_descriptor->bmCapabilities) {
2239 + dev_dbg(&intf->dev,
2240 + "ACM capabilities %02x, not really RNDIS?\n",
2241 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
2242 +index 80e6b211f60b..8d7a47d1b205 100644
2243 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
2244 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
2245 +@@ -77,11 +77,12 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
2246 + goto out;
2247 + }
2248 +
2249 +- mvif->omac_idx = get_omac_idx(vif->type, dev->omac_mask);
2250 +- if (mvif->omac_idx < 0) {
2251 ++ idx = get_omac_idx(vif->type, dev->omac_mask);
2252 ++ if (idx < 0) {
2253 + ret = -ENOSPC;
2254 + goto out;
2255 + }
2256 ++ mvif->omac_idx = idx;
2257 +
2258 + /* TODO: DBDC support. Use band 0 and wmm 0 for now */
2259 + mvif->band_idx = 0;
2260 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
2261 +index ea67c6022fe6..dc1301effa24 100644
2262 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
2263 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
2264 +@@ -1270,7 +1270,6 @@ int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
2265 + mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
2266 + 0, NULL);
2267 + memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
2268 +- dev_kfree_skb(skb);
2269 +
2270 + req.omac_idx = mvif->omac_idx;
2271 + req.enable = en;
2272 +@@ -1281,6 +1280,7 @@ int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
2273 + req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
2274 + req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + tim_off);
2275 +
2276 ++ dev_kfree_skb(skb);
2277 + skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
2278 +
2279 + return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_BCN_OFFLOAD,
2280 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
2281 +index 40c0d536e20d..9d4426f6905f 100644
2282 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
2283 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
2284 +@@ -59,6 +59,11 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
2285 + dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
2286 + }
2287 +
2288 ++ if (is_mt7630(dev)) {
2289 ++ dev->mt76.cap.has_5ghz = false;
2290 ++ dev_dbg(dev->mt76.dev, "mask out 5GHz support\n");
2291 ++ }
2292 ++
2293 + if (!mt76x02_field_valid(nic_conf1 & 0xff))
2294 + nic_conf1 &= 0xff00;
2295 +
2296 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
2297 +index 621cd4ce69e2..5673dd858811 100644
2298 +--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
2299 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
2300 +@@ -4156,24 +4156,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2301 + switch (rt2x00dev->default_ant.rx_chain_num) {
2302 + case 3:
2303 + /* Turn on tertiary LNAs */
2304 +- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN,
2305 +- rf->channel > 14);
2306 +- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN,
2307 +- rf->channel <= 14);
2308 ++ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
2309 ++ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
2310 + /* fall-through */
2311 + case 2:
2312 + /* Turn on secondary LNAs */
2313 +- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN,
2314 +- rf->channel > 14);
2315 +- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN,
2316 +- rf->channel <= 14);
2317 ++ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
2318 ++ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
2319 + /* fall-through */
2320 + case 1:
2321 + /* Turn on primary LNAs */
2322 +- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN,
2323 +- rf->channel > 14);
2324 +- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN,
2325 +- rf->channel <= 14);
2326 ++ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
2327 ++ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
2328 + break;
2329 + }
2330 +
2331 +diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
2332 +index f5048d4b8cb6..760eaffeebd6 100644
2333 +--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
2334 ++++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
2335 +@@ -645,7 +645,6 @@ fail_rx:
2336 + kfree(rsi_dev->tx_buffer);
2337 +
2338 + fail_eps:
2339 +- kfree(rsi_dev);
2340 +
2341 + return status;
2342 + }
2343 +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
2344 +index 74c3df250d9c..9c8d619d5979 100644
2345 +--- a/drivers/pci/pci-driver.c
2346 ++++ b/drivers/pci/pci-driver.c
2347 +@@ -399,7 +399,8 @@ void __weak pcibios_free_irq(struct pci_dev *dev)
2348 + #ifdef CONFIG_PCI_IOV
2349 + static inline bool pci_device_can_probe(struct pci_dev *pdev)
2350 + {
2351 +- return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe);
2352 ++ return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe ||
2353 ++ pdev->driver_override);
2354 + }
2355 + #else
2356 + static inline bool pci_device_can_probe(struct pci_dev *pdev)
2357 +diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c
2358 +index 7a8cbfb5d213..d35a73a24b3c 100644
2359 +--- a/drivers/platform/x86/pcengines-apuv2.c
2360 ++++ b/drivers/platform/x86/pcengines-apuv2.c
2361 +@@ -93,7 +93,7 @@ struct gpiod_lookup_table gpios_led_table = {
2362 +
2363 + static struct gpio_keys_button apu2_keys_buttons[] = {
2364 + {
2365 +- .code = KEY_SETUP,
2366 ++ .code = KEY_RESTART,
2367 + .active_low = 1,
2368 + .desc = "front button",
2369 + .type = EV_KEY,
2370 +diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
2371 +index be802fd2182d..551ed44dd361 100644
2372 +--- a/drivers/platform/x86/pmc_atom.c
2373 ++++ b/drivers/platform/x86/pmc_atom.c
2374 +@@ -412,6 +412,14 @@ static const struct dmi_system_id critclk_systems[] = {
2375 + DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
2376 + },
2377 + },
2378 ++ {
2379 ++ /* pmc_plt_clk* - are used for ethernet controllers */
2380 ++ .ident = "Beckhoff CB4063",
2381 ++ .matches = {
2382 ++ DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
2383 ++ DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
2384 ++ },
2385 ++ },
2386 + {
2387 + /* pmc_plt_clk* - are used for ethernet controllers */
2388 + .ident = "Beckhoff CB6263",
2389 +diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
2390 +index 6fa15b2d6fb3..866b4dd01da9 100644
2391 +--- a/drivers/regulator/twl-regulator.c
2392 ++++ b/drivers/regulator/twl-regulator.c
2393 +@@ -359,6 +359,17 @@ static const u16 VINTANA2_VSEL_table[] = {
2394 + 2500, 2750,
2395 + };
2396 +
2397 ++/* 600mV to 1450mV in 12.5 mV steps */
2398 ++static const struct regulator_linear_range VDD1_ranges[] = {
2399 ++ REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500)
2400 ++};
2401 ++
2402 ++/* 600mV to 1450mV in 12.5 mV steps, everything above = 1500mV */
2403 ++static const struct regulator_linear_range VDD2_ranges[] = {
2404 ++ REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500),
2405 ++ REGULATOR_LINEAR_RANGE(1500000, 69, 69, 12500)
2406 ++};
2407 ++
2408 + static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
2409 + {
2410 + struct twlreg_info *info = rdev_get_drvdata(rdev);
2411 +@@ -427,6 +438,8 @@ static int twl4030smps_get_voltage(struct regulator_dev *rdev)
2412 + }
2413 +
2414 + static const struct regulator_ops twl4030smps_ops = {
2415 ++ .list_voltage = regulator_list_voltage_linear_range,
2416 ++
2417 + .set_voltage = twl4030smps_set_voltage,
2418 + .get_voltage = twl4030smps_get_voltage,
2419 + };
2420 +@@ -466,7 +479,8 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
2421 + }, \
2422 + }
2423 +
2424 +-#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
2425 ++#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf, \
2426 ++ n_volt) \
2427 + static const struct twlreg_info TWL4030_INFO_##label = { \
2428 + .base = offset, \
2429 + .id = num, \
2430 +@@ -479,6 +493,9 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
2431 + .owner = THIS_MODULE, \
2432 + .enable_time = turnon_delay, \
2433 + .of_map_mode = twl4030reg_map_mode, \
2434 ++ .n_voltages = n_volt, \
2435 ++ .n_linear_ranges = ARRAY_SIZE(label ## _ranges), \
2436 ++ .linear_ranges = label ## _ranges, \
2437 + }, \
2438 + }
2439 +
2440 +@@ -518,8 +535,8 @@ TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9, 100, 0x00);
2441 + TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08);
2442 + TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08);
2443 + TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08);
2444 +-TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08);
2445 +-TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08);
2446 ++TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08, 68);
2447 ++TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08, 69);
2448 + /* VUSBCP is managed *only* by the USB subchip */
2449 + TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08);
2450 + TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08);
2451 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
2452 +index 6c8297bcfeb7..1bfd7e34f31e 100644
2453 +--- a/fs/btrfs/tree-log.c
2454 ++++ b/fs/btrfs/tree-log.c
2455 +@@ -4985,7 +4985,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
2456 + BTRFS_I(inode),
2457 + LOG_OTHER_INODE_ALL,
2458 + 0, LLONG_MAX, ctx);
2459 +- iput(inode);
2460 ++ btrfs_add_delayed_iput(inode);
2461 + }
2462 + }
2463 + continue;
2464 +@@ -5000,7 +5000,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
2465 + ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
2466 + LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
2467 + if (ret) {
2468 +- iput(inode);
2469 ++ btrfs_add_delayed_iput(inode);
2470 + continue;
2471 + }
2472 +
2473 +@@ -5009,7 +5009,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
2474 + key.offset = 0;
2475 + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2476 + if (ret < 0) {
2477 +- iput(inode);
2478 ++ btrfs_add_delayed_iput(inode);
2479 + continue;
2480 + }
2481 +
2482 +@@ -5056,7 +5056,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
2483 + }
2484 + path->slots[0]++;
2485 + }
2486 +- iput(inode);
2487 ++ btrfs_add_delayed_iput(inode);
2488 + }
2489 +
2490 + return ret;
2491 +@@ -5689,7 +5689,7 @@ process_leaf:
2492 + }
2493 +
2494 + if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
2495 +- iput(di_inode);
2496 ++ btrfs_add_delayed_iput(di_inode);
2497 + break;
2498 + }
2499 +
2500 +@@ -5701,7 +5701,7 @@ process_leaf:
2501 + if (!ret &&
2502 + btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
2503 + ret = 1;
2504 +- iput(di_inode);
2505 ++ btrfs_add_delayed_iput(di_inode);
2506 + if (ret)
2507 + goto next_dir_inode;
2508 + if (ctx->log_new_dentries) {
2509 +@@ -5848,7 +5848,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
2510 + if (!ret && ctx && ctx->log_new_dentries)
2511 + ret = log_new_dir_dentries(trans, root,
2512 + BTRFS_I(dir_inode), ctx);
2513 +- iput(dir_inode);
2514 ++ btrfs_add_delayed_iput(dir_inode);
2515 + if (ret)
2516 + goto out;
2517 + }
2518 +@@ -5891,7 +5891,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
2519 + ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
2520 + LOG_INODE_EXISTS,
2521 + 0, LLONG_MAX, ctx);
2522 +- iput(inode);
2523 ++ btrfs_add_delayed_iput(inode);
2524 + if (ret)
2525 + return ret;
2526 +
2527 +diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
2528 +index f5a823cb0e43..e8e7b0e9532e 100644
2529 +--- a/fs/ubifs/tnc.c
2530 ++++ b/fs/ubifs/tnc.c
2531 +@@ -1158,8 +1158,8 @@ static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
2532 + * o exact match, i.e. the found zero-level znode contains key @key, then %1
2533 + * is returned and slot number of the matched branch is stored in @n;
2534 + * o not exact match, which means that zero-level znode does not contain
2535 +- * @key, then %0 is returned and slot number of the closest branch is stored
2536 +- * in @n;
2537 ++ * @key, then %0 is returned and slot number of the closest branch or %-1
2538 ++ * is stored in @n; In this case calling tnc_next() is mandatory.
2539 + * o @key is so small that it is even less than the lowest key of the
2540 + * leftmost zero-level node, then %0 is returned and %0 is stored in @n.
2541 + *
2542 +@@ -1882,13 +1882,19 @@ int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
2543 +
2544 + static int search_dh_cookie(struct ubifs_info *c, const union ubifs_key *key,
2545 + struct ubifs_dent_node *dent, uint32_t cookie,
2546 +- struct ubifs_znode **zn, int *n)
2547 ++ struct ubifs_znode **zn, int *n, int exact)
2548 + {
2549 + int err;
2550 + struct ubifs_znode *znode = *zn;
2551 + struct ubifs_zbranch *zbr;
2552 + union ubifs_key *dkey;
2553 +
2554 ++ if (!exact) {
2555 ++ err = tnc_next(c, &znode, n);
2556 ++ if (err)
2557 ++ return err;
2558 ++ }
2559 ++
2560 + for (;;) {
2561 + zbr = &znode->zbranch[*n];
2562 + dkey = &zbr->key;
2563 +@@ -1930,7 +1936,7 @@ static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
2564 + if (unlikely(err < 0))
2565 + goto out_unlock;
2566 +
2567 +- err = search_dh_cookie(c, key, dent, cookie, &znode, &n);
2568 ++ err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
2569 +
2570 + out_unlock:
2571 + mutex_unlock(&c->tnc_mutex);
2572 +@@ -2723,7 +2729,7 @@ int ubifs_tnc_remove_dh(struct ubifs_info *c, const union ubifs_key *key,
2573 + if (unlikely(err < 0))
2574 + goto out_free;
2575 +
2576 +- err = search_dh_cookie(c, key, dent, cookie, &znode, &n);
2577 ++ err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
2578 + if (err)
2579 + goto out_free;
2580 + }
2581 +diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
2582 +index 1e5d86ebdaeb..52bc8e487ef7 100644
2583 +--- a/include/linux/phy_fixed.h
2584 ++++ b/include/linux/phy_fixed.h
2585 +@@ -11,6 +11,7 @@ struct fixed_phy_status {
2586 + };
2587 +
2588 + struct device_node;
2589 ++struct gpio_desc;
2590 +
2591 + #if IS_ENABLED(CONFIG_FIXED_PHY)
2592 + extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
2593 +diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
2594 +index 2bcef4c70183..4230b8532adb 100644
2595 +--- a/include/linux/syscalls.h
2596 ++++ b/include/linux/syscalls.h
2597 +@@ -1397,4 +1397,23 @@ static inline unsigned int ksys_personality(unsigned int personality)
2598 + return old;
2599 + }
2600 +
2601 ++/* for __ARCH_WANT_SYS_IPC */
2602 ++long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2603 ++ unsigned int nsops,
2604 ++ const struct __kernel_timespec __user *timeout);
2605 ++long ksys_semget(key_t key, int nsems, int semflg);
2606 ++long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
2607 ++long ksys_msgget(key_t key, int msgflg);
2608 ++long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
2609 ++long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
2610 ++ long msgtyp, int msgflg);
2611 ++long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz,
2612 ++ int msgflg);
2613 ++long ksys_shmget(key_t key, size_t size, int shmflg);
2614 ++long ksys_shmdt(char __user *shmaddr);
2615 ++long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
2616 ++long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2617 ++ unsigned int nsops,
2618 ++ const struct old_timespec32 __user *timeout);
2619 ++
2620 + #endif
2621 +diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
2622 +index a87904daf103..ae31a7f87ec8 100644
2623 +--- a/include/uapi/asm-generic/unistd.h
2624 ++++ b/include/uapi/asm-generic/unistd.h
2625 +@@ -569,7 +569,7 @@ __SYSCALL(__NR_semget, sys_semget)
2626 + __SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
2627 + #if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
2628 + #define __NR_semtimedop 192
2629 +-__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32)
2630 ++__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop)
2631 + #endif
2632 + #define __NR_semop 193
2633 + __SYSCALL(__NR_semop, sys_semop)
2634 +diff --git a/include/uapi/linux/isdn/capicmd.h b/include/uapi/linux/isdn/capicmd.h
2635 +index 4941628a4fb9..5ec88e7548a9 100644
2636 +--- a/include/uapi/linux/isdn/capicmd.h
2637 ++++ b/include/uapi/linux/isdn/capicmd.h
2638 +@@ -16,6 +16,7 @@
2639 + #define CAPI_MSG_BASELEN 8
2640 + #define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2)
2641 + #define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2)
2642 ++#define CAPI_DISCONNECT_B3_RESP_LEN (CAPI_MSG_BASELEN+4)
2643 +
2644 + /*----- CAPI commands -----*/
2645 + #define CAPI_ALERT 0x01
2646 +diff --git a/ipc/util.h b/ipc/util.h
2647 +index 0fcf8e719b76..5766c61aed0e 100644
2648 +--- a/ipc/util.h
2649 ++++ b/ipc/util.h
2650 +@@ -276,29 +276,7 @@ static inline int compat_ipc_parse_version(int *cmd)
2651 + *cmd &= ~IPC_64;
2652 + return version;
2653 + }
2654 +-#endif
2655 +
2656 +-/* for __ARCH_WANT_SYS_IPC */
2657 +-long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2658 +- unsigned int nsops,
2659 +- const struct __kernel_timespec __user *timeout);
2660 +-long ksys_semget(key_t key, int nsems, int semflg);
2661 +-long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
2662 +-long ksys_msgget(key_t key, int msgflg);
2663 +-long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
2664 +-long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
2665 +- long msgtyp, int msgflg);
2666 +-long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz,
2667 +- int msgflg);
2668 +-long ksys_shmget(key_t key, size_t size, int shmflg);
2669 +-long ksys_shmdt(char __user *shmaddr);
2670 +-long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
2671 +-
2672 +-/* for CONFIG_ARCH_WANT_OLD_COMPAT_IPC */
2673 +-long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2674 +- unsigned int nsops,
2675 +- const struct old_timespec32 __user *timeout);
2676 +-#ifdef CONFIG_COMPAT
2677 + long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg);
2678 + long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr);
2679 + long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz,
2680 +@@ -306,6 +284,7 @@ long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz,
2681 + long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp,
2682 + compat_ssize_t msgsz, int msgflg);
2683 + long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr);
2684 +-#endif /* CONFIG_COMPAT */
2685 ++
2686 ++#endif
2687 +
2688 + #endif
2689 +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
2690 +index bf9dbffd46b1..d2cba714d3ee 100644
2691 +--- a/kernel/cgroup/cgroup.c
2692 ++++ b/kernel/cgroup/cgroup.c
2693 +@@ -5213,8 +5213,16 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
2694 + * if the parent has to be frozen, the child has too.
2695 + */
2696 + cgrp->freezer.e_freeze = parent->freezer.e_freeze;
2697 +- if (cgrp->freezer.e_freeze)
2698 ++ if (cgrp->freezer.e_freeze) {
2699 ++ /*
2700 ++ * Set the CGRP_FREEZE flag, so when a process will be
2701 ++ * attached to the child cgroup, it will become frozen.
2702 ++ * At this point the new cgroup is unpopulated, so we can
2703 ++ * consider it frozen immediately.
2704 ++ */
2705 ++ set_bit(CGRP_FREEZE, &cgrp->flags);
2706 + set_bit(CGRP_FROZEN, &cgrp->flags);
2707 ++ }
2708 +
2709 + spin_lock_irq(&css_set_lock);
2710 + for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
2711 +diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
2712 +index 95414ad3506a..98c04ca5fa43 100644
2713 +--- a/kernel/irq/resend.c
2714 ++++ b/kernel/irq/resend.c
2715 +@@ -36,6 +36,8 @@ static void resend_irqs(unsigned long arg)
2716 + irq = find_first_bit(irqs_resend, nr_irqs);
2717 + clear_bit(irq, irqs_resend);
2718 + desc = irq_to_desc(irq);
2719 ++ if (!desc)
2720 ++ continue;
2721 + local_irq_disable();
2722 + desc->handle_irq(desc);
2723 + local_irq_enable();
2724 +diff --git a/kernel/module.c b/kernel/module.c
2725 +index 8431c3d47c97..dcf2cc656e7c 100644
2726 +--- a/kernel/module.c
2727 ++++ b/kernel/module.c
2728 +@@ -64,14 +64,9 @@
2729 +
2730 + /*
2731 + * Modules' sections will be aligned on page boundaries
2732 +- * to ensure complete separation of code and data, but
2733 +- * only when CONFIG_STRICT_MODULE_RWX=y
2734 ++ * to ensure complete separation of code and data
2735 + */
2736 +-#ifdef CONFIG_STRICT_MODULE_RWX
2737 + # define debug_align(X) ALIGN(X, PAGE_SIZE)
2738 +-#else
2739 +-# define debug_align(X) (X)
2740 +-#endif
2741 +
2742 + /* If this is set, the section belongs in the init part of the module */
2743 + #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
2744 +@@ -1697,6 +1692,8 @@ static int add_usage_links(struct module *mod)
2745 + return ret;
2746 + }
2747 +
2748 ++static void module_remove_modinfo_attrs(struct module *mod, int end);
2749 ++
2750 + static int module_add_modinfo_attrs(struct module *mod)
2751 + {
2752 + struct module_attribute *attr;
2753 +@@ -1711,24 +1708,34 @@ static int module_add_modinfo_attrs(struct module *mod)
2754 + return -ENOMEM;
2755 +
2756 + temp_attr = mod->modinfo_attrs;
2757 +- for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
2758 ++ for (i = 0; (attr = modinfo_attrs[i]); i++) {
2759 + if (!attr->test || attr->test(mod)) {
2760 + memcpy(temp_attr, attr, sizeof(*temp_attr));
2761 + sysfs_attr_init(&temp_attr->attr);
2762 + error = sysfs_create_file(&mod->mkobj.kobj,
2763 + &temp_attr->attr);
2764 ++ if (error)
2765 ++ goto error_out;
2766 + ++temp_attr;
2767 + }
2768 + }
2769 ++
2770 ++ return 0;
2771 ++
2772 ++error_out:
2773 ++ if (i > 0)
2774 ++ module_remove_modinfo_attrs(mod, --i);
2775 + return error;
2776 + }
2777 +
2778 +-static void module_remove_modinfo_attrs(struct module *mod)
2779 ++static void module_remove_modinfo_attrs(struct module *mod, int end)
2780 + {
2781 + struct module_attribute *attr;
2782 + int i;
2783 +
2784 + for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
2785 ++ if (end >= 0 && i > end)
2786 ++ break;
2787 + /* pick a field to test for end of list */
2788 + if (!attr->attr.name)
2789 + break;
2790 +@@ -1816,7 +1823,7 @@ static int mod_sysfs_setup(struct module *mod,
2791 + return 0;
2792 +
2793 + out_unreg_modinfo_attrs:
2794 +- module_remove_modinfo_attrs(mod);
2795 ++ module_remove_modinfo_attrs(mod, -1);
2796 + out_unreg_param:
2797 + module_param_sysfs_remove(mod);
2798 + out_unreg_holders:
2799 +@@ -1852,7 +1859,7 @@ static void mod_sysfs_fini(struct module *mod)
2800 + {
2801 + }
2802 +
2803 +-static void module_remove_modinfo_attrs(struct module *mod)
2804 ++static void module_remove_modinfo_attrs(struct module *mod, int end)
2805 + {
2806 + }
2807 +
2808 +@@ -1868,14 +1875,14 @@ static void init_param_lock(struct module *mod)
2809 + static void mod_sysfs_teardown(struct module *mod)
2810 + {
2811 + del_usage_links(mod);
2812 +- module_remove_modinfo_attrs(mod);
2813 ++ module_remove_modinfo_attrs(mod, -1);
2814 + module_param_sysfs_remove(mod);
2815 + kobject_put(mod->mkobj.drivers_dir);
2816 + kobject_put(mod->holders_dir);
2817 + mod_sysfs_fini(mod);
2818 + }
2819 +
2820 +-#ifdef CONFIG_STRICT_MODULE_RWX
2821 ++#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
2822 + /*
2823 + * LKM RO/NX protection: protect module's text/ro-data
2824 + * from modification and any data from execution.
2825 +@@ -1898,6 +1905,7 @@ static void frob_text(const struct module_layout *layout,
2826 + layout->text_size >> PAGE_SHIFT);
2827 + }
2828 +
2829 ++#ifdef CONFIG_STRICT_MODULE_RWX
2830 + static void frob_rodata(const struct module_layout *layout,
2831 + int (*set_memory)(unsigned long start, int num_pages))
2832 + {
2833 +@@ -1949,13 +1957,9 @@ void module_enable_ro(const struct module *mod, bool after_init)
2834 + set_vm_flush_reset_perms(mod->core_layout.base);
2835 + set_vm_flush_reset_perms(mod->init_layout.base);
2836 + frob_text(&mod->core_layout, set_memory_ro);
2837 +- frob_text(&mod->core_layout, set_memory_x);
2838 +
2839 + frob_rodata(&mod->core_layout, set_memory_ro);
2840 +-
2841 + frob_text(&mod->init_layout, set_memory_ro);
2842 +- frob_text(&mod->init_layout, set_memory_x);
2843 +-
2844 + frob_rodata(&mod->init_layout, set_memory_ro);
2845 +
2846 + if (after_init)
2847 +@@ -2014,9 +2018,19 @@ void set_all_modules_text_ro(void)
2848 + }
2849 + mutex_unlock(&module_mutex);
2850 + }
2851 +-#else
2852 ++#else /* !CONFIG_STRICT_MODULE_RWX */
2853 + static void module_enable_nx(const struct module *mod) { }
2854 +-#endif
2855 ++#endif /* CONFIG_STRICT_MODULE_RWX */
2856 ++static void module_enable_x(const struct module *mod)
2857 ++{
2858 ++ frob_text(&mod->core_layout, set_memory_x);
2859 ++ frob_text(&mod->init_layout, set_memory_x);
2860 ++}
2861 ++#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2862 ++static void module_enable_nx(const struct module *mod) { }
2863 ++static void module_enable_x(const struct module *mod) { }
2864 ++#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2865 ++
2866 +
2867 + #ifdef CONFIG_LIVEPATCH
2868 + /*
2869 +@@ -3614,6 +3628,7 @@ static int complete_formation(struct module *mod, struct load_info *info)
2870 +
2871 + module_enable_ro(mod, false);
2872 + module_enable_nx(mod);
2873 ++ module_enable_x(mod);
2874 +
2875 + /* Mark state as coming so strong_try_module_get() ignores us,
2876 + * but kallsyms etc. can see us. */
2877 +diff --git a/mm/z3fold.c b/mm/z3fold.c
2878 +index 46686d0e3df8..8374b18ebe9a 100644
2879 +--- a/mm/z3fold.c
2880 ++++ b/mm/z3fold.c
2881 +@@ -1408,6 +1408,7 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
2882 + * should freak out.
2883 + */
2884 + WARN(1, "Z3fold is experiencing kref problems\n");
2885 ++ z3fold_page_unlock(zhdr);
2886 + return false;
2887 + }
2888 + z3fold_page_unlock(zhdr);
2889 +@@ -1439,16 +1440,11 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
2890 + zhdr = page_address(page);
2891 + pool = zhdr_to_pool(zhdr);
2892 +
2893 +- if (!trylock_page(page))
2894 +- return -EAGAIN;
2895 +-
2896 + if (!z3fold_page_trylock(zhdr)) {
2897 +- unlock_page(page);
2898 + return -EAGAIN;
2899 + }
2900 + if (zhdr->mapped_count != 0) {
2901 + z3fold_page_unlock(zhdr);
2902 +- unlock_page(page);
2903 + return -EBUSY;
2904 + }
2905 + if (work_pending(&zhdr->work)) {
2906 +@@ -1494,7 +1490,6 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
2907 + spin_unlock(&pool->lock);
2908 +
2909 + page_mapcount_reset(page);
2910 +- unlock_page(page);
2911 + put_page(page);
2912 + return 0;
2913 + }
2914 +diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
2915 +index bf6acd34234d..63f9c08625f0 100644
2916 +--- a/net/bridge/br_mdb.c
2917 ++++ b/net/bridge/br_mdb.c
2918 +@@ -437,7 +437,7 @@ static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
2919 + struct nlmsghdr *nlh;
2920 + struct nlattr *nest;
2921 +
2922 +- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
2923 ++ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
2924 + if (!nlh)
2925 + return -EMSGSIZE;
2926 +
2927 +diff --git a/net/core/dev.c b/net/core/dev.c
2928 +index 29fcff2c3d51..2ff556906b5d 100644
2929 +--- a/net/core/dev.c
2930 ++++ b/net/core/dev.c
2931 +@@ -8768,6 +8768,8 @@ int register_netdevice(struct net_device *dev)
2932 + ret = notifier_to_errno(ret);
2933 + if (ret) {
2934 + rollback_registered(dev);
2935 ++ rcu_barrier();
2936 ++
2937 + dev->reg_state = NETREG_UNREGISTERED;
2938 + }
2939 + /*
2940 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2941 +index c8cd99c3603f..74efd63f15e2 100644
2942 +--- a/net/core/skbuff.c
2943 ++++ b/net/core/skbuff.c
2944 +@@ -3531,6 +3531,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
2945 + int pos;
2946 + int dummy;
2947 +
2948 ++ if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
2949 ++ (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
2950 ++ /* gso_size is untrusted, and we have a frag_list with a linear
2951 ++ * non head_frag head.
2952 ++ *
2953 ++ * (we assume checking the first list_skb member suffices;
2954 ++ * i.e if either of the list_skb members have non head_frag
2955 ++ * head, then the first one has too).
2956 ++ *
2957 ++ * If head_skb's headlen does not fit requested gso_size, it
2958 ++ * means that the frag_list members do NOT terminate on exact
2959 ++ * gso_size boundaries. Hence we cannot perform skb_frag_t page
2960 ++ * sharing. Therefore we must fallback to copying the frag_list
2961 ++ * skbs; we do so by disabling SG.
2962 ++ */
2963 ++ if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
2964 ++ features &= ~NETIF_F_SG;
2965 ++ }
2966 ++
2967 + __skb_push(head_skb, doffset);
2968 + proto = skb_network_protocol(head_skb, &dummy);
2969 + if (unlikely(!proto))
2970 +diff --git a/net/core/sock_map.c b/net/core/sock_map.c
2971 +index 8a4a45e7c29d..3b14de0e36d2 100644
2972 +--- a/net/core/sock_map.c
2973 ++++ b/net/core/sock_map.c
2974 +@@ -661,6 +661,7 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
2975 + struct sock *sk, u64 flags)
2976 + {
2977 + struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2978 ++ struct inet_connection_sock *icsk = inet_csk(sk);
2979 + u32 key_size = map->key_size, hash;
2980 + struct bpf_htab_elem *elem, *elem_new;
2981 + struct bpf_htab_bucket *bucket;
2982 +@@ -671,6 +672,8 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
2983 + WARN_ON_ONCE(!rcu_read_lock_held());
2984 + if (unlikely(flags > BPF_EXIST))
2985 + return -EINVAL;
2986 ++ if (unlikely(icsk->icsk_ulp_data))
2987 ++ return -EINVAL;
2988 +
2989 + link = sk_psock_init_link();
2990 + if (!link)
2991 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2992 +index d95ee40df6c2..21ed010d7551 100644
2993 +--- a/net/ipv4/tcp_input.c
2994 ++++ b/net/ipv4/tcp_input.c
2995 +@@ -266,7 +266,7 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
2996 +
2997 + static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
2998 + {
2999 +- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
3000 ++ tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
3001 + }
3002 +
3003 + static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
3004 +diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
3005 +index 87d2d8c1db7c..98ac32b49d8c 100644
3006 +--- a/net/ipv6/ping.c
3007 ++++ b/net/ipv6/ping.c
3008 +@@ -223,7 +223,7 @@ static int __net_init ping_v6_proc_init_net(struct net *net)
3009 + return 0;
3010 + }
3011 +
3012 +-static void __net_init ping_v6_proc_exit_net(struct net *net)
3013 ++static void __net_exit ping_v6_proc_exit_net(struct net *net)
3014 + {
3015 + remove_proc_entry("icmp6", net->proc_net);
3016 + }
3017 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3018 +index 5f5a0a42ce60..6a6e403c71ac 100644
3019 +--- a/net/ipv6/route.c
3020 ++++ b/net/ipv6/route.c
3021 +@@ -3841,13 +3841,14 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
3022 + struct fib6_config cfg = {
3023 + .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
3024 + .fc_ifindex = idev->dev->ifindex,
3025 +- .fc_flags = RTF_UP | RTF_ADDRCONF | RTF_NONEXTHOP,
3026 ++ .fc_flags = RTF_UP | RTF_NONEXTHOP,
3027 + .fc_dst = *addr,
3028 + .fc_dst_len = 128,
3029 + .fc_protocol = RTPROT_KERNEL,
3030 + .fc_nlinfo.nl_net = net,
3031 + .fc_ignore_dev_down = true,
3032 + };
3033 ++ struct fib6_info *f6i;
3034 +
3035 + if (anycast) {
3036 + cfg.fc_type = RTN_ANYCAST;
3037 +@@ -3857,7 +3858,10 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
3038 + cfg.fc_flags |= RTF_LOCAL;
3039 + }
3040 +
3041 +- return ip6_route_info_create(&cfg, gfp_flags, NULL);
3042 ++ f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
3043 ++ if (!IS_ERR(f6i))
3044 ++ f6i->dst_nocount = true;
3045 ++ return f6i;
3046 + }
3047 +
3048 + /* remove deleted ip from prefsrc entries */
3049 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
3050 +index 137db1cbde85..ac28f6a5d70e 100644
3051 +--- a/net/sched/sch_generic.c
3052 ++++ b/net/sched/sch_generic.c
3053 +@@ -46,6 +46,8 @@ EXPORT_SYMBOL(default_qdisc_ops);
3054 + * - updates to tree and tree walking are only done under the rtnl mutex.
3055 + */
3056 +
3057 ++#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
3058 ++
3059 + static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
3060 + {
3061 + const struct netdev_queue *txq = q->dev_queue;
3062 +@@ -71,7 +73,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
3063 + q->q.qlen--;
3064 + }
3065 + } else {
3066 +- skb = NULL;
3067 ++ skb = SKB_XOFF_MAGIC;
3068 + }
3069 + }
3070 +
3071 +@@ -253,8 +255,11 @@ validate:
3072 + return skb;
3073 +
3074 + skb = qdisc_dequeue_skb_bad_txq(q);
3075 +- if (unlikely(skb))
3076 ++ if (unlikely(skb)) {
3077 ++ if (skb == SKB_XOFF_MAGIC)
3078 ++ return NULL;
3079 + goto bulk;
3080 ++ }
3081 + skb = q->dequeue(q);
3082 + if (skb) {
3083 + bulk:
3084 +diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
3085 +index cee6971c1c82..23cd1c873a2c 100644
3086 +--- a/net/sched/sch_hhf.c
3087 ++++ b/net/sched/sch_hhf.c
3088 +@@ -531,7 +531,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
3089 + new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
3090 +
3091 + non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
3092 +- if (non_hh_quantum > INT_MAX)
3093 ++ if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
3094 + return -EINVAL;
3095 +
3096 + sch_tree_lock(sch);
3097 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
3098 +index 23af232c0a25..e2b4a440416b 100644
3099 +--- a/net/sctp/protocol.c
3100 ++++ b/net/sctp/protocol.c
3101 +@@ -1336,7 +1336,7 @@ static int __net_init sctp_ctrlsock_init(struct net *net)
3102 + return status;
3103 + }
3104 +
3105 +-static void __net_init sctp_ctrlsock_exit(struct net *net)
3106 ++static void __net_exit sctp_ctrlsock_exit(struct net *net)
3107 + {
3108 + /* Free the control endpoint. */
3109 + inet_ctl_sock_destroy(net->sctp.ctl_sock);
3110 +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
3111 +index 1cf5bb5b73c4..e52b2128e43b 100644
3112 +--- a/net/sctp/sm_sideeffect.c
3113 ++++ b/net/sctp/sm_sideeffect.c
3114 +@@ -547,7 +547,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
3115 + if (net->sctp.pf_enable &&
3116 + (transport->state == SCTP_ACTIVE) &&
3117 + (transport->error_count < transport->pathmaxrxt) &&
3118 +- (transport->error_count > asoc->pf_retrans)) {
3119 ++ (transport->error_count > transport->pf_retrans)) {
3120 +
3121 + sctp_assoc_control_transport(asoc, transport,
3122 + SCTP_TRANSPORT_PF,
3123 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3124 +index f33aa9ee9e27..d0324796f0b3 100644
3125 +--- a/net/sctp/socket.c
3126 ++++ b/net/sctp/socket.c
3127 +@@ -7176,7 +7176,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
3128 + val.spt_pathmaxrxt = trans->pathmaxrxt;
3129 + val.spt_pathpfthld = trans->pf_retrans;
3130 +
3131 +- return 0;
3132 ++ goto out;
3133 + }
3134 +
3135 + asoc = sctp_id2assoc(sk, val.spt_assoc_id);
3136 +@@ -7194,6 +7194,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
3137 + val.spt_pathmaxrxt = sp->pathmaxrxt;
3138 + }
3139 +
3140 ++out:
3141 + if (put_user(len, optlen) || copy_to_user(optval, &val, len))
3142 + return -EFAULT;
3143 +
3144 +diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
3145 +index 61219f0b9677..836e629e8f4a 100644
3146 +--- a/net/tipc/name_distr.c
3147 ++++ b/net/tipc/name_distr.c
3148 +@@ -223,7 +223,8 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
3149 + publ->key);
3150 + }
3151 +
3152 +- kfree_rcu(p, rcu);
3153 ++ if (p)
3154 ++ kfree_rcu(p, rcu);
3155 + }
3156 +
3157 + /**