Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Sat, 29 Dec 2018 18:55:27
Message-Id: 1546109702.723771096426fbaf0e4063f516fa18d26177087d.mpagano@gentoo
1 commit: 723771096426fbaf0e4063f516fa18d26177087d
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Dec 29 18:55:02 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Dec 29 18:55:02 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=72377109
7
8 proj/linux-patches: Linux patch 4.19.13
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1012_linux-4.19.13.patch | 2153 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2157 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 979d903..e349c5a 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -91,6 +91,10 @@ Patch: 1011_linux-4.19.12.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.19.12
23
24 +Patch: 1012_linux-4.19.13.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.19.13
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1012_linux-4.19.13.patch b/1012_linux-4.19.13.patch
33 new file mode 100644
34 index 0000000..4e9190e
35 --- /dev/null
36 +++ b/1012_linux-4.19.13.patch
37 @@ -0,0 +1,2153 @@
38 +diff --git a/Makefile b/Makefile
39 +index 9770f29a690a..892ff14cbc9d 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 12
47 ++SUBLEVEL = 13
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
52 +index 92fd2c8a9af0..12659ce5c1f3 100644
53 +--- a/arch/arm/include/asm/pgtable-2level.h
54 ++++ b/arch/arm/include/asm/pgtable-2level.h
55 +@@ -10,7 +10,7 @@
56 + #ifndef _ASM_PGTABLE_2LEVEL_H
57 + #define _ASM_PGTABLE_2LEVEL_H
58 +
59 +-#define __PAGETABLE_PMD_FOLDED
60 ++#define __PAGETABLE_PMD_FOLDED 1
61 +
62 + /*
63 + * Hardware-wise, we have a two level page table structure, where the first
64 +diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
65 +index 6181e4134483..fe3ddd73a0cc 100644
66 +--- a/arch/m68k/include/asm/pgtable_mm.h
67 ++++ b/arch/m68k/include/asm/pgtable_mm.h
68 +@@ -55,12 +55,12 @@
69 + */
70 + #ifdef CONFIG_SUN3
71 + #define PTRS_PER_PTE 16
72 +-#define __PAGETABLE_PMD_FOLDED
73 ++#define __PAGETABLE_PMD_FOLDED 1
74 + #define PTRS_PER_PMD 1
75 + #define PTRS_PER_PGD 2048
76 + #elif defined(CONFIG_COLDFIRE)
77 + #define PTRS_PER_PTE 512
78 +-#define __PAGETABLE_PMD_FOLDED
79 ++#define __PAGETABLE_PMD_FOLDED 1
80 + #define PTRS_PER_PMD 1
81 + #define PTRS_PER_PGD 1024
82 + #else
83 +diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
84 +index 7b650ab14fa0..2ca598534cc7 100644
85 +--- a/arch/microblaze/include/asm/pgtable.h
86 ++++ b/arch/microblaze/include/asm/pgtable.h
87 +@@ -63,7 +63,7 @@ extern int mem_init_done;
88 +
89 + #include <asm-generic/4level-fixup.h>
90 +
91 +-#define __PAGETABLE_PMD_FOLDED
92 ++#define __PAGETABLE_PMD_FOLDED 1
93 +
94 + #ifdef __KERNEL__
95 + #ifndef __ASSEMBLY__
96 +diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
97 +index d3e19a55cf53..9f52db930c00 100644
98 +--- a/arch/nds32/include/asm/pgtable.h
99 ++++ b/arch/nds32/include/asm/pgtable.h
100 +@@ -4,7 +4,7 @@
101 + #ifndef _ASMNDS32_PGTABLE_H
102 + #define _ASMNDS32_PGTABLE_H
103 +
104 +-#define __PAGETABLE_PMD_FOLDED
105 ++#define __PAGETABLE_PMD_FOLDED 1
106 + #include <asm-generic/4level-fixup.h>
107 + #include <asm-generic/sizes.h>
108 +
109 +diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
110 +index fa6b7c78f18a..ff0860b2b21a 100644
111 +--- a/arch/parisc/include/asm/pgtable.h
112 ++++ b/arch/parisc/include/asm/pgtable.h
113 +@@ -117,7 +117,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
114 + #if CONFIG_PGTABLE_LEVELS == 3
115 + #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
116 + #else
117 +-#define __PAGETABLE_PMD_FOLDED
118 ++#define __PAGETABLE_PMD_FOLDED 1
119 + #define BITS_PER_PMD 0
120 + #endif
121 + #define PTRS_PER_PMD (1UL << BITS_PER_PMD)
122 +diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
123 +index 141d415a8c80..c3d7ccd25381 100644
124 +--- a/arch/x86/entry/vdso/Makefile
125 ++++ b/arch/x86/entry/vdso/Makefile
126 +@@ -171,7 +171,8 @@ quiet_cmd_vdso = VDSO $@
127 + sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
128 +
129 + VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
130 +- $(call ld-option, --build-id) -Bsymbolic
131 ++ $(call ld-option, --build-id) $(call ld-option, --eh-frame-hdr) \
132 ++ -Bsymbolic
133 + GCOV_PROFILE := n
134 +
135 + #
136 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
137 +index b3486c8b570a..1f9de7635bcb 100644
138 +--- a/arch/x86/include/asm/msr-index.h
139 ++++ b/arch/x86/include/asm/msr-index.h
140 +@@ -389,6 +389,7 @@
141 + #define MSR_F15H_NB_PERF_CTR 0xc0010241
142 + #define MSR_F15H_PTSC 0xc0010280
143 + #define MSR_F15H_IC_CFG 0xc0011021
144 ++#define MSR_F15H_EX_CFG 0xc001102c
145 +
146 + /* Fam 10h MSRs */
147 + #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
148 +diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
149 +index 0f53049719cd..627e5c809b33 100644
150 +--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
151 ++++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
152 +@@ -23,6 +23,7 @@
153 +
154 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
155 +
156 ++#include <linux/cpu.h>
157 + #include <linux/kernfs.h>
158 + #include <linux/seq_file.h>
159 + #include <linux/slab.h>
160 +@@ -310,9 +311,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
161 + return -EINVAL;
162 + buf[nbytes - 1] = '\0';
163 +
164 ++ cpus_read_lock();
165 + rdtgrp = rdtgroup_kn_lock_live(of->kn);
166 + if (!rdtgrp) {
167 + rdtgroup_kn_unlock(of->kn);
168 ++ cpus_read_unlock();
169 + return -ENOENT;
170 + }
171 + rdt_last_cmd_clear();
172 +@@ -367,6 +370,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
173 +
174 + out:
175 + rdtgroup_kn_unlock(of->kn);
176 ++ cpus_read_unlock();
177 + return ret ?: nbytes;
178 + }
179 +
180 +diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
181 +index 40eee6cc4124..254683b503a9 100644
182 +--- a/arch/x86/kernel/cpu/mtrr/if.c
183 ++++ b/arch/x86/kernel/cpu/mtrr/if.c
184 +@@ -165,6 +165,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
185 + struct mtrr_gentry gentry;
186 + void __user *arg = (void __user *) __arg;
187 +
188 ++ memset(&gentry, 0, sizeof(gentry));
189 ++
190 + switch (cmd) {
191 + case MTRRIOC_ADD_ENTRY:
192 + case MTRRIOC_SET_ENTRY:
193 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
194 +index c97a9d60d305..33ffb6d17e73 100644
195 +--- a/arch/x86/kvm/vmx.c
196 ++++ b/arch/x86/kvm/vmx.c
197 +@@ -11471,6 +11471,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
198 + kunmap(vmx->nested.pi_desc_page);
199 + kvm_release_page_dirty(vmx->nested.pi_desc_page);
200 + vmx->nested.pi_desc_page = NULL;
201 ++ vmx->nested.pi_desc = NULL;
202 ++ vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull);
203 + }
204 + page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
205 + if (is_error_page(page))
206 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
207 +index 68b53f05a420..956eecd227f8 100644
208 +--- a/arch/x86/kvm/x86.c
209 ++++ b/arch/x86/kvm/x86.c
210 +@@ -2343,6 +2343,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
211 + case MSR_AMD64_PATCH_LOADER:
212 + case MSR_AMD64_BU_CFG2:
213 + case MSR_AMD64_DC_CFG:
214 ++ case MSR_F15H_EX_CFG:
215 + break;
216 +
217 + case MSR_IA32_UCODE_REV:
218 +@@ -2638,6 +2639,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
219 + case MSR_AMD64_BU_CFG2:
220 + case MSR_IA32_PERF_CTL:
221 + case MSR_AMD64_DC_CFG:
222 ++ case MSR_F15H_EX_CFG:
223 + msr_info->data = 0;
224 + break;
225 + case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
226 +@@ -7304,7 +7306,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm)
227 +
228 + static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
229 + {
230 +- if (!kvm_apic_hw_enabled(vcpu->arch.apic))
231 ++ if (!kvm_apic_present(vcpu))
232 + return;
233 +
234 + bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
235 +diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
236 +index 3d0c83ef6aab..a3c9ea29d7cc 100644
237 +--- a/arch/x86/mm/pat.c
238 ++++ b/arch/x86/mm/pat.c
239 +@@ -519,8 +519,13 @@ static u64 sanitize_phys(u64 address)
240 + * for a "decoy" virtual address (bit 63 clear) passed to
241 + * set_memory_X(). __pa() on a "decoy" address results in a
242 + * physical address with bit 63 set.
243 ++ *
244 ++ * Decoy addresses are not present for 32-bit builds, see
245 ++ * set_mce_nospec().
246 + */
247 +- return address & __PHYSICAL_MASK;
248 ++ if (IS_ENABLED(CONFIG_X86_64))
249 ++ return address & __PHYSICAL_MASK;
250 ++ return address;
251 + }
252 +
253 + /*
254 +@@ -546,7 +551,11 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
255 +
256 + start = sanitize_phys(start);
257 + end = sanitize_phys(end);
258 +- BUG_ON(start >= end); /* end is exclusive */
259 ++ if (start >= end) {
260 ++ WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
261 ++ start, end - 1, cattr_name(req_type));
262 ++ return -EINVAL;
263 ++ }
264 +
265 + if (!pat_enabled()) {
266 + /* This is identical to page table setting without PAT */
267 +diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c
268 +index 05813fbf3daf..647dfbbc4e1c 100644
269 +--- a/drivers/gpio/gpio-max7301.c
270 ++++ b/drivers/gpio/gpio-max7301.c
271 +@@ -25,7 +25,7 @@ static int max7301_spi_write(struct device *dev, unsigned int reg,
272 + struct spi_device *spi = to_spi_device(dev);
273 + u16 word = ((reg & 0x7F) << 8) | (val & 0xFF);
274 +
275 +- return spi_write(spi, (const u8 *)&word, sizeof(word));
276 ++ return spi_write_then_read(spi, &word, sizeof(word), NULL, 0);
277 + }
278 +
279 + /* A read from the MAX7301 means two transfers; here, one message each */
280 +@@ -37,14 +37,8 @@ static int max7301_spi_read(struct device *dev, unsigned int reg)
281 + struct spi_device *spi = to_spi_device(dev);
282 +
283 + word = 0x8000 | (reg << 8);
284 +- ret = spi_write(spi, (const u8 *)&word, sizeof(word));
285 +- if (ret)
286 +- return ret;
287 +- /*
288 +- * This relies on the fact, that a transfer with NULL tx_buf shifts out
289 +- * zero bytes (=NOOP for MAX7301)
290 +- */
291 +- ret = spi_read(spi, (u8 *)&word, sizeof(word));
292 ++ ret = spi_write_then_read(spi, &word, sizeof(word), &word,
293 ++ sizeof(word));
294 + if (ret)
295 + return ret;
296 + return word & 0xff;
297 +diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
298 +index 8b9d7e42c600..c5e009f61021 100644
299 +--- a/drivers/gpio/gpiolib-acpi.c
300 ++++ b/drivers/gpio/gpiolib-acpi.c
301 +@@ -23,11 +23,28 @@
302 +
303 + #include "gpiolib.h"
304 +
305 ++/**
306 ++ * struct acpi_gpio_event - ACPI GPIO event handler data
307 ++ *
308 ++ * @node: list-entry of the events list of the struct acpi_gpio_chip
309 ++ * @handle: handle of ACPI method to execute when the IRQ triggers
310 ++ * @handler: irq_handler to pass to request_irq when requesting the IRQ
311 ++ * @pin: GPIO pin number on the gpio_chip
312 ++ * @irq: Linux IRQ number for the event, for request_ / free_irq
313 ++ * @irqflags: flags to pass to request_irq when requesting the IRQ
314 ++ * @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source
315 ++ * @is_requested: True if request_irq has been done
316 ++ * @desc: gpio_desc for the GPIO pin for this event
317 ++ */
318 + struct acpi_gpio_event {
319 + struct list_head node;
320 + acpi_handle handle;
321 ++ irq_handler_t handler;
322 + unsigned int pin;
323 + unsigned int irq;
324 ++ unsigned long irqflags;
325 ++ bool irq_is_wake;
326 ++ bool irq_requested;
327 + struct gpio_desc *desc;
328 + };
329 +
330 +@@ -53,10 +70,10 @@ struct acpi_gpio_chip {
331 +
332 + /*
333 + * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
334 +- * (so builtin drivers) we register the ACPI GpioInt event handlers from a
335 ++ * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
336 + * late_initcall_sync handler, so that other builtin drivers can register their
337 + * OpRegions before the event handlers can run. This list contains gpiochips
338 +- * for which the acpi_gpiochip_request_interrupts() has been deferred.
339 ++ * for which the acpi_gpiochip_request_irqs() call has been deferred.
340 + */
341 + static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
342 + static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
343 +@@ -137,8 +154,42 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
344 + }
345 + EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
346 +
347 +-static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
348 +- void *context)
349 ++static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
350 ++ struct acpi_gpio_event *event)
351 ++{
352 ++ int ret, value;
353 ++
354 ++ ret = request_threaded_irq(event->irq, NULL, event->handler,
355 ++ event->irqflags, "ACPI:Event", event);
356 ++ if (ret) {
357 ++ dev_err(acpi_gpio->chip->parent,
358 ++ "Failed to setup interrupt handler for %d\n",
359 ++ event->irq);
360 ++ return;
361 ++ }
362 ++
363 ++ if (event->irq_is_wake)
364 ++ enable_irq_wake(event->irq);
365 ++
366 ++ event->irq_requested = true;
367 ++
368 ++ /* Make sure we trigger the initial state of edge-triggered IRQs */
369 ++ value = gpiod_get_raw_value_cansleep(event->desc);
370 ++ if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
371 ++ ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
372 ++ event->handler(event->irq, event);
373 ++}
374 ++
375 ++static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
376 ++{
377 ++ struct acpi_gpio_event *event;
378 ++
379 ++ list_for_each_entry(event, &acpi_gpio->events, node)
380 ++ acpi_gpiochip_request_irq(acpi_gpio, event);
381 ++}
382 ++
383 ++static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
384 ++ void *context)
385 + {
386 + struct acpi_gpio_chip *acpi_gpio = context;
387 + struct gpio_chip *chip = acpi_gpio->chip;
388 +@@ -147,8 +198,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
389 + struct acpi_gpio_event *event;
390 + irq_handler_t handler = NULL;
391 + struct gpio_desc *desc;
392 +- unsigned long irqflags;
393 +- int ret, pin, irq, value;
394 ++ int ret, pin, irq;
395 +
396 + if (!acpi_gpio_get_irq_resource(ares, &agpio))
397 + return AE_OK;
398 +@@ -179,8 +229,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
399 +
400 + gpiod_direction_input(desc);
401 +
402 +- value = gpiod_get_value_cansleep(desc);
403 +-
404 + ret = gpiochip_lock_as_irq(chip, pin);
405 + if (ret) {
406 + dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
407 +@@ -193,64 +241,42 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
408 + goto fail_unlock_irq;
409 + }
410 +
411 +- irqflags = IRQF_ONESHOT;
412 ++ event = kzalloc(sizeof(*event), GFP_KERNEL);
413 ++ if (!event)
414 ++ goto fail_unlock_irq;
415 ++
416 ++ event->irqflags = IRQF_ONESHOT;
417 + if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
418 + if (agpio->polarity == ACPI_ACTIVE_HIGH)
419 +- irqflags |= IRQF_TRIGGER_HIGH;
420 ++ event->irqflags |= IRQF_TRIGGER_HIGH;
421 + else
422 +- irqflags |= IRQF_TRIGGER_LOW;
423 ++ event->irqflags |= IRQF_TRIGGER_LOW;
424 + } else {
425 + switch (agpio->polarity) {
426 + case ACPI_ACTIVE_HIGH:
427 +- irqflags |= IRQF_TRIGGER_RISING;
428 ++ event->irqflags |= IRQF_TRIGGER_RISING;
429 + break;
430 + case ACPI_ACTIVE_LOW:
431 +- irqflags |= IRQF_TRIGGER_FALLING;
432 ++ event->irqflags |= IRQF_TRIGGER_FALLING;
433 + break;
434 + default:
435 +- irqflags |= IRQF_TRIGGER_RISING |
436 +- IRQF_TRIGGER_FALLING;
437 ++ event->irqflags |= IRQF_TRIGGER_RISING |
438 ++ IRQF_TRIGGER_FALLING;
439 + break;
440 + }
441 + }
442 +
443 +- event = kzalloc(sizeof(*event), GFP_KERNEL);
444 +- if (!event)
445 +- goto fail_unlock_irq;
446 +-
447 + event->handle = evt_handle;
448 ++ event->handler = handler;
449 + event->irq = irq;
450 ++ event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
451 + event->pin = pin;
452 + event->desc = desc;
453 +
454 +- ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
455 +- "ACPI:Event", event);
456 +- if (ret) {
457 +- dev_err(chip->parent,
458 +- "Failed to setup interrupt handler for %d\n",
459 +- event->irq);
460 +- goto fail_free_event;
461 +- }
462 +-
463 +- if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
464 +- enable_irq_wake(irq);
465 +-
466 + list_add_tail(&event->node, &acpi_gpio->events);
467 +
468 +- /*
469 +- * Make sure we trigger the initial state of the IRQ when using RISING
470 +- * or FALLING. Note we run the handlers on late_init, the AML code
471 +- * may refer to OperationRegions from other (builtin) drivers which
472 +- * may be probed after us.
473 +- */
474 +- if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
475 +- ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
476 +- handler(event->irq, event);
477 +-
478 + return AE_OK;
479 +
480 +-fail_free_event:
481 +- kfree(event);
482 + fail_unlock_irq:
483 + gpiochip_unlock_as_irq(chip, pin);
484 + fail_free_desc:
485 +@@ -287,6 +313,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
486 + if (ACPI_FAILURE(status))
487 + return;
488 +
489 ++ acpi_walk_resources(handle, "_AEI",
490 ++ acpi_gpiochip_alloc_event, acpi_gpio);
491 ++
492 + mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
493 + defer = !acpi_gpio_deferred_req_irqs_done;
494 + if (defer)
495 +@@ -297,8 +326,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
496 + if (defer)
497 + return;
498 +
499 +- acpi_walk_resources(handle, "_AEI",
500 +- acpi_gpiochip_request_interrupt, acpi_gpio);
501 ++ acpi_gpiochip_request_irqs(acpi_gpio);
502 + }
503 + EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts);
504 +
505 +@@ -335,10 +363,13 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
506 + list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
507 + struct gpio_desc *desc;
508 +
509 +- if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
510 +- disable_irq_wake(event->irq);
511 ++ if (event->irq_requested) {
512 ++ if (event->irq_is_wake)
513 ++ disable_irq_wake(event->irq);
514 ++
515 ++ free_irq(event->irq, event);
516 ++ }
517 +
518 +- free_irq(event->irq, event);
519 + desc = event->desc;
520 + if (WARN_ON(IS_ERR(desc)))
521 + continue;
522 +@@ -1204,23 +1235,16 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
523 + return con_id == NULL;
524 + }
525 +
526 +-/* Run deferred acpi_gpiochip_request_interrupts() */
527 +-static int acpi_gpio_handle_deferred_request_interrupts(void)
528 ++/* Run deferred acpi_gpiochip_request_irqs() */
529 ++static int acpi_gpio_handle_deferred_request_irqs(void)
530 + {
531 + struct acpi_gpio_chip *acpi_gpio, *tmp;
532 +
533 + mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
534 + list_for_each_entry_safe(acpi_gpio, tmp,
535 + &acpi_gpio_deferred_req_irqs_list,
536 +- deferred_req_irqs_list_entry) {
537 +- acpi_handle handle;
538 +-
539 +- handle = ACPI_HANDLE(acpi_gpio->chip->parent);
540 +- acpi_walk_resources(handle, "_AEI",
541 +- acpi_gpiochip_request_interrupt, acpi_gpio);
542 +-
543 +- list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
544 +- }
545 ++ deferred_req_irqs_list_entry)
546 ++ acpi_gpiochip_request_irqs(acpi_gpio);
547 +
548 + acpi_gpio_deferred_req_irqs_done = true;
549 + mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
550 +@@ -1228,4 +1252,4 @@ static int acpi_gpio_handle_deferred_request_interrupts(void)
551 + return 0;
552 + }
553 + /* We must use _sync so that this runs after the first deferred_probe run */
554 +-late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
555 ++late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
556 +diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
557 +index ea10e9a26aad..ba129b64b61f 100644
558 +--- a/drivers/gpu/drm/drm_ioctl.c
559 ++++ b/drivers/gpu/drm/drm_ioctl.c
560 +@@ -37,6 +37,7 @@
561 +
562 + #include <linux/pci.h>
563 + #include <linux/export.h>
564 ++#include <linux/nospec.h>
565 +
566 + /**
567 + * DOC: getunique and setversion story
568 +@@ -794,13 +795,17 @@ long drm_ioctl(struct file *filp,
569 +
570 + if (is_driver_ioctl) {
571 + /* driver ioctl */
572 +- if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
573 ++ unsigned int index = nr - DRM_COMMAND_BASE;
574 ++
575 ++ if (index >= dev->driver->num_ioctls)
576 + goto err_i1;
577 +- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
578 ++ index = array_index_nospec(index, dev->driver->num_ioctls);
579 ++ ioctl = &dev->driver->ioctls[index];
580 + } else {
581 + /* core ioctl */
582 + if (nr >= DRM_CORE_IOCTL_COUNT)
583 + goto err_i1;
584 ++ nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
585 + ioctl = &drm_ioctls[nr];
586 + }
587 +
588 +@@ -882,6 +887,7 @@ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
589 +
590 + if (nr >= DRM_CORE_IOCTL_COUNT)
591 + return false;
592 ++ nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
593 +
594 + *flags = drm_ioctls[nr].flags;
595 + return true;
596 +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
597 +index c71cc857b649..2c6d5c7a4445 100644
598 +--- a/drivers/hv/vmbus_drv.c
599 ++++ b/drivers/hv/vmbus_drv.c
600 +@@ -316,6 +316,8 @@ static ssize_t out_intr_mask_show(struct device *dev,
601 +
602 + if (!hv_dev->channel)
603 + return -ENODEV;
604 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
605 ++ return -EINVAL;
606 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
607 + return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
608 + }
609 +@@ -329,6 +331,8 @@ static ssize_t out_read_index_show(struct device *dev,
610 +
611 + if (!hv_dev->channel)
612 + return -ENODEV;
613 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
614 ++ return -EINVAL;
615 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
616 + return sprintf(buf, "%d\n", outbound.current_read_index);
617 + }
618 +@@ -343,6 +347,8 @@ static ssize_t out_write_index_show(struct device *dev,
619 +
620 + if (!hv_dev->channel)
621 + return -ENODEV;
622 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
623 ++ return -EINVAL;
624 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
625 + return sprintf(buf, "%d\n", outbound.current_write_index);
626 + }
627 +@@ -357,6 +363,8 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
628 +
629 + if (!hv_dev->channel)
630 + return -ENODEV;
631 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
632 ++ return -EINVAL;
633 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
634 + return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
635 + }
636 +@@ -371,6 +379,8 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
637 +
638 + if (!hv_dev->channel)
639 + return -ENODEV;
640 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
641 ++ return -EINVAL;
642 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
643 + return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
644 + }
645 +@@ -384,6 +394,8 @@ static ssize_t in_intr_mask_show(struct device *dev,
646 +
647 + if (!hv_dev->channel)
648 + return -ENODEV;
649 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
650 ++ return -EINVAL;
651 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
652 + return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
653 + }
654 +@@ -397,6 +409,8 @@ static ssize_t in_read_index_show(struct device *dev,
655 +
656 + if (!hv_dev->channel)
657 + return -ENODEV;
658 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
659 ++ return -EINVAL;
660 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
661 + return sprintf(buf, "%d\n", inbound.current_read_index);
662 + }
663 +@@ -410,6 +424,8 @@ static ssize_t in_write_index_show(struct device *dev,
664 +
665 + if (!hv_dev->channel)
666 + return -ENODEV;
667 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
668 ++ return -EINVAL;
669 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
670 + return sprintf(buf, "%d\n", inbound.current_write_index);
671 + }
672 +@@ -424,6 +440,8 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
673 +
674 + if (!hv_dev->channel)
675 + return -ENODEV;
676 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
677 ++ return -EINVAL;
678 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
679 + return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
680 + }
681 +@@ -438,6 +456,8 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
682 +
683 + if (!hv_dev->channel)
684 + return -ENODEV;
685 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
686 ++ return -EINVAL;
687 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
688 + return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
689 + }
690 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
691 +index 2d95e8d93cc7..9fe075c137dc 100644
692 +--- a/drivers/input/mouse/elantech.c
693 ++++ b/drivers/input/mouse/elantech.c
694 +@@ -1767,6 +1767,18 @@ static int elantech_smbus = IS_ENABLED(CONFIG_MOUSE_ELAN_I2C_SMBUS) ?
695 + module_param_named(elantech_smbus, elantech_smbus, int, 0644);
696 + MODULE_PARM_DESC(elantech_smbus, "Use a secondary bus for the Elantech device.");
697 +
698 ++static const char * const i2c_blacklist_pnp_ids[] = {
699 ++ /*
700 ++ * These are known to not be working properly as bits are missing
701 ++ * in elan_i2c.
702 ++ */
703 ++ "LEN2131", /* ThinkPad P52 w/ NFC */
704 ++ "LEN2132", /* ThinkPad P52 */
705 ++ "LEN2133", /* ThinkPad P72 w/ NFC */
706 ++ "LEN2134", /* ThinkPad P72 */
707 ++ NULL
708 ++};
709 ++
710 + static int elantech_create_smbus(struct psmouse *psmouse,
711 + struct elantech_device_info *info,
712 + bool leave_breadcrumbs)
713 +@@ -1802,10 +1814,12 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
714 +
715 + if (elantech_smbus == ELANTECH_SMBUS_NOT_SET) {
716 + /*
717 +- * New ICs are enabled by default.
718 ++ * New ICs are enabled by default, unless mentioned in
719 ++ * i2c_blacklist_pnp_ids.
720 + * Old ICs are up to the user to decide.
721 + */
722 +- if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
723 ++ if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
724 ++ psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
725 + return -ENXIO;
726 + }
727 +
728 +diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
729 +index 30b15e91d8be..8e7a2a59cd32 100644
730 +--- a/drivers/media/i2c/ov5640.c
731 ++++ b/drivers/media/i2c/ov5640.c
732 +@@ -2020,6 +2020,7 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
733 + struct ov5640_dev *sensor = to_ov5640_dev(sd);
734 + const struct ov5640_mode_info *new_mode;
735 + struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
736 ++ struct v4l2_mbus_framefmt *fmt;
737 + int ret;
738 +
739 + if (format->pad != 0)
740 +@@ -2037,22 +2038,20 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
741 + if (ret)
742 + goto out;
743 +
744 +- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
745 +- struct v4l2_mbus_framefmt *fmt =
746 +- v4l2_subdev_get_try_format(sd, cfg, 0);
747 ++ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
748 ++ fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
749 ++ else
750 ++ fmt = &sensor->fmt;
751 +
752 +- *fmt = *mbus_fmt;
753 +- goto out;
754 +- }
755 ++ *fmt = *mbus_fmt;
756 +
757 + if (new_mode != sensor->current_mode) {
758 + sensor->current_mode = new_mode;
759 + sensor->pending_mode_change = true;
760 + }
761 +- if (mbus_fmt->code != sensor->fmt.code) {
762 +- sensor->fmt = *mbus_fmt;
763 ++ if (mbus_fmt->code != sensor->fmt.code)
764 + sensor->pending_fmt_change = true;
765 +- }
766 ++
767 + out:
768 + mutex_unlock(&sensor->lock);
769 + return ret;
770 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
771 +index bc1bd2c25613..55997cf84b39 100644
772 +--- a/drivers/mmc/core/mmc.c
773 ++++ b/drivers/mmc/core/mmc.c
774 +@@ -30,6 +30,7 @@
775 + #include "pwrseq.h"
776 +
777 + #define DEFAULT_CMD6_TIMEOUT_MS 500
778 ++#define MIN_CACHE_EN_TIMEOUT_MS 1600
779 +
780 + static const unsigned int tran_exp[] = {
781 + 10000, 100000, 1000000, 10000000,
782 +@@ -526,8 +527,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
783 + card->cid.year += 16;
784 +
785 + /* check whether the eMMC card supports BKOPS */
786 +- if (!mmc_card_broken_hpi(card) &&
787 +- ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
788 ++ if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
789 + card->ext_csd.bkops = 1;
790 + card->ext_csd.man_bkops_en =
791 + (ext_csd[EXT_CSD_BKOPS_EN] &
792 +@@ -1782,20 +1782,26 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
793 + if (err) {
794 + pr_warn("%s: Enabling HPI failed\n",
795 + mmc_hostname(card->host));
796 ++ card->ext_csd.hpi_en = 0;
797 + err = 0;
798 +- } else
799 ++ } else {
800 + card->ext_csd.hpi_en = 1;
801 ++ }
802 + }
803 +
804 + /*
805 +- * If cache size is higher than 0, this indicates
806 +- * the existence of cache and it can be turned on.
807 ++ * If cache size is higher than 0, this indicates the existence of cache
808 ++ * and it can be turned on. Note that some eMMCs from Micron has been
809 ++ * reported to need ~800 ms timeout, while enabling the cache after
810 ++ * sudden power failure tests. Let's extend the timeout to a minimum of
811 ++ * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
812 + */
813 +- if (!mmc_card_broken_hpi(card) &&
814 +- card->ext_csd.cache_size > 0) {
815 ++ if (card->ext_csd.cache_size > 0) {
816 ++ unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
817 ++
818 ++ timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
819 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
820 +- EXT_CSD_CACHE_CTRL, 1,
821 +- card->ext_csd.generic_cmd6_time);
822 ++ EXT_CSD_CACHE_CTRL, 1, timeout_ms);
823 + if (err && err != -EBADMSG)
824 + goto free_card;
825 +
826 +diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
827 +index 68760d4a5d3d..b23c57e07f36 100644
828 +--- a/drivers/mmc/host/omap_hsmmc.c
829 ++++ b/drivers/mmc/host/omap_hsmmc.c
830 +@@ -2066,7 +2066,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
831 + mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
832 + mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
833 + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
834 +- mmc->max_seg_size = mmc->max_req_size;
835 +
836 + mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
837 + MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
838 +@@ -2096,6 +2095,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
839 + goto err_irq;
840 + }
841 +
842 ++ /*
843 ++ * Limit the maximum segment size to the lower of the request size
844 ++ * and the DMA engine device segment size limits. In reality, with
845 ++ * 32-bit transfers, the DMA engine can do longer segments than this
846 ++ * but there is no way to represent that in the DMA model - if we
847 ++ * increase this figure here, we get warnings from the DMA API debug.
848 ++ */
849 ++ mmc->max_seg_size = min3(mmc->max_req_size,
850 ++ dma_get_max_seg_size(host->rx_chan->device->dev),
851 ++ dma_get_max_seg_size(host->tx_chan->device->dev));
852 ++
853 + /* Request IRQ for MMC operations */
854 + ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
855 + mmc_hostname(mmc), host);
856 +diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
857 +index 184c24baca15..d6916f787fce 100644
858 +--- a/drivers/net/usb/hso.c
859 ++++ b/drivers/net/usb/hso.c
860 +@@ -2807,6 +2807,12 @@ static int hso_get_config_data(struct usb_interface *interface)
861 + return -EIO;
862 + }
863 +
864 ++ /* check if we have a valid interface */
865 ++ if (if_num > 16) {
866 ++ kfree(config_data);
867 ++ return -EINVAL;
868 ++ }
869 ++
870 + switch (config_data[if_num]) {
871 + case 0x0:
872 + result = 0;
873 +@@ -2877,10 +2883,18 @@ static int hso_probe(struct usb_interface *interface,
874 +
875 + /* Get the interface/port specification from either driver_info or from
876 + * the device itself */
877 +- if (id->driver_info)
878 ++ if (id->driver_info) {
879 ++ /* if_num is controlled by the device, driver_info is a 0 terminated
880 ++ * array. Make sure, the access is in bounds! */
881 ++ for (i = 0; i <= if_num; ++i)
882 ++ if (((u32 *)(id->driver_info))[i] == 0)
883 ++ goto exit;
884 + port_spec = ((u32 *)(id->driver_info))[if_num];
885 +- else
886 ++ } else {
887 + port_spec = hso_get_config_data(interface);
888 ++ if (port_spec < 0)
889 ++ goto exit;
890 ++ }
891 +
892 + /* Check if we need to switch to alt interfaces prior to port
893 + * configuration */
894 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
895 +index 4d49a1a3f504..16c6c7f921a8 100644
896 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
897 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
898 +@@ -868,6 +868,15 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
899 + int ret, i, j;
900 + u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
901 +
902 ++ /*
903 ++ * This command is not supported on earlier firmware versions.
904 ++ * Unfortunately, we don't have a TLV API flag to rely on, so
905 ++ * rely on the major version which is in the first byte of
906 ++ * ucode_ver.
907 ++ */
908 ++ if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
909 ++ return 0;
910 ++
911 + ret = iwl_mvm_sar_get_wgds_table(mvm);
912 + if (ret < 0) {
913 + IWL_DEBUG_RADIO(mvm,
914 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
915 +index b150da4c6721..5d65500a8aa7 100644
916 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
917 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
918 +@@ -518,6 +518,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
919 + {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
920 +
921 + /* 9000 Series */
922 ++ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
923 ++ {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
924 ++ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
925 ++ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
926 ++ {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
927 ++ {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
928 ++ {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
929 ++ {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
930 ++ {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
931 ++ {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
932 ++ {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
933 ++ {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
934 ++ {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
935 ++ {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
936 ++ {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
937 ++ {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
938 ++ {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
939 ++ {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
940 ++ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
941 ++ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
942 ++ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
943 ++ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
944 ++ {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
945 ++ {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
946 ++ {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
947 ++ {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
948 ++ {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
949 ++ {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
950 ++ {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
951 ++ {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
952 ++ {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
953 ++ {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
954 ++ {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
955 ++ {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
956 ++ {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
957 ++ {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
958 ++ {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
959 ++ {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
960 ++ {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
961 ++ {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
962 ++ {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
963 ++ {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
964 ++ {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
965 ++ {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
966 ++ {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
967 ++ {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
968 ++ {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
969 ++ {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
970 ++ {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
971 ++ {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
972 + {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
973 + {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
974 + {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
975 +diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
976 +index e2addd8b878b..5d75c971004b 100644
977 +--- a/drivers/net/wireless/marvell/mwifiex/11n.c
978 ++++ b/drivers/net/wireless/marvell/mwifiex/11n.c
979 +@@ -696,11 +696,10 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
980 + "Send delba to tid=%d, %pM\n",
981 + tid, rx_reor_tbl_ptr->ta);
982 + mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
983 +- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
984 +- flags);
985 +- return;
986 ++ goto exit;
987 + }
988 + }
989 ++exit:
990 + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
991 + }
992 +
993 +diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
994 +index 8e63d14c1e1c..5380fba652cc 100644
995 +--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
996 ++++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
997 +@@ -103,8 +103,6 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
998 + * There could be holes in the buffer, which are skipped by the function.
999 + * Since the buffer is linear, the function uses rotation to simulate
1000 + * circular buffer.
1001 +- *
1002 +- * The caller must hold rx_reorder_tbl_lock spinlock.
1003 + */
1004 + static void
1005 + mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
1006 +@@ -113,21 +111,25 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
1007 + {
1008 + int pkt_to_send, i;
1009 + void *rx_tmp_ptr;
1010 ++ unsigned long flags;
1011 +
1012 + pkt_to_send = (start_win > tbl->start_win) ?
1013 + min((start_win - tbl->start_win), tbl->win_size) :
1014 + tbl->win_size;
1015 +
1016 + for (i = 0; i < pkt_to_send; ++i) {
1017 ++ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1018 + rx_tmp_ptr = NULL;
1019 + if (tbl->rx_reorder_ptr[i]) {
1020 + rx_tmp_ptr = tbl->rx_reorder_ptr[i];
1021 + tbl->rx_reorder_ptr[i] = NULL;
1022 + }
1023 ++ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1024 + if (rx_tmp_ptr)
1025 + mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
1026 + }
1027 +
1028 ++ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1029 + /*
1030 + * We don't have a circular buffer, hence use rotation to simulate
1031 + * circular buffer
1032 +@@ -138,6 +140,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
1033 + }
1034 +
1035 + tbl->start_win = start_win;
1036 ++ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1037 + }
1038 +
1039 + /*
1040 +@@ -147,8 +150,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
1041 + * The start window is adjusted automatically when a hole is located.
1042 + * Since the buffer is linear, the function uses rotation to simulate
1043 + * circular buffer.
1044 +- *
1045 +- * The caller must hold rx_reorder_tbl_lock spinlock.
1046 + */
1047 + static void
1048 + mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
1049 +@@ -156,15 +157,22 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
1050 + {
1051 + int i, j, xchg;
1052 + void *rx_tmp_ptr;
1053 ++ unsigned long flags;
1054 +
1055 + for (i = 0; i < tbl->win_size; ++i) {
1056 +- if (!tbl->rx_reorder_ptr[i])
1057 ++ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1058 ++ if (!tbl->rx_reorder_ptr[i]) {
1059 ++ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
1060 ++ flags);
1061 + break;
1062 ++ }
1063 + rx_tmp_ptr = tbl->rx_reorder_ptr[i];
1064 + tbl->rx_reorder_ptr[i] = NULL;
1065 ++ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1066 + mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
1067 + }
1068 +
1069 ++ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1070 + /*
1071 + * We don't have a circular buffer, hence use rotation to simulate
1072 + * circular buffer
1073 +@@ -177,6 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
1074 + }
1075 + }
1076 + tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
1077 ++ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1078 + }
1079 +
1080 + /*
1081 +@@ -184,8 +193,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
1082 + *
1083 + * The function stops the associated timer and dispatches all the
1084 + * pending packets in the Rx reorder table before deletion.
1085 +- *
1086 +- * The caller must hold rx_reorder_tbl_lock spinlock.
1087 + */
1088 + static void
1089 + mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
1090 +@@ -211,7 +218,11 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
1091 +
1092 + del_timer_sync(&tbl->timer_context.timer);
1093 + tbl->timer_context.timer_is_set = false;
1094 ++
1095 ++ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1096 + list_del(&tbl->list);
1097 ++ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1098 ++
1099 + kfree(tbl->rx_reorder_ptr);
1100 + kfree(tbl);
1101 +
1102 +@@ -224,17 +235,22 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
1103 + /*
1104 + * This function returns the pointer to an entry in Rx reordering
1105 + * table which matches the given TA/TID pair.
1106 +- *
1107 +- * The caller must hold rx_reorder_tbl_lock spinlock.
1108 + */
1109 + struct mwifiex_rx_reorder_tbl *
1110 + mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
1111 + {
1112 + struct mwifiex_rx_reorder_tbl *tbl;
1113 ++ unsigned long flags;
1114 +
1115 +- list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
1116 +- if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
1117 ++ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1118 ++ list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
1119 ++ if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
1120 ++ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
1121 ++ flags);
1122 + return tbl;
1123 ++ }
1124 ++ }
1125 ++ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1126 +
1127 + return NULL;
1128 + }
1129 +@@ -251,9 +267,14 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
1130 + return;
1131 +
1132 + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1133 +- list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
1134 +- if (!memcmp(tbl->ta, ta, ETH_ALEN))
1135 ++ list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
1136 ++ if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
1137 ++ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
1138 ++ flags);
1139 + mwifiex_del_rx_reorder_entry(priv, tbl);
1140 ++ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1141 ++ }
1142 ++ }
1143 + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1144 +
1145 + return;
1146 +@@ -262,18 +283,24 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
1147 + /*
1148 + * This function finds the last sequence number used in the packets
1149 + * buffered in Rx reordering table.
1150 +- *
1151 +- * The caller must hold rx_reorder_tbl_lock spinlock.
1152 + */
1153 + static int
1154 + mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
1155 + {
1156 + struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
1157 ++ struct mwifiex_private *priv = ctx->priv;
1158 ++ unsigned long flags;
1159 + int i;
1160 +
1161 +- for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
1162 +- if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
1163 ++ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1164 ++ for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
1165 ++ if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
1166 ++ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
1167 ++ flags);
1168 + return i;
1169 ++ }
1170 ++ }
1171 ++ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1172 +
1173 + return -1;
1174 + }
1175 +@@ -291,22 +318,17 @@ mwifiex_flush_data(struct timer_list *t)
1176 + struct reorder_tmr_cnxt *ctx =
1177 + from_timer(ctx, t, timer);
1178 + int start_win, seq_num;
1179 +- unsigned long flags;
1180 +
1181 + ctx->timer_is_set = false;
1182 +- spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
1183 + seq_num = mwifiex_11n_find_last_seq_num(ctx);
1184 +
1185 +- if (seq_num < 0) {
1186 +- spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
1187 ++ if (seq_num < 0)
1188 + return;
1189 +- }
1190 +
1191 + mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
1192 + start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
1193 + mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
1194 + start_win);
1195 +- spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
1196 + }
1197 +
1198 + /*
1199 +@@ -333,14 +355,11 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
1200 + * If we get a TID, ta pair which is already present dispatch all the
1201 + * the packets and move the window size until the ssn
1202 + */
1203 +- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1204 + tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
1205 + if (tbl) {
1206 + mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
1207 +- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1208 + return;
1209 + }
1210 +- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1211 + /* if !tbl then create one */
1212 + new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
1213 + if (!new_node)
1214 +@@ -551,20 +570,16 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
1215 + int prev_start_win, start_win, end_win, win_size;
1216 + u16 pkt_index;
1217 + bool init_window_shift = false;
1218 +- unsigned long flags;
1219 + int ret = 0;
1220 +
1221 +- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1222 + tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
1223 + if (!tbl) {
1224 +- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1225 + if (pkt_type != PKT_TYPE_BAR)
1226 + mwifiex_11n_dispatch_pkt(priv, payload);
1227 + return ret;
1228 + }
1229 +
1230 + if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
1231 +- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1232 + mwifiex_11n_dispatch_pkt(priv, payload);
1233 + return ret;
1234 + }
1235 +@@ -651,8 +666,6 @@ done:
1236 + if (!tbl->timer_context.timer_is_set ||
1237 + prev_start_win != tbl->start_win)
1238 + mwifiex_11n_rxreorder_timer_restart(tbl);
1239 +-
1240 +- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1241 + return ret;
1242 + }
1243 +
1244 +@@ -681,18 +694,14 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
1245 + peer_mac, tid, initiator);
1246 +
1247 + if (cleanup_rx_reorder_tbl) {
1248 +- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1249 + tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
1250 + peer_mac);
1251 + if (!tbl) {
1252 +- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
1253 +- flags);
1254 + mwifiex_dbg(priv->adapter, EVENT,
1255 + "event: TID, TA not found in table\n");
1256 + return;
1257 + }
1258 + mwifiex_del_rx_reorder_entry(priv, tbl);
1259 +- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1260 + } else {
1261 + ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
1262 + if (!ptx_tbl) {
1263 +@@ -726,7 +735,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
1264 + int tid, win_size;
1265 + struct mwifiex_rx_reorder_tbl *tbl;
1266 + uint16_t block_ack_param_set;
1267 +- unsigned long flags;
1268 +
1269 + block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
1270 +
1271 +@@ -740,20 +748,17 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
1272 + mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
1273 + add_ba_rsp->peer_mac_addr, tid);
1274 +
1275 +- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1276 + tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
1277 + add_ba_rsp->peer_mac_addr);
1278 + if (tbl)
1279 + mwifiex_del_rx_reorder_entry(priv, tbl);
1280 +
1281 +- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1282 + return 0;
1283 + }
1284 +
1285 + win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
1286 + >> BLOCKACKPARAM_WINSIZE_POS;
1287 +
1288 +- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1289 + tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
1290 + add_ba_rsp->peer_mac_addr);
1291 + if (tbl) {
1292 +@@ -764,7 +769,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
1293 + else
1294 + tbl->amsdu = false;
1295 + }
1296 +- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1297 +
1298 + mwifiex_dbg(priv->adapter, CMD,
1299 + "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
1300 +@@ -804,8 +808,11 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
1301 +
1302 + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1303 + list_for_each_entry_safe(del_tbl_ptr, tmp_node,
1304 +- &priv->rx_reorder_tbl_ptr, list)
1305 ++ &priv->rx_reorder_tbl_ptr, list) {
1306 ++ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1307 + mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
1308 ++ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1309 ++ }
1310 + INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
1311 + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1312 +
1313 +@@ -929,7 +936,6 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
1314 + int tlv_buf_left = len;
1315 + int ret;
1316 + u8 *tmp;
1317 +- unsigned long flags;
1318 +
1319 + mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
1320 + event_buf, len);
1321 +@@ -949,18 +955,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
1322 + tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
1323 + tlv_bitmap_len);
1324 +
1325 +- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1326 + rx_reor_tbl_ptr =
1327 + mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
1328 + tlv_rxba->mac);
1329 + if (!rx_reor_tbl_ptr) {
1330 +- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
1331 +- flags);
1332 + mwifiex_dbg(priv->adapter, ERROR,
1333 + "Can not find rx_reorder_tbl!");
1334 + return;
1335 + }
1336 +- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1337 +
1338 + for (i = 0; i < tlv_bitmap_len; i++) {
1339 + for (j = 0 ; j < 8; j++) {
1340 +diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
1341 +index a83c5afc256a..5ce85d5727e4 100644
1342 +--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
1343 ++++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
1344 +@@ -421,15 +421,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
1345 + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
1346 + }
1347 +
1348 +- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1349 + if (!priv->ap_11n_enabled ||
1350 + (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
1351 + (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
1352 + ret = mwifiex_handle_uap_rx_forward(priv, skb);
1353 +- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1354 + return ret;
1355 + }
1356 +- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1357 +
1358 + /* Reorder and send to kernel */
1359 + pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
1360 +diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
1361 +index f4122c8fdd97..ef9b502ce576 100644
1362 +--- a/drivers/net/wireless/realtek/rtlwifi/base.c
1363 ++++ b/drivers/net/wireless/realtek/rtlwifi/base.c
1364 +@@ -2289,6 +2289,7 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
1365 +
1366 + if (rtl_c2h_fast_cmd(hw, skb)) {
1367 + rtl_c2h_content_parsing(hw, skb);
1368 ++ kfree_skb(skb);
1369 + return;
1370 + }
1371 +
1372 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1373 +index 4a57ffecc7e6..5c9acb634ff7 100644
1374 +--- a/drivers/scsi/sd.c
1375 ++++ b/drivers/scsi/sd.c
1376 +@@ -132,6 +132,7 @@ static DEFINE_MUTEX(sd_ref_mutex);
1377 +
1378 + static struct kmem_cache *sd_cdb_cache;
1379 + static mempool_t *sd_cdb_pool;
1380 ++static mempool_t *sd_page_pool;
1381 +
1382 + static const char *sd_cache_types[] = {
1383 + "write through", "none", "write back",
1384 +@@ -758,9 +759,10 @@ static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
1385 + unsigned int data_len = 24;
1386 + char *buf;
1387 +
1388 +- rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
1389 ++ rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
1390 + if (!rq->special_vec.bv_page)
1391 + return BLKPREP_DEFER;
1392 ++ clear_highpage(rq->special_vec.bv_page);
1393 + rq->special_vec.bv_offset = 0;
1394 + rq->special_vec.bv_len = data_len;
1395 + rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
1396 +@@ -791,9 +793,10 @@ static int sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap)
1397 + u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
1398 + u32 data_len = sdp->sector_size;
1399 +
1400 +- rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
1401 ++ rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
1402 + if (!rq->special_vec.bv_page)
1403 + return BLKPREP_DEFER;
1404 ++ clear_highpage(rq->special_vec.bv_page);
1405 + rq->special_vec.bv_offset = 0;
1406 + rq->special_vec.bv_len = data_len;
1407 + rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
1408 +@@ -821,9 +824,10 @@ static int sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, bool unmap)
1409 + u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
1410 + u32 data_len = sdp->sector_size;
1411 +
1412 +- rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
1413 ++ rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
1414 + if (!rq->special_vec.bv_page)
1415 + return BLKPREP_DEFER;
1416 ++ clear_highpage(rq->special_vec.bv_page);
1417 + rq->special_vec.bv_offset = 0;
1418 + rq->special_vec.bv_len = data_len;
1419 + rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
1420 +@@ -1287,7 +1291,7 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1421 + u8 *cmnd;
1422 +
1423 + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1424 +- __free_page(rq->special_vec.bv_page);
1425 ++ mempool_free(rq->special_vec.bv_page, sd_page_pool);
1426 +
1427 + if (SCpnt->cmnd != scsi_req(rq)->cmd) {
1428 + cmnd = SCpnt->cmnd;
1429 +@@ -3635,6 +3639,13 @@ static int __init init_sd(void)
1430 + goto err_out_cache;
1431 + }
1432 +
1433 ++ sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
1434 ++ if (!sd_page_pool) {
1435 ++ printk(KERN_ERR "sd: can't init discard page pool\n");
1436 ++ err = -ENOMEM;
1437 ++ goto err_out_ppool;
1438 ++ }
1439 ++
1440 + err = scsi_register_driver(&sd_template.gendrv);
1441 + if (err)
1442 + goto err_out_driver;
1443 +@@ -3642,6 +3653,9 @@ static int __init init_sd(void)
1444 + return 0;
1445 +
1446 + err_out_driver:
1447 ++ mempool_destroy(sd_page_pool);
1448 ++
1449 ++err_out_ppool:
1450 + mempool_destroy(sd_cdb_pool);
1451 +
1452 + err_out_cache:
1453 +@@ -3668,6 +3682,7 @@ static void __exit exit_sd(void)
1454 +
1455 + scsi_unregister_driver(&sd_template.gendrv);
1456 + mempool_destroy(sd_cdb_pool);
1457 ++ mempool_destroy(sd_page_pool);
1458 + kmem_cache_destroy(sd_cdb_cache);
1459 +
1460 + class_unregister(&sd_disk_class);
1461 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
1462 +index 94aca1b5ac8a..01b5818a4be5 100644
1463 +--- a/drivers/usb/host/xhci-hub.c
1464 ++++ b/drivers/usb/host/xhci-hub.c
1465 +@@ -1507,7 +1507,8 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1466 + portsc_buf[port_index] = 0;
1467 +
1468 + /* Bail out if a USB3 port has a new device in link training */
1469 +- if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
1470 ++ if ((hcd->speed >= HCD_USB3) &&
1471 ++ (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
1472 + bus_state->bus_suspended = 0;
1473 + spin_unlock_irqrestore(&xhci->lock, flags);
1474 + xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
1475 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
1476 +index c3ed7d1c9f65..e88060ea1e33 100644
1477 +--- a/drivers/usb/host/xhci.h
1478 ++++ b/drivers/usb/host/xhci.h
1479 +@@ -1860,6 +1860,8 @@ struct xhci_hcd {
1480 + unsigned sw_lpm_support:1;
1481 + /* support xHCI 1.0 spec USB2 hardware LPM */
1482 + unsigned hw_lpm_support:1;
1483 ++ /* Broken Suspend flag for SNPS Suspend resume issue */
1484 ++ unsigned broken_suspend:1;
1485 + /* cached usb2 extened protocol capabilites */
1486 + u32 *ext_caps;
1487 + unsigned int num_ext_caps;
1488 +@@ -1877,8 +1879,6 @@ struct xhci_hcd {
1489 + void *dbc;
1490 + /* platform-specific data -- must come last */
1491 + unsigned long priv[0] __aligned(sizeof(s64));
1492 +- /* Broken Suspend flag for SNPS Suspend resume issue */
1493 +- u8 broken_suspend;
1494 + };
1495 +
1496 + /* Platform specific overrides to generic XHCI hc_driver ops */
1497 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1498 +index e72ad9f81c73..17787dc349f8 100644
1499 +--- a/drivers/usb/serial/option.c
1500 ++++ b/drivers/usb/serial/option.c
1501 +@@ -1164,6 +1164,10 @@ static const struct usb_device_id option_ids[] = {
1502 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
1503 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
1504 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
1505 ++ { USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */
1506 ++ .driver_info = NCTRL(0) | RSVD(1) },
1507 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
1508 ++ .driver_info = NCTRL(0) },
1509 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
1510 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
1511 + .driver_info = RSVD(1) },
1512 +@@ -1328,6 +1332,7 @@ static const struct usb_device_id option_ids[] = {
1513 + .driver_info = RSVD(4) },
1514 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
1515 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
1516 ++ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) }, /* GosunCn ZTE WeLink ME3630 (MBIM mode) */
1517 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
1518 + .driver_info = RSVD(4) },
1519 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
1520 +@@ -1531,6 +1536,7 @@ static const struct usb_device_id option_ids[] = {
1521 + .driver_info = RSVD(2) },
1522 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
1523 + .driver_info = RSVD(2) },
1524 ++ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
1525 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
1526 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
1527 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
1528 +@@ -1758,6 +1764,7 @@ static const struct usb_device_id option_ids[] = {
1529 + { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
1530 + { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
1531 + .driver_info = RSVD(5) | RSVD(6) },
1532 ++ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
1533 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
1534 + .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
1535 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
1536 +@@ -1940,7 +1947,14 @@ static const struct usb_device_id option_ids[] = {
1537 + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
1538 + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
1539 + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
1540 +- { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
1541 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
1542 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) }, /* HP lt4132 (Huawei ME906s-158) */
1543 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) },
1544 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
1545 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
1546 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
1547 ++ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
1548 ++ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
1549 + { } /* Terminating entry */
1550 + };
1551 + MODULE_DEVICE_TABLE(usb, option_ids);
1552 +diff --git a/fs/iomap.c b/fs/iomap.c
1553 +index 37da7a61a6c5..ec15cf2ec696 100644
1554 +--- a/fs/iomap.c
1555 ++++ b/fs/iomap.c
1556 +@@ -117,12 +117,6 @@ iomap_page_create(struct inode *inode, struct page *page)
1557 + atomic_set(&iop->read_count, 0);
1558 + atomic_set(&iop->write_count, 0);
1559 + bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
1560 +-
1561 +- /*
1562 +- * migrate_page_move_mapping() assumes that pages with private data have
1563 +- * their count elevated by 1.
1564 +- */
1565 +- get_page(page);
1566 + set_page_private(page, (unsigned long)iop);
1567 + SetPagePrivate(page);
1568 + return iop;
1569 +@@ -139,7 +133,6 @@ iomap_page_release(struct page *page)
1570 + WARN_ON_ONCE(atomic_read(&iop->write_count));
1571 + ClearPagePrivate(page);
1572 + set_page_private(page, 0);
1573 +- put_page(page);
1574 + kfree(iop);
1575 + }
1576 +
1577 +diff --git a/fs/namei.c b/fs/namei.c
1578 +index 0cab6494978c..914178cdbe94 100644
1579 +--- a/fs/namei.c
1580 ++++ b/fs/namei.c
1581 +@@ -3701,8 +3701,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
1582 + if (error)
1583 + return error;
1584 +
1585 +- if ((S_ISCHR(mode) || S_ISBLK(mode)) &&
1586 +- !ns_capable(dentry->d_sb->s_user_ns, CAP_MKNOD))
1587 ++ if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
1588 + return -EPERM;
1589 +
1590 + if (!dir->i_op->mknod)
1591 +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
1592 +index 89921a0d2ebb..4d598a399bbf 100644
1593 +--- a/fs/proc/proc_sysctl.c
1594 ++++ b/fs/proc/proc_sysctl.c
1595 +@@ -464,7 +464,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
1596 +
1597 + inode = new_inode(sb);
1598 + if (!inode)
1599 +- goto out;
1600 ++ return ERR_PTR(-ENOMEM);
1601 +
1602 + inode->i_ino = get_next_ino();
1603 +
1604 +@@ -474,8 +474,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
1605 + if (unlikely(head->unregistering)) {
1606 + spin_unlock(&sysctl_lock);
1607 + iput(inode);
1608 +- inode = NULL;
1609 +- goto out;
1610 ++ return ERR_PTR(-ENOENT);
1611 + }
1612 + ei->sysctl = head;
1613 + ei->sysctl_entry = table;
1614 +@@ -500,7 +499,6 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
1615 + if (root->set_ownership)
1616 + root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
1617 +
1618 +-out:
1619 + return inode;
1620 + }
1621 +
1622 +@@ -549,10 +547,11 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
1623 + goto out;
1624 + }
1625 +
1626 +- err = ERR_PTR(-ENOMEM);
1627 + inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
1628 +- if (!inode)
1629 ++ if (IS_ERR(inode)) {
1630 ++ err = ERR_CAST(inode);
1631 + goto out;
1632 ++ }
1633 +
1634 + d_set_d_op(dentry, &proc_sys_dentry_operations);
1635 + err = d_splice_alias(inode, dentry);
1636 +@@ -685,7 +684,7 @@ static bool proc_sys_fill_cache(struct file *file,
1637 + if (d_in_lookup(child)) {
1638 + struct dentry *res;
1639 + inode = proc_sys_make_inode(dir->d_sb, head, table);
1640 +- if (!inode) {
1641 ++ if (IS_ERR(inode)) {
1642 + d_lookup_done(child);
1643 + dput(child);
1644 + return false;
1645 +diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
1646 +index 4844538eb926..c6f9b2225387 100644
1647 +--- a/fs/ubifs/replay.c
1648 ++++ b/fs/ubifs/replay.c
1649 +@@ -209,6 +209,38 @@ static int trun_remove_range(struct ubifs_info *c, struct replay_entry *r)
1650 + return ubifs_tnc_remove_range(c, &min_key, &max_key);
1651 + }
1652 +
1653 ++/**
1654 ++ * inode_still_linked - check whether inode in question will be re-linked.
1655 ++ * @c: UBIFS file-system description object
1656 ++ * @rino: replay entry to test
1657 ++ *
1658 ++ * O_TMPFILE files can be re-linked, this means link count goes from 0 to 1.
1659 ++ * This case needs special care, otherwise all references to the inode will
1660 ++ * be removed upon the first replay entry of an inode with link count 0
1661 ++ * is found.
1662 ++ */
1663 ++static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
1664 ++{
1665 ++ struct replay_entry *r;
1666 ++
1667 ++ ubifs_assert(c, rino->deletion);
1668 ++ ubifs_assert(c, key_type(c, &rino->key) == UBIFS_INO_KEY);
1669 ++
1670 ++ /*
1671 ++ * Find the most recent entry for the inode behind @rino and check
1672 ++ * whether it is a deletion.
1673 ++ */
1674 ++ list_for_each_entry_reverse(r, &c->replay_list, list) {
1675 ++ ubifs_assert(c, r->sqnum >= rino->sqnum);
1676 ++ if (key_inum(c, &r->key) == key_inum(c, &rino->key))
1677 ++ return r->deletion == 0;
1678 ++
1679 ++ }
1680 ++
1681 ++ ubifs_assert(c, 0);
1682 ++ return false;
1683 ++}
1684 ++
1685 + /**
1686 + * apply_replay_entry - apply a replay entry to the TNC.
1687 + * @c: UBIFS file-system description object
1688 +@@ -236,6 +268,11 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
1689 + {
1690 + ino_t inum = key_inum(c, &r->key);
1691 +
1692 ++ if (inode_still_linked(c, r)) {
1693 ++ err = 0;
1694 ++ break;
1695 ++ }
1696 ++
1697 + err = ubifs_tnc_remove_ino(c, inum);
1698 + break;
1699 + }
1700 +diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
1701 +index 89f3b03b1445..e3667c9a33a5 100644
1702 +--- a/include/asm-generic/4level-fixup.h
1703 ++++ b/include/asm-generic/4level-fixup.h
1704 +@@ -3,7 +3,7 @@
1705 + #define _4LEVEL_FIXUP_H
1706 +
1707 + #define __ARCH_HAS_4LEVEL_HACK
1708 +-#define __PAGETABLE_PUD_FOLDED
1709 ++#define __PAGETABLE_PUD_FOLDED 1
1710 +
1711 + #define PUD_SHIFT PGDIR_SHIFT
1712 + #define PUD_SIZE PGDIR_SIZE
1713 +diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
1714 +index 9c2e0708eb82..73474bb52344 100644
1715 +--- a/include/asm-generic/5level-fixup.h
1716 ++++ b/include/asm-generic/5level-fixup.h
1717 +@@ -3,7 +3,7 @@
1718 + #define _5LEVEL_FIXUP_H
1719 +
1720 + #define __ARCH_HAS_5LEVEL_HACK
1721 +-#define __PAGETABLE_P4D_FOLDED
1722 ++#define __PAGETABLE_P4D_FOLDED 1
1723 +
1724 + #define P4D_SHIFT PGDIR_SHIFT
1725 + #define P4D_SIZE PGDIR_SIZE
1726 +diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
1727 +index 0c34215263b8..1d6dd38c0e5e 100644
1728 +--- a/include/asm-generic/pgtable-nop4d-hack.h
1729 ++++ b/include/asm-generic/pgtable-nop4d-hack.h
1730 +@@ -5,7 +5,7 @@
1731 + #ifndef __ASSEMBLY__
1732 + #include <asm-generic/5level-fixup.h>
1733 +
1734 +-#define __PAGETABLE_PUD_FOLDED
1735 ++#define __PAGETABLE_PUD_FOLDED 1
1736 +
1737 + /*
1738 + * Having the pud type consist of a pgd gets the size right, and allows
1739 +diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
1740 +index 1a29b2a0282b..04cb913797bc 100644
1741 +--- a/include/asm-generic/pgtable-nop4d.h
1742 ++++ b/include/asm-generic/pgtable-nop4d.h
1743 +@@ -4,7 +4,7 @@
1744 +
1745 + #ifndef __ASSEMBLY__
1746 +
1747 +-#define __PAGETABLE_P4D_FOLDED
1748 ++#define __PAGETABLE_P4D_FOLDED 1
1749 +
1750 + typedef struct { pgd_t pgd; } p4d_t;
1751 +
1752 +diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
1753 +index f35f6e8149e4..b85b8271a73d 100644
1754 +--- a/include/asm-generic/pgtable-nopmd.h
1755 ++++ b/include/asm-generic/pgtable-nopmd.h
1756 +@@ -8,7 +8,7 @@
1757 +
1758 + struct mm_struct;
1759 +
1760 +-#define __PAGETABLE_PMD_FOLDED
1761 ++#define __PAGETABLE_PMD_FOLDED 1
1762 +
1763 + /*
1764 + * Having the pmd type consist of a pud gets the size right, and allows
1765 +diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
1766 +index e950b9c50f34..9bef475db6fe 100644
1767 +--- a/include/asm-generic/pgtable-nopud.h
1768 ++++ b/include/asm-generic/pgtable-nopud.h
1769 +@@ -9,7 +9,7 @@
1770 + #else
1771 + #include <asm-generic/pgtable-nop4d.h>
1772 +
1773 +-#define __PAGETABLE_PUD_FOLDED
1774 ++#define __PAGETABLE_PUD_FOLDED 1
1775 +
1776 + /*
1777 + * Having the pud type consist of a p4d gets the size right, and allows
1778 +diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
1779 +index 88ebc6102c7c..15fd0277ffa6 100644
1780 +--- a/include/asm-generic/pgtable.h
1781 ++++ b/include/asm-generic/pgtable.h
1782 +@@ -1127,4 +1127,20 @@ static inline bool arch_has_pfn_modify_check(void)
1783 + #endif
1784 + #endif
1785 +
1786 ++/*
1787 ++ * On some architectures it depends on the mm if the p4d/pud or pmd
1788 ++ * layer of the page table hierarchy is folded or not.
1789 ++ */
1790 ++#ifndef mm_p4d_folded
1791 ++#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
1792 ++#endif
1793 ++
1794 ++#ifndef mm_pud_folded
1795 ++#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
1796 ++#endif
1797 ++
1798 ++#ifndef mm_pmd_folded
1799 ++#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
1800 ++#endif
1801 ++
1802 + #endif /* _ASM_GENERIC_PGTABLE_H */
1803 +diff --git a/include/linux/math64.h b/include/linux/math64.h
1804 +index 837f2f2d1d34..bb2c84afb80c 100644
1805 +--- a/include/linux/math64.h
1806 ++++ b/include/linux/math64.h
1807 +@@ -281,4 +281,7 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
1808 + }
1809 + #endif /* mul_u64_u32_div */
1810 +
1811 ++#define DIV64_U64_ROUND_UP(ll, d) \
1812 ++ ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
1813 ++
1814 + #endif /* _LINUX_MATH64_H */
1815 +diff --git a/include/linux/mm.h b/include/linux/mm.h
1816 +index 0416a7204be3..e899460f1bc5 100644
1817 +--- a/include/linux/mm.h
1818 ++++ b/include/linux/mm.h
1819 +@@ -1724,11 +1724,15 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
1820 +
1821 + static inline void mm_inc_nr_puds(struct mm_struct *mm)
1822 + {
1823 ++ if (mm_pud_folded(mm))
1824 ++ return;
1825 + atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1826 + }
1827 +
1828 + static inline void mm_dec_nr_puds(struct mm_struct *mm)
1829 + {
1830 ++ if (mm_pud_folded(mm))
1831 ++ return;
1832 + atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1833 + }
1834 + #endif
1835 +@@ -1748,11 +1752,15 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1836 +
1837 + static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1838 + {
1839 ++ if (mm_pmd_folded(mm))
1840 ++ return;
1841 + atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1842 + }
1843 +
1844 + static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1845 + {
1846 ++ if (mm_pmd_folded(mm))
1847 ++ return;
1848 + atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1849 + }
1850 + #endif
1851 +diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
1852 +index b9626aa7e90c..3e2a80cc7b56 100644
1853 +--- a/include/linux/t10-pi.h
1854 ++++ b/include/linux/t10-pi.h
1855 +@@ -39,12 +39,13 @@ struct t10_pi_tuple {
1856 +
1857 + static inline u32 t10_pi_ref_tag(struct request *rq)
1858 + {
1859 ++ unsigned int shift = ilog2(queue_logical_block_size(rq->q));
1860 ++
1861 + #ifdef CONFIG_BLK_DEV_INTEGRITY
1862 +- return blk_rq_pos(rq) >>
1863 +- (rq->q->integrity.interval_exp - 9) & 0xffffffff;
1864 +-#else
1865 +- return -1U;
1866 ++ if (rq->q->integrity.interval_exp)
1867 ++ shift = rq->q->integrity.interval_exp;
1868 + #endif
1869 ++ return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
1870 + }
1871 +
1872 + extern const struct blk_integrity_profile t10_pi_type1_crc;
1873 +diff --git a/include/net/xfrm.h b/include/net/xfrm.h
1874 +index 0eb390c205af..da588def3c61 100644
1875 +--- a/include/net/xfrm.h
1876 ++++ b/include/net/xfrm.h
1877 +@@ -1552,6 +1552,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1878 + int (*func)(struct xfrm_state *, int, void*), void *);
1879 + void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1880 + struct xfrm_state *xfrm_state_alloc(struct net *net);
1881 ++void xfrm_state_free(struct xfrm_state *x);
1882 + struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1883 + const xfrm_address_t *saddr,
1884 + const struct flowi *fl,
1885 +diff --git a/kernel/futex.c b/kernel/futex.c
1886 +index 11fc3bb456d6..f89abca89513 100644
1887 +--- a/kernel/futex.c
1888 ++++ b/kernel/futex.c
1889 +@@ -1148,11 +1148,65 @@ out_error:
1890 + return ret;
1891 + }
1892 +
1893 ++static int handle_exit_race(u32 __user *uaddr, u32 uval,
1894 ++ struct task_struct *tsk)
1895 ++{
1896 ++ u32 uval2;
1897 ++
1898 ++ /*
1899 ++ * If PF_EXITPIDONE is not yet set, then try again.
1900 ++ */
1901 ++ if (tsk && !(tsk->flags & PF_EXITPIDONE))
1902 ++ return -EAGAIN;
1903 ++
1904 ++ /*
1905 ++ * Reread the user space value to handle the following situation:
1906 ++ *
1907 ++ * CPU0 CPU1
1908 ++ *
1909 ++ * sys_exit() sys_futex()
1910 ++ * do_exit() futex_lock_pi()
1911 ++ * futex_lock_pi_atomic()
1912 ++ * exit_signals(tsk) No waiters:
1913 ++ * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID
1914 ++ * mm_release(tsk) Set waiter bit
1915 ++ * exit_robust_list(tsk) { *uaddr = 0x80000PID;
1916 ++ * Set owner died attach_to_pi_owner() {
1917 ++ * *uaddr = 0xC0000000; tsk = get_task(PID);
1918 ++ * } if (!tsk->flags & PF_EXITING) {
1919 ++ * ... attach();
1920 ++ * tsk->flags |= PF_EXITPIDONE; } else {
1921 ++ * if (!(tsk->flags & PF_EXITPIDONE))
1922 ++ * return -EAGAIN;
1923 ++ * return -ESRCH; <--- FAIL
1924 ++ * }
1925 ++ *
1926 ++ * Returning ESRCH unconditionally is wrong here because the
1927 ++ * user space value has been changed by the exiting task.
1928 ++ *
1929 ++ * The same logic applies to the case where the exiting task is
1930 ++ * already gone.
1931 ++ */
1932 ++ if (get_futex_value_locked(&uval2, uaddr))
1933 ++ return -EFAULT;
1934 ++
1935 ++ /* If the user space value has changed, try again. */
1936 ++ if (uval2 != uval)
1937 ++ return -EAGAIN;
1938 ++
1939 ++ /*
1940 ++ * The exiting task did not have a robust list, the robust list was
1941 ++ * corrupted or the user space value in *uaddr is simply bogus.
1942 ++ * Give up and tell user space.
1943 ++ */
1944 ++ return -ESRCH;
1945 ++}
1946 ++
1947 + /*
1948 + * Lookup the task for the TID provided from user space and attach to
1949 + * it after doing proper sanity checks.
1950 + */
1951 +-static int attach_to_pi_owner(u32 uval, union futex_key *key,
1952 ++static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
1953 + struct futex_pi_state **ps)
1954 + {
1955 + pid_t pid = uval & FUTEX_TID_MASK;
1956 +@@ -1162,12 +1216,15 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
1957 + /*
1958 + * We are the first waiter - try to look up the real owner and attach
1959 + * the new pi_state to it, but bail out when TID = 0 [1]
1960 ++ *
1961 ++ * The !pid check is paranoid. None of the call sites should end up
1962 ++ * with pid == 0, but better safe than sorry. Let the caller retry
1963 + */
1964 + if (!pid)
1965 +- return -ESRCH;
1966 ++ return -EAGAIN;
1967 + p = find_get_task_by_vpid(pid);
1968 + if (!p)
1969 +- return -ESRCH;
1970 ++ return handle_exit_race(uaddr, uval, NULL);
1971 +
1972 + if (unlikely(p->flags & PF_KTHREAD)) {
1973 + put_task_struct(p);
1974 +@@ -1187,7 +1244,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
1975 + * set, we know that the task has finished the
1976 + * cleanup:
1977 + */
1978 +- int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
1979 ++ int ret = handle_exit_race(uaddr, uval, p);
1980 +
1981 + raw_spin_unlock_irq(&p->pi_lock);
1982 + put_task_struct(p);
1983 +@@ -1244,7 +1301,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
1984 + * We are the first waiter - try to look up the owner based on
1985 + * @uval and attach to it.
1986 + */
1987 +- return attach_to_pi_owner(uval, key, ps);
1988 ++ return attach_to_pi_owner(uaddr, uval, key, ps);
1989 + }
1990 +
1991 + static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1992 +@@ -1352,7 +1409,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1993 + * attach to the owner. If that fails, no harm done, we only
1994 + * set the FUTEX_WAITERS bit in the user space variable.
1995 + */
1996 +- return attach_to_pi_owner(uval, key, ps);
1997 ++ return attach_to_pi_owner(uaddr, newval, key, ps);
1998 + }
1999 +
2000 + /**
2001 +diff --git a/kernel/panic.c b/kernel/panic.c
2002 +index 8b2e002d52eb..6a6df23acd1a 100644
2003 +--- a/kernel/panic.c
2004 ++++ b/kernel/panic.c
2005 +@@ -14,6 +14,7 @@
2006 + #include <linux/kmsg_dump.h>
2007 + #include <linux/kallsyms.h>
2008 + #include <linux/notifier.h>
2009 ++#include <linux/vt_kern.h>
2010 + #include <linux/module.h>
2011 + #include <linux/random.h>
2012 + #include <linux/ftrace.h>
2013 +@@ -233,7 +234,10 @@ void panic(const char *fmt, ...)
2014 + if (_crash_kexec_post_notifiers)
2015 + __crash_kexec(NULL);
2016 +
2017 +- bust_spinlocks(0);
2018 ++#ifdef CONFIG_VT
2019 ++ unblank_screen();
2020 ++#endif
2021 ++ console_unblank();
2022 +
2023 + /*
2024 + * We may have ended up stopping the CPU holding the lock (in
2025 +diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
2026 +index 4b9127e95430..5a01c4fdbfef 100644
2027 +--- a/kernel/time/posix-timers.c
2028 ++++ b/kernel/time/posix-timers.c
2029 +@@ -289,9 +289,6 @@ static void common_hrtimer_rearm(struct k_itimer *timr)
2030 + {
2031 + struct hrtimer *timer = &timr->it.real.timer;
2032 +
2033 +- if (!timr->it_interval)
2034 +- return;
2035 +-
2036 + timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
2037 + timr->it_interval);
2038 + hrtimer_restart(timer);
2039 +@@ -317,7 +314,7 @@ void posixtimer_rearm(struct siginfo *info)
2040 + if (!timr)
2041 + return;
2042 +
2043 +- if (timr->it_requeue_pending == info->si_sys_private) {
2044 ++ if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) {
2045 + timr->kclock->timer_rearm(timr);
2046 +
2047 + timr->it_active = 1;
2048 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
2049 +index 15310f14c25e..d2cd70cfaa90 100644
2050 +--- a/mm/huge_memory.c
2051 ++++ b/mm/huge_memory.c
2052 +@@ -2127,23 +2127,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2053 + */
2054 + old_pmd = pmdp_invalidate(vma, haddr, pmd);
2055 +
2056 +-#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2057 + pmd_migration = is_pmd_migration_entry(old_pmd);
2058 +- if (pmd_migration) {
2059 ++ if (unlikely(pmd_migration)) {
2060 + swp_entry_t entry;
2061 +
2062 + entry = pmd_to_swp_entry(old_pmd);
2063 + page = pfn_to_page(swp_offset(entry));
2064 +- } else
2065 +-#endif
2066 ++ write = is_write_migration_entry(entry);
2067 ++ young = false;
2068 ++ soft_dirty = pmd_swp_soft_dirty(old_pmd);
2069 ++ } else {
2070 + page = pmd_page(old_pmd);
2071 ++ if (pmd_dirty(old_pmd))
2072 ++ SetPageDirty(page);
2073 ++ write = pmd_write(old_pmd);
2074 ++ young = pmd_young(old_pmd);
2075 ++ soft_dirty = pmd_soft_dirty(old_pmd);
2076 ++ }
2077 + VM_BUG_ON_PAGE(!page_count(page), page);
2078 + page_ref_add(page, HPAGE_PMD_NR - 1);
2079 +- if (pmd_dirty(old_pmd))
2080 +- SetPageDirty(page);
2081 +- write = pmd_write(old_pmd);
2082 +- young = pmd_young(old_pmd);
2083 +- soft_dirty = pmd_soft_dirty(old_pmd);
2084 +
2085 + /*
2086 + * Withdraw the table only after we mark the pmd entry invalid.
2087 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2088 +index 6a62b2421cdf..93e73ccb4dec 100644
2089 +--- a/mm/page_alloc.c
2090 ++++ b/mm/page_alloc.c
2091 +@@ -5538,6 +5538,18 @@ not_early:
2092 + cond_resched();
2093 + }
2094 + }
2095 ++#ifdef CONFIG_SPARSEMEM
2096 ++ /*
2097 ++ * If the zone does not span the rest of the section then
2098 ++ * we should at least initialize those pages. Otherwise we
2099 ++ * could blow up on a poisoned page in some paths which depend
2100 ++ * on full sections being initialized (e.g. memory hotplug).
2101 ++ */
2102 ++ while (end_pfn % PAGES_PER_SECTION) {
2103 ++ __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid);
2104 ++ end_pfn++;
2105 ++ }
2106 ++#endif
2107 + }
2108 +
2109 + static void __meminit zone_init_free_lists(struct zone *zone)
2110 +@@ -7704,11 +7716,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
2111 + * handle each tail page individually in migration.
2112 + */
2113 + if (PageHuge(page)) {
2114 ++ struct page *head = compound_head(page);
2115 ++ unsigned int skip_pages;
2116 +
2117 +- if (!hugepage_migration_supported(page_hstate(page)))
2118 ++ if (!hugepage_migration_supported(page_hstate(head)))
2119 + goto unmovable;
2120 +
2121 +- iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
2122 ++ skip_pages = (1 << compound_order(head)) - (page - head);
2123 ++ iter += skip_pages - 1;
2124 + continue;
2125 + }
2126 +
2127 +diff --git a/mm/vmscan.c b/mm/vmscan.c
2128 +index c5ef7240cbcb..961401c46334 100644
2129 +--- a/mm/vmscan.c
2130 ++++ b/mm/vmscan.c
2131 +@@ -2456,9 +2456,11 @@ out:
2132 + /*
2133 + * Scan types proportional to swappiness and
2134 + * their relative recent reclaim efficiency.
2135 ++ * Make sure we don't miss the last page
2136 ++ * because of a round-off error.
2137 + */
2138 +- scan = div64_u64(scan * fraction[file],
2139 +- denominator);
2140 ++ scan = DIV64_U64_ROUND_UP(scan * fraction[file],
2141 ++ denominator);
2142 + break;
2143 + case SCAN_FILE:
2144 + case SCAN_ANON:
2145 +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
2146 +index b669262682c9..7a34990a68b1 100644
2147 +--- a/net/xfrm/xfrm_state.c
2148 ++++ b/net/xfrm/xfrm_state.c
2149 +@@ -426,6 +426,12 @@ static void xfrm_put_mode(struct xfrm_mode *mode)
2150 + module_put(mode->owner);
2151 + }
2152 +
2153 ++void xfrm_state_free(struct xfrm_state *x)
2154 ++{
2155 ++ kmem_cache_free(xfrm_state_cache, x);
2156 ++}
2157 ++EXPORT_SYMBOL(xfrm_state_free);
2158 ++
2159 + static void xfrm_state_gc_destroy(struct xfrm_state *x)
2160 + {
2161 + tasklet_hrtimer_cancel(&x->mtimer);
2162 +@@ -452,7 +458,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
2163 + }
2164 + xfrm_dev_state_free(x);
2165 + security_xfrm_state_free(x);
2166 +- kmem_cache_free(xfrm_state_cache, x);
2167 ++ xfrm_state_free(x);
2168 + }
2169 +
2170 + static void xfrm_state_gc_task(struct work_struct *work)
2171 +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
2172 +index df7ca2dabc48..566919838d5e 100644
2173 +--- a/net/xfrm/xfrm_user.c
2174 ++++ b/net/xfrm/xfrm_user.c
2175 +@@ -2288,13 +2288,13 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
2176 +
2177 + }
2178 +
2179 +- kfree(x);
2180 ++ xfrm_state_free(x);
2181 + kfree(xp);
2182 +
2183 + return 0;
2184 +
2185 + free_state:
2186 +- kfree(x);
2187 ++ xfrm_state_free(x);
2188 + nomem:
2189 + return err;
2190 + }