Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.15 commit in: /
Date: Wed, 21 Mar 2018 14:42:46
Message-Id: 1521643351.eaeb5c4a6f5cf7ae7aa3f783c83f06ce942641a9.mpagano@gentoo
1 commit: eaeb5c4a6f5cf7ae7aa3f783c83f06ce942641a9
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Mar 21 14:42:31 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Mar 21 14:42:31 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=eaeb5c4a
7
8 Linux patch 4.15.12
9
10 0000_README | 4 +
11 1011_linux-4.15.12.patch | 1976 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1980 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 6b57403..2ad5c9b 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -87,6 +87,10 @@ Patch: 1010_linux-4.15.11.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.15.11
21
22 +Patch: 1011_linux-4.15.12.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.15.12
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1011_linux-4.15.12.patch b/1011_linux-4.15.12.patch
31 new file mode 100644
32 index 0000000..b55e91e
33 --- /dev/null
34 +++ b/1011_linux-4.15.12.patch
35 @@ -0,0 +1,1976 @@
36 +diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
37 +index e64d903bcbe8..46da5f184460 100644
38 +--- a/Documentation/devicetree/bindings/usb/dwc2.txt
39 ++++ b/Documentation/devicetree/bindings/usb/dwc2.txt
40 +@@ -19,7 +19,7 @@ Required properties:
41 + configured in FS mode;
42 + - "st,stm32f4x9-hsotg": The DWC2 USB HS controller instance in STM32F4x9 SoCs
43 + configured in HS mode;
44 +- - "st,stm32f7xx-hsotg": The DWC2 USB HS controller instance in STM32F7xx SoCs
45 ++ - "st,stm32f7-hsotg": The DWC2 USB HS controller instance in STM32F7 SoCs
46 + configured in HS mode;
47 + - reg : Should contain 1 register range (address and length)
48 + - interrupts : Should contain 1 interrupt
49 +diff --git a/Makefile b/Makefile
50 +index 74c0f5e8dd55..2e6ba1553dff 100644
51 +--- a/Makefile
52 ++++ b/Makefile
53 +@@ -1,7 +1,7 @@
54 + # SPDX-License-Identifier: GPL-2.0
55 + VERSION = 4
56 + PATCHLEVEL = 15
57 +-SUBLEVEL = 11
58 ++SUBLEVEL = 12
59 + EXTRAVERSION =
60 + NAME = Fearless Coyote
61 +
62 +diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
63 +index 79089778725b..e3b45546d589 100644
64 +--- a/arch/parisc/kernel/cache.c
65 ++++ b/arch/parisc/kernel/cache.c
66 +@@ -543,7 +543,8 @@ void flush_cache_mm(struct mm_struct *mm)
67 + rp3440, etc. So, avoid it if the mm isn't too big. */
68 + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
69 + mm_total_size(mm) >= parisc_cache_flush_threshold) {
70 +- flush_tlb_all();
71 ++ if (mm->context)
72 ++ flush_tlb_all();
73 + flush_cache_all();
74 + return;
75 + }
76 +@@ -571,6 +572,8 @@ void flush_cache_mm(struct mm_struct *mm)
77 + pfn = pte_pfn(*ptep);
78 + if (!pfn_valid(pfn))
79 + continue;
80 ++ if (unlikely(mm->context))
81 ++ flush_tlb_page(vma, addr);
82 + __flush_cache_page(vma, addr, PFN_PHYS(pfn));
83 + }
84 + }
85 +@@ -579,26 +582,46 @@ void flush_cache_mm(struct mm_struct *mm)
86 + void flush_cache_range(struct vm_area_struct *vma,
87 + unsigned long start, unsigned long end)
88 + {
89 ++ pgd_t *pgd;
90 ++ unsigned long addr;
91 ++
92 + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
93 + end - start >= parisc_cache_flush_threshold) {
94 +- flush_tlb_range(vma, start, end);
95 ++ if (vma->vm_mm->context)
96 ++ flush_tlb_range(vma, start, end);
97 + flush_cache_all();
98 + return;
99 + }
100 +
101 +- flush_user_dcache_range_asm(start, end);
102 +- if (vma->vm_flags & VM_EXEC)
103 +- flush_user_icache_range_asm(start, end);
104 +- flush_tlb_range(vma, start, end);
105 ++ if (vma->vm_mm->context == mfsp(3)) {
106 ++ flush_user_dcache_range_asm(start, end);
107 ++ if (vma->vm_flags & VM_EXEC)
108 ++ flush_user_icache_range_asm(start, end);
109 ++ flush_tlb_range(vma, start, end);
110 ++ return;
111 ++ }
112 ++
113 ++ pgd = vma->vm_mm->pgd;
114 ++ for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
115 ++ unsigned long pfn;
116 ++ pte_t *ptep = get_ptep(pgd, addr);
117 ++ if (!ptep)
118 ++ continue;
119 ++ pfn = pte_pfn(*ptep);
120 ++ if (pfn_valid(pfn)) {
121 ++ if (unlikely(vma->vm_mm->context))
122 ++ flush_tlb_page(vma, addr);
123 ++ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
124 ++ }
125 ++ }
126 + }
127 +
128 + void
129 + flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
130 + {
131 +- BUG_ON(!vma->vm_mm->context);
132 +-
133 + if (pfn_valid(pfn)) {
134 +- flush_tlb_page(vma, vmaddr);
135 ++ if (likely(vma->vm_mm->context))
136 ++ flush_tlb_page(vma, vmaddr);
137 + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
138 + }
139 + }
140 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
141 +index 66c14347c502..23a65439c37c 100644
142 +--- a/arch/x86/include/asm/cpufeatures.h
143 ++++ b/arch/x86/include/asm/cpufeatures.h
144 +@@ -314,6 +314,7 @@
145 + #define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
146 + #define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
147 + #define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
148 ++#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */
149 + #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
150 + #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
151 + #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
152 +@@ -326,6 +327,7 @@
153 + /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
154 + #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
155 + #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
156 ++#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
157 + #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
158 + #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
159 + #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
160 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
161 +index d0dabeae0505..f928ad9b143f 100644
162 +--- a/arch/x86/include/asm/nospec-branch.h
163 ++++ b/arch/x86/include/asm/nospec-branch.h
164 +@@ -183,7 +183,10 @@
165 + * otherwise we'll run out of registers. We don't care about CET
166 + * here, anyway.
167 + */
168 +-# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \
169 ++# define CALL_NOSPEC \
170 ++ ALTERNATIVE( \
171 ++ ANNOTATE_RETPOLINE_SAFE \
172 ++ "call *%[thunk_target]\n", \
173 + " jmp 904f;\n" \
174 + " .align 16\n" \
175 + "901: call 903f;\n" \
176 +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
177 +index 4aa9fd379390..c3af167d0a70 100644
178 +--- a/arch/x86/kernel/cpu/intel.c
179 ++++ b/arch/x86/kernel/cpu/intel.c
180 +@@ -105,7 +105,7 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
181 + /*
182 + * Early microcode releases for the Spectre v2 mitigation were broken.
183 + * Information taken from;
184 +- * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
185 ++ * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
186 + * - https://kb.vmware.com/s/article/52345
187 + * - Microcode revisions observed in the wild
188 + * - Release note from 20180108 microcode release
189 +@@ -123,7 +123,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
190 + { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
191 + { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
192 + { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
193 +- { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
194 + { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
195 + { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
196 + { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
197 +diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
198 +index 5edb27f1a2c4..9d0b5af7db91 100644
199 +--- a/arch/x86/kernel/vm86_32.c
200 ++++ b/arch/x86/kernel/vm86_32.c
201 +@@ -727,7 +727,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
202 + return;
203 +
204 + check_vip:
205 +- if (VEFLAGS & X86_EFLAGS_VIP) {
206 ++ if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
207 ++ (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
208 + save_v86_state(regs, VM86_STI);
209 + return;
210 + }
211 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
212 +index fe2cb4cfa75b..37277859a2a1 100644
213 +--- a/arch/x86/kvm/mmu.c
214 ++++ b/arch/x86/kvm/mmu.c
215 +@@ -2758,8 +2758,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
216 + else
217 + pte_access &= ~ACC_WRITE_MASK;
218 +
219 ++ if (!kvm_is_mmio_pfn(pfn))
220 ++ spte |= shadow_me_mask;
221 ++
222 + spte |= (u64)pfn << PAGE_SHIFT;
223 +- spte |= shadow_me_mask;
224 +
225 + if (pte_access & ACC_WRITE_MASK) {
226 +
227 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
228 +index c88573d90f3e..25a30b5d6582 100644
229 +--- a/arch/x86/mm/fault.c
230 ++++ b/arch/x86/mm/fault.c
231 +@@ -330,7 +330,7 @@ static noinline int vmalloc_fault(unsigned long address)
232 + if (!pmd_k)
233 + return -1;
234 +
235 +- if (pmd_huge(*pmd_k))
236 ++ if (pmd_large(*pmd_k))
237 + return 0;
238 +
239 + pte_k = pte_offset_kernel(pmd_k, address);
240 +@@ -475,7 +475,7 @@ static noinline int vmalloc_fault(unsigned long address)
241 + if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
242 + BUG();
243 +
244 +- if (pud_huge(*pud))
245 ++ if (pud_large(*pud))
246 + return 0;
247 +
248 + pmd = pmd_offset(pud, address);
249 +@@ -486,7 +486,7 @@ static noinline int vmalloc_fault(unsigned long address)
250 + if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
251 + BUG();
252 +
253 +- if (pmd_huge(*pmd))
254 ++ if (pmd_large(*pmd))
255 + return 0;
256 +
257 + pte_ref = pte_offset_kernel(pmd_ref, address);
258 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
259 +index 21e7ae159dff..9f72993a6175 100644
260 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
261 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
262 +@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
263 + /* don't do anything if sink is not display port, i.e.,
264 + * passive dp->(dvi|hdmi) adaptor
265 + */
266 +- if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
267 +- int saved_dpms = connector->dpms;
268 +- /* Only turn off the display if it's physically disconnected */
269 +- if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
270 +- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
271 +- } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
272 +- /* Don't try to start link training before we
273 +- * have the dpcd */
274 +- if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
275 +- return;
276 +-
277 +- /* set it to OFF so that drm_helper_connector_dpms()
278 +- * won't return immediately since the current state
279 +- * is ON at this point.
280 +- */
281 +- connector->dpms = DRM_MODE_DPMS_OFF;
282 +- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
283 +- }
284 +- connector->dpms = saved_dpms;
285 ++ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
286 ++ amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
287 ++ amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
288 ++ /* Don't start link training before we have the DPCD */
289 ++ if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
290 ++ return;
291 ++
292 ++ /* Turn the connector off and back on immediately, which
293 ++ * will trigger link training
294 ++ */
295 ++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
296 ++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
297 + }
298 + }
299 + }
300 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
301 +index 1eac7c3c687b..e0eef2c41190 100644
302 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
303 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
304 +@@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
305 + struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
306 +
307 + if (robj) {
308 +- if (robj->gem_base.import_attach)
309 +- drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
310 + amdgpu_mn_unregister(robj);
311 + amdgpu_bo_unref(&robj);
312 + }
313 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
314 +index ea25164e7f4b..828252dc1d91 100644
315 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
316 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
317 +@@ -44,6 +44,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
318 +
319 + amdgpu_bo_kunmap(bo);
320 +
321 ++ if (bo->gem_base.import_attach)
322 ++ drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
323 + drm_gem_object_release(&bo->gem_base);
324 + amdgpu_bo_unref(&bo->parent);
325 + if (!list_empty(&bo->shadow_list)) {
326 +diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
327 +index 380f340204e8..f56f60f695e1 100644
328 +--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
329 ++++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
330 +@@ -268,13 +268,13 @@ nouveau_backlight_init(struct drm_device *dev)
331 + struct nvif_device *device = &drm->client.device;
332 + struct drm_connector *connector;
333 +
334 ++ INIT_LIST_HEAD(&drm->bl_connectors);
335 ++
336 + if (apple_gmux_present()) {
337 + NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n");
338 + return 0;
339 + }
340 +
341 +- INIT_LIST_HEAD(&drm->bl_connectors);
342 +-
343 + list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
344 + if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
345 + connector->connector_type != DRM_MODE_CONNECTOR_eDP)
346 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
347 +index e35d3e17cd7c..c6e3d0dd1070 100644
348 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
349 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
350 +@@ -1354,7 +1354,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
351 +
352 + tail = this->addr + this->size;
353 + if (vmm->func->page_block && next && next->page != p)
354 +- tail = ALIGN_DOWN(addr, vmm->func->page_block);
355 ++ tail = ALIGN_DOWN(tail, vmm->func->page_block);
356 +
357 + if (addr <= tail && tail - addr >= size) {
358 + rb_erase(&this->tree, &vmm->free);
359 +diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
360 +index cf3deb283da5..065c058f7b5f 100644
361 +--- a/drivers/gpu/drm/radeon/radeon_gem.c
362 ++++ b/drivers/gpu/drm/radeon/radeon_gem.c
363 +@@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
364 + struct radeon_bo *robj = gem_to_radeon_bo(gobj);
365 +
366 + if (robj) {
367 +- if (robj->gem_base.import_attach)
368 +- drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
369 + radeon_mn_unregister(robj);
370 + radeon_bo_unref(&robj);
371 + }
372 +diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
373 +index 093594976126..baadb706c276 100644
374 +--- a/drivers/gpu/drm/radeon/radeon_object.c
375 ++++ b/drivers/gpu/drm/radeon/radeon_object.c
376 +@@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
377 + mutex_unlock(&bo->rdev->gem.mutex);
378 + radeon_bo_clear_surface_reg(bo);
379 + WARN_ON_ONCE(!list_empty(&bo->va));
380 ++ if (bo->gem_base.import_attach)
381 ++ drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
382 + drm_gem_object_release(&bo->gem_base);
383 + kfree(bo);
384 + }
385 +diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
386 +index 42713511b53b..524e6134642e 100644
387 +--- a/drivers/infiniband/sw/rdmavt/mr.c
388 ++++ b/drivers/infiniband/sw/rdmavt/mr.c
389 +@@ -489,11 +489,13 @@ static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
390 + unsigned long timeout;
391 + struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
392 +
393 +- if (percpu_ref_is_zero(&mr->refcount))
394 +- return 0;
395 +- /* avoid dma mr */
396 +- if (mr->lkey)
397 ++ if (mr->lkey) {
398 ++ /* avoid dma mr */
399 + rvt_dereg_clean_qps(mr);
400 ++ /* @mr was indexed on rcu protected @lkey_table */
401 ++ synchronize_rcu();
402 ++ }
403 ++
404 + timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ);
405 + if (!timeout) {
406 + rvt_pr_err(rdi,
407 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
408 +index 06f025fd5726..12c325066deb 100644
409 +--- a/drivers/irqchip/irq-gic-v3-its.c
410 ++++ b/drivers/irqchip/irq-gic-v3-its.c
411 +@@ -1412,7 +1412,7 @@ static struct irq_chip its_irq_chip = {
412 + * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
413 + */
414 + #define IRQS_PER_CHUNK_SHIFT 5
415 +-#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
416 ++#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
417 + #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
418 +
419 + static unsigned long *lpi_bitmap;
420 +@@ -2119,11 +2119,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
421 +
422 + dev = kzalloc(sizeof(*dev), GFP_KERNEL);
423 + /*
424 +- * At least one bit of EventID is being used, hence a minimum
425 +- * of two entries. No, the architecture doesn't let you
426 +- * express an ITT with a single entry.
427 ++ * We allocate at least one chunk worth of LPIs bet device,
428 ++ * and thus that many ITEs. The device may require less though.
429 + */
430 +- nr_ites = max(2UL, roundup_pow_of_two(nvecs));
431 ++ nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
432 + sz = nr_ites * its->ite_size;
433 + sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
434 + itt = kzalloc(sz, GFP_KERNEL);
435 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
436 +index 3551fbd6fe41..935593032123 100644
437 +--- a/drivers/nvme/host/core.c
438 ++++ b/drivers/nvme/host/core.c
439 +@@ -2052,6 +2052,22 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
440 + NULL,
441 + };
442 +
443 ++static int nvme_active_ctrls(struct nvme_subsystem *subsys)
444 ++{
445 ++ int count = 0;
446 ++ struct nvme_ctrl *ctrl;
447 ++
448 ++ mutex_lock(&subsys->lock);
449 ++ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
450 ++ if (ctrl->state != NVME_CTRL_DELETING &&
451 ++ ctrl->state != NVME_CTRL_DEAD)
452 ++ count++;
453 ++ }
454 ++ mutex_unlock(&subsys->lock);
455 ++
456 ++ return count;
457 ++}
458 ++
459 + static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
460 + {
461 + struct nvme_subsystem *subsys, *found;
462 +@@ -2090,7 +2106,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
463 + * Verify that the subsystem actually supports multiple
464 + * controllers, else bail out.
465 + */
466 +- if (!(id->cmic & (1 << 1))) {
467 ++ if (nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
468 + dev_err(ctrl->device,
469 + "ignoring ctrl due to duplicate subnqn (%s).\n",
470 + found->subnqn);
471 +diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
472 +index 1e7ce0b6f299..1b7febc43da9 100644
473 +--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
474 ++++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
475 +@@ -50,6 +50,8 @@
476 + #define USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK 0x80000000 /* option */
477 + #define USB_CTRL_EBRIDGE 0x0c
478 + #define USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK 0x00020000 /* option */
479 ++#define USB_CTRL_OBRIDGE 0x10
480 ++#define USB_CTRL_OBRIDGE_LS_KEEP_ALIVE_MASK 0x08000000
481 + #define USB_CTRL_MDIO 0x14
482 + #define USB_CTRL_MDIO2 0x18
483 + #define USB_CTRL_UTMI_CTL_1 0x2c
484 +@@ -71,6 +73,7 @@
485 + #define USB_CTRL_USB30_CTL1_USB3_IPP_MASK 0x20000000 /* option */
486 + #define USB_CTRL_USB30_PCTL 0x70
487 + #define USB_CTRL_USB30_PCTL_PHY3_SOFT_RESETB_MASK 0x00000002
488 ++#define USB_CTRL_USB30_PCTL_PHY3_IDDQ_OVERRIDE_MASK 0x00008000
489 + #define USB_CTRL_USB30_PCTL_PHY3_SOFT_RESETB_P1_MASK 0x00020000
490 + #define USB_CTRL_USB_DEVICE_CTL1 0x90
491 + #define USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK 0x00000003 /* option */
492 +@@ -116,7 +119,6 @@ enum {
493 + USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR,
494 + USB_CTRL_SETUP_OC3_DISABLE_SELECTOR,
495 + USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_SELECTOR,
496 +- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_SELECTOR,
497 + USB_CTRL_USB_PM_BDC_SOFT_RESETB_SELECTOR,
498 + USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR,
499 + USB_CTRL_USB_PM_USB_PWRDN_SELECTOR,
500 +@@ -203,7 +205,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
501 + USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
502 + USB_CTRL_SETUP_OC3_DISABLE_MASK,
503 + 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
504 +- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
505 + 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
506 + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
507 + USB_CTRL_USB_PM_USB_PWRDN_MASK,
508 +@@ -225,7 +226,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
509 + 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
510 + USB_CTRL_SETUP_OC3_DISABLE_MASK,
511 + USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
512 +- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
513 + 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
514 + USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK,
515 + 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
516 +@@ -247,7 +247,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
517 + USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
518 + USB_CTRL_SETUP_OC3_DISABLE_MASK,
519 + 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
520 +- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
521 + USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
522 + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
523 + USB_CTRL_USB_PM_USB_PWRDN_MASK,
524 +@@ -269,7 +268,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
525 + 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
526 + USB_CTRL_SETUP_OC3_DISABLE_MASK,
527 + USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
528 +- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
529 + 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
530 + USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK,
531 + 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
532 +@@ -291,7 +289,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
533 + 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
534 + USB_CTRL_SETUP_OC3_DISABLE_MASK,
535 + 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
536 +- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
537 + 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
538 + USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK,
539 + USB_CTRL_USB_PM_USB_PWRDN_MASK,
540 +@@ -313,7 +310,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
541 + 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
542 + 0, /* USB_CTRL_SETUP_OC3_DISABLE_MASK */
543 + USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
544 +- 0, /* USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK */
545 + 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
546 + 0, /* USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK */
547 + 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
548 +@@ -335,7 +331,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
549 + USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
550 + USB_CTRL_SETUP_OC3_DISABLE_MASK,
551 + 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
552 +- 0, /* USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK */
553 + USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
554 + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
555 + USB_CTRL_USB_PM_USB_PWRDN_MASK,
556 +@@ -357,7 +352,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
557 + 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
558 + USB_CTRL_SETUP_OC3_DISABLE_MASK,
559 + USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
560 +- 0, /* USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK */
561 + 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
562 + 0, /* USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK */
563 + 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
564 +@@ -379,7 +373,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
565 + USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
566 + USB_CTRL_SETUP_OC3_DISABLE_MASK,
567 + 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
568 +- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
569 + USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
570 + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
571 + USB_CTRL_USB_PM_USB_PWRDN_MASK,
572 +@@ -401,7 +394,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
573 + USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
574 + USB_CTRL_SETUP_OC3_DISABLE_MASK,
575 + 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
576 +- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
577 + USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
578 + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
579 + USB_CTRL_USB_PM_USB_PWRDN_MASK,
580 +@@ -926,6 +918,7 @@ void brcm_usb_init_common(struct brcm_usb_init_params *params)
581 + USB_CTRL_UNSET_FAMILY(params, USB_PM, BDC_SOFT_RESETB);
582 + break;
583 + default:
584 ++ USB_CTRL_UNSET_FAMILY(params, USB_PM, BDC_SOFT_RESETB);
585 + USB_CTRL_SET_FAMILY(params, USB_PM, BDC_SOFT_RESETB);
586 + break;
587 + }
588 +@@ -952,13 +945,17 @@ void brcm_usb_init_eohci(struct brcm_usb_init_params *params)
589 + * Don't enable this so the memory controller doesn't read
590 + * into memory holes. NOTE: This bit is low true on 7366C0.
591 + */
592 +- USB_CTRL_SET_FAMILY(params, EBRIDGE, ESTOP_SCB_REQ);
593 ++ USB_CTRL_SET(ctrl, EBRIDGE, ESTOP_SCB_REQ);
594 +
595 + /* Setup the endian bits */
596 + reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
597 + reg &= ~USB_CTRL_SETUP_ENDIAN_BITS;
598 + reg |= USB_CTRL_MASK_FAMILY(params, SETUP, ENDIAN);
599 + brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
600 ++
601 ++ if (params->selected_family == BRCM_FAMILY_7271A0)
602 ++ /* Enable LS keep alive fix for certain keyboards */
603 ++ USB_CTRL_SET(ctrl, OBRIDGE, LS_KEEP_ALIVE);
604 + }
605 +
606 + void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
607 +@@ -1003,6 +1000,7 @@ void brcm_usb_uninit_eohci(struct brcm_usb_init_params *params)
608 + void brcm_usb_uninit_xhci(struct brcm_usb_init_params *params)
609 + {
610 + brcmusb_xhci_soft_reset(params, 1);
611 ++ USB_CTRL_SET(params->ctrl_regs, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
612 + }
613 +
614 + void brcm_usb_set_family_map(struct brcm_usb_init_params *params)
615 +diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
616 +index 195b98139e5f..d1dab36fa5b7 100644
617 +--- a/drivers/phy/broadcom/phy-brcm-usb.c
618 ++++ b/drivers/phy/broadcom/phy-brcm-usb.c
619 +@@ -338,9 +338,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
620 + ARRAY_SIZE(brcm_dr_mode_to_name),
621 + mode, &priv->ini.mode);
622 + }
623 +- if (of_property_read_bool(dn, "brcm,has_xhci"))
624 ++ if (of_property_read_bool(dn, "brcm,has-xhci"))
625 + priv->has_xhci = true;
626 +- if (of_property_read_bool(dn, "brcm,has_eohci"))
627 ++ if (of_property_read_bool(dn, "brcm,has-eohci"))
628 + priv->has_eohci = true;
629 +
630 + err = brcm_usb_phy_dvr_init(dev, priv, dn);
631 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
632 +index 6082389f25c3..7b44a2c68a45 100644
633 +--- a/drivers/scsi/qla2xxx/qla_init.c
634 ++++ b/drivers/scsi/qla2xxx/qla_init.c
635 +@@ -102,11 +102,16 @@ qla2x00_async_iocb_timeout(void *data)
636 + struct srb_iocb *lio = &sp->u.iocb_cmd;
637 + struct event_arg ea;
638 +
639 +- ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
640 +- "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
641 +- sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
642 ++ if (fcport) {
643 ++ ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
644 ++ "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
645 ++ sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
646 +
647 +- fcport->flags &= ~FCF_ASYNC_SENT;
648 ++ fcport->flags &= ~FCF_ASYNC_SENT;
649 ++ } else {
650 ++ pr_info("Async-%s timeout - hdl=%x.\n",
651 ++ sp->name, sp->handle);
652 ++ }
653 +
654 + switch (sp->type) {
655 + case SRB_LOGIN_CMD:
656 +diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
657 +index e538e6308885..522d585a1a08 100644
658 +--- a/drivers/scsi/qla2xxx/qla_mid.c
659 ++++ b/drivers/scsi/qla2xxx/qla_mid.c
660 +@@ -582,8 +582,9 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
661 + ret = qla25xx_init_req_que(vha, req);
662 + if (ret != QLA_SUCCESS)
663 + return QLA_FUNCTION_FAILED;
664 ++
665 ++ qla25xx_free_req_que(vha, req);
666 + }
667 +- qla25xx_free_req_que(vha, req);
668 +
669 + return ret;
670 + }
671 +@@ -598,8 +599,9 @@ qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
672 + ret = qla25xx_init_rsp_que(vha, rsp);
673 + if (ret != QLA_SUCCESS)
674 + return QLA_FUNCTION_FAILED;
675 ++
676 ++ qla25xx_free_rsp_que(vha, rsp);
677 + }
678 +- qla25xx_free_rsp_que(vha, rsp);
679 +
680 + return ret;
681 + }
682 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
683 +index 1f69e89b950f..1204c1d59bc4 100644
684 +--- a/drivers/scsi/qla2xxx/qla_os.c
685 ++++ b/drivers/scsi/qla2xxx/qla_os.c
686 +@@ -449,7 +449,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
687 + ha->req_q_map[0] = req;
688 + set_bit(0, ha->rsp_qid_map);
689 + set_bit(0, ha->req_qid_map);
690 +- return 1;
691 ++ return 0;
692 +
693 + fail_qpair_map:
694 + kfree(ha->base_qpair);
695 +@@ -466,6 +466,9 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
696 +
697 + static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
698 + {
699 ++ if (!ha->req_q_map)
700 ++ return;
701 ++
702 + if (IS_QLAFX00(ha)) {
703 + if (req && req->ring_fx00)
704 + dma_free_coherent(&ha->pdev->dev,
705 +@@ -476,14 +479,17 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
706 + (req->length + 1) * sizeof(request_t),
707 + req->ring, req->dma);
708 +
709 +- if (req)
710 ++ if (req) {
711 + kfree(req->outstanding_cmds);
712 +-
713 +- kfree(req);
714 ++ kfree(req);
715 ++ }
716 + }
717 +
718 + static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
719 + {
720 ++ if (!ha->rsp_q_map)
721 ++ return;
722 ++
723 + if (IS_QLAFX00(ha)) {
724 + if (rsp && rsp->ring)
725 + dma_free_coherent(&ha->pdev->dev,
726 +@@ -494,7 +500,8 @@ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
727 + (rsp->length + 1) * sizeof(response_t),
728 + rsp->ring, rsp->dma);
729 + }
730 +- kfree(rsp);
731 ++ if (rsp)
732 ++ kfree(rsp);
733 + }
734 +
735 + static void qla2x00_free_queues(struct qla_hw_data *ha)
736 +@@ -1717,6 +1724,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
737 + struct qla_tgt_cmd *cmd;
738 + uint8_t trace = 0;
739 +
740 ++ if (!ha->req_q_map)
741 ++ return;
742 + spin_lock_irqsave(&ha->hardware_lock, flags);
743 + for (que = 0; que < ha->max_req_queues; que++) {
744 + req = ha->req_q_map[que];
745 +@@ -3071,14 +3080,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
746 + /* Set up the irqs */
747 + ret = qla2x00_request_irqs(ha, rsp);
748 + if (ret)
749 +- goto probe_hw_failed;
750 ++ goto probe_failed;
751 +
752 + /* Alloc arrays of request and response ring ptrs */
753 +- if (!qla2x00_alloc_queues(ha, req, rsp)) {
754 ++ if (qla2x00_alloc_queues(ha, req, rsp)) {
755 + ql_log(ql_log_fatal, base_vha, 0x003d,
756 + "Failed to allocate memory for queue pointers..."
757 + "aborting.\n");
758 +- goto probe_init_failed;
759 ++ goto probe_failed;
760 + }
761 +
762 + if (ha->mqenable && shost_use_blk_mq(host)) {
763 +@@ -3363,15 +3372,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
764 +
765 + return 0;
766 +
767 +-probe_init_failed:
768 +- qla2x00_free_req_que(ha, req);
769 +- ha->req_q_map[0] = NULL;
770 +- clear_bit(0, ha->req_qid_map);
771 +- qla2x00_free_rsp_que(ha, rsp);
772 +- ha->rsp_q_map[0] = NULL;
773 +- clear_bit(0, ha->rsp_qid_map);
774 +- ha->max_req_queues = ha->max_rsp_queues = 0;
775 +-
776 + probe_failed:
777 + if (base_vha->timer_active)
778 + qla2x00_stop_timer(base_vha);
779 +@@ -4451,11 +4451,17 @@ qla2x00_mem_free(struct qla_hw_data *ha)
780 + if (ha->init_cb)
781 + dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
782 + ha->init_cb, ha->init_cb_dma);
783 +- vfree(ha->optrom_buffer);
784 +- kfree(ha->nvram);
785 +- kfree(ha->npiv_info);
786 +- kfree(ha->swl);
787 +- kfree(ha->loop_id_map);
788 ++
789 ++ if (ha->optrom_buffer)
790 ++ vfree(ha->optrom_buffer);
791 ++ if (ha->nvram)
792 ++ kfree(ha->nvram);
793 ++ if (ha->npiv_info)
794 ++ kfree(ha->npiv_info);
795 ++ if (ha->swl)
796 ++ kfree(ha->swl);
797 ++ if (ha->loop_id_map)
798 ++ kfree(ha->loop_id_map);
799 +
800 + ha->srb_mempool = NULL;
801 + ha->ctx_mempool = NULL;
802 +@@ -4471,6 +4477,15 @@ qla2x00_mem_free(struct qla_hw_data *ha)
803 + ha->ex_init_cb_dma = 0;
804 + ha->async_pd = NULL;
805 + ha->async_pd_dma = 0;
806 ++ ha->loop_id_map = NULL;
807 ++ ha->npiv_info = NULL;
808 ++ ha->optrom_buffer = NULL;
809 ++ ha->swl = NULL;
810 ++ ha->nvram = NULL;
811 ++ ha->mctp_dump = NULL;
812 ++ ha->dcbx_tlv = NULL;
813 ++ ha->xgmac_data = NULL;
814 ++ ha->sfp_data = NULL;
815 +
816 + ha->s_dma_pool = NULL;
817 + ha->dl_dma_pool = NULL;
818 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
819 +index cb35bb1ae305..46bb4d057293 100644
820 +--- a/drivers/scsi/qla2xxx/qla_target.c
821 ++++ b/drivers/scsi/qla2xxx/qla_target.c
822 +@@ -982,6 +982,7 @@ static void qlt_free_session_done(struct work_struct *work)
823 +
824 + logo.id = sess->d_id;
825 + logo.cmd_count = 0;
826 ++ sess->send_els_logo = 0;
827 + qlt_send_first_logo(vha, &logo);
828 + }
829 +
830 +diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
831 +index 03fd20f0b496..c4a47496d2fb 100644
832 +--- a/drivers/usb/dwc2/params.c
833 ++++ b/drivers/usb/dwc2/params.c
834 +@@ -137,7 +137,7 @@ static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg)
835 + p->activate_stm_fs_transceiver = true;
836 + }
837 +
838 +-static void dwc2_set_stm32f7xx_hsotg_params(struct dwc2_hsotg *hsotg)
839 ++static void dwc2_set_stm32f7_hsotg_params(struct dwc2_hsotg *hsotg)
840 + {
841 + struct dwc2_core_params *p = &hsotg->params;
842 +
843 +@@ -164,8 +164,8 @@ const struct of_device_id dwc2_of_match_table[] = {
844 + { .compatible = "st,stm32f4x9-fsotg",
845 + .data = dwc2_set_stm32f4x9_fsotg_params },
846 + { .compatible = "st,stm32f4x9-hsotg" },
847 +- { .compatible = "st,stm32f7xx-hsotg",
848 +- .data = dwc2_set_stm32f7xx_hsotg_params },
849 ++ { .compatible = "st,stm32f7-hsotg",
850 ++ .data = dwc2_set_stm32f7_hsotg_params },
851 + {},
852 + };
853 + MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
854 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
855 +index 51de21ef3cdc..b417d9aeaeeb 100644
856 +--- a/drivers/usb/dwc3/core.c
857 ++++ b/drivers/usb/dwc3/core.c
858 +@@ -100,6 +100,8 @@ static void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
859 + reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
860 + reg |= DWC3_GCTL_PRTCAPDIR(mode);
861 + dwc3_writel(dwc->regs, DWC3_GCTL, reg);
862 ++
863 ++ dwc->current_dr_role = mode;
864 + }
865 +
866 + static void __dwc3_set_mode(struct work_struct *work)
867 +@@ -133,8 +135,6 @@ static void __dwc3_set_mode(struct work_struct *work)
868 +
869 + dwc3_set_prtcap(dwc, dwc->desired_dr_role);
870 +
871 +- dwc->current_dr_role = dwc->desired_dr_role;
872 +-
873 + spin_unlock_irqrestore(&dwc->lock, flags);
874 +
875 + switch (dwc->desired_dr_role) {
876 +@@ -218,7 +218,7 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
877 + * XHCI driver will reset the host block. If dwc3 was configured for
878 + * host-only mode, then we can return early.
879 + */
880 +- if (dwc->dr_mode == USB_DR_MODE_HOST)
881 ++ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
882 + return 0;
883 +
884 + reg = dwc3_readl(dwc->regs, DWC3_DCTL);
885 +@@ -915,7 +915,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
886 +
887 + switch (dwc->dr_mode) {
888 + case USB_DR_MODE_PERIPHERAL:
889 +- dwc->current_dr_role = DWC3_GCTL_PRTCAP_DEVICE;
890 + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
891 +
892 + if (dwc->usb2_phy)
893 +@@ -931,7 +930,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
894 + }
895 + break;
896 + case USB_DR_MODE_HOST:
897 +- dwc->current_dr_role = DWC3_GCTL_PRTCAP_HOST;
898 + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
899 +
900 + if (dwc->usb2_phy)
901 +@@ -1279,7 +1277,7 @@ static int dwc3_remove(struct platform_device *pdev)
902 + }
903 +
904 + #ifdef CONFIG_PM
905 +-static int dwc3_suspend_common(struct dwc3 *dwc)
906 ++static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
907 + {
908 + unsigned long flags;
909 +
910 +@@ -1291,6 +1289,10 @@ static int dwc3_suspend_common(struct dwc3 *dwc)
911 + dwc3_core_exit(dwc);
912 + break;
913 + case DWC3_GCTL_PRTCAP_HOST:
914 ++ /* do nothing during host runtime_suspend */
915 ++ if (!PMSG_IS_AUTO(msg))
916 ++ dwc3_core_exit(dwc);
917 ++ break;
918 + default:
919 + /* do nothing */
920 + break;
921 +@@ -1299,7 +1301,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc)
922 + return 0;
923 + }
924 +
925 +-static int dwc3_resume_common(struct dwc3 *dwc)
926 ++static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
927 + {
928 + unsigned long flags;
929 + int ret;
930 +@@ -1315,6 +1317,13 @@ static int dwc3_resume_common(struct dwc3 *dwc)
931 + spin_unlock_irqrestore(&dwc->lock, flags);
932 + break;
933 + case DWC3_GCTL_PRTCAP_HOST:
934 ++ /* nothing to do on host runtime_resume */
935 ++ if (!PMSG_IS_AUTO(msg)) {
936 ++ ret = dwc3_core_init(dwc);
937 ++ if (ret)
938 ++ return ret;
939 ++ }
940 ++ break;
941 + default:
942 + /* do nothing */
943 + break;
944 +@@ -1326,12 +1335,11 @@ static int dwc3_resume_common(struct dwc3 *dwc)
945 + static int dwc3_runtime_checks(struct dwc3 *dwc)
946 + {
947 + switch (dwc->current_dr_role) {
948 +- case USB_DR_MODE_PERIPHERAL:
949 +- case USB_DR_MODE_OTG:
950 ++ case DWC3_GCTL_PRTCAP_DEVICE:
951 + if (dwc->connected)
952 + return -EBUSY;
953 + break;
954 +- case USB_DR_MODE_HOST:
955 ++ case DWC3_GCTL_PRTCAP_HOST:
956 + default:
957 + /* do nothing */
958 + break;
959 +@@ -1348,7 +1356,7 @@ static int dwc3_runtime_suspend(struct device *dev)
960 + if (dwc3_runtime_checks(dwc))
961 + return -EBUSY;
962 +
963 +- ret = dwc3_suspend_common(dwc);
964 ++ ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND);
965 + if (ret)
966 + return ret;
967 +
968 +@@ -1364,7 +1372,7 @@ static int dwc3_runtime_resume(struct device *dev)
969 +
970 + device_init_wakeup(dev, false);
971 +
972 +- ret = dwc3_resume_common(dwc);
973 ++ ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME);
974 + if (ret)
975 + return ret;
976 +
977 +@@ -1411,7 +1419,7 @@ static int dwc3_suspend(struct device *dev)
978 + struct dwc3 *dwc = dev_get_drvdata(dev);
979 + int ret;
980 +
981 +- ret = dwc3_suspend_common(dwc);
982 ++ ret = dwc3_suspend_common(dwc, PMSG_SUSPEND);
983 + if (ret)
984 + return ret;
985 +
986 +@@ -1427,7 +1435,7 @@ static int dwc3_resume(struct device *dev)
987 +
988 + pinctrl_pm_select_default_state(dev);
989 +
990 +- ret = dwc3_resume_common(dwc);
991 ++ ret = dwc3_resume_common(dwc, PMSG_RESUME);
992 + if (ret)
993 + return ret;
994 +
995 +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
996 +index 4a4a4c98508c..6d4e7a66cedd 100644
997 +--- a/drivers/usb/dwc3/core.h
998 ++++ b/drivers/usb/dwc3/core.h
999 +@@ -158,13 +158,15 @@
1000 + #define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0)
1001 + #define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff)
1002 +
1003 +-#define DWC3_TXFIFOQ 1
1004 +-#define DWC3_RXFIFOQ 3
1005 +-#define DWC3_TXREQQ 5
1006 +-#define DWC3_RXREQQ 7
1007 +-#define DWC3_RXINFOQ 9
1008 +-#define DWC3_DESCFETCHQ 13
1009 +-#define DWC3_EVENTQ 15
1010 ++#define DWC3_TXFIFOQ 0
1011 ++#define DWC3_RXFIFOQ 1
1012 ++#define DWC3_TXREQQ 2
1013 ++#define DWC3_RXREQQ 3
1014 ++#define DWC3_RXINFOQ 4
1015 ++#define DWC3_PSTATQ 5
1016 ++#define DWC3_DESCFETCHQ 6
1017 ++#define DWC3_EVENTQ 7
1018 ++#define DWC3_AUXEVENTQ 8
1019 +
1020 + /* Global RX Threshold Configuration Register */
1021 + #define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19)
1022 +diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
1023 +index 7ae0eefc7cc7..e54c3622eb28 100644
1024 +--- a/drivers/usb/dwc3/dwc3-of-simple.c
1025 ++++ b/drivers/usb/dwc3/dwc3-of-simple.c
1026 +@@ -143,6 +143,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
1027 + clk_disable_unprepare(simple->clks[i]);
1028 + clk_put(simple->clks[i]);
1029 + }
1030 ++ simple->num_clocks = 0;
1031 +
1032 + reset_control_assert(simple->resets);
1033 + reset_control_put(simple->resets);
1034 +diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c
1035 +index 1e940f054cb8..6dbc489513cd 100644
1036 +--- a/drivers/usb/gadget/udc/bdc/bdc_pci.c
1037 ++++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c
1038 +@@ -77,6 +77,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
1039 + if (ret) {
1040 + dev_err(&pci->dev,
1041 + "couldn't add resources to bdc device\n");
1042 ++ platform_device_put(bdc);
1043 + return ret;
1044 + }
1045 +
1046 +diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
1047 +index 6e87af248367..409cde4e6a51 100644
1048 +--- a/drivers/usb/gadget/udc/renesas_usb3.c
1049 ++++ b/drivers/usb/gadget/udc/renesas_usb3.c
1050 +@@ -2410,7 +2410,7 @@ static int renesas_usb3_remove(struct platform_device *pdev)
1051 + __renesas_usb3_ep_free_request(usb3->ep0_req);
1052 + if (usb3->phy)
1053 + phy_put(usb3->phy);
1054 +- pm_runtime_disable(usb3_to_dev(usb3));
1055 ++ pm_runtime_disable(&pdev->dev);
1056 +
1057 + return 0;
1058 + }
1059 +diff --git a/fs/aio.c b/fs/aio.c
1060 +index a062d75109cb..6bcd3fb5265a 100644
1061 +--- a/fs/aio.c
1062 ++++ b/fs/aio.c
1063 +@@ -68,9 +68,9 @@ struct aio_ring {
1064 + #define AIO_RING_PAGES 8
1065 +
1066 + struct kioctx_table {
1067 +- struct rcu_head rcu;
1068 +- unsigned nr;
1069 +- struct kioctx *table[];
1070 ++ struct rcu_head rcu;
1071 ++ unsigned nr;
1072 ++ struct kioctx __rcu *table[];
1073 + };
1074 +
1075 + struct kioctx_cpu {
1076 +@@ -115,7 +115,8 @@ struct kioctx {
1077 + struct page **ring_pages;
1078 + long nr_pages;
1079 +
1080 +- struct work_struct free_work;
1081 ++ struct rcu_head free_rcu;
1082 ++ struct work_struct free_work; /* see free_ioctx() */
1083 +
1084 + /*
1085 + * signals when all in-flight requests are done
1086 +@@ -329,7 +330,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
1087 + for (i = 0; i < table->nr; i++) {
1088 + struct kioctx *ctx;
1089 +
1090 +- ctx = table->table[i];
1091 ++ ctx = rcu_dereference(table->table[i]);
1092 + if (ctx && ctx->aio_ring_file == file) {
1093 + if (!atomic_read(&ctx->dead)) {
1094 + ctx->user_id = ctx->mmap_base = vma->vm_start;
1095 +@@ -588,6 +589,12 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
1096 + return cancel(&kiocb->common);
1097 + }
1098 +
1099 ++/*
1100 ++ * free_ioctx() should be RCU delayed to synchronize against the RCU
1101 ++ * protected lookup_ioctx() and also needs process context to call
1102 ++ * aio_free_ring(), so the double bouncing through kioctx->free_rcu and
1103 ++ * ->free_work.
1104 ++ */
1105 + static void free_ioctx(struct work_struct *work)
1106 + {
1107 + struct kioctx *ctx = container_of(work, struct kioctx, free_work);
1108 +@@ -601,6 +608,14 @@ static void free_ioctx(struct work_struct *work)
1109 + kmem_cache_free(kioctx_cachep, ctx);
1110 + }
1111 +
1112 ++static void free_ioctx_rcufn(struct rcu_head *head)
1113 ++{
1114 ++ struct kioctx *ctx = container_of(head, struct kioctx, free_rcu);
1115 ++
1116 ++ INIT_WORK(&ctx->free_work, free_ioctx);
1117 ++ schedule_work(&ctx->free_work);
1118 ++}
1119 ++
1120 + static void free_ioctx_reqs(struct percpu_ref *ref)
1121 + {
1122 + struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
1123 +@@ -609,8 +624,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
1124 + if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
1125 + complete(&ctx->rq_wait->comp);
1126 +
1127 +- INIT_WORK(&ctx->free_work, free_ioctx);
1128 +- schedule_work(&ctx->free_work);
1129 ++ /* Synchronize against RCU protected table->table[] dereferences */
1130 ++ call_rcu(&ctx->free_rcu, free_ioctx_rcufn);
1131 + }
1132 +
1133 + /*
1134 +@@ -651,9 +666,9 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
1135 + while (1) {
1136 + if (table)
1137 + for (i = 0; i < table->nr; i++)
1138 +- if (!table->table[i]) {
1139 ++ if (!rcu_access_pointer(table->table[i])) {
1140 + ctx->id = i;
1141 +- table->table[i] = ctx;
1142 ++ rcu_assign_pointer(table->table[i], ctx);
1143 + spin_unlock(&mm->ioctx_lock);
1144 +
1145 + /* While kioctx setup is in progress,
1146 +@@ -834,11 +849,11 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
1147 + }
1148 +
1149 + table = rcu_dereference_raw(mm->ioctx_table);
1150 +- WARN_ON(ctx != table->table[ctx->id]);
1151 +- table->table[ctx->id] = NULL;
1152 ++ WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
1153 ++ RCU_INIT_POINTER(table->table[ctx->id], NULL);
1154 + spin_unlock(&mm->ioctx_lock);
1155 +
1156 +- /* percpu_ref_kill() will do the necessary call_rcu() */
1157 ++ /* free_ioctx_reqs() will do the necessary RCU synchronization */
1158 + wake_up_all(&ctx->wait);
1159 +
1160 + /*
1161 +@@ -880,7 +895,8 @@ void exit_aio(struct mm_struct *mm)
1162 +
1163 + skipped = 0;
1164 + for (i = 0; i < table->nr; ++i) {
1165 +- struct kioctx *ctx = table->table[i];
1166 ++ struct kioctx *ctx =
1167 ++ rcu_dereference_protected(table->table[i], true);
1168 +
1169 + if (!ctx) {
1170 + skipped++;
1171 +@@ -1069,7 +1085,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1172 + if (!table || id >= table->nr)
1173 + goto out;
1174 +
1175 +- ctx = table->table[id];
1176 ++ ctx = rcu_dereference(table->table[id]);
1177 + if (ctx && ctx->user_id == ctx_id) {
1178 + percpu_ref_get(&ctx->users);
1179 + ret = ctx;
1180 +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
1181 +index 7d0dc100a09a..8a9df8003345 100644
1182 +--- a/fs/btrfs/backref.c
1183 ++++ b/fs/btrfs/backref.c
1184 +@@ -1263,7 +1263,16 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
1185 + while (node) {
1186 + ref = rb_entry(node, struct prelim_ref, rbnode);
1187 + node = rb_next(&ref->rbnode);
1188 +- WARN_ON(ref->count < 0);
1189 ++ /*
1190 ++ * ref->count < 0 can happen here if there are delayed
1191 ++ * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1192 ++ * prelim_ref_insert() relies on this when merging
1193 ++ * identical refs to keep the overall count correct.
1194 ++ * prelim_ref_insert() will merge only those refs
1195 ++ * which compare identically. Any refs having
1196 ++ * e.g. different offsets would not be merged,
1197 ++ * and would retain their original ref->count < 0.
1198 ++ */
1199 + if (roots && ref->count && ref->root_id && ref->parent == 0) {
1200 + if (sc && sc->root_objectid &&
1201 + ref->root_id != sc->root_objectid) {
1202 +@@ -1509,6 +1518,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
1203 + if (!node)
1204 + break;
1205 + bytenr = node->val;
1206 ++ shared.share_count = 0;
1207 + cond_resched();
1208 + }
1209 +
1210 +diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
1211 +index 8903c4fbf7e6..8a3e42412506 100644
1212 +--- a/fs/btrfs/raid56.c
1213 ++++ b/fs/btrfs/raid56.c
1214 +@@ -1351,6 +1351,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1215 + stripe_start = stripe->physical;
1216 + if (physical >= stripe_start &&
1217 + physical < stripe_start + rbio->stripe_len &&
1218 ++ stripe->dev->bdev &&
1219 + bio->bi_disk == stripe->dev->bdev->bd_disk &&
1220 + bio->bi_partno == stripe->dev->bdev->bd_partno) {
1221 + return i;
1222 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
1223 +index a25684287501..6631f48c6a11 100644
1224 +--- a/fs/btrfs/volumes.c
1225 ++++ b/fs/btrfs/volumes.c
1226 +@@ -574,6 +574,7 @@ static void btrfs_free_stale_device(struct btrfs_device *cur_dev)
1227 + btrfs_sysfs_remove_fsid(fs_devs);
1228 + list_del(&fs_devs->list);
1229 + free_fs_devices(fs_devs);
1230 ++ break;
1231 + } else {
1232 + fs_devs->num_devices--;
1233 + list_del(&dev->dev_list);
1234 +@@ -4737,10 +4738,13 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1235 + ndevs = min(ndevs, devs_max);
1236 +
1237 + /*
1238 +- * the primary goal is to maximize the number of stripes, so use as many
1239 +- * devices as possible, even if the stripes are not maximum sized.
1240 ++ * The primary goal is to maximize the number of stripes, so use as
1241 ++ * many devices as possible, even if the stripes are not maximum sized.
1242 ++ *
1243 ++ * The DUP profile stores more than one stripe per device, the
1244 ++ * max_avail is the total size so we have to adjust.
1245 + */
1246 +- stripe_size = devices_info[ndevs-1].max_avail;
1247 ++ stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
1248 + num_stripes = ndevs * dev_stripes;
1249 +
1250 + /*
1251 +@@ -4775,8 +4779,6 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1252 + stripe_size = devices_info[ndevs-1].max_avail;
1253 + }
1254 +
1255 +- stripe_size = div_u64(stripe_size, dev_stripes);
1256 +-
1257 + /* align to BTRFS_STRIPE_LEN */
1258 + stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN);
1259 +
1260 +@@ -7091,10 +7093,24 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
1261 +
1262 + mutex_lock(&fs_devices->device_list_mutex);
1263 + list_for_each_entry(device, &fs_devices->devices, dev_list) {
1264 +- if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
1265 ++ stats_cnt = atomic_read(&device->dev_stats_ccnt);
1266 ++ if (!device->dev_stats_valid || stats_cnt == 0)
1267 + continue;
1268 +
1269 +- stats_cnt = atomic_read(&device->dev_stats_ccnt);
1270 ++
1271 ++ /*
1272 ++ * There is a LOAD-LOAD control dependency between the value of
1273 ++ * dev_stats_ccnt and updating the on-disk values which requires
1274 ++ * reading the in-memory counters. Such control dependencies
1275 ++ * require explicit read memory barriers.
1276 ++ *
1277 ++ * This memory barriers pairs with smp_mb__before_atomic in
1278 ++ * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
1279 ++ * barrier implied by atomic_xchg in
1280 ++ * btrfs_dev_stats_read_and_reset
1281 ++ */
1282 ++ smp_rmb();
1283 ++
1284 + ret = update_dev_stat_item(trans, fs_info, device);
1285 + if (!ret)
1286 + atomic_sub(stats_cnt, &device->dev_stats_ccnt);
1287 +diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
1288 +index ff15208344a7..52ee7b094f3f 100644
1289 +--- a/fs/btrfs/volumes.h
1290 ++++ b/fs/btrfs/volumes.h
1291 +@@ -498,6 +498,12 @@ static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
1292 + int index)
1293 + {
1294 + atomic_inc(dev->dev_stat_values + index);
1295 ++ /*
1296 ++ * This memory barrier orders stores updating statistics before stores
1297 ++ * updating dev_stats_ccnt.
1298 ++ *
1299 ++ * It pairs with smp_rmb() in btrfs_run_dev_stats().
1300 ++ */
1301 + smp_mb__before_atomic();
1302 + atomic_inc(&dev->dev_stats_ccnt);
1303 + }
1304 +@@ -523,6 +529,12 @@ static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
1305 + int index, unsigned long val)
1306 + {
1307 + atomic_set(dev->dev_stat_values + index, val);
1308 ++ /*
1309 ++ * This memory barrier orders stores updating statistics before stores
1310 ++ * updating dev_stats_ccnt.
1311 ++ *
1312 ++ * It pairs with smp_rmb() in btrfs_run_dev_stats().
1313 ++ */
1314 + smp_mb__before_atomic();
1315 + atomic_inc(&dev->dev_stats_ccnt);
1316 + }
1317 +diff --git a/fs/dcache.c b/fs/dcache.c
1318 +index 5c7df1df81ff..eb2c297a87d0 100644
1319 +--- a/fs/dcache.c
1320 ++++ b/fs/dcache.c
1321 +@@ -644,11 +644,16 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
1322 + spin_unlock(&parent->d_lock);
1323 + goto again;
1324 + }
1325 +- rcu_read_unlock();
1326 +- if (parent != dentry)
1327 ++ if (parent != dentry) {
1328 + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1329 +- else
1330 ++ if (unlikely(dentry->d_lockref.count < 0)) {
1331 ++ spin_unlock(&parent->d_lock);
1332 ++ parent = NULL;
1333 ++ }
1334 ++ } else {
1335 + parent = NULL;
1336 ++ }
1337 ++ rcu_read_unlock();
1338 + return parent;
1339 + }
1340 +
1341 +diff --git a/fs/namei.c b/fs/namei.c
1342 +index 4e3fc58dae72..ee19c4ef24b2 100644
1343 +--- a/fs/namei.c
1344 ++++ b/fs/namei.c
1345 +@@ -578,9 +578,10 @@ static int __nd_alloc_stack(struct nameidata *nd)
1346 + static bool path_connected(const struct path *path)
1347 + {
1348 + struct vfsmount *mnt = path->mnt;
1349 ++ struct super_block *sb = mnt->mnt_sb;
1350 +
1351 +- /* Only bind mounts can have disconnected paths */
1352 +- if (mnt->mnt_root == mnt->mnt_sb->s_root)
1353 ++ /* Bind mounts and multi-root filesystems can have disconnected paths */
1354 ++ if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root))
1355 + return true;
1356 +
1357 + return is_subdir(path->dentry, mnt->mnt_root);
1358 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
1359 +index 29bacdc56f6a..5e470e233c83 100644
1360 +--- a/fs/nfs/super.c
1361 ++++ b/fs/nfs/super.c
1362 +@@ -2631,6 +2631,8 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
1363 + /* initial superblock/root creation */
1364 + mount_info->fill_super(s, mount_info);
1365 + nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
1366 ++ if (!(server->flags & NFS_MOUNT_UNSHARED))
1367 ++ s->s_iflags |= SB_I_MULTIROOT;
1368 + }
1369 +
1370 + mntroot = nfs_get_root(s, mount_info->mntfh, dev_name);
1371 +diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
1372 +index 3861d61fb265..3ce946063ffe 100644
1373 +--- a/fs/xfs/xfs_icache.c
1374 ++++ b/fs/xfs/xfs_icache.c
1375 +@@ -295,6 +295,7 @@ xfs_reinit_inode(
1376 + uint32_t generation = inode->i_generation;
1377 + uint64_t version = inode->i_version;
1378 + umode_t mode = inode->i_mode;
1379 ++ dev_t dev = inode->i_rdev;
1380 +
1381 + error = inode_init_always(mp->m_super, inode);
1382 +
1383 +@@ -302,6 +303,7 @@ xfs_reinit_inode(
1384 + inode->i_generation = generation;
1385 + inode->i_version = version;
1386 + inode->i_mode = mode;
1387 ++ inode->i_rdev = dev;
1388 + return error;
1389 + }
1390 +
1391 +diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
1392 +index 8c896540a72c..ff58c2933fdf 100644
1393 +--- a/include/kvm/arm_vgic.h
1394 ++++ b/include/kvm/arm_vgic.h
1395 +@@ -349,6 +349,7 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu);
1396 + bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
1397 + void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
1398 + void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
1399 ++void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
1400 +
1401 + void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
1402 +
1403 +diff --git a/include/linux/fs.h b/include/linux/fs.h
1404 +index 79421287ff5e..d8af431d9c91 100644
1405 +--- a/include/linux/fs.h
1406 ++++ b/include/linux/fs.h
1407 +@@ -1312,6 +1312,7 @@ extern int send_sigurg(struct fown_struct *fown);
1408 + #define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
1409 + #define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
1410 + #define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
1411 ++#define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */
1412 +
1413 + /* sb->s_iflags to limit user namespace mounts */
1414 + #define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
1415 +diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
1416 +index c00c4c33e432..b26eccc78fb1 100644
1417 +--- a/include/linux/irqchip/arm-gic-v3.h
1418 ++++ b/include/linux/irqchip/arm-gic-v3.h
1419 +@@ -503,6 +503,7 @@
1420 +
1421 + #define ICH_HCR_EN (1 << 0)
1422 + #define ICH_HCR_UIE (1 << 1)
1423 ++#define ICH_HCR_NPIE (1 << 3)
1424 + #define ICH_HCR_TC (1 << 10)
1425 + #define ICH_HCR_TALL0 (1 << 11)
1426 + #define ICH_HCR_TALL1 (1 << 12)
1427 +diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
1428 +index d3453ee072fc..68d8b1f73682 100644
1429 +--- a/include/linux/irqchip/arm-gic.h
1430 ++++ b/include/linux/irqchip/arm-gic.h
1431 +@@ -84,6 +84,7 @@
1432 +
1433 + #define GICH_HCR_EN (1 << 0)
1434 + #define GICH_HCR_UIE (1 << 1)
1435 ++#define GICH_HCR_NPIE (1 << 3)
1436 +
1437 + #define GICH_LR_VIRTUALID (0x3ff << 0)
1438 + #define GICH_LR_PHYSID_CPUID_SHIFT (10)
1439 +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
1440 +index c2db7e905f7d..012881461058 100644
1441 +--- a/sound/core/oss/pcm_oss.c
1442 ++++ b/sound/core/oss/pcm_oss.c
1443 +@@ -1762,10 +1762,9 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
1444 + return -ENOMEM;
1445 + _snd_pcm_hw_params_any(params);
1446 + err = snd_pcm_hw_refine(substream, params);
1447 +- format_mask = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
1448 +- kfree(params);
1449 + if (err < 0)
1450 +- return err;
1451 ++ goto error;
1452 ++ format_mask = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
1453 + for (fmt = 0; fmt < 32; ++fmt) {
1454 + if (snd_mask_test(format_mask, fmt)) {
1455 + int f = snd_pcm_oss_format_to(fmt);
1456 +@@ -1773,7 +1772,10 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
1457 + formats |= f;
1458 + }
1459 + }
1460 +- return formats;
1461 ++
1462 ++ error:
1463 ++ kfree(params);
1464 ++ return err < 0 ? err : formats;
1465 + }
1466 +
1467 + static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format)
1468 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
1469 +index 35ff97bfd492..6204b886309a 100644
1470 +--- a/sound/core/seq/seq_clientmgr.c
1471 ++++ b/sound/core/seq/seq_clientmgr.c
1472 +@@ -255,12 +255,12 @@ static int seq_free_client1(struct snd_seq_client *client)
1473 +
1474 + if (!client)
1475 + return 0;
1476 +- snd_seq_delete_all_ports(client);
1477 +- snd_seq_queue_client_leave(client->number);
1478 + spin_lock_irqsave(&clients_lock, flags);
1479 + clienttablock[client->number] = 1;
1480 + clienttab[client->number] = NULL;
1481 + spin_unlock_irqrestore(&clients_lock, flags);
1482 ++ snd_seq_delete_all_ports(client);
1483 ++ snd_seq_queue_client_leave(client->number);
1484 + snd_use_lock_sync(&client->use_lock);
1485 + snd_seq_queue_client_termination(client->number);
1486 + if (client->pool)
1487 +diff --git a/sound/core/seq/seq_prioq.c b/sound/core/seq/seq_prioq.c
1488 +index bc1c8488fc2a..2bc6759e4adc 100644
1489 +--- a/sound/core/seq/seq_prioq.c
1490 ++++ b/sound/core/seq/seq_prioq.c
1491 +@@ -87,7 +87,7 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo)
1492 + if (f->cells > 0) {
1493 + /* drain prioQ */
1494 + while (f->cells > 0)
1495 +- snd_seq_cell_free(snd_seq_prioq_cell_out(f));
1496 ++ snd_seq_cell_free(snd_seq_prioq_cell_out(f, NULL));
1497 + }
1498 +
1499 + kfree(f);
1500 +@@ -214,8 +214,18 @@ int snd_seq_prioq_cell_in(struct snd_seq_prioq * f,
1501 + return 0;
1502 + }
1503 +
1504 ++/* return 1 if the current time >= event timestamp */
1505 ++static int event_is_ready(struct snd_seq_event *ev, void *current_time)
1506 ++{
1507 ++ if ((ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) == SNDRV_SEQ_TIME_STAMP_TICK)
1508 ++ return snd_seq_compare_tick_time(current_time, &ev->time.tick);
1509 ++ else
1510 ++ return snd_seq_compare_real_time(current_time, &ev->time.time);
1511 ++}
1512 ++
1513 + /* dequeue cell from prioq */
1514 +-struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f)
1515 ++struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f,
1516 ++ void *current_time)
1517 + {
1518 + struct snd_seq_event_cell *cell;
1519 + unsigned long flags;
1520 +@@ -227,6 +237,8 @@ struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f)
1521 + spin_lock_irqsave(&f->lock, flags);
1522 +
1523 + cell = f->head;
1524 ++ if (cell && current_time && !event_is_ready(&cell->event, current_time))
1525 ++ cell = NULL;
1526 + if (cell) {
1527 + f->head = cell->next;
1528 +
1529 +@@ -252,18 +264,6 @@ int snd_seq_prioq_avail(struct snd_seq_prioq * f)
1530 + return f->cells;
1531 + }
1532 +
1533 +-
1534 +-/* peek at cell at the head of the prioq */
1535 +-struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq * f)
1536 +-{
1537 +- if (f == NULL) {
1538 +- pr_debug("ALSA: seq: snd_seq_prioq_cell_in() called with NULL prioq\n");
1539 +- return NULL;
1540 +- }
1541 +- return f->head;
1542 +-}
1543 +-
1544 +-
1545 + static inline int prioq_match(struct snd_seq_event_cell *cell,
1546 + int client, int timestamp)
1547 + {
1548 +diff --git a/sound/core/seq/seq_prioq.h b/sound/core/seq/seq_prioq.h
1549 +index d38bb78d9345..2c315ca10fc4 100644
1550 +--- a/sound/core/seq/seq_prioq.h
1551 ++++ b/sound/core/seq/seq_prioq.h
1552 +@@ -44,14 +44,12 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo);
1553 + int snd_seq_prioq_cell_in(struct snd_seq_prioq *f, struct snd_seq_event_cell *cell);
1554 +
1555 + /* dequeue cell from prioq */
1556 +-struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f);
1557 ++struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f,
1558 ++ void *current_time);
1559 +
1560 + /* return number of events available in prioq */
1561 + int snd_seq_prioq_avail(struct snd_seq_prioq *f);
1562 +
1563 +-/* peek at cell at the head of the prioq */
1564 +-struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq *f);
1565 +-
1566 + /* client left queue */
1567 + void snd_seq_prioq_leave(struct snd_seq_prioq *f, int client, int timestamp);
1568 +
1569 +diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
1570 +index 79e0c5604ef8..1a6dc4ff44a6 100644
1571 +--- a/sound/core/seq/seq_queue.c
1572 ++++ b/sound/core/seq/seq_queue.c
1573 +@@ -277,30 +277,20 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
1574 +
1575 + __again:
1576 + /* Process tick queue... */
1577 +- while ((cell = snd_seq_prioq_cell_peek(q->tickq)) != NULL) {
1578 +- if (snd_seq_compare_tick_time(&q->timer->tick.cur_tick,
1579 +- &cell->event.time.tick)) {
1580 +- cell = snd_seq_prioq_cell_out(q->tickq);
1581 +- if (cell)
1582 +- snd_seq_dispatch_event(cell, atomic, hop);
1583 +- } else {
1584 +- /* event remains in the queue */
1585 ++ for (;;) {
1586 ++ cell = snd_seq_prioq_cell_out(q->tickq,
1587 ++ &q->timer->tick.cur_tick);
1588 ++ if (!cell)
1589 + break;
1590 +- }
1591 ++ snd_seq_dispatch_event(cell, atomic, hop);
1592 + }
1593 +
1594 +-
1595 + /* Process time queue... */
1596 +- while ((cell = snd_seq_prioq_cell_peek(q->timeq)) != NULL) {
1597 +- if (snd_seq_compare_real_time(&q->timer->cur_time,
1598 +- &cell->event.time.time)) {
1599 +- cell = snd_seq_prioq_cell_out(q->timeq);
1600 +- if (cell)
1601 +- snd_seq_dispatch_event(cell, atomic, hop);
1602 +- } else {
1603 +- /* event remains in the queue */
1604 ++ for (;;) {
1605 ++ cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
1606 ++ if (!cell)
1607 + break;
1608 +- }
1609 ++ snd_seq_dispatch_event(cell, atomic, hop);
1610 + }
1611 +
1612 + /* free lock */
1613 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
1614 +index 96143df19b21..d5017adf9feb 100644
1615 +--- a/sound/pci/hda/hda_intel.c
1616 ++++ b/sound/pci/hda/hda_intel.c
1617 +@@ -181,11 +181,15 @@ static const struct kernel_param_ops param_ops_xint = {
1618 + };
1619 + #define param_check_xint param_check_int
1620 +
1621 +-static int power_save = -1;
1622 ++static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
1623 + module_param(power_save, xint, 0644);
1624 + MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
1625 + "(in second, 0 = disable).");
1626 +
1627 ++static bool pm_blacklist = true;
1628 ++module_param(pm_blacklist, bool, 0644);
1629 ++MODULE_PARM_DESC(pm_blacklist, "Enable power-management blacklist");
1630 ++
1631 + /* reset the HD-audio controller in power save mode.
1632 + * this may give more power-saving, but will take longer time to
1633 + * wake up.
1634 +@@ -2300,10 +2304,9 @@ static int azx_probe_continue(struct azx *chip)
1635 +
1636 + val = power_save;
1637 + #ifdef CONFIG_PM
1638 +- if (val == -1) {
1639 ++ if (pm_blacklist) {
1640 + const struct snd_pci_quirk *q;
1641 +
1642 +- val = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
1643 + q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
1644 + if (q && val) {
1645 + dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
1646 +diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c
1647 +index 361466a2eaef..ade443a88421 100644
1648 +--- a/tools/testing/selftests/x86/entry_from_vm86.c
1649 ++++ b/tools/testing/selftests/x86/entry_from_vm86.c
1650 +@@ -95,6 +95,10 @@ asm (
1651 + "int3\n\t"
1652 + "vmcode_int80:\n\t"
1653 + "int $0x80\n\t"
1654 ++ "vmcode_popf_hlt:\n\t"
1655 ++ "push %ax\n\t"
1656 ++ "popf\n\t"
1657 ++ "hlt\n\t"
1658 + "vmcode_umip:\n\t"
1659 + /* addressing via displacements */
1660 + "smsw (2052)\n\t"
1661 +@@ -124,8 +128,8 @@ asm (
1662 +
1663 + extern unsigned char vmcode[], end_vmcode[];
1664 + extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[],
1665 +- vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_umip[],
1666 +- vmcode_umip_str[], vmcode_umip_sldt[];
1667 ++ vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_popf_hlt[],
1668 ++ vmcode_umip[], vmcode_umip_str[], vmcode_umip_sldt[];
1669 +
1670 + /* Returns false if the test was skipped. */
1671 + static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
1672 +@@ -175,7 +179,7 @@ static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
1673 + (VM86_TYPE(ret) == rettype && VM86_ARG(ret) == retarg)) {
1674 + printf("[OK]\tReturned correctly\n");
1675 + } else {
1676 +- printf("[FAIL]\tIncorrect return reason\n");
1677 ++ printf("[FAIL]\tIncorrect return reason (started at eip = 0x%lx, ended at eip = 0x%lx)\n", eip, v86->regs.eip);
1678 + nerrs++;
1679 + }
1680 +
1681 +@@ -264,6 +268,9 @@ int main(void)
1682 + v86.regs.ds = load_addr / 16;
1683 + v86.regs.es = load_addr / 16;
1684 +
1685 ++ /* Use the end of the page as our stack. */
1686 ++ v86.regs.esp = 4096;
1687 ++
1688 + assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */
1689 +
1690 + /* #BR -- should deliver SIG??? */
1691 +@@ -295,6 +302,23 @@ int main(void)
1692 + v86.regs.eflags &= ~X86_EFLAGS_IF;
1693 + do_test(&v86, vmcode_sti - vmcode, VM86_STI, 0, "STI with VIP set");
1694 +
1695 ++ /* POPF with VIP set but IF clear: should not trap */
1696 ++ v86.regs.eflags = X86_EFLAGS_VIP;
1697 ++ v86.regs.eax = 0;
1698 ++ do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP set and IF clear");
1699 ++
1700 ++ /* POPF with VIP set and IF set: should trap */
1701 ++ v86.regs.eflags = X86_EFLAGS_VIP;
1702 ++ v86.regs.eax = X86_EFLAGS_IF;
1703 ++ do_test(&v86, vmcode_popf_hlt - vmcode, VM86_STI, 0, "POPF with VIP and IF set");
1704 ++
1705 ++ /* POPF with VIP clear and IF set: should not trap */
1706 ++ v86.regs.eflags = 0;
1707 ++ v86.regs.eax = X86_EFLAGS_IF;
1708 ++ do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP clear and IF set");
1709 ++
1710 ++ v86.regs.eflags = 0;
1711 ++
1712 + /* INT3 -- should cause #BP */
1713 + do_test(&v86, vmcode_int3 - vmcode, VM86_TRAP, 3, "INT3");
1714 +
1715 +@@ -318,7 +342,7 @@ int main(void)
1716 + clearhandler(SIGSEGV);
1717 +
1718 + /* Make sure nothing explodes if we fork. */
1719 +- if (fork() > 0)
1720 ++ if (fork() == 0)
1721 + return 0;
1722 +
1723 + return (nerrs == 0 ? 0 : 1);
1724 +diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
1725 +index cc29a8148328..811631a1296c 100644
1726 +--- a/virt/kvm/arm/arch_timer.c
1727 ++++ b/virt/kvm/arm/arch_timer.c
1728 +@@ -589,6 +589,7 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
1729 +
1730 + int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
1731 + {
1732 ++ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
1733 + struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
1734 + struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
1735 +
1736 +@@ -602,6 +603,9 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
1737 + ptimer->cnt_ctl = 0;
1738 + kvm_timer_update_state(vcpu);
1739 +
1740 ++ if (timer->enabled && irqchip_in_kernel(vcpu->kvm))
1741 ++ kvm_vgic_reset_mapped_irq(vcpu, vtimer->irq.irq);
1742 ++
1743 + return 0;
1744 + }
1745 +
1746 +@@ -773,7 +777,7 @@ int kvm_timer_hyp_init(bool has_gic)
1747 + }
1748 + }
1749 +
1750 +- kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
1751 ++ kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
1752 +
1753 + cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
1754 + "kvm/arm/timer:starting", kvm_timer_starting_cpu,
1755 +diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
1756 +index f5c3d6d7019e..b89ce5432214 100644
1757 +--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
1758 ++++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
1759 +@@ -215,7 +215,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
1760 + * are now visible to the system register interface.
1761 + */
1762 + if (!cpu_if->vgic_sre) {
1763 +- dsb(st);
1764 ++ dsb(sy);
1765 ++ isb();
1766 + cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
1767 + }
1768 +
1769 +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
1770 +index 9dea96380339..b69798a7880e 100644
1771 +--- a/virt/kvm/arm/mmu.c
1772 ++++ b/virt/kvm/arm/mmu.c
1773 +@@ -1760,9 +1760,9 @@ int kvm_mmu_init(void)
1774 + */
1775 + BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
1776 +
1777 +- kvm_info("IDMAP page: %lx\n", hyp_idmap_start);
1778 +- kvm_info("HYP VA range: %lx:%lx\n",
1779 +- kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
1780 ++ kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
1781 ++ kvm_debug("HYP VA range: %lx:%lx\n",
1782 ++ kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
1783 +
1784 + if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
1785 + hyp_idmap_start < kern_hyp_va(~0UL) &&
1786 +diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
1787 +index 80897102da26..028d2ba05b7b 100644
1788 +--- a/virt/kvm/arm/vgic/vgic-v2.c
1789 ++++ b/virt/kvm/arm/vgic/vgic-v2.c
1790 +@@ -37,6 +37,13 @@ void vgic_v2_init_lrs(void)
1791 + vgic_v2_write_lr(i, 0);
1792 + }
1793 +
1794 ++void vgic_v2_set_npie(struct kvm_vcpu *vcpu)
1795 ++{
1796 ++ struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
1797 ++
1798 ++ cpuif->vgic_hcr |= GICH_HCR_NPIE;
1799 ++}
1800 ++
1801 + void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
1802 + {
1803 + struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
1804 +@@ -64,7 +71,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
1805 + int lr;
1806 + unsigned long flags;
1807 +
1808 +- cpuif->vgic_hcr &= ~GICH_HCR_UIE;
1809 ++ cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE);
1810 +
1811 + for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
1812 + u32 val = cpuif->vgic_lr[lr];
1813 +@@ -381,7 +388,7 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
1814 + kvm_vgic_global_state.type = VGIC_V2;
1815 + kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
1816 +
1817 +- kvm_info("vgic-v2@%llx\n", info->vctrl.start);
1818 ++ kvm_debug("vgic-v2@%llx\n", info->vctrl.start);
1819 +
1820 + return 0;
1821 + out:
1822 +diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
1823 +index f47e8481fa45..f667c7e86b8f 100644
1824 +--- a/virt/kvm/arm/vgic/vgic-v3.c
1825 ++++ b/virt/kvm/arm/vgic/vgic-v3.c
1826 +@@ -26,6 +26,13 @@ static bool group1_trap;
1827 + static bool common_trap;
1828 + static bool gicv4_enable;
1829 +
1830 ++void vgic_v3_set_npie(struct kvm_vcpu *vcpu)
1831 ++{
1832 ++ struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
1833 ++
1834 ++ cpuif->vgic_hcr |= ICH_HCR_NPIE;
1835 ++}
1836 ++
1837 + void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
1838 + {
1839 + struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
1840 +@@ -47,7 +54,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
1841 + int lr;
1842 + unsigned long flags;
1843 +
1844 +- cpuif->vgic_hcr &= ~ICH_HCR_UIE;
1845 ++ cpuif->vgic_hcr &= ~(ICH_HCR_UIE | ICH_HCR_NPIE);
1846 +
1847 + for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
1848 + u64 val = cpuif->vgic_lr[lr];
1849 +diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
1850 +index ecb8e25f5fe5..04816ecdf9ce 100644
1851 +--- a/virt/kvm/arm/vgic/vgic.c
1852 ++++ b/virt/kvm/arm/vgic/vgic.c
1853 +@@ -460,6 +460,32 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
1854 + return ret;
1855 + }
1856 +
1857 ++/**
1858 ++ * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
1859 ++ * @vcpu: The VCPU pointer
1860 ++ * @vintid: The INTID of the interrupt
1861 ++ *
1862 ++ * Reset the active and pending states of a mapped interrupt. Kernel
1863 ++ * subsystems injecting mapped interrupts should reset their interrupt lines
1864 ++ * when we are doing a reset of the VM.
1865 ++ */
1866 ++void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
1867 ++{
1868 ++ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
1869 ++ unsigned long flags;
1870 ++
1871 ++ if (!irq->hw)
1872 ++ goto out;
1873 ++
1874 ++ spin_lock_irqsave(&irq->irq_lock, flags);
1875 ++ irq->active = false;
1876 ++ irq->pending_latch = false;
1877 ++ irq->line_level = false;
1878 ++ spin_unlock_irqrestore(&irq->irq_lock, flags);
1879 ++out:
1880 ++ vgic_put_irq(vcpu->kvm, irq);
1881 ++}
1882 ++
1883 + int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
1884 + {
1885 + struct vgic_irq *irq;
1886 +@@ -649,22 +675,37 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
1887 + vgic_v3_set_underflow(vcpu);
1888 + }
1889 +
1890 ++static inline void vgic_set_npie(struct kvm_vcpu *vcpu)
1891 ++{
1892 ++ if (kvm_vgic_global_state.type == VGIC_V2)
1893 ++ vgic_v2_set_npie(vcpu);
1894 ++ else
1895 ++ vgic_v3_set_npie(vcpu);
1896 ++}
1897 ++
1898 + /* Requires the ap_list_lock to be held. */
1899 +-static int compute_ap_list_depth(struct kvm_vcpu *vcpu)
1900 ++static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
1901 ++ bool *multi_sgi)
1902 + {
1903 + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1904 + struct vgic_irq *irq;
1905 + int count = 0;
1906 +
1907 ++ *multi_sgi = false;
1908 ++
1909 + DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
1910 +
1911 + list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
1912 + spin_lock(&irq->irq_lock);
1913 + /* GICv2 SGIs can count for more than one... */
1914 +- if (vgic_irq_is_sgi(irq->intid) && irq->source)
1915 +- count += hweight8(irq->source);
1916 +- else
1917 ++ if (vgic_irq_is_sgi(irq->intid) && irq->source) {
1918 ++ int w = hweight8(irq->source);
1919 ++
1920 ++ count += w;
1921 ++ *multi_sgi |= (w > 1);
1922 ++ } else {
1923 + count++;
1924 ++ }
1925 + spin_unlock(&irq->irq_lock);
1926 + }
1927 + return count;
1928 +@@ -675,28 +716,43 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
1929 + {
1930 + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1931 + struct vgic_irq *irq;
1932 +- int count = 0;
1933 ++ int count;
1934 ++ bool npie = false;
1935 ++ bool multi_sgi;
1936 ++ u8 prio = 0xff;
1937 +
1938 + DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
1939 +
1940 +- if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr)
1941 ++ count = compute_ap_list_depth(vcpu, &multi_sgi);
1942 ++ if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
1943 + vgic_sort_ap_list(vcpu);
1944 +
1945 ++ count = 0;
1946 ++
1947 + list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
1948 + spin_lock(&irq->irq_lock);
1949 +
1950 +- if (unlikely(vgic_target_oracle(irq) != vcpu))
1951 +- goto next;
1952 +-
1953 + /*
1954 +- * If we get an SGI with multiple sources, try to get
1955 +- * them in all at once.
1956 ++ * If we have multi-SGIs in the pipeline, we need to
1957 ++ * guarantee that they are all seen before any IRQ of
1958 ++ * lower priority. In that case, we need to filter out
1959 ++ * these interrupts by exiting early. This is easy as
1960 ++ * the AP list has been sorted already.
1961 + */
1962 +- do {
1963 ++ if (multi_sgi && irq->priority > prio) {
1964 ++ spin_unlock(&irq->irq_lock);
1965 ++ break;
1966 ++ }
1967 ++
1968 ++ if (likely(vgic_target_oracle(irq) == vcpu)) {
1969 + vgic_populate_lr(vcpu, irq, count++);
1970 +- } while (irq->source && count < kvm_vgic_global_state.nr_lr);
1971 +
1972 +-next:
1973 ++ if (irq->source) {
1974 ++ npie = true;
1975 ++ prio = irq->priority;
1976 ++ }
1977 ++ }
1978 ++
1979 + spin_unlock(&irq->irq_lock);
1980 +
1981 + if (count == kvm_vgic_global_state.nr_lr) {
1982 +@@ -707,6 +763,9 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
1983 + }
1984 + }
1985 +
1986 ++ if (npie)
1987 ++ vgic_set_npie(vcpu);
1988 ++
1989 + vcpu->arch.vgic_cpu.used_lrs = count;
1990 +
1991 + /* Nuke remaining LRs */
1992 +diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
1993 +index efbcf8f96f9c..d434ebd67599 100644
1994 +--- a/virt/kvm/arm/vgic/vgic.h
1995 ++++ b/virt/kvm/arm/vgic/vgic.h
1996 +@@ -151,6 +151,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
1997 + void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
1998 + void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
1999 + void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
2000 ++void vgic_v2_set_npie(struct kvm_vcpu *vcpu);
2001 + int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
2002 + int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
2003 + int offset, u32 *val);
2004 +@@ -180,6 +181,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
2005 + void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
2006 + void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
2007 + void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
2008 ++void vgic_v3_set_npie(struct kvm_vcpu *vcpu);
2009 + void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
2010 + void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
2011 + void vgic_v3_enable(struct kvm_vcpu *vcpu);