Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Fri, 02 Dec 2016 16:21:49
Message-Id: 1480695685.b2dc6d58c360a6763403f7fd947011a6e225ddf5.mpagano@gentoo
1 commit: b2dc6d58c360a6763403f7fd947011a6e225ddf5
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Dec 2 16:21:25 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Dec 2 16:21:25 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b2dc6d58
7
8 Linux patch 4.4.36
9
10 0000_README | 4 +
11 1035_linux-4.4.36.patch | 914 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 918 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 28c74ce..58c7374 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -183,6 +183,10 @@ Patch: 1034_linux-4.4.35.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.35
21
22 +Patch: 1035_linux-4.4.36.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.36
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1035_linux-4.4.36.patch b/1035_linux-4.4.36.patch
31 new file mode 100644
32 index 0000000..0db6e38
33 --- /dev/null
34 +++ b/1035_linux-4.4.36.patch
35 @@ -0,0 +1,914 @@
36 +diff --git a/Makefile b/Makefile
37 +index f88830af1533..705eb9e38fce 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 4
43 +-SUBLEVEL = 35
44 ++SUBLEVEL = 36
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
49 +index cda6dbbe9842..fd5979f28ada 100644
50 +--- a/arch/parisc/kernel/cache.c
51 ++++ b/arch/parisc/kernel/cache.c
52 +@@ -351,6 +351,7 @@ void __init parisc_setup_cache_timing(void)
53 + {
54 + unsigned long rangetime, alltime;
55 + unsigned long size, start;
56 ++ unsigned long threshold;
57 +
58 + alltime = mfctl(16);
59 + flush_data_cache();
60 +@@ -364,17 +365,12 @@ void __init parisc_setup_cache_timing(void)
61 + printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
62 + alltime, size, rangetime);
63 +
64 +- /* Racy, but if we see an intermediate value, it's ok too... */
65 +- parisc_cache_flush_threshold = size * alltime / rangetime;
66 +-
67 +- parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
68 +- if (!parisc_cache_flush_threshold)
69 +- parisc_cache_flush_threshold = FLUSH_THRESHOLD;
70 +-
71 +- if (parisc_cache_flush_threshold > cache_info.dc_size)
72 +- parisc_cache_flush_threshold = cache_info.dc_size;
73 +-
74 +- printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
75 ++ threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
76 ++ if (threshold > cache_info.dc_size)
77 ++ threshold = cache_info.dc_size;
78 ++ if (threshold)
79 ++ parisc_cache_flush_threshold = threshold;
80 ++ printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
81 + parisc_cache_flush_threshold/1024);
82 +
83 + /* calculate TLB flush threshold */
84 +@@ -383,7 +379,7 @@ void __init parisc_setup_cache_timing(void)
85 + flush_tlb_all();
86 + alltime = mfctl(16) - alltime;
87 +
88 +- size = PAGE_SIZE;
89 ++ size = 0;
90 + start = (unsigned long) _text;
91 + rangetime = mfctl(16);
92 + while (start < (unsigned long) _end) {
93 +@@ -396,13 +392,10 @@ void __init parisc_setup_cache_timing(void)
94 + printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
95 + alltime, size, rangetime);
96 +
97 +- parisc_tlb_flush_threshold = size * alltime / rangetime;
98 +- parisc_tlb_flush_threshold *= num_online_cpus();
99 +- parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
100 +- if (!parisc_tlb_flush_threshold)
101 +- parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
102 +-
103 +- printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
104 ++ threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
105 ++ if (threshold)
106 ++ parisc_tlb_flush_threshold = threshold;
107 ++ printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
108 + parisc_tlb_flush_threshold/1024);
109 + }
110 +
111 +diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
112 +index b743a80eaba0..675521919229 100644
113 +--- a/arch/parisc/kernel/pacache.S
114 ++++ b/arch/parisc/kernel/pacache.S
115 +@@ -96,7 +96,7 @@ fitmanyloop: /* Loop if LOOP >= 2 */
116 +
117 + fitmanymiddle: /* Loop if LOOP >= 2 */
118 + addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
119 +- pitlbe 0(%sr1, %r28)
120 ++ pitlbe %r0(%sr1, %r28)
121 + pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
122 + addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
123 + copy %arg3, %r31 /* Re-init inner loop count */
124 +@@ -139,7 +139,7 @@ fdtmanyloop: /* Loop if LOOP >= 2 */
125 +
126 + fdtmanymiddle: /* Loop if LOOP >= 2 */
127 + addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
128 +- pdtlbe 0(%sr1, %r28)
129 ++ pdtlbe %r0(%sr1, %r28)
130 + pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
131 + addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
132 + copy %arg3, %r31 /* Re-init inner loop count */
133 +@@ -620,12 +620,12 @@ ENTRY(copy_user_page_asm)
134 + /* Purge any old translations */
135 +
136 + #ifdef CONFIG_PA20
137 +- pdtlb,l 0(%r28)
138 +- pdtlb,l 0(%r29)
139 ++ pdtlb,l %r0(%r28)
140 ++ pdtlb,l %r0(%r29)
141 + #else
142 + tlb_lock %r20,%r21,%r22
143 +- pdtlb 0(%r28)
144 +- pdtlb 0(%r29)
145 ++ pdtlb %r0(%r28)
146 ++ pdtlb %r0(%r29)
147 + tlb_unlock %r20,%r21,%r22
148 + #endif
149 +
150 +@@ -768,10 +768,10 @@ ENTRY(clear_user_page_asm)
151 + /* Purge any old translation */
152 +
153 + #ifdef CONFIG_PA20
154 +- pdtlb,l 0(%r28)
155 ++ pdtlb,l %r0(%r28)
156 + #else
157 + tlb_lock %r20,%r21,%r22
158 +- pdtlb 0(%r28)
159 ++ pdtlb %r0(%r28)
160 + tlb_unlock %r20,%r21,%r22
161 + #endif
162 +
163 +@@ -852,10 +852,10 @@ ENTRY(flush_dcache_page_asm)
164 + /* Purge any old translation */
165 +
166 + #ifdef CONFIG_PA20
167 +- pdtlb,l 0(%r28)
168 ++ pdtlb,l %r0(%r28)
169 + #else
170 + tlb_lock %r20,%r21,%r22
171 +- pdtlb 0(%r28)
172 ++ pdtlb %r0(%r28)
173 + tlb_unlock %r20,%r21,%r22
174 + #endif
175 +
176 +@@ -892,10 +892,10 @@ ENTRY(flush_dcache_page_asm)
177 + sync
178 +
179 + #ifdef CONFIG_PA20
180 +- pdtlb,l 0(%r25)
181 ++ pdtlb,l %r0(%r25)
182 + #else
183 + tlb_lock %r20,%r21,%r22
184 +- pdtlb 0(%r25)
185 ++ pdtlb %r0(%r25)
186 + tlb_unlock %r20,%r21,%r22
187 + #endif
188 +
189 +@@ -925,13 +925,18 @@ ENTRY(flush_icache_page_asm)
190 + depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
191 + #endif
192 +
193 +- /* Purge any old translation */
194 ++ /* Purge any old translation. Note that the FIC instruction
195 ++ * may use either the instruction or data TLB. Given that we
196 ++ * have a flat address space, it's not clear which TLB will be
197 ++ * used. So, we purge both entries. */
198 +
199 + #ifdef CONFIG_PA20
200 ++ pdtlb,l %r0(%r28)
201 + pitlb,l %r0(%sr4,%r28)
202 + #else
203 + tlb_lock %r20,%r21,%r22
204 +- pitlb (%sr4,%r28)
205 ++ pdtlb %r0(%r28)
206 ++ pitlb %r0(%sr4,%r28)
207 + tlb_unlock %r20,%r21,%r22
208 + #endif
209 +
210 +@@ -970,10 +975,12 @@ ENTRY(flush_icache_page_asm)
211 + sync
212 +
213 + #ifdef CONFIG_PA20
214 ++ pdtlb,l %r0(%r28)
215 + pitlb,l %r0(%sr4,%r25)
216 + #else
217 + tlb_lock %r20,%r21,%r22
218 +- pitlb (%sr4,%r25)
219 ++ pdtlb %r0(%r28)
220 ++ pitlb %r0(%sr4,%r25)
221 + tlb_unlock %r20,%r21,%r22
222 + #endif
223 +
224 +diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
225 +index b9402c9b3454..af0d7fae7aa7 100644
226 +--- a/arch/parisc/kernel/pci-dma.c
227 ++++ b/arch/parisc/kernel/pci-dma.c
228 +@@ -95,8 +95,8 @@ static inline int map_pte_uncached(pte_t * pte,
229 +
230 + if (!pte_none(*pte))
231 + printk(KERN_ERR "map_pte_uncached: page already exists\n");
232 +- set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
233 + purge_tlb_start(flags);
234 ++ set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
235 + pdtlb_kernel(orig_vaddr);
236 + purge_tlb_end(flags);
237 + vaddr += PAGE_SIZE;
238 +diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
239 +index 81d6f6391944..2e66a887788e 100644
240 +--- a/arch/parisc/kernel/setup.c
241 ++++ b/arch/parisc/kernel/setup.c
242 +@@ -334,6 +334,10 @@ static int __init parisc_init(void)
243 + /* tell PDC we're Linux. Nevermind failure. */
244 + pdc_stable_write(0x40, &osid, sizeof(osid));
245 +
246 ++ /* start with known state */
247 ++ flush_cache_all_local();
248 ++ flush_tlb_all_local(NULL);
249 ++
250 + processor_init();
251 + #ifdef CONFIG_SMP
252 + pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n",
253 +diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
254 +index 178989e6d3e3..ea960d660917 100644
255 +--- a/arch/tile/kernel/time.c
256 ++++ b/arch/tile/kernel/time.c
257 +@@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
258 + */
259 + unsigned long long sched_clock(void)
260 + {
261 +- return clocksource_cyc2ns(get_cycles(),
262 +- sched_clock_mult, SCHED_CLOCK_SHIFT);
263 ++ return mult_frac(get_cycles(),
264 ++ sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
265 + }
266 +
267 + int setup_profiling_timer(unsigned int multiplier)
268 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
269 +index 5fa652c16a50..f49e98062ea5 100644
270 +--- a/arch/x86/kvm/emulate.c
271 ++++ b/arch/x86/kvm/emulate.c
272 +@@ -2093,16 +2093,10 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
273 + static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
274 + {
275 + int rc;
276 +- unsigned short sel, old_sel;
277 +- struct desc_struct old_desc, new_desc;
278 +- const struct x86_emulate_ops *ops = ctxt->ops;
279 ++ unsigned short sel;
280 ++ struct desc_struct new_desc;
281 + u8 cpl = ctxt->ops->cpl(ctxt);
282 +
283 +- /* Assignment of RIP may only fail in 64-bit mode */
284 +- if (ctxt->mode == X86EMUL_MODE_PROT64)
285 +- ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
286 +- VCPU_SREG_CS);
287 +-
288 + memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
289 +
290 + rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
291 +@@ -2112,12 +2106,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
292 + return rc;
293 +
294 + rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
295 +- if (rc != X86EMUL_CONTINUE) {
296 +- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
297 +- /* assigning eip failed; restore the old cs */
298 +- ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
299 +- return rc;
300 +- }
301 ++ /* Error handling is not implemented. */
302 ++ if (rc != X86EMUL_CONTINUE)
303 ++ return X86EMUL_UNHANDLEABLE;
304 ++
305 + return rc;
306 + }
307 +
308 +@@ -2177,14 +2169,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
309 + {
310 + int rc;
311 + unsigned long eip, cs;
312 +- u16 old_cs;
313 + int cpl = ctxt->ops->cpl(ctxt);
314 +- struct desc_struct old_desc, new_desc;
315 +- const struct x86_emulate_ops *ops = ctxt->ops;
316 +-
317 +- if (ctxt->mode == X86EMUL_MODE_PROT64)
318 +- ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
319 +- VCPU_SREG_CS);
320 ++ struct desc_struct new_desc;
321 +
322 + rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
323 + if (rc != X86EMUL_CONTINUE)
324 +@@ -2201,10 +2187,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
325 + if (rc != X86EMUL_CONTINUE)
326 + return rc;
327 + rc = assign_eip_far(ctxt, eip, &new_desc);
328 +- if (rc != X86EMUL_CONTINUE) {
329 +- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
330 +- ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
331 +- }
332 ++ /* Error handling is not implemented. */
333 ++ if (rc != X86EMUL_CONTINUE)
334 ++ return X86EMUL_UNHANDLEABLE;
335 ++
336 + return rc;
337 + }
338 +
339 +diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
340 +index 84b96d319909..d09544e826f6 100644
341 +--- a/arch/x86/kvm/irq_comm.c
342 ++++ b/arch/x86/kvm/irq_comm.c
343 +@@ -38,6 +38,15 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
344 + bool line_status)
345 + {
346 + struct kvm_pic *pic = pic_irqchip(kvm);
347 ++
348 ++ /*
349 ++ * XXX: rejecting pic routes when pic isn't in use would be better,
350 ++ * but the default routing table is installed while kvm->arch.vpic is
351 ++ * NULL and KVM_CREATE_IRQCHIP can race with KVM_IRQ_LINE.
352 ++ */
353 ++ if (!pic)
354 ++ return -1;
355 ++
356 + return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
357 + }
358 +
359 +@@ -46,6 +55,10 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
360 + bool line_status)
361 + {
362 + struct kvm_ioapic *ioapic = kvm->arch.vioapic;
363 ++
364 ++ if (!ioapic)
365 ++ return -1;
366 ++
367 + return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
368 + line_status);
369 + }
370 +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
371 +index 79bab6fd76bb..6755d4768f59 100644
372 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c
373 ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
374 +@@ -275,6 +275,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
375 + atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
376 + atombios_blank_crtc(crtc, ATOM_DISABLE);
377 + drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
378 ++ /* Make sure vblank interrupt is still enabled if needed */
379 ++ radeon_irq_set(rdev);
380 + radeon_crtc_load_lut(crtc);
381 + break;
382 + case DRM_MODE_DPMS_STANDBY:
383 +diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
384 +index 678b4386540d..89f22bdde298 100644
385 +--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
386 ++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
387 +@@ -331,6 +331,8 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
388 + WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
389 + }
390 + drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
391 ++ /* Make sure vblank interrupt is still enabled if needed */
392 ++ radeon_irq_set(rdev);
393 + radeon_crtc_load_lut(crtc);
394 + break;
395 + case DRM_MODE_DPMS_STANDBY:
396 +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
397 +index 565bb2c140ed..e913a930ac80 100644
398 +--- a/drivers/iommu/dmar.c
399 ++++ b/drivers/iommu/dmar.c
400 +@@ -326,7 +326,9 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
401 + struct pci_dev *pdev = to_pci_dev(data);
402 + struct dmar_pci_notify_info *info;
403 +
404 +- /* Only care about add/remove events for physical functions */
405 ++ /* Only care about add/remove events for physical functions.
406 ++ * For VFs we actually do the lookup based on the corresponding
407 ++ * PF in device_to_iommu() anyway. */
408 + if (pdev->is_virtfn)
409 + return NOTIFY_DONE;
410 + if (action != BUS_NOTIFY_ADD_DEVICE &&
411 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
412 +index 5baa830ce49f..59e9abd3345e 100644
413 +--- a/drivers/iommu/intel-iommu.c
414 ++++ b/drivers/iommu/intel-iommu.c
415 +@@ -885,7 +885,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
416 + return NULL;
417 +
418 + if (dev_is_pci(dev)) {
419 ++ struct pci_dev *pf_pdev;
420 ++
421 + pdev = to_pci_dev(dev);
422 ++ /* VFs aren't listed in scope tables; we need to look up
423 ++ * the PF instead to find the IOMMU. */
424 ++ pf_pdev = pci_physfn(pdev);
425 ++ dev = &pf_pdev->dev;
426 + segment = pci_domain_nr(pdev->bus);
427 + } else if (has_acpi_companion(dev))
428 + dev = &ACPI_COMPANION(dev)->dev;
429 +@@ -898,6 +904,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
430 + for_each_active_dev_scope(drhd->devices,
431 + drhd->devices_cnt, i, tmp) {
432 + if (tmp == dev) {
433 ++ /* For a VF use its original BDF# not that of the PF
434 ++ * which we used for the IOMMU lookup. Strictly speaking
435 ++ * we could do this for all PCI devices; we only need to
436 ++ * get the BDF# from the scope table for ACPI matches. */
437 ++ if (pdev->is_virtfn)
438 ++ goto got_pdev;
439 ++
440 + *bus = drhd->devices[i].bus;
441 + *devfn = drhd->devices[i].devfn;
442 + goto out;
443 +diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
444 +index d9939fa9b588..f929879ecae6 100644
445 +--- a/drivers/iommu/intel-svm.c
446 ++++ b/drivers/iommu/intel-svm.c
447 +@@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
448 + struct page *pages;
449 + int order;
450 +
451 +- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
452 +- if (order < 0)
453 +- order = 0;
454 +-
455 ++ /* Start at 2 because it's defined as 2^(1+PSS) */
456 ++ iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
457 ++
458 ++ /* Eventually I'm promised we will get a multi-level PASID table
459 ++ * and it won't have to be physically contiguous. Until then,
460 ++ * limit the size because 8MiB contiguous allocations can be hard
461 ++ * to come by. The limit of 0x20000, which is 1MiB for each of
462 ++ * the PASID and PASID-state tables, is somewhat arbitrary. */
463 ++ if (iommu->pasid_max > 0x20000)
464 ++ iommu->pasid_max = 0x20000;
465 ++
466 ++ order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
467 + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
468 + if (!pages) {
469 + pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
470 +@@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
471 + pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
472 +
473 + if (ecap_dis(iommu->ecap)) {
474 ++ /* Just making it explicit... */
475 ++ BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
476 + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
477 + if (pages)
478 + iommu->pasid_state_table = page_address(pages);
479 +@@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
480 +
481 + int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
482 + {
483 +- int order;
484 +-
485 +- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
486 +- if (order < 0)
487 +- order = 0;
488 ++ int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
489 +
490 + if (iommu->pasid_table) {
491 + free_pages((unsigned long)iommu->pasid_table, order);
492 +@@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
493 + }
494 + svm->iommu = iommu;
495 +
496 +- if (pasid_max > 2 << ecap_pss(iommu->ecap))
497 +- pasid_max = 2 << ecap_pss(iommu->ecap);
498 ++ if (pasid_max > iommu->pasid_max)
499 ++ pasid_max = iommu->pasid_max;
500 +
501 + /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
502 + ret = idr_alloc(&iommu->pasid_idr, svm,
503 +diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
504 +index a77643954523..e59838231703 100644
505 +--- a/drivers/misc/mei/bus.c
506 ++++ b/drivers/misc/mei/bus.c
507 +@@ -144,7 +144,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
508 + mutex_lock(&bus->device_lock);
509 +
510 + if (!mei_cl_is_connected(cl)) {
511 +- rets = -EBUSY;
512 ++ rets = -ENODEV;
513 + goto out;
514 + }
515 + }
516 +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
517 +index 4e8069866c85..a2661381ddfc 100644
518 +--- a/drivers/misc/mei/hw-me-regs.h
519 ++++ b/drivers/misc/mei/hw-me-regs.h
520 +@@ -66,9 +66,6 @@
521 + #ifndef _MEI_HW_MEI_REGS_H_
522 + #define _MEI_HW_MEI_REGS_H_
523 +
524 +-#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
525 +-#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
526 +-
527 + /*
528 + * MEI device IDs
529 + */
530 +@@ -124,6 +121,10 @@
531 + #define MEI_DEV_ID_SPT_2 0x9D3B /* Sunrise Point 2 */
532 + #define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */
533 + #define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */
534 ++
535 ++#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
536 ++#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
537 ++
538 + /*
539 + * MEI HW Section
540 + */
541 +diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
542 +index 25b1997a62cb..36333750c512 100644
543 +--- a/drivers/misc/mei/hw-me.c
544 ++++ b/drivers/misc/mei/hw-me.c
545 +@@ -1258,8 +1258,14 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
546 + static bool mei_me_fw_type_sps(struct pci_dev *pdev)
547 + {
548 + u32 reg;
549 +- /* Read ME FW Status check for SPS Firmware */
550 +- pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
551 ++ unsigned int devfn;
552 ++
553 ++ /*
554 ++ * Read ME FW Status register to check for SPS Firmware
555 ++ * The SPS FW is only signaled in pci function 0
556 ++ */
557 ++ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
558 ++ pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
559 + /* if bits [19:16] = 15, running SPS Firmware */
560 + return (reg & 0xf0000) == 0xf0000;
561 + }
562 +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
563 +index 80f9afcb1382..4ef189a7a2fb 100644
564 +--- a/drivers/misc/mei/main.c
565 ++++ b/drivers/misc/mei/main.c
566 +@@ -207,7 +207,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
567 +
568 + mutex_lock(&dev->device_lock);
569 + if (!mei_cl_is_connected(cl)) {
570 +- rets = -EBUSY;
571 ++ rets = -ENODEV;
572 + goto out;
573 + }
574 + }
575 +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
576 +index 0af3d7d30419..01e20384ac44 100644
577 +--- a/drivers/misc/mei/pci-me.c
578 ++++ b/drivers/misc/mei/pci-me.c
579 +@@ -84,8 +84,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
580 +
581 + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)},
582 + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
583 +- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
584 +- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
585 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)},
586 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)},
587 +
588 + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)},
589 + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)},
590 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
591 +index 2d867c5bfd9f..8cead04f26d6 100644
592 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
593 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
594 +@@ -3706,6 +3706,11 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
595 + }
596 + }
597 +
598 ++static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
599 ++{
600 ++ return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
601 ++}
602 ++
603 + /**
604 + * _scsih_flush_running_cmds - completing outstanding commands.
605 + * @ioc: per adapter object
606 +@@ -3727,6 +3732,9 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
607 + if (!scmd)
608 + continue;
609 + count++;
610 ++ if (ata_12_16_cmd(scmd))
611 ++ scsi_internal_device_unblock(scmd->device,
612 ++ SDEV_RUNNING);
613 + mpt3sas_base_free_smid(ioc, smid);
614 + scsi_dma_unmap(scmd);
615 + if (ioc->pci_error_recovery)
616 +@@ -3831,8 +3839,6 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
617 + SAM_STAT_CHECK_CONDITION;
618 + }
619 +
620 +-
621 +-
622 + /**
623 + * scsih_qcmd - main scsi request entry point
624 + * @scmd: pointer to scsi command object
625 +@@ -3859,6 +3865,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
626 + if (ioc->logging_level & MPT_DEBUG_SCSI)
627 + scsi_print_command(scmd);
628 +
629 ++ /*
630 ++ * Lock the device for any subsequent command until command is
631 ++ * done.
632 ++ */
633 ++ if (ata_12_16_cmd(scmd))
634 ++ scsi_internal_device_block(scmd->device);
635 ++
636 + sas_device_priv_data = scmd->device->hostdata;
637 + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
638 + scmd->result = DID_NO_CONNECT << 16;
639 +@@ -4431,6 +4444,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
640 + if (scmd == NULL)
641 + return 1;
642 +
643 ++ if (ata_12_16_cmd(scmd))
644 ++ scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
645 ++
646 + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
647 +
648 + if (mpi_reply == NULL) {
649 +diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
650 +index 965d0e240dcb..ba4a2a1eb3ff 100644
651 +--- a/drivers/usb/chipidea/core.c
652 ++++ b/drivers/usb/chipidea/core.c
653 +@@ -926,6 +926,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
654 + if (!ci)
655 + return -ENOMEM;
656 +
657 ++ spin_lock_init(&ci->lock);
658 + ci->dev = dev;
659 + ci->platdata = dev_get_platdata(dev);
660 + ci->imx28_write_fix = !!(ci->platdata->flags &
661 +diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
662 +index 68fc5fce4cc5..d8a045fc1fdb 100644
663 +--- a/drivers/usb/chipidea/udc.c
664 ++++ b/drivers/usb/chipidea/udc.c
665 +@@ -1884,8 +1884,6 @@ static int udc_start(struct ci_hdrc *ci)
666 + struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
667 + int retval = 0;
668 +
669 +- spin_lock_init(&ci->lock);
670 +-
671 + ci->gadget.ops = &usb_gadget_ops;
672 + ci->gadget.speed = USB_SPEED_UNKNOWN;
673 + ci->gadget.max_speed = USB_SPEED_HIGH;
674 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
675 +index 976195e748a3..fe7452f0f38a 100644
676 +--- a/drivers/usb/serial/cp210x.c
677 ++++ b/drivers/usb/serial/cp210x.c
678 +@@ -130,6 +130,7 @@ static const struct usb_device_id id_table[] = {
679 + { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
680 + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
681 + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
682 ++ { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
683 + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
684 + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
685 + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
686 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
687 +index 494167fe6a2c..d3d6ec455151 100644
688 +--- a/drivers/usb/serial/ftdi_sio.c
689 ++++ b/drivers/usb/serial/ftdi_sio.c
690 +@@ -1012,6 +1012,8 @@ static const struct usb_device_id id_table_combined[] = {
691 + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
692 + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
693 + { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
694 ++ { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
695 ++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
696 + { } /* Terminating entry */
697 + };
698 +
699 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
700 +index 21011c0a4c64..48ee04c94a75 100644
701 +--- a/drivers/usb/serial/ftdi_sio_ids.h
702 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
703 +@@ -596,6 +596,12 @@
704 + #define STK541_PID 0x2109 /* Zigbee Controller */
705 +
706 + /*
707 ++ * Texas Instruments
708 ++ */
709 ++#define TI_VID 0x0451
710 ++#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */
711 ++
712 ++/*
713 + * Blackfin gnICE JTAG
714 + * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
715 + */
716 +diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
717 +index 5e67f63b2e46..02f86dd1a340 100644
718 +--- a/drivers/usb/storage/transport.c
719 ++++ b/drivers/usb/storage/transport.c
720 +@@ -919,10 +919,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
721 +
722 + /* COMMAND STAGE */
723 + /* let's send the command via the control pipe */
724 ++ /*
725 ++ * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack.
726 ++ * Stack may be vmallocated. So no DMA for us. Make a copy.
727 ++ */
728 ++ memcpy(us->iobuf, srb->cmnd, srb->cmd_len);
729 + result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
730 + US_CBI_ADSC,
731 + USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
732 +- us->ifnum, srb->cmnd, srb->cmd_len);
733 ++ us->ifnum, us->iobuf, srb->cmd_len);
734 +
735 + /* check the return code for the command */
736 + usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n",
737 +diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
738 +index 52a28311e2a4..48efe62e1302 100644
739 +--- a/fs/nfs/callback.c
740 ++++ b/fs/nfs/callback.c
741 +@@ -261,7 +261,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
742 + }
743 +
744 + ret = -EPROTONOSUPPORT;
745 +- if (minorversion == 0)
746 ++ if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0)
747 + ret = nfs4_callback_up_net(serv, net);
748 + else if (xprt->ops->bc_up)
749 + ret = xprt->ops->bc_up(serv, net);
750 +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
751 +index 2d9b650047a5..d49e26c6cdc7 100644
752 +--- a/include/linux/intel-iommu.h
753 ++++ b/include/linux/intel-iommu.h
754 +@@ -429,6 +429,7 @@ struct intel_iommu {
755 + struct page_req_dsc *prq;
756 + unsigned char prq_name[16]; /* Name for PRQ interrupt */
757 + struct idr pasid_idr;
758 ++ u32 pasid_max;
759 + #endif
760 + struct q_inval *qi; /* Queued invalidation info */
761 + u32 *iommu_state; /* Store iommu states between suspend and resume.*/
762 +diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
763 +index 5464c8744ea9..e24388a863a7 100644
764 +--- a/lib/mpi/mpi-pow.c
765 ++++ b/lib/mpi/mpi-pow.c
766 +@@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
767 + if (!esize) {
768 + /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
769 + * depending on if MOD equals 1. */
770 +- rp[0] = 1;
771 + res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
772 ++ if (res->nlimbs) {
773 ++ if (mpi_resize(res, 1) < 0)
774 ++ goto enomem;
775 ++ rp = res->d;
776 ++ rp[0] = 1;
777 ++ }
778 + res->sign = 0;
779 + goto leave;
780 + }
781 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
782 +index 9aba9e93c0a2..ee9082792530 100644
783 +--- a/net/core/flow_dissector.c
784 ++++ b/net/core/flow_dissector.c
785 +@@ -949,4 +949,4 @@ static int __init init_default_flow_dissectors(void)
786 + return 0;
787 + }
788 +
789 +-late_initcall_sync(init_default_flow_dissectors);
790 ++core_initcall(init_default_flow_dissectors);
791 +diff --git a/net/wireless/core.h b/net/wireless/core.h
792 +index a618b4b86fa4..47a967fed8ff 100644
793 +--- a/net/wireless/core.h
794 ++++ b/net/wireless/core.h
795 +@@ -72,6 +72,7 @@ struct cfg80211_registered_device {
796 + struct list_head bss_list;
797 + struct rb_root bss_tree;
798 + u32 bss_generation;
799 ++ u32 bss_entries;
800 + struct cfg80211_scan_request *scan_req; /* protected by RTNL */
801 + struct sk_buff *scan_msg;
802 + struct cfg80211_sched_scan_request __rcu *sched_scan_req;
803 +diff --git a/net/wireless/scan.c b/net/wireless/scan.c
804 +index 14d5369eb778..8dde12a11725 100644
805 +--- a/net/wireless/scan.c
806 ++++ b/net/wireless/scan.c
807 +@@ -56,6 +56,19 @@
808 + * also linked into the probe response struct.
809 + */
810 +
811 ++/*
812 ++ * Limit the number of BSS entries stored in mac80211. Each one is
813 ++ * a bit over 4k at most, so this limits to roughly 4-5M of memory.
814 ++ * If somebody wants to really attack this though, they'd likely
815 ++ * use small beacons, and only one type of frame, limiting each of
816 ++ * the entries to a much smaller size (in order to generate more
817 ++ * entries in total, so overhead is bigger.)
818 ++ */
819 ++static int bss_entries_limit = 1000;
820 ++module_param(bss_entries_limit, int, 0644);
821 ++MODULE_PARM_DESC(bss_entries_limit,
822 ++ "limit to number of scan BSS entries (per wiphy, default 1000)");
823 ++
824 + #define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
825 +
826 + static void bss_free(struct cfg80211_internal_bss *bss)
827 +@@ -136,6 +149,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
828 +
829 + list_del_init(&bss->list);
830 + rb_erase(&bss->rbn, &rdev->bss_tree);
831 ++ rdev->bss_entries--;
832 ++ WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list),
833 ++ "rdev bss entries[%d]/list[empty:%d] corruption\n",
834 ++ rdev->bss_entries, list_empty(&rdev->bss_list));
835 + bss_ref_put(rdev, bss);
836 + return true;
837 + }
838 +@@ -162,6 +179,40 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
839 + rdev->bss_generation++;
840 + }
841 +
842 ++static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev)
843 ++{
844 ++ struct cfg80211_internal_bss *bss, *oldest = NULL;
845 ++ bool ret;
846 ++
847 ++ lockdep_assert_held(&rdev->bss_lock);
848 ++
849 ++ list_for_each_entry(bss, &rdev->bss_list, list) {
850 ++ if (atomic_read(&bss->hold))
851 ++ continue;
852 ++
853 ++ if (!list_empty(&bss->hidden_list) &&
854 ++ !bss->pub.hidden_beacon_bss)
855 ++ continue;
856 ++
857 ++ if (oldest && time_before(oldest->ts, bss->ts))
858 ++ continue;
859 ++ oldest = bss;
860 ++ }
861 ++
862 ++ if (WARN_ON(!oldest))
863 ++ return false;
864 ++
865 ++ /*
866 ++ * The callers make sure to increase rdev->bss_generation if anything
867 ++ * gets removed (and a new entry added), so there's no need to also do
868 ++ * it here.
869 ++ */
870 ++
871 ++ ret = __cfg80211_unlink_bss(rdev, oldest);
872 ++ WARN_ON(!ret);
873 ++ return ret;
874 ++}
875 ++
876 + void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
877 + bool send_message)
878 + {
879 +@@ -687,6 +738,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
880 + const u8 *ie;
881 + int i, ssidlen;
882 + u8 fold = 0;
883 ++ u32 n_entries = 0;
884 +
885 + ies = rcu_access_pointer(new->pub.beacon_ies);
886 + if (WARN_ON(!ies))
887 +@@ -710,6 +762,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
888 + /* This is the bad part ... */
889 +
890 + list_for_each_entry(bss, &rdev->bss_list, list) {
891 ++ /*
892 ++ * we're iterating all the entries anyway, so take the
893 ++ * opportunity to validate the list length accounting
894 ++ */
895 ++ n_entries++;
896 ++
897 + if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
898 + continue;
899 + if (bss->pub.channel != new->pub.channel)
900 +@@ -738,6 +796,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
901 + new->pub.beacon_ies);
902 + }
903 +
904 ++ WARN_ONCE(n_entries != rdev->bss_entries,
905 ++ "rdev bss entries[%d]/list[len:%d] corruption\n",
906 ++ rdev->bss_entries, n_entries);
907 ++
908 + return true;
909 + }
910 +
911 +@@ -890,7 +952,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
912 + }
913 + }
914 +
915 ++ if (rdev->bss_entries >= bss_entries_limit &&
916 ++ !cfg80211_bss_expire_oldest(rdev)) {
917 ++ kfree(new);
918 ++ goto drop;
919 ++ }
920 ++
921 + list_add_tail(&new->list, &rdev->bss_list);
922 ++ rdev->bss_entries++;
923 + rb_insert_bss(rdev, new);
924 + found = new;
925 + }
926 +diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
927 +index dc0027b28b04..53426a6ee6dc 100644
928 +--- a/security/apparmor/domain.c
929 ++++ b/security/apparmor/domain.c
930 +@@ -623,8 +623,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
931 + /* released below */
932 + cred = get_current_cred();
933 + cxt = cred_cxt(cred);
934 +- profile = aa_cred_profile(cred);
935 +- previous_profile = cxt->previous;
936 ++ profile = aa_get_newest_profile(aa_cred_profile(cred));
937 ++ previous_profile = aa_get_newest_profile(cxt->previous);
938 +
939 + if (unconfined(profile)) {
940 + info = "unconfined";
941 +@@ -720,6 +720,8 @@ audit:
942 + out:
943 + aa_put_profile(hat);
944 + kfree(name);
945 ++ aa_put_profile(profile);
946 ++ aa_put_profile(previous_profile);
947 + put_cred(cred);
948 +
949 + return error;