Gentoo Archives: gentoo-commits

From: "Anthony G. Basile" <blueness@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/hardened-patchset:master commit in: 3.2.69/, 3.14.46/, 3.14.47/, 4.0.7/
Date: Sun, 05 Jul 2015 16:33:35
Message-Id: 1436113999.20f00980e88516e9663fcebc708948c1824b56b2.blueness@gentoo
1 commit: 20f00980e88516e9663fcebc708948c1824b56b2
2 Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
3 AuthorDate: Sun Jul 5 16:33:00 2015 +0000
4 Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
5 CommitDate: Sun Jul 5 16:33:19 2015 +0000
6 URL: https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=20f00980
7
8 Grsec/PaX: 3.1-{3.2.69,3.14.47,4.0.7}-201507050833
9
10 3.14.46/1045_linux-3.14.46.patch | 829 ------------
11 {3.14.46 => 3.14.47}/0000_README | 6 +-
12 3.14.47/1046_linux-3.14.47.patch | 1395 ++++++++++++++++++++
13 .../4420_grsecurity-3.1-3.14.47-201507050832.patch | 154 ++-
14 .../4425_grsec_remove_EI_PAX.patch | 0
15 .../4427_force_XATTR_PAX_tmpfs.patch | 0
16 .../4430_grsec-remove-localversion-grsec.patch | 0
17 .../4435_grsec-mute-warnings.patch | 0
18 .../4440_grsec-remove-protected-paths.patch | 0
19 .../4450_grsec-kconfig-default-gids.patch | 0
20 .../4465_selinux-avc_audit-log-curr_ip.patch | 0
21 .../4470_disable-compat_vdso.patch | 0
22 .../4475_emutramp_default_on.patch | 0
23 3.2.69/0000_README | 2 +-
24 ... 4420_grsecurity-3.1-3.2.69-201507050830.patch} | 37 +-
25 4.0.7/0000_README | 6 +-
26 4.0.7/1006_linux-4.0.7.patch | 707 ----------
27 ...> 4420_grsecurity-3.1-4.0.7-201507050833.patch} | 103 +-
28 18 files changed, 1630 insertions(+), 1609 deletions(-)
29
30 diff --git a/3.14.46/1045_linux-3.14.46.patch b/3.14.46/1045_linux-3.14.46.patch
31 deleted file mode 100644
32 index 12790dc..0000000
33 --- a/3.14.46/1045_linux-3.14.46.patch
34 +++ /dev/null
35 @@ -1,829 +0,0 @@
36 -diff --git a/Makefile b/Makefile
37 -index c92186c..def39fd 100644
38 ---- a/Makefile
39 -+++ b/Makefile
40 -@@ -1,6 +1,6 @@
41 - VERSION = 3
42 - PATCHLEVEL = 14
43 --SUBLEVEL = 45
44 -+SUBLEVEL = 46
45 - EXTRAVERSION =
46 - NAME = Remembering Coco
47 -
48 -diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
49 -index 09af149..530f56e 100644
50 ---- a/arch/arm/include/asm/kvm_host.h
51 -+++ b/arch/arm/include/asm/kvm_host.h
52 -@@ -42,7 +42,7 @@
53 -
54 - struct kvm_vcpu;
55 - u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
56 --int kvm_target_cpu(void);
57 -+int __attribute_const__ kvm_target_cpu(void);
58 - int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
59 - void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
60 -
61 -diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
62 -index 7b362bc..0cbdb8e 100644
63 ---- a/arch/arm/include/asm/kvm_mmu.h
64 -+++ b/arch/arm/include/asm/kvm_mmu.h
65 -@@ -127,6 +127,18 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
66 - (__boundary - 1 < (end) - 1)? __boundary: (end); \
67 - })
68 -
69 -+static inline bool kvm_page_empty(void *ptr)
70 -+{
71 -+ struct page *ptr_page = virt_to_page(ptr);
72 -+ return page_count(ptr_page) == 1;
73 -+}
74 -+
75 -+
76 -+#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
77 -+#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
78 -+#define kvm_pud_table_empty(pudp) (0)
79 -+
80 -+
81 - struct kvm;
82 -
83 - #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
84 -diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
85 -index 797b1a6..7e666cf 100644
86 ---- a/arch/arm/kernel/hyp-stub.S
87 -+++ b/arch/arm/kernel/hyp-stub.S
88 -@@ -134,9 +134,7 @@ ENTRY(__hyp_stub_install_secondary)
89 - mcr p15, 4, r7, c1, c1, 3 @ HSTR
90 -
91 - THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
92 --#ifdef CONFIG_CPU_BIG_ENDIAN
93 -- orr r7, #(1 << 9) @ HSCTLR.EE
94 --#endif
95 -+ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
96 - mcr p15, 4, r7, c1, c0, 0 @ HSCTLR
97 -
98 - mrc p15, 4, r7, c1, c1, 1 @ HDCR
99 -diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
100 -index bd18bb8..df6e75e 100644
101 ---- a/arch/arm/kvm/arm.c
102 -+++ b/arch/arm/kvm/arm.c
103 -@@ -82,7 +82,7 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
104 - /**
105 - * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
106 - */
107 --struct kvm_vcpu __percpu **kvm_get_running_vcpus(void)
108 -+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
109 - {
110 - return &kvm_arm_running_vcpu;
111 - }
112 -@@ -155,16 +155,6 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
113 - return VM_FAULT_SIGBUS;
114 - }
115 -
116 --void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
117 -- struct kvm_memory_slot *dont)
118 --{
119 --}
120 --
121 --int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
122 -- unsigned long npages)
123 --{
124 -- return 0;
125 --}
126 -
127 - /**
128 - * kvm_arch_destroy_vm - destroy the VM data structure
129 -@@ -224,33 +214,6 @@ long kvm_arch_dev_ioctl(struct file *filp,
130 - return -EINVAL;
131 - }
132 -
133 --void kvm_arch_memslots_updated(struct kvm *kvm)
134 --{
135 --}
136 --
137 --int kvm_arch_prepare_memory_region(struct kvm *kvm,
138 -- struct kvm_memory_slot *memslot,
139 -- struct kvm_userspace_memory_region *mem,
140 -- enum kvm_mr_change change)
141 --{
142 -- return 0;
143 --}
144 --
145 --void kvm_arch_commit_memory_region(struct kvm *kvm,
146 -- struct kvm_userspace_memory_region *mem,
147 -- const struct kvm_memory_slot *old,
148 -- enum kvm_mr_change change)
149 --{
150 --}
151 --
152 --void kvm_arch_flush_shadow_all(struct kvm *kvm)
153 --{
154 --}
155 --
156 --void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
157 -- struct kvm_memory_slot *slot)
158 --{
159 --}
160 -
161 - struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
162 - {
163 -diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
164 -index c58a351..7c73290 100644
165 ---- a/arch/arm/kvm/coproc.c
166 -+++ b/arch/arm/kvm/coproc.c
167 -@@ -742,7 +742,7 @@ static bool is_valid_cache(u32 val)
168 - u32 level, ctype;
169 -
170 - if (val >= CSSELR_MAX)
171 -- return -ENOENT;
172 -+ return false;
173 -
174 - /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
175 - level = (val >> 1);
176 -diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
177 -index c93ef38..70ed2c1 100644
178 ---- a/arch/arm/kvm/mmu.c
179 -+++ b/arch/arm/kvm/mmu.c
180 -@@ -90,103 +90,115 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
181 - return p;
182 - }
183 -
184 --static bool page_empty(void *ptr)
185 -+static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
186 - {
187 -- struct page *ptr_page = virt_to_page(ptr);
188 -- return page_count(ptr_page) == 1;
189 -+ pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
190 -+ pgd_clear(pgd);
191 -+ kvm_tlb_flush_vmid_ipa(kvm, addr);
192 -+ pud_free(NULL, pud_table);
193 -+ put_page(virt_to_page(pgd));
194 - }
195 -
196 - static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
197 - {
198 -- if (pud_huge(*pud)) {
199 -- pud_clear(pud);
200 -- kvm_tlb_flush_vmid_ipa(kvm, addr);
201 -- } else {
202 -- pmd_t *pmd_table = pmd_offset(pud, 0);
203 -- pud_clear(pud);
204 -- kvm_tlb_flush_vmid_ipa(kvm, addr);
205 -- pmd_free(NULL, pmd_table);
206 -- }
207 -+ pmd_t *pmd_table = pmd_offset(pud, 0);
208 -+ VM_BUG_ON(pud_huge(*pud));
209 -+ pud_clear(pud);
210 -+ kvm_tlb_flush_vmid_ipa(kvm, addr);
211 -+ pmd_free(NULL, pmd_table);
212 - put_page(virt_to_page(pud));
213 - }
214 -
215 - static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
216 - {
217 -- if (kvm_pmd_huge(*pmd)) {
218 -- pmd_clear(pmd);
219 -- kvm_tlb_flush_vmid_ipa(kvm, addr);
220 -- } else {
221 -- pte_t *pte_table = pte_offset_kernel(pmd, 0);
222 -- pmd_clear(pmd);
223 -- kvm_tlb_flush_vmid_ipa(kvm, addr);
224 -- pte_free_kernel(NULL, pte_table);
225 -- }
226 -+ pte_t *pte_table = pte_offset_kernel(pmd, 0);
227 -+ VM_BUG_ON(kvm_pmd_huge(*pmd));
228 -+ pmd_clear(pmd);
229 -+ kvm_tlb_flush_vmid_ipa(kvm, addr);
230 -+ pte_free_kernel(NULL, pte_table);
231 - put_page(virt_to_page(pmd));
232 - }
233 -
234 --static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
235 -+static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
236 -+ phys_addr_t addr, phys_addr_t end)
237 - {
238 -- if (pte_present(*pte)) {
239 -- kvm_set_pte(pte, __pte(0));
240 -- put_page(virt_to_page(pte));
241 -- kvm_tlb_flush_vmid_ipa(kvm, addr);
242 -+ phys_addr_t start_addr = addr;
243 -+ pte_t *pte, *start_pte;
244 -+
245 -+ start_pte = pte = pte_offset_kernel(pmd, addr);
246 -+ do {
247 -+ if (!pte_none(*pte)) {
248 -+ kvm_set_pte(pte, __pte(0));
249 -+ put_page(virt_to_page(pte));
250 -+ kvm_tlb_flush_vmid_ipa(kvm, addr);
251 -+ }
252 -+ } while (pte++, addr += PAGE_SIZE, addr != end);
253 -+
254 -+ if (kvm_pte_table_empty(start_pte))
255 -+ clear_pmd_entry(kvm, pmd, start_addr);
256 - }
257 --}
258 -
259 --static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
260 -- unsigned long long start, u64 size)
261 -+static void unmap_pmds(struct kvm *kvm, pud_t *pud,
262 -+ phys_addr_t addr, phys_addr_t end)
263 - {
264 -- pgd_t *pgd;
265 -- pud_t *pud;
266 -- pmd_t *pmd;
267 -- pte_t *pte;
268 -- unsigned long long addr = start, end = start + size;
269 -- u64 next;
270 --
271 -- while (addr < end) {
272 -- pgd = pgdp + pgd_index(addr);
273 -- pud = pud_offset(pgd, addr);
274 -- if (pud_none(*pud)) {
275 -- addr = kvm_pud_addr_end(addr, end);
276 -- continue;
277 -- }
278 -+ phys_addr_t next, start_addr = addr;
279 -+ pmd_t *pmd, *start_pmd;
280 -
281 -- if (pud_huge(*pud)) {
282 -- /*
283 -- * If we are dealing with a huge pud, just clear it and
284 -- * move on.
285 -- */
286 -- clear_pud_entry(kvm, pud, addr);
287 -- addr = kvm_pud_addr_end(addr, end);
288 -- continue;
289 -+ start_pmd = pmd = pmd_offset(pud, addr);
290 -+ do {
291 -+ next = kvm_pmd_addr_end(addr, end);
292 -+ if (!pmd_none(*pmd)) {
293 -+ if (kvm_pmd_huge(*pmd)) {
294 -+ pmd_clear(pmd);
295 -+ kvm_tlb_flush_vmid_ipa(kvm, addr);
296 -+ put_page(virt_to_page(pmd));
297 -+ } else {
298 -+ unmap_ptes(kvm, pmd, addr, next);
299 -+ }
300 - }
301 -+ } while (pmd++, addr = next, addr != end);
302 -
303 -- pmd = pmd_offset(pud, addr);
304 -- if (pmd_none(*pmd)) {
305 -- addr = kvm_pmd_addr_end(addr, end);
306 -- continue;
307 -- }
308 -+ if (kvm_pmd_table_empty(start_pmd))
309 -+ clear_pud_entry(kvm, pud, start_addr);
310 -+}
311 -
312 -- if (!kvm_pmd_huge(*pmd)) {
313 -- pte = pte_offset_kernel(pmd, addr);
314 -- clear_pte_entry(kvm, pte, addr);
315 -- next = addr + PAGE_SIZE;
316 -- }
317 -+static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
318 -+ phys_addr_t addr, phys_addr_t end)
319 -+{
320 -+ phys_addr_t next, start_addr = addr;
321 -+ pud_t *pud, *start_pud;
322 -
323 -- /*
324 -- * If the pmd entry is to be cleared, walk back up the ladder
325 -- */
326 -- if (kvm_pmd_huge(*pmd) || page_empty(pte)) {
327 -- clear_pmd_entry(kvm, pmd, addr);
328 -- next = kvm_pmd_addr_end(addr, end);
329 -- if (page_empty(pmd) && !page_empty(pud)) {
330 -- clear_pud_entry(kvm, pud, addr);
331 -- next = kvm_pud_addr_end(addr, end);
332 -+ start_pud = pud = pud_offset(pgd, addr);
333 -+ do {
334 -+ next = kvm_pud_addr_end(addr, end);
335 -+ if (!pud_none(*pud)) {
336 -+ if (pud_huge(*pud)) {
337 -+ pud_clear(pud);
338 -+ kvm_tlb_flush_vmid_ipa(kvm, addr);
339 -+ put_page(virt_to_page(pud));
340 -+ } else {
341 -+ unmap_pmds(kvm, pud, addr, next);
342 - }
343 - }
344 -+ } while (pud++, addr = next, addr != end);
345 -
346 -- addr = next;
347 -- }
348 -+ if (kvm_pud_table_empty(start_pud))
349 -+ clear_pgd_entry(kvm, pgd, start_addr);
350 -+}
351 -+
352 -+
353 -+static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
354 -+ phys_addr_t start, u64 size)
355 -+{
356 -+ pgd_t *pgd;
357 -+ phys_addr_t addr = start, end = start + size;
358 -+ phys_addr_t next;
359 -+
360 -+ pgd = pgdp + pgd_index(addr);
361 -+ do {
362 -+ next = kvm_pgd_addr_end(addr, end);
363 -+ unmap_puds(kvm, pgd, addr, next);
364 -+ } while (pgd++, addr = next, addr != end);
365 - }
366 -
367 - static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
368 -@@ -747,6 +759,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
369 - struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
370 - struct vm_area_struct *vma;
371 - pfn_t pfn;
372 -+ pgprot_t mem_type = PAGE_S2;
373 -
374 - write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
375 - if (fault_status == FSC_PERM && !write_fault) {
376 -@@ -797,6 +810,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
377 - if (is_error_pfn(pfn))
378 - return -EFAULT;
379 -
380 -+ if (kvm_is_mmio_pfn(pfn))
381 -+ mem_type = PAGE_S2_DEVICE;
382 -+
383 - spin_lock(&kvm->mmu_lock);
384 - if (mmu_notifier_retry(kvm, mmu_seq))
385 - goto out_unlock;
386 -@@ -804,7 +820,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
387 - hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
388 -
389 - if (hugetlb) {
390 -- pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2);
391 -+ pmd_t new_pmd = pfn_pmd(pfn, mem_type);
392 - new_pmd = pmd_mkhuge(new_pmd);
393 - if (writable) {
394 - kvm_set_s2pmd_writable(&new_pmd);
395 -@@ -813,13 +829,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
396 - coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
397 - ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
398 - } else {
399 -- pte_t new_pte = pfn_pte(pfn, PAGE_S2);
400 -+ pte_t new_pte = pfn_pte(pfn, mem_type);
401 - if (writable) {
402 - kvm_set_s2pte_writable(&new_pte);
403 - kvm_set_pfn_dirty(pfn);
404 - }
405 - coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
406 -- ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
407 -+ ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
408 -+ mem_type == PAGE_S2_DEVICE);
409 - }
410 -
411 -
412 -@@ -1099,3 +1116,49 @@ out:
413 - free_hyp_pgds();
414 - return err;
415 - }
416 -+
417 -+void kvm_arch_commit_memory_region(struct kvm *kvm,
418 -+ struct kvm_userspace_memory_region *mem,
419 -+ const struct kvm_memory_slot *old,
420 -+ enum kvm_mr_change change)
421 -+{
422 -+ gpa_t gpa = old->base_gfn << PAGE_SHIFT;
423 -+ phys_addr_t size = old->npages << PAGE_SHIFT;
424 -+ if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
425 -+ spin_lock(&kvm->mmu_lock);
426 -+ unmap_stage2_range(kvm, gpa, size);
427 -+ spin_unlock(&kvm->mmu_lock);
428 -+ }
429 -+}
430 -+
431 -+int kvm_arch_prepare_memory_region(struct kvm *kvm,
432 -+ struct kvm_memory_slot *memslot,
433 -+ struct kvm_userspace_memory_region *mem,
434 -+ enum kvm_mr_change change)
435 -+{
436 -+ return 0;
437 -+}
438 -+
439 -+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
440 -+ struct kvm_memory_slot *dont)
441 -+{
442 -+}
443 -+
444 -+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
445 -+ unsigned long npages)
446 -+{
447 -+ return 0;
448 -+}
449 -+
450 -+void kvm_arch_memslots_updated(struct kvm *kvm)
451 -+{
452 -+}
453 -+
454 -+void kvm_arch_flush_shadow_all(struct kvm *kvm)
455 -+{
456 -+}
457 -+
458 -+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
459 -+ struct kvm_memory_slot *slot)
460 -+{
461 -+}
462 -diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
463 -index 0a1d697..3fb0946 100644
464 ---- a/arch/arm64/include/asm/kvm_host.h
465 -+++ b/arch/arm64/include/asm/kvm_host.h
466 -@@ -42,7 +42,7 @@
467 - #define KVM_VCPU_MAX_FEATURES 2
468 -
469 - struct kvm_vcpu;
470 --int kvm_target_cpu(void);
471 -+int __attribute_const__ kvm_target_cpu(void);
472 - int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
473 - int kvm_arch_dev_ioctl_check_extension(long ext);
474 -
475 -@@ -177,7 +177,7 @@ static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
476 - }
477 -
478 - struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
479 --struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
480 -+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
481 -
482 - u64 kvm_call_hyp(void *hypfn, ...);
483 -
484 -diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
485 -index 7d29847..8e138c7 100644
486 ---- a/arch/arm64/include/asm/kvm_mmu.h
487 -+++ b/arch/arm64/include/asm/kvm_mmu.h
488 -@@ -125,6 +125,21 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
489 - #define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
490 - #define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
491 -
492 -+static inline bool kvm_page_empty(void *ptr)
493 -+{
494 -+ struct page *ptr_page = virt_to_page(ptr);
495 -+ return page_count(ptr_page) == 1;
496 -+}
497 -+
498 -+#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
499 -+#ifndef CONFIG_ARM64_64K_PAGES
500 -+#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
501 -+#else
502 -+#define kvm_pmd_table_empty(pmdp) (0)
503 -+#endif
504 -+#define kvm_pud_table_empty(pudp) (0)
505 -+
506 -+
507 - struct kvm;
508 -
509 - #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
510 -diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
511 -index b0d1512..5dfc8331 100644
512 ---- a/arch/arm64/kvm/hyp.S
513 -+++ b/arch/arm64/kvm/hyp.S
514 -@@ -830,7 +830,7 @@ el1_trap:
515 - mrs x2, far_el2
516 -
517 - 2: mrs x0, tpidr_el2
518 -- str x1, [x0, #VCPU_ESR_EL2]
519 -+ str w1, [x0, #VCPU_ESR_EL2]
520 - str x2, [x0, #VCPU_FAR_EL2]
521 - str x3, [x0, #VCPU_HPFAR_EL2]
522 -
523 -diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
524 -index 0324458..7691b25 100644
525 ---- a/arch/arm64/kvm/sys_regs.c
526 -+++ b/arch/arm64/kvm/sys_regs.c
527 -@@ -836,7 +836,7 @@ static bool is_valid_cache(u32 val)
528 - u32 level, ctype;
529 -
530 - if (val >= CSSELR_MAX)
531 -- return -ENOENT;
532 -+ return false;
533 -
534 - /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
535 - level = (val >> 1);
536 -@@ -962,7 +962,7 @@ static unsigned int num_demux_regs(void)
537 -
538 - static int write_demux_regids(u64 __user *uindices)
539 - {
540 -- u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
541 -+ u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
542 - unsigned int i;
543 -
544 - val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
545 -diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
546 -index 26b03e1..8ff2b3c 100644
547 ---- a/drivers/bluetooth/ath3k.c
548 -+++ b/drivers/bluetooth/ath3k.c
549 -@@ -79,6 +79,7 @@ static const struct usb_device_id ath3k_table[] = {
550 - { USB_DEVICE(0x0489, 0xe057) },
551 - { USB_DEVICE(0x0489, 0xe056) },
552 - { USB_DEVICE(0x0489, 0xe05f) },
553 -+ { USB_DEVICE(0x0489, 0xe076) },
554 - { USB_DEVICE(0x0489, 0xe078) },
555 - { USB_DEVICE(0x04c5, 0x1330) },
556 - { USB_DEVICE(0x04CA, 0x3004) },
557 -@@ -109,6 +110,7 @@ static const struct usb_device_id ath3k_table[] = {
558 - { USB_DEVICE(0x13d3, 0x3402) },
559 - { USB_DEVICE(0x13d3, 0x3408) },
560 - { USB_DEVICE(0x13d3, 0x3432) },
561 -+ { USB_DEVICE(0x13d3, 0x3474) },
562 -
563 - /* Atheros AR5BBU12 with sflash firmware */
564 - { USB_DEVICE(0x0489, 0xE02C) },
565 -@@ -133,6 +135,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
566 - { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
567 - { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
568 - { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
569 -+ { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
570 - { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
571 - { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
572 - { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
573 -@@ -163,6 +166,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
574 - { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
575 - { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
576 - { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
577 -+ { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
578 -
579 - /* Atheros AR5BBU22 with sflash firmware */
580 - { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
581 -diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
582 -index 9eb1669..c0e7a9aa9 100644
583 ---- a/drivers/bluetooth/btusb.c
584 -+++ b/drivers/bluetooth/btusb.c
585 -@@ -157,6 +157,7 @@ static const struct usb_device_id blacklist_table[] = {
586 - { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
587 - { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
588 - { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
589 -+ { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
590 - { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
591 - { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
592 - { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
593 -@@ -187,6 +188,7 @@ static const struct usb_device_id blacklist_table[] = {
594 - { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
595 - { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
596 - { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
597 -+ { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
598 -
599 - /* Atheros AR5BBU12 with sflash firmware */
600 - { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
601 -diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
602 -index 28486b1..ae6dae8 100644
603 ---- a/drivers/crypto/caam/caamrng.c
604 -+++ b/drivers/crypto/caam/caamrng.c
605 -@@ -56,7 +56,7 @@
606 -
607 - /* Buffer, its dma address and lock */
608 - struct buf_data {
609 -- u8 buf[RN_BUF_SIZE];
610 -+ u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
611 - dma_addr_t addr;
612 - struct completion filled;
613 - u32 hw_desc[DESC_JOB_O_LEN];
614 -diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
615 -index 9683747..f2511a0 100644
616 ---- a/drivers/gpu/drm/mgag200/mgag200_mode.c
617 -+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
618 -@@ -1529,6 +1529,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
619 - return MODE_BANDWIDTH;
620 - }
621 -
622 -+ if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 ||
623 -+ (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) {
624 -+ return MODE_H_ILLEGAL;
625 -+ }
626 -+
627 - if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
628 - mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
629 - mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
630 -diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
631 -index 8f580fd..ce21132 100644
632 ---- a/drivers/scsi/lpfc/lpfc_sli.c
633 -+++ b/drivers/scsi/lpfc/lpfc_sli.c
634 -@@ -265,6 +265,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
635 - return NULL;
636 -
637 - q->hba_index = idx;
638 -+
639 -+ /*
640 -+ * insert barrier for instruction interlock : data from the hardware
641 -+ * must have the valid bit checked before it can be copied and acted
642 -+ * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
643 -+ * instructions allowing action on content before valid bit checked,
644 -+ * add barrier here as well. May not be needed as "content" is a
645 -+ * single 32-bit entity here (vs multi word structure for cq's).
646 -+ */
647 -+ mb();
648 - return eqe;
649 - }
650 -
651 -@@ -370,6 +380,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
652 -
653 - cqe = q->qe[q->hba_index].cqe;
654 - q->hba_index = idx;
655 -+
656 -+ /*
657 -+ * insert barrier for instruction interlock : data from the hardware
658 -+ * must have the valid bit checked before it can be copied and acted
659 -+ * upon. Speculative instructions were allowing a bcopy at the start
660 -+ * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
661 -+ * after our return, to copy data before the valid bit check above
662 -+ * was done. As such, some of the copied data was stale. The barrier
663 -+ * ensures the check is before any data is copied.
664 -+ */
665 -+ mb();
666 - return cqe;
667 - }
668 -
669 -diff --git a/fs/pipe.c b/fs/pipe.c
670 -index 78fd0d0..46f1ab2 100644
671 ---- a/fs/pipe.c
672 -+++ b/fs/pipe.c
673 -@@ -117,25 +117,27 @@ void pipe_wait(struct pipe_inode_info *pipe)
674 - }
675 -
676 - static int
677 --pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
678 -- int atomic)
679 -+pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov,
680 -+ size_t *remaining, int atomic)
681 - {
682 - unsigned long copy;
683 -
684 -- while (len > 0) {
685 -+ while (*remaining > 0) {
686 - while (!iov->iov_len)
687 - iov++;
688 -- copy = min_t(unsigned long, len, iov->iov_len);
689 -+ copy = min_t(unsigned long, *remaining, iov->iov_len);
690 -
691 - if (atomic) {
692 -- if (__copy_from_user_inatomic(to, iov->iov_base, copy))
693 -+ if (__copy_from_user_inatomic(addr + *offset,
694 -+ iov->iov_base, copy))
695 - return -EFAULT;
696 - } else {
697 -- if (copy_from_user(to, iov->iov_base, copy))
698 -+ if (copy_from_user(addr + *offset,
699 -+ iov->iov_base, copy))
700 - return -EFAULT;
701 - }
702 -- to += copy;
703 -- len -= copy;
704 -+ *offset += copy;
705 -+ *remaining -= copy;
706 - iov->iov_base += copy;
707 - iov->iov_len -= copy;
708 - }
709 -@@ -143,25 +145,27 @@ pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
710 - }
711 -
712 - static int
713 --pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
714 -- int atomic)
715 -+pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset,
716 -+ size_t *remaining, int atomic)
717 - {
718 - unsigned long copy;
719 -
720 -- while (len > 0) {
721 -+ while (*remaining > 0) {
722 - while (!iov->iov_len)
723 - iov++;
724 -- copy = min_t(unsigned long, len, iov->iov_len);
725 -+ copy = min_t(unsigned long, *remaining, iov->iov_len);
726 -
727 - if (atomic) {
728 -- if (__copy_to_user_inatomic(iov->iov_base, from, copy))
729 -+ if (__copy_to_user_inatomic(iov->iov_base,
730 -+ addr + *offset, copy))
731 - return -EFAULT;
732 - } else {
733 -- if (copy_to_user(iov->iov_base, from, copy))
734 -+ if (copy_to_user(iov->iov_base,
735 -+ addr + *offset, copy))
736 - return -EFAULT;
737 - }
738 -- from += copy;
739 -- len -= copy;
740 -+ *offset += copy;
741 -+ *remaining -= copy;
742 - iov->iov_base += copy;
743 - iov->iov_len -= copy;
744 - }
745 -@@ -395,7 +399,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
746 - struct pipe_buffer *buf = pipe->bufs + curbuf;
747 - const struct pipe_buf_operations *ops = buf->ops;
748 - void *addr;
749 -- size_t chars = buf->len;
750 -+ size_t chars = buf->len, remaining;
751 - int error, atomic;
752 -
753 - if (chars > total_len)
754 -@@ -409,9 +413,11 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
755 - }
756 -
757 - atomic = !iov_fault_in_pages_write(iov, chars);
758 -+ remaining = chars;
759 - redo:
760 - addr = ops->map(pipe, buf, atomic);
761 -- error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
762 -+ error = pipe_iov_copy_to_user(iov, addr, &buf->offset,
763 -+ &remaining, atomic);
764 - ops->unmap(pipe, buf, addr);
765 - if (unlikely(error)) {
766 - /*
767 -@@ -426,7 +432,6 @@ redo:
768 - break;
769 - }
770 - ret += chars;
771 -- buf->offset += chars;
772 - buf->len -= chars;
773 -
774 - /* Was it a packet buffer? Clean up and exit */
775 -@@ -531,6 +536,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
776 - if (ops->can_merge && offset + chars <= PAGE_SIZE) {
777 - int error, atomic = 1;
778 - void *addr;
779 -+ size_t remaining = chars;
780 -
781 - error = ops->confirm(pipe, buf);
782 - if (error)
783 -@@ -539,8 +545,8 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
784 - iov_fault_in_pages_read(iov, chars);
785 - redo1:
786 - addr = ops->map(pipe, buf, atomic);
787 -- error = pipe_iov_copy_from_user(offset + addr, iov,
788 -- chars, atomic);
789 -+ error = pipe_iov_copy_from_user(addr, &offset, iov,
790 -+ &remaining, atomic);
791 - ops->unmap(pipe, buf, addr);
792 - ret = error;
793 - do_wakeup = 1;
794 -@@ -575,6 +581,8 @@ redo1:
795 - struct page *page = pipe->tmp_page;
796 - char *src;
797 - int error, atomic = 1;
798 -+ int offset = 0;
799 -+ size_t remaining;
800 -
801 - if (!page) {
802 - page = alloc_page(GFP_HIGHUSER);
803 -@@ -595,14 +603,15 @@ redo1:
804 - chars = total_len;
805 -
806 - iov_fault_in_pages_read(iov, chars);
807 -+ remaining = chars;
808 - redo2:
809 - if (atomic)
810 - src = kmap_atomic(page);
811 - else
812 - src = kmap(page);
813 -
814 -- error = pipe_iov_copy_from_user(src, iov, chars,
815 -- atomic);
816 -+ error = pipe_iov_copy_from_user(src, &offset, iov,
817 -+ &remaining, atomic);
818 - if (atomic)
819 - kunmap_atomic(src);
820 - else
821 -diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
822 -index 8a86319..cb347e8 100644
823 ---- a/kernel/trace/trace_events_filter.c
824 -+++ b/kernel/trace/trace_events_filter.c
825 -@@ -1399,19 +1399,24 @@ static int check_preds(struct filter_parse_state *ps)
826 - {
827 - int n_normal_preds = 0, n_logical_preds = 0;
828 - struct postfix_elt *elt;
829 -+ int cnt = 0;
830 -
831 - list_for_each_entry(elt, &ps->postfix, list) {
832 -- if (elt->op == OP_NONE)
833 -+ if (elt->op == OP_NONE) {
834 -+ cnt++;
835 - continue;
836 -+ }
837 -
838 -+ cnt--;
839 - if (elt->op == OP_AND || elt->op == OP_OR) {
840 - n_logical_preds++;
841 - continue;
842 - }
843 - n_normal_preds++;
844 -+ WARN_ON_ONCE(cnt < 0);
845 - }
846 -
847 -- if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
848 -+ if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
849 - parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
850 - return -EINVAL;
851 - }
852 -diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
853 -index 4eec2d4..1316e55 100644
854 ---- a/virt/kvm/arm/vgic.c
855 -+++ b/virt/kvm/arm/vgic.c
856 -@@ -1654,7 +1654,7 @@ out:
857 - return ret;
858 - }
859 -
860 --static bool vgic_ioaddr_overlap(struct kvm *kvm)
861 -+static int vgic_ioaddr_overlap(struct kvm *kvm)
862 - {
863 - phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
864 - phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
865
866 diff --git a/3.14.46/0000_README b/3.14.47/0000_README
867 similarity index 93%
868 rename from 3.14.46/0000_README
869 rename to 3.14.47/0000_README
870 index de59c28..b3b9e28 100644
871 --- a/3.14.46/0000_README
872 +++ b/3.14.47/0000_README
873 @@ -2,11 +2,11 @@ README
874 -----------------------------------------------------------------------------
875 Individual Patch Descriptions:
876 -----------------------------------------------------------------------------
877 -Patch: 1045_linux-3.14.46.patch
878 +Patch: 1046_linux-3.14.47.patch
879 From: http://www.kernel.org
880 -Desc: Linux 3.14.46
881 +Desc: Linux 3.14.47
882
883 -Patch: 4420_grsecurity-3.1-3.14.46-201506300711.patch
884 +Patch: 4420_grsecurity-3.1-3.14.47-201507050832.patch
885 From: http://www.grsecurity.net
886 Desc: hardened-sources base patch from upstream grsecurity
887
888
889 diff --git a/3.14.47/1046_linux-3.14.47.patch b/3.14.47/1046_linux-3.14.47.patch
890 new file mode 100644
891 index 0000000..4dc0c5a
892 --- /dev/null
893 +++ b/3.14.47/1046_linux-3.14.47.patch
894 @@ -0,0 +1,1395 @@
895 +diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
896 +index 6cd63a9..bc6d617 100644
897 +--- a/Documentation/virtual/kvm/api.txt
898 ++++ b/Documentation/virtual/kvm/api.txt
899 +@@ -2344,7 +2344,8 @@ should be created before this ioctl is invoked.
900 +
901 + Possible features:
902 + - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
903 +- Depends on KVM_CAP_ARM_PSCI.
904 ++ Depends on KVM_CAP_ARM_PSCI. If not set, the CPU will be powered on
905 ++ and execute guest code when KVM_RUN is called.
906 + - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
907 + Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
908 +
909 +diff --git a/Makefile b/Makefile
910 +index def39fd..f9041e6 100644
911 +--- a/Makefile
912 ++++ b/Makefile
913 +@@ -1,6 +1,6 @@
914 + VERSION = 3
915 + PATCHLEVEL = 14
916 +-SUBLEVEL = 46
917 ++SUBLEVEL = 47
918 + EXTRAVERSION =
919 + NAME = Remembering Coco
920 +
921 +diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
922 +index 0fa90c9..853e2be 100644
923 +--- a/arch/arm/include/asm/kvm_emulate.h
924 ++++ b/arch/arm/include/asm/kvm_emulate.h
925 +@@ -33,6 +33,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
926 + void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
927 + void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
928 +
929 ++static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
930 ++{
931 ++ vcpu->arch.hcr = HCR_GUEST_MASK;
932 ++}
933 ++
934 + static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
935 + {
936 + return 1;
937 +diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
938 +index 0cbdb8e..9f79231 100644
939 +--- a/arch/arm/include/asm/kvm_mmu.h
940 ++++ b/arch/arm/include/asm/kvm_mmu.h
941 +@@ -47,6 +47,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
942 + void free_boot_hyp_pgd(void);
943 + void free_hyp_pgds(void);
944 +
945 ++void stage2_unmap_vm(struct kvm *kvm);
946 + int kvm_alloc_stage2_pgd(struct kvm *kvm);
947 + void kvm_free_stage2_pgd(struct kvm *kvm);
948 + int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
949 +@@ -78,17 +79,6 @@ static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
950 + flush_pmd_entry(pte);
951 + }
952 +
953 +-static inline bool kvm_is_write_fault(unsigned long hsr)
954 +-{
955 +- unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
956 +- if (hsr_ec == HSR_EC_IABT)
957 +- return false;
958 +- else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
959 +- return false;
960 +- else
961 +- return true;
962 +-}
963 +-
964 + static inline void kvm_clean_pgd(pgd_t *pgd)
965 + {
966 + clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
967 +diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
968 +index df6e75e..2e74a61 100644
969 +--- a/arch/arm/kvm/arm.c
970 ++++ b/arch/arm/kvm/arm.c
971 +@@ -220,6 +220,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
972 + int err;
973 + struct kvm_vcpu *vcpu;
974 +
975 ++ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
976 ++ err = -EBUSY;
977 ++ goto out;
978 ++ }
979 ++
980 + vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
981 + if (!vcpu) {
982 + err = -ENOMEM;
983 +@@ -427,9 +432,9 @@ static void update_vttbr(struct kvm *kvm)
984 +
985 + /* update vttbr to be used with the new vmid */
986 + pgd_phys = virt_to_phys(kvm->arch.pgd);
987 ++ BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
988 + vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
989 +- kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
990 +- kvm->arch.vttbr |= vmid;
991 ++ kvm->arch.vttbr = pgd_phys | vmid;
992 +
993 + spin_unlock(&kvm_vmid_lock);
994 + }
995 +@@ -676,10 +681,21 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
996 + return ret;
997 +
998 + /*
999 ++ * Ensure a rebooted VM will fault in RAM pages and detect if the
1000 ++ * guest MMU is turned off and flush the caches as needed.
1001 ++ */
1002 ++ if (vcpu->arch.has_run_once)
1003 ++ stage2_unmap_vm(vcpu->kvm);
1004 ++
1005 ++ vcpu_reset_hcr(vcpu);
1006 ++
1007 ++ /*
1008 + * Handle the "start in power-off" case by marking the VCPU as paused.
1009 + */
1010 +- if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
1011 ++ if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
1012 + vcpu->arch.pause = true;
1013 ++ else
1014 ++ vcpu->arch.pause = false;
1015 +
1016 + return 0;
1017 + }
1018 +@@ -825,7 +841,8 @@ static int hyp_init_cpu_notify(struct notifier_block *self,
1019 + switch (action) {
1020 + case CPU_STARTING:
1021 + case CPU_STARTING_FROZEN:
1022 +- cpu_init_hyp_mode(NULL);
1023 ++ if (__hyp_get_vectors() == hyp_default_vectors)
1024 ++ cpu_init_hyp_mode(NULL);
1025 + break;
1026 + }
1027 +
1028 +diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
1029 +index b23a59c..2786eae 100644
1030 +--- a/arch/arm/kvm/guest.c
1031 ++++ b/arch/arm/kvm/guest.c
1032 +@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
1033 +
1034 + int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1035 + {
1036 +- vcpu->arch.hcr = HCR_GUEST_MASK;
1037 + return 0;
1038 + }
1039 +
1040 +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
1041 +index 70ed2c1..524b4b5 100644
1042 +--- a/arch/arm/kvm/mmu.c
1043 ++++ b/arch/arm/kvm/mmu.c
1044 +@@ -197,7 +197,8 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
1045 + pgd = pgdp + pgd_index(addr);
1046 + do {
1047 + next = kvm_pgd_addr_end(addr, end);
1048 +- unmap_puds(kvm, pgd, addr, next);
1049 ++ if (!pgd_none(*pgd))
1050 ++ unmap_puds(kvm, pgd, addr, next);
1051 + } while (pgd++, addr = next, addr != end);
1052 + }
1053 +
1054 +@@ -555,6 +556,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
1055 + unmap_range(kvm, kvm->arch.pgd, start, size);
1056 + }
1057 +
1058 ++static void stage2_unmap_memslot(struct kvm *kvm,
1059 ++ struct kvm_memory_slot *memslot)
1060 ++{
1061 ++ hva_t hva = memslot->userspace_addr;
1062 ++ phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
1063 ++ phys_addr_t size = PAGE_SIZE * memslot->npages;
1064 ++ hva_t reg_end = hva + size;
1065 ++
1066 ++ /*
1067 ++ * A memory region could potentially cover multiple VMAs, and any holes
1068 ++ * between them, so iterate over all of them to find out if we should
1069 ++ * unmap any of them.
1070 ++ *
1071 ++ * +--------------------------------------------+
1072 ++ * +---------------+----------------+ +----------------+
1073 ++ * | : VMA 1 | VMA 2 | | VMA 3 : |
1074 ++ * +---------------+----------------+ +----------------+
1075 ++ * | memory region |
1076 ++ * +--------------------------------------------+
1077 ++ */
1078 ++ do {
1079 ++ struct vm_area_struct *vma = find_vma(current->mm, hva);
1080 ++ hva_t vm_start, vm_end;
1081 ++
1082 ++ if (!vma || vma->vm_start >= reg_end)
1083 ++ break;
1084 ++
1085 ++ /*
1086 ++ * Take the intersection of this VMA with the memory region
1087 ++ */
1088 ++ vm_start = max(hva, vma->vm_start);
1089 ++ vm_end = min(reg_end, vma->vm_end);
1090 ++
1091 ++ if (!(vma->vm_flags & VM_PFNMAP)) {
1092 ++ gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
1093 ++ unmap_stage2_range(kvm, gpa, vm_end - vm_start);
1094 ++ }
1095 ++ hva = vm_end;
1096 ++ } while (hva < reg_end);
1097 ++}
1098 ++
1099 ++/**
1100 ++ * stage2_unmap_vm - Unmap Stage-2 RAM mappings
1101 ++ * @kvm: The struct kvm pointer
1102 ++ *
1103 ++ * Go through the memregions and unmap any reguler RAM
1104 ++ * backing memory already mapped to the VM.
1105 ++ */
1106 ++void stage2_unmap_vm(struct kvm *kvm)
1107 ++{
1108 ++ struct kvm_memslots *slots;
1109 ++ struct kvm_memory_slot *memslot;
1110 ++ int idx;
1111 ++
1112 ++ idx = srcu_read_lock(&kvm->srcu);
1113 ++ spin_lock(&kvm->mmu_lock);
1114 ++
1115 ++ slots = kvm_memslots(kvm);
1116 ++ kvm_for_each_memslot(memslot, slots)
1117 ++ stage2_unmap_memslot(kvm, memslot);
1118 ++
1119 ++ spin_unlock(&kvm->mmu_lock);
1120 ++ srcu_read_unlock(&kvm->srcu, idx);
1121 ++}
1122 ++
1123 + /**
1124 + * kvm_free_stage2_pgd - free all stage-2 tables
1125 + * @kvm: The KVM struct pointer for the VM.
1126 +@@ -746,6 +812,19 @@ static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
1127 + return false;
1128 + }
1129 +
1130 ++static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
1131 ++{
1132 ++ if (kvm_vcpu_trap_is_iabt(vcpu))
1133 ++ return false;
1134 ++
1135 ++ return kvm_vcpu_dabt_iswrite(vcpu);
1136 ++}
1137 ++
1138 ++static bool kvm_is_device_pfn(unsigned long pfn)
1139 ++{
1140 ++ return !pfn_valid(pfn);
1141 ++}
1142 ++
1143 + static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1144 + struct kvm_memory_slot *memslot,
1145 + unsigned long fault_status)
1146 +@@ -761,7 +840,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1147 + pfn_t pfn;
1148 + pgprot_t mem_type = PAGE_S2;
1149 +
1150 +- write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
1151 ++ write_fault = kvm_is_write_fault(vcpu);
1152 + if (fault_status == FSC_PERM && !write_fault) {
1153 + kvm_err("Unexpected L2 read permission error\n");
1154 + return -EFAULT;
1155 +@@ -770,6 +849,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1156 + /* Let's check if we will get back a huge page backed by hugetlbfs */
1157 + down_read(&current->mm->mmap_sem);
1158 + vma = find_vma_intersection(current->mm, hva, hva + 1);
1159 ++ if (unlikely(!vma)) {
1160 ++ kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1161 ++ up_read(&current->mm->mmap_sem);
1162 ++ return -EFAULT;
1163 ++ }
1164 ++
1165 + if (is_vm_hugetlb_page(vma)) {
1166 + hugetlb = true;
1167 + gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
1168 +@@ -810,7 +895,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1169 + if (is_error_pfn(pfn))
1170 + return -EFAULT;
1171 +
1172 +- if (kvm_is_mmio_pfn(pfn))
1173 ++ if (kvm_is_device_pfn(pfn))
1174 + mem_type = PAGE_S2_DEVICE;
1175 +
1176 + spin_lock(&kvm->mmu_lock);
1177 +@@ -836,7 +921,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1178 + }
1179 + coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
1180 + ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
1181 +- mem_type == PAGE_S2_DEVICE);
1182 ++ pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
1183 + }
1184 +
1185 +
1186 +@@ -912,6 +997,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1187 +
1188 + memslot = gfn_to_memslot(vcpu->kvm, gfn);
1189 +
1190 ++ /* Userspace should not be able to register out-of-bounds IPAs */
1191 ++ VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
1192 ++
1193 + ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status);
1194 + if (ret == 0)
1195 + ret = 1;
1196 +@@ -1136,6 +1224,14 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1197 + struct kvm_userspace_memory_region *mem,
1198 + enum kvm_mr_change change)
1199 + {
1200 ++ /*
1201 ++ * Prevent userspace from creating a memory region outside of the IPA
1202 ++ * space addressable by the KVM guest IPA space.
1203 ++ */
1204 ++ if (memslot->base_gfn + memslot->npages >=
1205 ++ (KVM_PHYS_SIZE >> PAGE_SHIFT))
1206 ++ return -EFAULT;
1207 ++
1208 + return 0;
1209 + }
1210 +
1211 +diff --git a/arch/arm/mach-dove/board-dt.c b/arch/arm/mach-dove/board-dt.c
1212 +index 49fa9ab..7a7a09a5 100644
1213 +--- a/arch/arm/mach-dove/board-dt.c
1214 ++++ b/arch/arm/mach-dove/board-dt.c
1215 +@@ -26,7 +26,7 @@ static void __init dove_dt_init(void)
1216 + #ifdef CONFIG_CACHE_TAUROS2
1217 + tauros2_init(0);
1218 + #endif
1219 +- BUG_ON(mvebu_mbus_dt_init());
1220 ++ BUG_ON(mvebu_mbus_dt_init(false));
1221 + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
1222 + }
1223 +
1224 +diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
1225 +index 01a5765..b509556 100644
1226 +--- a/arch/arm/mach-imx/clk-imx6q.c
1227 ++++ b/arch/arm/mach-imx/clk-imx6q.c
1228 +@@ -406,7 +406,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
1229 + clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
1230 + clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
1231 + clk[rom] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
1232 +- clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4);
1233 ++ clk[sata] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4);
1234 + clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
1235 + clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
1236 + clk[spdif] = imx_clk_gate2("spdif", "spdif_podf", base + 0x7c, 14);
1237 +diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
1238 +index 7818815..79e629d 100644
1239 +--- a/arch/arm/mach-kirkwood/board-dt.c
1240 ++++ b/arch/arm/mach-kirkwood/board-dt.c
1241 +@@ -116,7 +116,7 @@ static void __init kirkwood_dt_init(void)
1242 + */
1243 + writel(readl(CPU_CONFIG) & ~CPU_CONFIG_ERROR_PROP, CPU_CONFIG);
1244 +
1245 +- BUG_ON(mvebu_mbus_dt_init());
1246 ++ BUG_ON(mvebu_mbus_dt_init(false));
1247 +
1248 + kirkwood_l2_init();
1249 +
1250 +diff --git a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c
1251 +index f6c9d1d..79c3766a 100644
1252 +--- a/arch/arm/mach-mvebu/armada-370-xp.c
1253 ++++ b/arch/arm/mach-mvebu/armada-370-xp.c
1254 +@@ -41,7 +41,7 @@ static void __init armada_370_xp_timer_and_clk_init(void)
1255 + of_clk_init(NULL);
1256 + clocksource_of_init();
1257 + coherency_init();
1258 +- BUG_ON(mvebu_mbus_dt_init());
1259 ++ BUG_ON(mvebu_mbus_dt_init(coherency_available()));
1260 + #ifdef CONFIG_CACHE_L2X0
1261 + l2x0_of_init(0, ~0UL);
1262 + #endif
1263 +diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
1264 +index c295c10..49bad4d 100644
1265 +--- a/arch/arm/mach-mvebu/coherency.c
1266 ++++ b/arch/arm/mach-mvebu/coherency.c
1267 +@@ -121,6 +121,20 @@ static struct notifier_block mvebu_hwcc_platform_nb = {
1268 + .notifier_call = mvebu_hwcc_platform_notifier,
1269 + };
1270 +
1271 ++/*
1272 ++ * Keep track of whether we have IO hardware coherency enabled or not.
1273 ++ * On Armada 370's we will not be using it for example. We need to make
1274 ++ * that available [through coherency_available()] so the mbus controller
1275 ++ * doesn't enable the IO coherency bit in the attribute bits of the
1276 ++ * chip selects.
1277 ++ */
1278 ++static int coherency_enabled;
1279 ++
1280 ++int coherency_available(void)
1281 ++{
1282 ++ return coherency_enabled;
1283 ++}
1284 ++
1285 + int __init coherency_init(void)
1286 + {
1287 + struct device_node *np;
1288 +@@ -164,6 +178,7 @@ int __init coherency_init(void)
1289 + coherency_base = of_iomap(np, 0);
1290 + coherency_cpu_base = of_iomap(np, 1);
1291 + set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
1292 ++ coherency_enabled = 1;
1293 + of_node_put(np);
1294 + }
1295 +
1296 +diff --git a/arch/arm/mach-mvebu/coherency.h b/arch/arm/mach-mvebu/coherency.h
1297 +index 760226c..63e18c6 100644
1298 +--- a/arch/arm/mach-mvebu/coherency.h
1299 ++++ b/arch/arm/mach-mvebu/coherency.h
1300 +@@ -17,6 +17,7 @@
1301 + extern unsigned long coherency_phys_base;
1302 +
1303 + int set_cpu_coherent(unsigned int cpu_id, int smp_group_id);
1304 ++int coherency_available(void);
1305 + int coherency_init(void);
1306 +
1307 + #endif /* __MACH_370_XP_COHERENCY_H */
1308 +diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
1309 +index 00fbaa7..ea68925 100644
1310 +--- a/arch/arm64/include/asm/kvm_arm.h
1311 ++++ b/arch/arm64/include/asm/kvm_arm.h
1312 +@@ -18,6 +18,7 @@
1313 + #ifndef __ARM64_KVM_ARM_H__
1314 + #define __ARM64_KVM_ARM_H__
1315 +
1316 ++#include <asm/memory.h>
1317 + #include <asm/types.h>
1318 +
1319 + /* Hyp Configuration Register (HCR) bits */
1320 +@@ -122,6 +123,17 @@
1321 + #define VTCR_EL2_T0SZ_MASK 0x3f
1322 + #define VTCR_EL2_T0SZ_40B 24
1323 +
1324 ++/*
1325 ++ * We configure the Stage-2 page tables to always restrict the IPA space to be
1326 ++ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
1327 ++ * not known to exist and will break with this configuration.
1328 ++ *
1329 ++ * Note that when using 4K pages, we concatenate two first level page tables
1330 ++ * together.
1331 ++ *
1332 ++ * The magic numbers used for VTTBR_X in this patch can be found in Tables
1333 ++ * D4-23 and D4-25 in ARM DDI 0487A.b.
1334 ++ */
1335 + #ifdef CONFIG_ARM64_64K_PAGES
1336 + /*
1337 + * Stage2 translation configuration:
1338 +@@ -151,9 +163,9 @@
1339 + #endif
1340 +
1341 + #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
1342 +-#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
1343 +-#define VTTBR_VMID_SHIFT (48LLU)
1344 +-#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
1345 ++#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
1346 ++#define VTTBR_VMID_SHIFT (UL(48))
1347 ++#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
1348 +
1349 + /* Hyp System Trap Register */
1350 + #define HSTR_EL2_TTEE (1 << 16)
1351 +@@ -176,13 +188,13 @@
1352 +
1353 + /* Exception Syndrome Register (ESR) bits */
1354 + #define ESR_EL2_EC_SHIFT (26)
1355 +-#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT)
1356 +-#define ESR_EL2_IL (1U << 25)
1357 ++#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT)
1358 ++#define ESR_EL2_IL (UL(1) << 25)
1359 + #define ESR_EL2_ISS (ESR_EL2_IL - 1)
1360 + #define ESR_EL2_ISV_SHIFT (24)
1361 +-#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT)
1362 ++#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT)
1363 + #define ESR_EL2_SAS_SHIFT (22)
1364 +-#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT)
1365 ++#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT)
1366 + #define ESR_EL2_SSE (1 << 21)
1367 + #define ESR_EL2_SRT_SHIFT (16)
1368 + #define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
1369 +@@ -196,16 +208,16 @@
1370 + #define ESR_EL2_FSC_TYPE (0x3c)
1371 +
1372 + #define ESR_EL2_CV_SHIFT (24)
1373 +-#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT)
1374 ++#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT)
1375 + #define ESR_EL2_COND_SHIFT (20)
1376 +-#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT)
1377 ++#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT)
1378 +
1379 +
1380 + #define FSC_FAULT (0x04)
1381 + #define FSC_PERM (0x0c)
1382 +
1383 + /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
1384 +-#define HPFAR_MASK (~0xFUL)
1385 ++#define HPFAR_MASK (~UL(0xf))
1386 +
1387 + #define ESR_EL2_EC_UNKNOWN (0x00)
1388 + #define ESR_EL2_EC_WFI (0x01)
1389 +diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
1390 +index dd8ecfc3..681cb90 100644
1391 +--- a/arch/arm64/include/asm/kvm_emulate.h
1392 ++++ b/arch/arm64/include/asm/kvm_emulate.h
1393 +@@ -38,6 +38,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
1394 + void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
1395 + void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
1396 +
1397 ++static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
1398 ++{
1399 ++ vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
1400 ++}
1401 ++
1402 + static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
1403 + {
1404 + return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
1405 +diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
1406 +index 8e138c7..0d51874 100644
1407 +--- a/arch/arm64/include/asm/kvm_mmu.h
1408 ++++ b/arch/arm64/include/asm/kvm_mmu.h
1409 +@@ -59,10 +59,9 @@
1410 + #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
1411 +
1412 + /*
1413 +- * Align KVM with the kernel's view of physical memory. Should be
1414 +- * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
1415 ++ * We currently only support a 40bit IPA.
1416 + */
1417 +-#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT
1418 ++#define KVM_PHYS_SHIFT (40)
1419 + #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
1420 + #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
1421 +
1422 +@@ -75,6 +74,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
1423 + void free_boot_hyp_pgd(void);
1424 + void free_hyp_pgds(void);
1425 +
1426 ++void stage2_unmap_vm(struct kvm *kvm);
1427 + int kvm_alloc_stage2_pgd(struct kvm *kvm);
1428 + void kvm_free_stage2_pgd(struct kvm *kvm);
1429 + int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1430 +@@ -93,19 +93,6 @@ void kvm_clear_hyp_idmap(void);
1431 + #define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
1432 + #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
1433 +
1434 +-static inline bool kvm_is_write_fault(unsigned long esr)
1435 +-{
1436 +- unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
1437 +-
1438 +- if (esr_ec == ESR_EL2_EC_IABT)
1439 +- return false;
1440 +-
1441 +- if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
1442 +- return false;
1443 +-
1444 +- return true;
1445 +-}
1446 +-
1447 + static inline void kvm_clean_pgd(pgd_t *pgd) {}
1448 + static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
1449 + static inline void kvm_clean_pte(pte_t *pte) {}
1450 +diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
1451 +index 0874557..a8d81fa 100644
1452 +--- a/arch/arm64/kvm/guest.c
1453 ++++ b/arch/arm64/kvm/guest.c
1454 +@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
1455 +
1456 + int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1457 + {
1458 +- vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
1459 + return 0;
1460 + }
1461 +
1462 +diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
1463 +index 3974881..b76159a 100644
1464 +--- a/arch/arm64/mm/dma-mapping.c
1465 ++++ b/arch/arm64/mm/dma-mapping.c
1466 +@@ -54,8 +54,7 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1467 +
1468 + *dma_handle = phys_to_dma(dev, page_to_phys(page));
1469 + addr = page_address(page);
1470 +- if (flags & __GFP_ZERO)
1471 +- memset(addr, 0, size);
1472 ++ memset(addr, 0, size);
1473 + return addr;
1474 + } else {
1475 + return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
1476 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
1477 +index 2f645c9..5dab54a 100644
1478 +--- a/arch/x86/Kconfig
1479 ++++ b/arch/x86/Kconfig
1480 +@@ -160,7 +160,7 @@ config SBUS
1481 +
1482 + config NEED_DMA_MAP_STATE
1483 + def_bool y
1484 +- depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG
1485 ++ depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB
1486 +
1487 + config NEED_SG_DMA_LENGTH
1488 + def_bool y
1489 +diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
1490 +index 18f7391..43a07bf 100644
1491 +--- a/arch/x86/kernel/cpu/microcode/intel_early.c
1492 ++++ b/arch/x86/kernel/cpu/microcode/intel_early.c
1493 +@@ -321,7 +321,7 @@ get_matching_model_microcode(int cpu, unsigned long start,
1494 + unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
1495 + int i;
1496 +
1497 +- while (leftover) {
1498 ++ while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
1499 + mc_header = (struct microcode_header_intel *)ucode_ptr;
1500 +
1501 + mc_size = get_totalsize(mc_header);
1502 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
1503 +index a1f5b18..490fee1 100644
1504 +--- a/arch/x86/kernel/kprobes/core.c
1505 ++++ b/arch/x86/kernel/kprobes/core.c
1506 +@@ -326,13 +326,16 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
1507 + {
1508 + struct insn insn;
1509 + kprobe_opcode_t buf[MAX_INSN_SIZE];
1510 ++ int length;
1511 +
1512 + kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src));
1513 + insn_get_length(&insn);
1514 ++ length = insn.length;
1515 ++
1516 + /* Another subsystem puts a breakpoint, failed to recover */
1517 + if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
1518 + return 0;
1519 +- memcpy(dest, insn.kaddr, insn.length);
1520 ++ memcpy(dest, insn.kaddr, length);
1521 +
1522 + #ifdef CONFIG_X86_64
1523 + if (insn_rip_relative(&insn)) {
1524 +@@ -362,7 +365,7 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
1525 + *(s32 *) disp = (s32) newdisp;
1526 + }
1527 + #endif
1528 +- return insn.length;
1529 ++ return length;
1530 + }
1531 +
1532 + static int __kprobes arch_copy_kprobe(struct kprobe *p)
1533 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1534 +index 9643eda6..0746334 100644
1535 +--- a/arch/x86/kvm/svm.c
1536 ++++ b/arch/x86/kvm/svm.c
1537 +@@ -495,8 +495,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
1538 + {
1539 + struct vcpu_svm *svm = to_svm(vcpu);
1540 +
1541 +- if (svm->vmcb->control.next_rip != 0)
1542 ++ if (svm->vmcb->control.next_rip != 0) {
1543 ++ WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
1544 + svm->next_rip = svm->vmcb->control.next_rip;
1545 ++ }
1546 +
1547 + if (!svm->next_rip) {
1548 + if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
1549 +@@ -4246,7 +4248,9 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
1550 + break;
1551 + }
1552 +
1553 +- vmcb->control.next_rip = info->next_rip;
1554 ++ /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
1555 ++ if (static_cpu_has(X86_FEATURE_NRIPS))
1556 ++ vmcb->control.next_rip = info->next_rip;
1557 + vmcb->control.exit_code = icpt_info.exit_code;
1558 + vmexit = nested_svm_exit_handled(svm);
1559 +
1560 +diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
1561 +index e990dee..1aa0130 100644
1562 +--- a/drivers/bus/mvebu-mbus.c
1563 ++++ b/drivers/bus/mvebu-mbus.c
1564 +@@ -701,7 +701,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
1565 + phys_addr_t sdramwins_phys_base,
1566 + size_t sdramwins_size)
1567 + {
1568 +- struct device_node *np;
1569 + int win;
1570 +
1571 + mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
1572 +@@ -714,12 +713,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
1573 + return -ENOMEM;
1574 + }
1575 +
1576 +- np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
1577 +- if (np) {
1578 +- mbus->hw_io_coherency = 1;
1579 +- of_node_put(np);
1580 +- }
1581 +-
1582 + for (win = 0; win < mbus->soc->num_wins; win++)
1583 + mvebu_mbus_disable_window(mbus, win);
1584 +
1585 +@@ -889,7 +882,7 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
1586 + }
1587 + }
1588 +
1589 +-int __init mvebu_mbus_dt_init(void)
1590 ++int __init mvebu_mbus_dt_init(bool is_coherent)
1591 + {
1592 + struct resource mbuswins_res, sdramwins_res;
1593 + struct device_node *np, *controller;
1594 +@@ -928,6 +921,8 @@ int __init mvebu_mbus_dt_init(void)
1595 + return -EINVAL;
1596 + }
1597 +
1598 ++ mbus_state.hw_io_coherency = is_coherent;
1599 ++
1600 + /* Get optional pcie-{mem,io}-aperture properties */
1601 + mvebu_mbus_get_pcie_resources(np, &mbus_state.pcie_mem_aperture,
1602 + &mbus_state.pcie_io_aperture);
1603 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
1604 +index c611bcc..3e623ab 100644
1605 +--- a/drivers/edac/sb_edac.c
1606 ++++ b/drivers/edac/sb_edac.c
1607 +@@ -765,7 +765,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
1608 + u32 reg;
1609 + u64 limit, prv = 0;
1610 + u64 tmp_mb;
1611 +- u32 mb, kb;
1612 ++ u32 gb, mb;
1613 + u32 rir_way;
1614 +
1615 + /*
1616 +@@ -775,15 +775,17 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
1617 + pvt->tolm = pvt->info.get_tolm(pvt);
1618 + tmp_mb = (1 + pvt->tolm) >> 20;
1619 +
1620 +- mb = div_u64_rem(tmp_mb, 1000, &kb);
1621 +- edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
1622 ++ gb = div_u64_rem(tmp_mb, 1024, &mb);
1623 ++ edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
1624 ++ gb, (mb*1000)/1024, (u64)pvt->tolm);
1625 +
1626 + /* Address range is already 45:25 */
1627 + pvt->tohm = pvt->info.get_tohm(pvt);
1628 + tmp_mb = (1 + pvt->tohm) >> 20;
1629 +
1630 +- mb = div_u64_rem(tmp_mb, 1000, &kb);
1631 +- edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
1632 ++ gb = div_u64_rem(tmp_mb, 1024, &mb);
1633 ++ edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
1634 ++ gb, (mb*1000)/1024, (u64)pvt->tohm);
1635 +
1636 + /*
1637 + * Step 2) Get SAD range and SAD Interleave list
1638 +@@ -805,11 +807,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
1639 + break;
1640 +
1641 + tmp_mb = (limit + 1) >> 20;
1642 +- mb = div_u64_rem(tmp_mb, 1000, &kb);
1643 ++ gb = div_u64_rem(tmp_mb, 1024, &mb);
1644 + edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
1645 + n_sads,
1646 + get_dram_attr(reg),
1647 +- mb, kb,
1648 ++ gb, (mb*1000)/1024,
1649 + ((u64)tmp_mb) << 20L,
1650 + INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
1651 + reg);
1652 +@@ -840,9 +842,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
1653 + break;
1654 + tmp_mb = (limit + 1) >> 20;
1655 +
1656 +- mb = div_u64_rem(tmp_mb, 1000, &kb);
1657 ++ gb = div_u64_rem(tmp_mb, 1024, &mb);
1658 + edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
1659 +- n_tads, mb, kb,
1660 ++ n_tads, gb, (mb*1000)/1024,
1661 + ((u64)tmp_mb) << 20L,
1662 + (u32)TAD_SOCK(reg),
1663 + (u32)TAD_CH(reg),
1664 +@@ -865,10 +867,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
1665 + tad_ch_nilv_offset[j],
1666 + &reg);
1667 + tmp_mb = TAD_OFFSET(reg) >> 20;
1668 +- mb = div_u64_rem(tmp_mb, 1000, &kb);
1669 ++ gb = div_u64_rem(tmp_mb, 1024, &mb);
1670 + edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1671 + i, j,
1672 +- mb, kb,
1673 ++ gb, (mb*1000)/1024,
1674 + ((u64)tmp_mb) << 20L,
1675 + reg);
1676 + }
1677 +@@ -890,10 +892,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
1678 +
1679 + tmp_mb = RIR_LIMIT(reg) >> 20;
1680 + rir_way = 1 << RIR_WAY(reg);
1681 +- mb = div_u64_rem(tmp_mb, 1000, &kb);
1682 ++ gb = div_u64_rem(tmp_mb, 1024, &mb);
1683 + edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1684 + i, j,
1685 +- mb, kb,
1686 ++ gb, (mb*1000)/1024,
1687 + ((u64)tmp_mb) << 20L,
1688 + rir_way,
1689 + reg);
1690 +@@ -904,10 +906,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
1691 + &reg);
1692 + tmp_mb = RIR_OFFSET(reg) << 6;
1693 +
1694 +- mb = div_u64_rem(tmp_mb, 1000, &kb);
1695 ++ gb = div_u64_rem(tmp_mb, 1024, &mb);
1696 + edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1697 + i, j, k,
1698 +- mb, kb,
1699 ++ gb, (mb*1000)/1024,
1700 + ((u64)tmp_mb) << 20L,
1701 + (u32)RIR_RNK_TGT(reg),
1702 + reg);
1703 +@@ -945,7 +947,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1704 + u8 ch_way, sck_way, pkg, sad_ha = 0;
1705 + u32 tad_offset;
1706 + u32 rir_way;
1707 +- u32 mb, kb;
1708 ++ u32 mb, gb;
1709 + u64 ch_addr, offset, limit = 0, prv = 0;
1710 +
1711 +
1712 +@@ -1183,10 +1185,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1713 + continue;
1714 +
1715 + limit = RIR_LIMIT(reg);
1716 +- mb = div_u64_rem(limit >> 20, 1000, &kb);
1717 ++ gb = div_u64_rem(limit >> 20, 1024, &mb);
1718 + edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
1719 + n_rir,
1720 +- mb, kb,
1721 ++ gb, (mb*1000)/1024,
1722 + limit,
1723 + 1 << RIR_WAY(reg));
1724 + if (ch_addr <= limit)
1725 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
1726 +index 019a04a..a467261 100644
1727 +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
1728 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
1729 +@@ -810,8 +810,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
1730 + tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
1731 + tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
1732 + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1733 +- tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
1734 +- MLX4_WQE_CTRL_TCP_UDP_CSUM);
1735 ++ if (!skb->encapsulation)
1736 ++ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
1737 ++ MLX4_WQE_CTRL_TCP_UDP_CSUM);
1738 ++ else
1739 ++ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
1740 + ring->tx_csum++;
1741 + }
1742 +
1743 +diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
1744 +index 528bff5..85d370e 100644
1745 +--- a/drivers/scsi/hpsa.c
1746 ++++ b/drivers/scsi/hpsa.c
1747 +@@ -3984,10 +3984,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
1748 +
1749 + /* Save the PCI command register */
1750 + pci_read_config_word(pdev, 4, &command_register);
1751 +- /* Turn the board off. This is so that later pci_restore_state()
1752 +- * won't turn the board on before the rest of config space is ready.
1753 +- */
1754 +- pci_disable_device(pdev);
1755 + pci_save_state(pdev);
1756 +
1757 + /* find the first memory BAR, so we can find the cfg table */
1758 +@@ -4035,11 +4031,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
1759 + goto unmap_cfgtable;
1760 +
1761 + pci_restore_state(pdev);
1762 +- rc = pci_enable_device(pdev);
1763 +- if (rc) {
1764 +- dev_warn(&pdev->dev, "failed to enable device.\n");
1765 +- goto unmap_cfgtable;
1766 +- }
1767 + pci_write_config_word(pdev, 4, command_register);
1768 +
1769 + /* Some devices (notably the HP Smart Array 5i Controller)
1770 +@@ -4525,6 +4516,23 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
1771 + if (!reset_devices)
1772 + return 0;
1773 +
1774 ++ /* kdump kernel is loading, we don't know in which state is
1775 ++ * the pci interface. The dev->enable_cnt is equal zero
1776 ++ * so we call enable+disable, wait a while and switch it on.
1777 ++ */
1778 ++ rc = pci_enable_device(pdev);
1779 ++ if (rc) {
1780 ++ dev_warn(&pdev->dev, "Failed to enable PCI device\n");
1781 ++ return -ENODEV;
1782 ++ }
1783 ++ pci_disable_device(pdev);
1784 ++ msleep(260); /* a randomly chosen number */
1785 ++ rc = pci_enable_device(pdev);
1786 ++ if (rc) {
1787 ++ dev_warn(&pdev->dev, "failed to enable device.\n");
1788 ++ return -ENODEV;
1789 ++ }
1790 ++ pci_set_master(pdev);
1791 + /* Reset the controller with a PCI power-cycle or via doorbell */
1792 + rc = hpsa_kdump_hard_reset_controller(pdev);
1793 +
1794 +@@ -4533,10 +4541,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
1795 + * "performant mode". Or, it might be 640x, which can't reset
1796 + * due to concerns about shared bbwc between 6402/6404 pair.
1797 + */
1798 +- if (rc == -ENOTSUPP)
1799 +- return rc; /* just try to do the kdump anyhow. */
1800 +- if (rc)
1801 +- return -ENODEV;
1802 ++ if (rc) {
1803 ++ if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */
1804 ++ rc = -ENODEV;
1805 ++ goto out_disable;
1806 ++ }
1807 +
1808 + /* Now try to get the controller to respond to a no-op */
1809 + dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
1810 +@@ -4547,7 +4556,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
1811 + dev_warn(&pdev->dev, "no-op failed%s\n",
1812 + (i < 11 ? "; re-trying" : ""));
1813 + }
1814 +- return 0;
1815 ++
1816 ++out_disable:
1817 ++
1818 ++ pci_disable_device(pdev);
1819 ++ return rc;
1820 + }
1821 +
1822 + static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
1823 +@@ -4690,6 +4703,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
1824 + iounmap(h->transtable);
1825 + if (h->cfgtable)
1826 + iounmap(h->cfgtable);
1827 ++ pci_disable_device(h->pdev);
1828 + pci_release_regions(h->pdev);
1829 + kfree(h);
1830 + }
1831 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
1832 +index 93de3ba..f8ffee4 100644
1833 +--- a/fs/btrfs/ctree.c
1834 ++++ b/fs/btrfs/ctree.c
1835 +@@ -2963,7 +2963,7 @@ done:
1836 + */
1837 + if (!p->leave_spinning)
1838 + btrfs_set_path_blocking(p);
1839 +- if (ret < 0)
1840 ++ if (ret < 0 && !p->skip_release_on_error)
1841 + btrfs_release_path(p);
1842 + return ret;
1843 + }
1844 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
1845 +index d3511cc..3b39eb4 100644
1846 +--- a/fs/btrfs/ctree.h
1847 ++++ b/fs/btrfs/ctree.h
1848 +@@ -608,6 +608,7 @@ struct btrfs_path {
1849 + unsigned int skip_locking:1;
1850 + unsigned int leave_spinning:1;
1851 + unsigned int search_commit_root:1;
1852 ++ unsigned int skip_release_on_error:1;
1853 + };
1854 +
1855 + /*
1856 +@@ -3609,6 +3610,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
1857 + int verify_dir_item(struct btrfs_root *root,
1858 + struct extent_buffer *leaf,
1859 + struct btrfs_dir_item *dir_item);
1860 ++struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
1861 ++ struct btrfs_path *path,
1862 ++ const char *name,
1863 ++ int name_len);
1864 +
1865 + /* orphan.c */
1866 + int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
1867 +diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
1868 +index a0691df..9521a93 100644
1869 +--- a/fs/btrfs/dir-item.c
1870 ++++ b/fs/btrfs/dir-item.c
1871 +@@ -21,10 +21,6 @@
1872 + #include "hash.h"
1873 + #include "transaction.h"
1874 +
1875 +-static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
1876 +- struct btrfs_path *path,
1877 +- const char *name, int name_len);
1878 +-
1879 + /*
1880 + * insert a name into a directory, doing overflow properly if there is a hash
1881 + * collision. data_size indicates how big the item inserted should be. On
1882 +@@ -383,9 +379,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
1883 + * this walks through all the entries in a dir item and finds one
1884 + * for a specific name.
1885 + */
1886 +-static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
1887 +- struct btrfs_path *path,
1888 +- const char *name, int name_len)
1889 ++struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
1890 ++ struct btrfs_path *path,
1891 ++ const char *name, int name_len)
1892 + {
1893 + struct btrfs_dir_item *dir_item;
1894 + unsigned long name_ptr;
1895 +diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
1896 +index 488e987..618e86c 100644
1897 +--- a/fs/btrfs/xattr.c
1898 ++++ b/fs/btrfs/xattr.c
1899 +@@ -29,6 +29,7 @@
1900 + #include "xattr.h"
1901 + #include "disk-io.h"
1902 + #include "props.h"
1903 ++#include "locking.h"
1904 +
1905 +
1906 + ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
1907 +@@ -91,7 +92,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
1908 + struct inode *inode, const char *name,
1909 + const void *value, size_t size, int flags)
1910 + {
1911 +- struct btrfs_dir_item *di;
1912 ++ struct btrfs_dir_item *di = NULL;
1913 + struct btrfs_root *root = BTRFS_I(inode)->root;
1914 + struct btrfs_path *path;
1915 + size_t name_len = strlen(name);
1916 +@@ -103,84 +104,119 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
1917 + path = btrfs_alloc_path();
1918 + if (!path)
1919 + return -ENOMEM;
1920 ++ path->skip_release_on_error = 1;
1921 ++
1922 ++ if (!value) {
1923 ++ di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
1924 ++ name, name_len, -1);
1925 ++ if (!di && (flags & XATTR_REPLACE))
1926 ++ ret = -ENODATA;
1927 ++ else if (di)
1928 ++ ret = btrfs_delete_one_dir_name(trans, root, path, di);
1929 ++ goto out;
1930 ++ }
1931 +
1932 ++ /*
1933 ++ * For a replace we can't just do the insert blindly.
1934 ++ * Do a lookup first (read-only btrfs_search_slot), and return if xattr
1935 ++ * doesn't exist. If it exists, fall down below to the insert/replace
1936 ++ * path - we can't race with a concurrent xattr delete, because the VFS
1937 ++ * locks the inode's i_mutex before calling setxattr or removexattr.
1938 ++ */
1939 + if (flags & XATTR_REPLACE) {
1940 +- di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
1941 +- name_len, -1);
1942 +- if (IS_ERR(di)) {
1943 +- ret = PTR_ERR(di);
1944 +- goto out;
1945 +- } else if (!di) {
1946 ++ ASSERT(mutex_is_locked(&inode->i_mutex));
1947 ++ di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
1948 ++ name, name_len, 0);
1949 ++ if (!di) {
1950 + ret = -ENODATA;
1951 + goto out;
1952 + }
1953 +- ret = btrfs_delete_one_dir_name(trans, root, path, di);
1954 +- if (ret)
1955 +- goto out;
1956 + btrfs_release_path(path);
1957 ++ di = NULL;
1958 ++ }
1959 +
1960 ++ ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
1961 ++ name, name_len, value, size);
1962 ++ if (ret == -EOVERFLOW) {
1963 + /*
1964 +- * remove the attribute
1965 ++ * We have an existing item in a leaf, split_leaf couldn't
1966 ++ * expand it. That item might have or not a dir_item that
1967 ++ * matches our target xattr, so lets check.
1968 + */
1969 +- if (!value)
1970 +- goto out;
1971 +- } else {
1972 +- di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
1973 +- name, name_len, 0);
1974 +- if (IS_ERR(di)) {
1975 +- ret = PTR_ERR(di);
1976 ++ ret = 0;
1977 ++ btrfs_assert_tree_locked(path->nodes[0]);
1978 ++ di = btrfs_match_dir_item_name(root, path, name, name_len);
1979 ++ if (!di && !(flags & XATTR_REPLACE)) {
1980 ++ ret = -ENOSPC;
1981 + goto out;
1982 + }
1983 +- if (!di && !value)
1984 +- goto out;
1985 +- btrfs_release_path(path);
1986 ++ } else if (ret == -EEXIST) {
1987 ++ ret = 0;
1988 ++ di = btrfs_match_dir_item_name(root, path, name, name_len);
1989 ++ ASSERT(di); /* logic error */
1990 ++ } else if (ret) {
1991 ++ goto out;
1992 + }
1993 +
1994 +-again:
1995 +- ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
1996 +- name, name_len, value, size);
1997 +- /*
1998 +- * If we're setting an xattr to a new value but the new value is say
1999 +- * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting
2000 +- * back from split_leaf. This is because it thinks we'll be extending
2001 +- * the existing item size, but we're asking for enough space to add the
2002 +- * item itself. So if we get EOVERFLOW just set ret to EEXIST and let
2003 +- * the rest of the function figure it out.
2004 +- */
2005 +- if (ret == -EOVERFLOW)
2006 ++ if (di && (flags & XATTR_CREATE)) {
2007 + ret = -EEXIST;
2008 ++ goto out;
2009 ++ }
2010 +
2011 +- if (ret == -EEXIST) {
2012 +- if (flags & XATTR_CREATE)
2013 +- goto out;
2014 ++ if (di) {
2015 + /*
2016 +- * We can't use the path we already have since we won't have the
2017 +- * proper locking for a delete, so release the path and
2018 +- * re-lookup to delete the thing.
2019 ++ * We're doing a replace, and it must be atomic, that is, at
2020 ++ * any point in time we have either the old or the new xattr
2021 ++ * value in the tree. We don't want readers (getxattr and
2022 ++ * listxattrs) to miss a value, this is specially important
2023 ++ * for ACLs.
2024 + */
2025 +- btrfs_release_path(path);
2026 +- di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
2027 +- name, name_len, -1);
2028 +- if (IS_ERR(di)) {
2029 +- ret = PTR_ERR(di);
2030 +- goto out;
2031 +- } else if (!di) {
2032 +- /* Shouldn't happen but just in case... */
2033 +- btrfs_release_path(path);
2034 +- goto again;
2035 ++ const int slot = path->slots[0];
2036 ++ struct extent_buffer *leaf = path->nodes[0];
2037 ++ const u16 old_data_len = btrfs_dir_data_len(leaf, di);
2038 ++ const u32 item_size = btrfs_item_size_nr(leaf, slot);
2039 ++ const u32 data_size = sizeof(*di) + name_len + size;
2040 ++ struct btrfs_item *item;
2041 ++ unsigned long data_ptr;
2042 ++ char *ptr;
2043 ++
2044 ++ if (size > old_data_len) {
2045 ++ if (btrfs_leaf_free_space(root, leaf) <
2046 ++ (size - old_data_len)) {
2047 ++ ret = -ENOSPC;
2048 ++ goto out;
2049 ++ }
2050 + }
2051 +
2052 +- ret = btrfs_delete_one_dir_name(trans, root, path, di);
2053 +- if (ret)
2054 +- goto out;
2055 ++ if (old_data_len + name_len + sizeof(*di) == item_size) {
2056 ++ /* No other xattrs packed in the same leaf item. */
2057 ++ if (size > old_data_len)
2058 ++ btrfs_extend_item(root, path,
2059 ++ size - old_data_len);
2060 ++ else if (size < old_data_len)
2061 ++ btrfs_truncate_item(root, path, data_size, 1);
2062 ++ } else {
2063 ++ /* There are other xattrs packed in the same item. */
2064 ++ ret = btrfs_delete_one_dir_name(trans, root, path, di);
2065 ++ if (ret)
2066 ++ goto out;
2067 ++ btrfs_extend_item(root, path, data_size);
2068 ++ }
2069 +
2070 ++ item = btrfs_item_nr(slot);
2071 ++ ptr = btrfs_item_ptr(leaf, slot, char);
2072 ++ ptr += btrfs_item_size(leaf, item) - data_size;
2073 ++ di = (struct btrfs_dir_item *)ptr;
2074 ++ btrfs_set_dir_data_len(leaf, di, size);
2075 ++ data_ptr = ((unsigned long)(di + 1)) + name_len;
2076 ++ write_extent_buffer(leaf, value, data_ptr, size);
2077 ++ btrfs_mark_buffer_dirty(leaf);
2078 ++ } else {
2079 + /*
2080 +- * We have a value to set, so go back and try to insert it now.
2081 ++ * Insert, and we had space for the xattr, so path->slots[0] is
2082 ++ * where our xattr dir_item is and btrfs_insert_xattr_item()
2083 ++ * filled it.
2084 + */
2085 +- if (value) {
2086 +- btrfs_release_path(path);
2087 +- goto again;
2088 +- }
2089 + }
2090 + out:
2091 + btrfs_free_path(path);
2092 +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
2093 +index 7fe30f6..35f54bc 100644
2094 +--- a/fs/ocfs2/file.c
2095 ++++ b/fs/ocfs2/file.c
2096 +@@ -2478,9 +2478,7 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
2097 + struct address_space *mapping = out->f_mapping;
2098 + struct inode *inode = mapping->host;
2099 + struct splice_desc sd = {
2100 +- .total_len = len,
2101 + .flags = flags,
2102 +- .pos = *ppos,
2103 + .u.file = out,
2104 + };
2105 +
2106 +@@ -2490,6 +2488,12 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
2107 + out->f_path.dentry->d_name.len,
2108 + out->f_path.dentry->d_name.name, len);
2109 +
2110 ++ ret = generic_write_checks(out, ppos, &len, 0);
2111 ++ if (ret)
2112 ++ return ret;
2113 ++ sd.total_len = len;
2114 ++ sd.pos = *ppos;
2115 ++
2116 + pipe_lock(pipe);
2117 +
2118 + splice_from_pipe_begin(&sd);
2119 +diff --git a/fs/splice.c b/fs/splice.c
2120 +index 12028fa..f345d53 100644
2121 +--- a/fs/splice.c
2122 ++++ b/fs/splice.c
2123 +@@ -1012,13 +1012,17 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
2124 + struct address_space *mapping = out->f_mapping;
2125 + struct inode *inode = mapping->host;
2126 + struct splice_desc sd = {
2127 +- .total_len = len,
2128 + .flags = flags,
2129 +- .pos = *ppos,
2130 + .u.file = out,
2131 + };
2132 + ssize_t ret;
2133 +
2134 ++ ret = generic_write_checks(out, ppos, &len, S_ISBLK(inode->i_mode));
2135 ++ if (ret)
2136 ++ return ret;
2137 ++ sd.total_len = len;
2138 ++ sd.pos = *ppos;
2139 ++
2140 + pipe_lock(pipe);
2141 +
2142 + splice_from_pipe_begin(&sd);
2143 +diff --git a/include/linux/mbus.h b/include/linux/mbus.h
2144 +index 345b8c5..550c88f 100644
2145 +--- a/include/linux/mbus.h
2146 ++++ b/include/linux/mbus.h
2147 +@@ -73,6 +73,6 @@ int mvebu_mbus_del_window(phys_addr_t base, size_t size);
2148 + int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base,
2149 + size_t mbus_size, phys_addr_t sdram_phys_base,
2150 + size_t sdram_size);
2151 +-int mvebu_mbus_dt_init(void);
2152 ++int mvebu_mbus_dt_init(bool is_coherent);
2153 +
2154 + #endif /* __LINUX_MBUS_H */
2155 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
2156 +index c68e5e0..99de240 100644
2157 +--- a/net/netfilter/nf_tables_api.c
2158 ++++ b/net/netfilter/nf_tables_api.c
2159 +@@ -855,7 +855,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
2160 +
2161 + if (nla[NFTA_CHAIN_POLICY]) {
2162 + if ((chain != NULL &&
2163 +- !(chain->flags & NFT_BASE_CHAIN)) ||
2164 ++ !(chain->flags & NFT_BASE_CHAIN)))
2165 ++ return -EOPNOTSUPP;
2166 ++
2167 ++ if (chain == NULL &&
2168 + nla[NFTA_CHAIN_HOOK] == NULL)
2169 + return -EOPNOTSUPP;
2170 +
2171 +diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
2172 +index 9e287cb..54330fb 100644
2173 +--- a/net/netfilter/nfnetlink_cthelper.c
2174 ++++ b/net/netfilter/nfnetlink_cthelper.c
2175 +@@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
2176 + if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
2177 + return -EINVAL;
2178 +
2179 ++ /* Not all fields are initialized so first zero the tuple */
2180 ++ memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
2181 ++
2182 + tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
2183 + tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
2184 +
2185 +@@ -86,7 +89,7 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
2186 + static int
2187 + nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
2188 + {
2189 +- const struct nf_conn_help *help = nfct_help(ct);
2190 ++ struct nf_conn_help *help = nfct_help(ct);
2191 +
2192 + if (attr == NULL)
2193 + return -EINVAL;
2194 +@@ -94,7 +97,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
2195 + if (help->helper->data_len == 0)
2196 + return -EINVAL;
2197 +
2198 +- memcpy(&help->data, nla_data(attr), help->helper->data_len);
2199 ++ memcpy(help->data, nla_data(attr), help->helper->data_len);
2200 + return 0;
2201 + }
2202 +
2203 +diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
2204 +index 7350723..9695895 100644
2205 +--- a/net/netfilter/nft_compat.c
2206 ++++ b/net/netfilter/nft_compat.c
2207 +@@ -82,6 +82,9 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
2208 + entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
2209 + break;
2210 + case AF_INET6:
2211 ++ if (proto)
2212 ++ entry->e6.ipv6.flags |= IP6T_F_PROTO;
2213 ++
2214 + entry->e6.ipv6.proto = proto;
2215 + entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
2216 + break;
2217 +@@ -313,6 +316,9 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
2218 + entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
2219 + break;
2220 + case AF_INET6:
2221 ++ if (proto)
2222 ++ entry->e6.ipv6.flags |= IP6T_F_PROTO;
2223 ++
2224 + entry->e6.ipv6.proto = proto;
2225 + entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
2226 + break;
2227 +diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
2228 +index 1316e55..c324a52 100644
2229 +--- a/virt/kvm/arm/vgic.c
2230 ++++ b/virt/kvm/arm/vgic.c
2231 +@@ -674,7 +674,7 @@ static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
2232 + {
2233 + struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
2234 + int sgi;
2235 +- int min_sgi = (offset & ~0x3) * 4;
2236 ++ int min_sgi = (offset & ~0x3);
2237 + int max_sgi = min_sgi + 3;
2238 + int vcpu_id = vcpu->vcpu_id;
2239 + u32 reg = 0;
2240 +@@ -695,7 +695,7 @@ static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
2241 + {
2242 + struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
2243 + int sgi;
2244 +- int min_sgi = (offset & ~0x3) * 4;
2245 ++ int min_sgi = (offset & ~0x3);
2246 + int max_sgi = min_sgi + 3;
2247 + int vcpu_id = vcpu->vcpu_id;
2248 + u32 reg;
2249 +@@ -1387,7 +1387,8 @@ out:
2250 + int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
2251 + bool level)
2252 + {
2253 +- if (vgic_update_irq_state(kvm, cpuid, irq_num, level))
2254 ++ if (likely(vgic_initialized(kvm)) &&
2255 ++ vgic_update_irq_state(kvm, cpuid, irq_num, level))
2256 + vgic_kick_vcpus(kvm);
2257 +
2258 + return 0;
2259 +@@ -1610,7 +1611,7 @@ out:
2260 +
2261 + int kvm_vgic_create(struct kvm *kvm)
2262 + {
2263 +- int i, vcpu_lock_idx = -1, ret = 0;
2264 ++ int i, vcpu_lock_idx = -1, ret;
2265 + struct kvm_vcpu *vcpu;
2266 +
2267 + mutex_lock(&kvm->lock);
2268 +@@ -1625,6 +1626,7 @@ int kvm_vgic_create(struct kvm *kvm)
2269 + * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
2270 + * that no other VCPUs are run while we create the vgic.
2271 + */
2272 ++ ret = -EBUSY;
2273 + kvm_for_each_vcpu(i, vcpu, kvm) {
2274 + if (!mutex_trylock(&vcpu->mutex))
2275 + goto out_unlock;
2276 +@@ -1632,11 +1634,10 @@ int kvm_vgic_create(struct kvm *kvm)
2277 + }
2278 +
2279 + kvm_for_each_vcpu(i, vcpu, kvm) {
2280 +- if (vcpu->arch.has_run_once) {
2281 +- ret = -EBUSY;
2282 ++ if (vcpu->arch.has_run_once)
2283 + goto out_unlock;
2284 +- }
2285 + }
2286 ++ ret = 0;
2287 +
2288 + spin_lock_init(&kvm->arch.vgic.lock);
2289 + kvm->arch.vgic.vctrl_base = vgic_vctrl_base;
2290
2291 diff --git a/3.14.46/4420_grsecurity-3.1-3.14.46-201506300711.patch b/3.14.47/4420_grsecurity-3.1-3.14.47-201507050832.patch
2292 similarity index 99%
2293 rename from 3.14.46/4420_grsecurity-3.1-3.14.46-201506300711.patch
2294 rename to 3.14.47/4420_grsecurity-3.1-3.14.47-201507050832.patch
2295 index 008971f..f646996 100644
2296 --- a/3.14.46/4420_grsecurity-3.1-3.14.46-201506300711.patch
2297 +++ b/3.14.47/4420_grsecurity-3.1-3.14.47-201507050832.patch
2298 @@ -295,7 +295,7 @@ index 5d91ba1..ef1d374 100644
2299
2300 pcd. [PARIDE]
2301 diff --git a/Makefile b/Makefile
2302 -index def39fd..4636aea 100644
2303 +index f9041e6..46bcf1d 100644
2304 --- a/Makefile
2305 +++ b/Makefile
2306 @@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
2307 @@ -3307,7 +3307,7 @@ index 7bcee5c..e2f3249 100644
2308 __data_loc = .;
2309 #endif
2310 diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
2311 -index df6e75e..1858aa0 100644
2312 +index 2e74a61..14d0a66 100644
2313 --- a/arch/arm/kvm/arm.c
2314 +++ b/arch/arm/kvm/arm.c
2315 @@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
2316 @@ -3319,7 +3319,7 @@ index df6e75e..1858aa0 100644
2317 static u8 kvm_next_vmid;
2318 static DEFINE_SPINLOCK(kvm_vmid_lock);
2319
2320 -@@ -371,7 +371,7 @@ void force_vm_exit(const cpumask_t *mask)
2321 +@@ -376,7 +376,7 @@ void force_vm_exit(const cpumask_t *mask)
2322 */
2323 static bool need_new_vmid_gen(struct kvm *kvm)
2324 {
2325 @@ -3328,7 +3328,7 @@ index df6e75e..1858aa0 100644
2326 }
2327
2328 /**
2329 -@@ -404,7 +404,7 @@ static void update_vttbr(struct kvm *kvm)
2330 +@@ -409,7 +409,7 @@ static void update_vttbr(struct kvm *kvm)
2331
2332 /* First user of a new VMID generation? */
2333 if (unlikely(kvm_next_vmid == 0)) {
2334 @@ -3337,7 +3337,7 @@ index df6e75e..1858aa0 100644
2335 kvm_next_vmid = 1;
2336
2337 /*
2338 -@@ -421,7 +421,7 @@ static void update_vttbr(struct kvm *kvm)
2339 +@@ -426,7 +426,7 @@ static void update_vttbr(struct kvm *kvm)
2340 kvm_call_hyp(__kvm_flush_vm_context);
2341 }
2342
2343 @@ -3346,7 +3346,7 @@ index df6e75e..1858aa0 100644
2344 kvm->arch.vmid = kvm_next_vmid;
2345 kvm_next_vmid++;
2346
2347 -@@ -996,7 +996,7 @@ static void check_kvm_target_cpu(void *ret)
2348 +@@ -1013,7 +1013,7 @@ static void check_kvm_target_cpu(void *ret)
2349 /**
2350 * Initialize Hyp-mode and memory mappings on all CPUs.
2351 */
2352 @@ -12396,7 +12396,7 @@ index ad8f795..2c7eec6 100644
2353 /*
2354 * Memory returned by kmalloc() may be used for DMA, so we must make
2355 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
2356 -index 2f645c9..7e2933c 100644
2357 +index 5dab54a..a20467d 100644
2358 --- a/arch/x86/Kconfig
2359 +++ b/arch/x86/Kconfig
2360 @@ -22,6 +22,7 @@ config X86_64
2361 @@ -21769,15 +21769,13 @@ index a276fa7..3ef18f0 100644
2362
2363 static void microcode_fini_cpu(int cpu)
2364 diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
2365 -index 18f7391..8c5225d 100644
2366 +index 43a07bf..8c5225d 100644
2367 --- a/arch/x86/kernel/cpu/microcode/intel_early.c
2368 +++ b/arch/x86/kernel/cpu/microcode/intel_early.c
2369 -@@ -321,7 +321,11 @@ get_matching_model_microcode(int cpu, unsigned long start,
2370 - unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
2371 +@@ -322,6 +322,10 @@ get_matching_model_microcode(int cpu, unsigned long start,
2372 int i;
2373
2374 -- while (leftover) {
2375 -+ while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
2376 + while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
2377 +
2378 + if (leftover < sizeof(mc_header))
2379 + break;
2380 @@ -25816,7 +25814,7 @@ index 7ec1d5f..5a7d130 100644
2381 }
2382
2383 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
2384 -index a1f5b18..9d9e077 100644
2385 +index 490fee1..d7eb482 100644
2386 --- a/arch/x86/kernel/kprobes/core.c
2387 +++ b/arch/x86/kernel/kprobes/core.c
2388 @@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
2389 @@ -25854,17 +25852,17 @@ index a1f5b18..9d9e077 100644
2390 }
2391
2392 /*
2393 -@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
2394 +@@ -335,7 +338,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
2395 /* Another subsystem puts a breakpoint, failed to recover */
2396 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
2397 return 0;
2398 + pax_open_kernel();
2399 - memcpy(dest, insn.kaddr, insn.length);
2400 + memcpy(dest, insn.kaddr, length);
2401 + pax_close_kernel();
2402
2403 #ifdef CONFIG_X86_64
2404 if (insn_rip_relative(&insn)) {
2405 -@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
2406 +@@ -362,7 +367,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
2407 return 0;
2408 }
2409 disp = (u8 *) dest + insn_offset_displacement(&insn);
2410 @@ -25873,8 +25871,8 @@ index a1f5b18..9d9e077 100644
2411 + pax_close_kernel();
2412 }
2413 #endif
2414 - return insn.length;
2415 -@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
2416 + return length;
2417 +@@ -501,7 +508,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
2418 * nor set current_kprobe, because it doesn't use single
2419 * stepping.
2420 */
2421 @@ -25883,7 +25881,7 @@ index a1f5b18..9d9e077 100644
2422 preempt_enable_no_resched();
2423 return;
2424 }
2425 -@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
2426 +@@ -518,9 +525,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
2427 regs->flags &= ~X86_EFLAGS_IF;
2428 /* single step inline if the instruction is an int3 */
2429 if (p->opcode == BREAKPOINT_INSTRUCTION)
2430 @@ -25895,7 +25893,7 @@ index a1f5b18..9d9e077 100644
2431 }
2432
2433 /*
2434 -@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
2435 +@@ -599,7 +606,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
2436 setup_singlestep(p, regs, kcb, 0);
2437 return 1;
2438 }
2439 @@ -25904,7 +25902,7 @@ index a1f5b18..9d9e077 100644
2440 /*
2441 * The breakpoint instruction was removed right
2442 * after we hit it. Another cpu has removed
2443 -@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
2444 +@@ -645,6 +652,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
2445 " movq %rax, 152(%rsp)\n"
2446 RESTORE_REGS_STRING
2447 " popfq\n"
2448 @@ -25914,7 +25912,7 @@ index a1f5b18..9d9e077 100644
2449 #else
2450 " pushf\n"
2451 SAVE_REGS_STRING
2452 -@@ -779,7 +789,7 @@ static void __kprobes
2453 +@@ -782,7 +792,7 @@ static void __kprobes
2454 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
2455 {
2456 unsigned long *tos = stack_addr(regs);
2457 @@ -25923,7 +25921,7 @@ index a1f5b18..9d9e077 100644
2458 unsigned long orig_ip = (unsigned long)p->addr;
2459 kprobe_opcode_t *insn = p->ainsn.insn;
2460
2461 -@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
2462 +@@ -964,7 +974,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
2463 struct die_args *args = data;
2464 int ret = NOTIFY_DONE;
2465
2466 @@ -28893,10 +28891,10 @@ index cba218a..1cc1bed 100644
2467 goto error;
2468 walker->ptep_user[walker->level - 1] = ptep_user;
2469 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
2470 -index 9643eda6..c9cb765 100644
2471 +index 0746334..f6c04e8 100644
2472 --- a/arch/x86/kvm/svm.c
2473 +++ b/arch/x86/kvm/svm.c
2474 -@@ -3508,7 +3508,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
2475 +@@ -3510,7 +3510,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
2476 int cpu = raw_smp_processor_id();
2477
2478 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2479 @@ -28908,7 +28906,7 @@ index 9643eda6..c9cb765 100644
2480 load_TR_desc();
2481 }
2482
2483 -@@ -3911,6 +3915,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
2484 +@@ -3913,6 +3917,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
2485 #endif
2486 #endif
2487
2488 @@ -51585,7 +51583,7 @@ index f28ea07..34b16d3 100644
2489
2490 /* These three are default values which can be overridden */
2491 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
2492 -index 528bff5..84963854 100644
2493 +index 85d370e..7374c8c 100644
2494 --- a/drivers/scsi/hpsa.c
2495 +++ b/drivers/scsi/hpsa.c
2496 @@ -571,7 +571,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
2497 @@ -51636,7 +51634,7 @@ index 528bff5..84963854 100644
2498 (h->interrupts_enabled == 0);
2499 }
2500
2501 -@@ -4442,7 +4442,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
2502 +@@ -4433,7 +4433,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
2503 if (prod_index < 0)
2504 return -ENODEV;
2505 h->product_name = products[prod_index].product_name;
2506 @@ -51645,7 +51643,7 @@ index 528bff5..84963854 100644
2507
2508 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
2509 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
2510 -@@ -4712,7 +4712,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
2511 +@@ -4726,7 +4726,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
2512 {
2513 unsigned long flags;
2514
2515 @@ -51654,7 +51652,7 @@ index 528bff5..84963854 100644
2516 spin_lock_irqsave(&h->lock, flags);
2517 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
2518 spin_unlock_irqrestore(&h->lock, flags);
2519 -@@ -4843,7 +4843,7 @@ reinit_after_soft_reset:
2520 +@@ -4857,7 +4857,7 @@ reinit_after_soft_reset:
2521 }
2522
2523 /* make sure the board interrupts are off */
2524 @@ -51663,7 +51661,7 @@ index 528bff5..84963854 100644
2525
2526 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
2527 goto clean2;
2528 -@@ -4877,7 +4877,7 @@ reinit_after_soft_reset:
2529 +@@ -4891,7 +4891,7 @@ reinit_after_soft_reset:
2530 * fake ones to scoop up any residual completions.
2531 */
2532 spin_lock_irqsave(&h->lock, flags);
2533 @@ -51672,7 +51670,7 @@ index 528bff5..84963854 100644
2534 spin_unlock_irqrestore(&h->lock, flags);
2535 free_irqs(h);
2536 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
2537 -@@ -4896,9 +4896,9 @@ reinit_after_soft_reset:
2538 +@@ -4910,9 +4910,9 @@ reinit_after_soft_reset:
2539 dev_info(&h->pdev->dev, "Board READY.\n");
2540 dev_info(&h->pdev->dev,
2541 "Waiting for stale completions to drain.\n");
2542 @@ -51684,7 +51682,7 @@ index 528bff5..84963854 100644
2543
2544 rc = controller_reset_failed(h->cfgtable);
2545 if (rc)
2546 -@@ -4919,7 +4919,7 @@ reinit_after_soft_reset:
2547 +@@ -4933,7 +4933,7 @@ reinit_after_soft_reset:
2548 }
2549
2550 /* Turn the interrupts on so we can service requests */
2551 @@ -51693,7 +51691,7 @@ index 528bff5..84963854 100644
2552
2553 hpsa_hba_inquiry(h);
2554 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
2555 -@@ -4988,7 +4988,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
2556 +@@ -5002,7 +5002,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
2557 * To write all data in the battery backed cache to disks
2558 */
2559 hpsa_flush_cache(h);
2560 @@ -51702,7 +51700,7 @@ index 528bff5..84963854 100644
2561 hpsa_free_irqs_and_disable_msix(h);
2562 }
2563
2564 -@@ -5162,7 +5162,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
2565 +@@ -5176,7 +5176,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
2566 return;
2567 }
2568 /* Change the access methods to the performant access methods */
2569 @@ -55240,6 +55238,19 @@ index 8cfc319..4868255 100644
2570
2571 return 0;
2572 }
2573 +diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
2574 +index 3740a3f..466ec15 100644
2575 +--- a/drivers/usb/gadget/configfs.c
2576 ++++ b/drivers/usb/gadget/configfs.c
2577 +@@ -560,7 +560,7 @@ static struct config_group *function_make(
2578 + if (IS_ERR(fi))
2579 + return ERR_CAST(fi);
2580 +
2581 +- ret = config_item_set_name(&fi->group.cg_item, name);
2582 ++ ret = config_item_set_name(&fi->group.cg_item, "%s", name);
2583 + if (ret) {
2584 + usb_put_function_instance(fi);
2585 + return ERR_PTR(ret);
2586 diff --git a/drivers/usb/gadget/f_uac1.c b/drivers/usb/gadget/f_uac1.c
2587 index 2b4c82d..06a8ee6 100644
2588 --- a/drivers/usb/gadget/f_uac1.c
2589 @@ -66906,7 +66917,7 @@ index 1e86823..8e34695 100644
2590 else if (whole->bd_holder != NULL)
2591 return false; /* is a partition of a held device */
2592 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
2593 -index 93de3ba..0e1cf23 100644
2594 +index f8ffee4..b82e697 100644
2595 --- a/fs/btrfs/ctree.c
2596 +++ b/fs/btrfs/ctree.c
2597 @@ -1216,9 +1216,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
2598 @@ -67968,6 +67979,28 @@ index e081acb..911df21 100644
2599
2600 /*
2601 * We'll have a dentry and an inode for
2602 +diff --git a/fs/configfs/item.c b/fs/configfs/item.c
2603 +index 50cee7f..8238ebd 100644
2604 +--- a/fs/configfs/item.c
2605 ++++ b/fs/configfs/item.c
2606 +@@ -116,7 +116,7 @@ void config_item_init_type_name(struct config_item *item,
2607 + const char *name,
2608 + struct config_item_type *type)
2609 + {
2610 +- config_item_set_name(item, name);
2611 ++ config_item_set_name(item, "%s", name);
2612 + item->ci_type = type;
2613 + config_item_init(item);
2614 + }
2615 +@@ -125,7 +125,7 @@ EXPORT_SYMBOL(config_item_init_type_name);
2616 + void config_group_init_type_name(struct config_group *group, const char *name,
2617 + struct config_item_type *type)
2618 + {
2619 +- config_item_set_name(&group->cg_item, name);
2620 ++ config_item_set_name(&group->cg_item, "%s", name);
2621 + group->cg_item.ci_type = type;
2622 + config_group_init(group);
2623 + }
2624 diff --git a/fs/coredump.c b/fs/coredump.c
2625 index a93f7e6..d58bcbe 100644
2626 --- a/fs/coredump.c
2627 @@ -75409,7 +75442,7 @@ index 1d641bb..9ca7f61 100644
2628 {
2629 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
2630 diff --git a/fs/splice.c b/fs/splice.c
2631 -index 12028fa..2cde9b2 100644
2632 +index f345d53..f6e7484 100644
2633 --- a/fs/splice.c
2634 +++ b/fs/splice.c
2635 @@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
2636 @@ -75491,7 +75524,7 @@ index 12028fa..2cde9b2 100644
2637 return 0;
2638
2639 if (sd->flags & SPLICE_F_NONBLOCK)
2640 -@@ -1171,7 +1171,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
2641 +@@ -1175,7 +1175,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
2642 long ret, bytes;
2643 umode_t i_mode;
2644 size_t len;
2645 @@ -75500,7 +75533,7 @@ index 12028fa..2cde9b2 100644
2646
2647 /*
2648 * We require the input being a regular file, as we don't want to
2649 -@@ -1197,7 +1197,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
2650 +@@ -1201,7 +1201,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
2651 * out of the pipe right after the splice_to_pipe(). So set
2652 * PIPE_READERS appropriately.
2653 */
2654 @@ -75509,7 +75542,7 @@ index 12028fa..2cde9b2 100644
2655
2656 current->splice_pipe = pipe;
2657 }
2658 -@@ -1214,6 +1214,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
2659 +@@ -1218,6 +1218,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
2660 * Don't block on output, we have to drain the direct pipe.
2661 */
2662 sd->flags &= ~SPLICE_F_NONBLOCK;
2663 @@ -75517,7 +75550,7 @@ index 12028fa..2cde9b2 100644
2664
2665 while (len) {
2666 size_t read_len;
2667 -@@ -1227,6 +1228,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
2668 +@@ -1231,6 +1232,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
2669 sd->total_len = read_len;
2670
2671 /*
2672 @@ -75533,7 +75566,7 @@ index 12028fa..2cde9b2 100644
2673 * NOTE: nonblocking mode only applies to the input. We
2674 * must not do the output in nonblocking mode as then we
2675 * could get stuck data in the internal pipe:
2676 -@@ -1493,6 +1503,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
2677 +@@ -1497,6 +1507,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
2678
2679 partial[buffers].offset = off;
2680 partial[buffers].len = plen;
2681 @@ -75541,7 +75574,7 @@ index 12028fa..2cde9b2 100644
2682
2683 off = 0;
2684 len -= plen;
2685 -@@ -1795,9 +1806,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
2686 +@@ -1799,9 +1810,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
2687 ret = -ERESTARTSYS;
2688 break;
2689 }
2690 @@ -75553,7 +75586,7 @@ index 12028fa..2cde9b2 100644
2691 if (flags & SPLICE_F_NONBLOCK) {
2692 ret = -EAGAIN;
2693 break;
2694 -@@ -1829,7 +1840,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
2695 +@@ -1833,7 +1844,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
2696 pipe_lock(pipe);
2697
2698 while (pipe->nrbufs >= pipe->buffers) {
2699 @@ -75562,7 +75595,7 @@ index 12028fa..2cde9b2 100644
2700 send_sig(SIGPIPE, current, 0);
2701 ret = -EPIPE;
2702 break;
2703 -@@ -1842,9 +1853,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
2704 +@@ -1846,9 +1857,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
2705 ret = -ERESTARTSYS;
2706 break;
2707 }
2708 @@ -75574,7 +75607,7 @@ index 12028fa..2cde9b2 100644
2709 }
2710
2711 pipe_unlock(pipe);
2712 -@@ -1880,14 +1891,14 @@ retry:
2713 +@@ -1884,14 +1895,14 @@ retry:
2714 pipe_double_lock(ipipe, opipe);
2715
2716 do {
2717 @@ -75591,7 +75624,7 @@ index 12028fa..2cde9b2 100644
2718 break;
2719
2720 /*
2721 -@@ -1984,7 +1995,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
2722 +@@ -1988,7 +1999,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
2723 pipe_double_lock(ipipe, opipe);
2724
2725 do {
2726 @@ -75600,7 +75633,7 @@ index 12028fa..2cde9b2 100644
2727 send_sig(SIGPIPE, current, 0);
2728 if (!ret)
2729 ret = -EPIPE;
2730 -@@ -2029,7 +2040,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
2731 +@@ -2033,7 +2044,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
2732 * return EAGAIN if we have the potential of some data in the
2733 * future, otherwise just return 0
2734 */
2735 @@ -81750,7 +81783,7 @@ index 0000000..4c7e00a
2736 +}
2737 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
2738 new file mode 100644
2739 -index 0000000..f056b81
2740 +index 0000000..ed6ee43
2741 --- /dev/null
2742 +++ b/grsecurity/gracl_ip.c
2743 @@ -0,0 +1,386 @@
2744 @@ -81824,7 +81857,7 @@ index 0000000..f056b81
2745 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
2746 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
2747 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
2748 -+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
2749 ++ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf", "alg", "nfc", "vsock"
2750 + };
2751 +
2752 +const char *
2753 @@ -89322,9 +89355,18 @@ index 5d5aaae..0ea9b84 100644
2754 extern bool completion_done(struct completion *x);
2755
2756 diff --git a/include/linux/configfs.h b/include/linux/configfs.h
2757 -index 34025df..d94bbbc 100644
2758 +index 34025df..2a6ee32 100644
2759 --- a/include/linux/configfs.h
2760 +++ b/include/linux/configfs.h
2761 +@@ -64,7 +64,7 @@ struct config_item {
2762 + struct dentry *ci_dentry;
2763 + };
2764 +
2765 +-extern int config_item_set_name(struct config_item *, const char *, ...);
2766 ++extern __printf(2, 3) int config_item_set_name(struct config_item *, const char *, ...);
2767 +
2768 + static inline char *config_item_name(struct config_item * item)
2769 + {
2770 @@ -125,7 +125,7 @@ struct configfs_attribute {
2771 const char *ca_name;
2772 struct module *ca_owner;
2773 @@ -114514,7 +114556,7 @@ index f042ae5..30ea486 100644
2774 }
2775 EXPORT_SYMBOL(nf_unregister_sockopt);
2776 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
2777 -index c68e5e0..3bed3f0 100644
2778 +index 99de240..539632e 100644
2779 --- a/net/netfilter/nf_tables_api.c
2780 +++ b/net/netfilter/nf_tables_api.c
2781 @@ -152,8 +152,8 @@ nf_tables_chain_type_lookup(const struct nft_af_info *afi,
2782 @@ -114576,10 +114618,10 @@ index 108120f..5b169db 100644
2783 queued = 0;
2784 err = 0;
2785 diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
2786 -index 7350723..af7fa0d 100644
2787 +index 9695895..f0a05e6 100644
2788 --- a/net/netfilter/nft_compat.c
2789 +++ b/net/netfilter/nft_compat.c
2790 -@@ -216,7 +216,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
2791 +@@ -219,7 +219,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
2792 /* We want to reuse existing compat_to_user */
2793 old_fs = get_fs();
2794 set_fs(KERNEL_DS);
2795 @@ -114588,7 +114630,7 @@ index 7350723..af7fa0d 100644
2796 set_fs(old_fs);
2797 ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
2798 kfree(out);
2799 -@@ -283,14 +283,7 @@ static void nft_match_eval(const struct nft_expr *expr,
2800 +@@ -286,14 +286,7 @@ static void nft_match_eval(const struct nft_expr *expr,
2801 return;
2802 }
2803
2804 @@ -114604,7 +114646,7 @@ index 7350723..af7fa0d 100644
2805 }
2806
2807 static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
2808 -@@ -403,7 +396,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
2809 +@@ -409,7 +402,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
2810 /* We want to reuse existing compat_to_user */
2811 old_fs = get_fs();
2812 set_fs(KERNEL_DS);
2813
2814 diff --git a/3.14.46/4425_grsec_remove_EI_PAX.patch b/3.14.47/4425_grsec_remove_EI_PAX.patch
2815 similarity index 100%
2816 rename from 3.14.46/4425_grsec_remove_EI_PAX.patch
2817 rename to 3.14.47/4425_grsec_remove_EI_PAX.patch
2818
2819 diff --git a/3.14.46/4427_force_XATTR_PAX_tmpfs.patch b/3.14.47/4427_force_XATTR_PAX_tmpfs.patch
2820 similarity index 100%
2821 rename from 3.14.46/4427_force_XATTR_PAX_tmpfs.patch
2822 rename to 3.14.47/4427_force_XATTR_PAX_tmpfs.patch
2823
2824 diff --git a/3.14.46/4430_grsec-remove-localversion-grsec.patch b/3.14.47/4430_grsec-remove-localversion-grsec.patch
2825 similarity index 100%
2826 rename from 3.14.46/4430_grsec-remove-localversion-grsec.patch
2827 rename to 3.14.47/4430_grsec-remove-localversion-grsec.patch
2828
2829 diff --git a/3.14.46/4435_grsec-mute-warnings.patch b/3.14.47/4435_grsec-mute-warnings.patch
2830 similarity index 100%
2831 rename from 3.14.46/4435_grsec-mute-warnings.patch
2832 rename to 3.14.47/4435_grsec-mute-warnings.patch
2833
2834 diff --git a/3.14.46/4440_grsec-remove-protected-paths.patch b/3.14.47/4440_grsec-remove-protected-paths.patch
2835 similarity index 100%
2836 rename from 3.14.46/4440_grsec-remove-protected-paths.patch
2837 rename to 3.14.47/4440_grsec-remove-protected-paths.patch
2838
2839 diff --git a/3.14.46/4450_grsec-kconfig-default-gids.patch b/3.14.47/4450_grsec-kconfig-default-gids.patch
2840 similarity index 100%
2841 rename from 3.14.46/4450_grsec-kconfig-default-gids.patch
2842 rename to 3.14.47/4450_grsec-kconfig-default-gids.patch
2843
2844 diff --git a/3.14.46/4465_selinux-avc_audit-log-curr_ip.patch b/3.14.47/4465_selinux-avc_audit-log-curr_ip.patch
2845 similarity index 100%
2846 rename from 3.14.46/4465_selinux-avc_audit-log-curr_ip.patch
2847 rename to 3.14.47/4465_selinux-avc_audit-log-curr_ip.patch
2848
2849 diff --git a/3.14.46/4470_disable-compat_vdso.patch b/3.14.47/4470_disable-compat_vdso.patch
2850 similarity index 100%
2851 rename from 3.14.46/4470_disable-compat_vdso.patch
2852 rename to 3.14.47/4470_disable-compat_vdso.patch
2853
2854 diff --git a/3.14.46/4475_emutramp_default_on.patch b/3.14.47/4475_emutramp_default_on.patch
2855 similarity index 100%
2856 rename from 3.14.46/4475_emutramp_default_on.patch
2857 rename to 3.14.47/4475_emutramp_default_on.patch
2858
2859 diff --git a/3.2.69/0000_README b/3.2.69/0000_README
2860 index d006716..6773701 100644
2861 --- a/3.2.69/0000_README
2862 +++ b/3.2.69/0000_README
2863 @@ -194,7 +194,7 @@ Patch: 1068_linux-3.2.69.patch
2864 From: http://www.kernel.org
2865 Desc: Linux 3.2.69
2866
2867 -Patch: 4420_grsecurity-3.1-3.2.69-201506300708.patch
2868 +Patch: 4420_grsecurity-3.1-3.2.69-201507050830.patch
2869 From: http://www.grsecurity.net
2870 Desc: hardened-sources base patch from upstream grsecurity
2871
2872
2873 diff --git a/3.2.69/4420_grsecurity-3.1-3.2.69-201506300708.patch b/3.2.69/4420_grsecurity-3.1-3.2.69-201507050830.patch
2874 similarity index 99%
2875 rename from 3.2.69/4420_grsecurity-3.1-3.2.69-201506300708.patch
2876 rename to 3.2.69/4420_grsecurity-3.1-3.2.69-201507050830.patch
2877 index e8aabfa..57ddd0b 100644
2878 --- a/3.2.69/4420_grsecurity-3.1-3.2.69-201506300708.patch
2879 +++ b/3.2.69/4420_grsecurity-3.1-3.2.69-201507050830.patch
2880 @@ -59119,6 +59119,28 @@ index 5ef72c8..0c72810 100644
2881
2882 /*
2883 * We'll have a dentry and an inode for
2884 +diff --git a/fs/configfs/item.c b/fs/configfs/item.c
2885 +index 50cee7f..8238ebd 100644
2886 +--- a/fs/configfs/item.c
2887 ++++ b/fs/configfs/item.c
2888 +@@ -116,7 +116,7 @@ void config_item_init_type_name(struct config_item *item,
2889 + const char *name,
2890 + struct config_item_type *type)
2891 + {
2892 +- config_item_set_name(item, name);
2893 ++ config_item_set_name(item, "%s", name);
2894 + item->ci_type = type;
2895 + config_item_init(item);
2896 + }
2897 +@@ -125,7 +125,7 @@ EXPORT_SYMBOL(config_item_init_type_name);
2898 + void config_group_init_type_name(struct config_group *group, const char *name,
2899 + struct config_item_type *type)
2900 + {
2901 +- config_item_set_name(&group->cg_item, name);
2902 ++ config_item_set_name(&group->cg_item, "%s", name);
2903 + group->cg_item.ci_type = type;
2904 + config_group_init(group);
2905 + }
2906 diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
2907 index 276e15c..aeac324 100644
2908 --- a/fs/configfs/mount.c
2909 @@ -73326,7 +73348,7 @@ index 0000000..b916759
2910 +}
2911 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
2912 new file mode 100644
2913 -index 0000000..35f8064
2914 +index 0000000..e0bbcf4
2915 --- /dev/null
2916 +++ b/grsecurity/gracl_ip.c
2917 @@ -0,0 +1,386 @@
2918 @@ -73400,7 +73422,7 @@ index 0000000..35f8064
2919 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
2920 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
2921 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
2922 -+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
2923 ++ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf", "alg", "nfc"
2924 + };
2925 +
2926 +const char *
2927 @@ -81044,9 +81066,18 @@ index 51494e6..340575ab 100644
2928 extern bool completion_done(struct completion *x);
2929
2930 diff --git a/include/linux/configfs.h b/include/linux/configfs.h
2931 -index 3081c58..7714c00 100644
2932 +index 3081c58..5a0b545 100644
2933 --- a/include/linux/configfs.h
2934 +++ b/include/linux/configfs.h
2935 +@@ -64,7 +64,7 @@ struct config_item {
2936 + struct dentry *ci_dentry;
2937 + };
2938 +
2939 +-extern int config_item_set_name(struct config_item *, const char *, ...);
2940 ++extern __printf(2, 3) int config_item_set_name(struct config_item *, const char *, ...);
2941 +
2942 + static inline char *config_item_name(struct config_item * item)
2943 + {
2944 @@ -125,7 +125,7 @@ struct configfs_attribute {
2945 const char *ca_name;
2946 struct module *ca_owner;
2947
2948 diff --git a/4.0.7/0000_README b/4.0.7/0000_README
2949 index 1c85007..fc634e5 100644
2950 --- a/4.0.7/0000_README
2951 +++ b/4.0.7/0000_README
2952 @@ -2,11 +2,7 @@ README
2953 -----------------------------------------------------------------------------
2954 Individual Patch Descriptions:
2955 -----------------------------------------------------------------------------
2956 -Patch: 1006_linux-4.0.7.patch
2957 -From: http://www.kernel.org
2958 -Desc: Linux 4.0.7
2959 -
2960 -Patch: 4420_grsecurity-3.1-4.0.7-201506300712.patch
2961 +Patch: 4420_grsecurity-3.1-4.0.7-201507050833.patch
2962 From: http://www.grsecurity.net
2963 Desc: hardened-sources base patch from upstream grsecurity
2964
2965
2966 diff --git a/4.0.7/1006_linux-4.0.7.patch b/4.0.7/1006_linux-4.0.7.patch
2967 deleted file mode 100644
2968 index 0b9b646..0000000
2969 --- a/4.0.7/1006_linux-4.0.7.patch
2970 +++ /dev/null
2971 @@ -1,707 +0,0 @@
2972 -diff --git a/Makefile b/Makefile
2973 -index af6da04..bd76a8e 100644
2974 ---- a/Makefile
2975 -+++ b/Makefile
2976 -@@ -1,6 +1,6 @@
2977 - VERSION = 4
2978 - PATCHLEVEL = 0
2979 --SUBLEVEL = 6
2980 -+SUBLEVEL = 7
2981 - EXTRAVERSION =
2982 - NAME = Hurr durr I'ma sheep
2983 -
2984 -diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
2985 -index f70eca7..0ef8d4b 100644
2986 ---- a/arch/arm/mach-exynos/common.h
2987 -+++ b/arch/arm/mach-exynos/common.h
2988 -@@ -153,6 +153,8 @@ extern void exynos_enter_aftr(void);
2989 -
2990 - extern struct cpuidle_exynos_data cpuidle_coupled_exynos_data;
2991 -
2992 -+extern void exynos_set_delayed_reset_assertion(bool enable);
2993 -+
2994 - extern void s5p_init_cpu(void __iomem *cpuid_addr);
2995 - extern unsigned int samsung_rev(void);
2996 - extern void __iomem *cpu_boot_reg_base(void);
2997 -diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
2998 -index 9e9dfdf..1081ff1 100644
2999 ---- a/arch/arm/mach-exynos/exynos.c
3000 -+++ b/arch/arm/mach-exynos/exynos.c
3001 -@@ -166,6 +166,33 @@ static void __init exynos_init_io(void)
3002 - exynos_map_io();
3003 - }
3004 -
3005 -+/*
3006 -+ * Set or clear the USE_DELAYED_RESET_ASSERTION option. Used by smp code
3007 -+ * and suspend.
3008 -+ *
3009 -+ * This is necessary only on Exynos4 SoCs. When system is running
3010 -+ * USE_DELAYED_RESET_ASSERTION should be set so the ARM CLK clock down
3011 -+ * feature could properly detect global idle state when secondary CPU is
3012 -+ * powered down.
3013 -+ *
3014 -+ * However this should not be set when such system is going into suspend.
3015 -+ */
3016 -+void exynos_set_delayed_reset_assertion(bool enable)
3017 -+{
3018 -+ if (soc_is_exynos4()) {
3019 -+ unsigned int tmp, core_id;
3020 -+
3021 -+ for (core_id = 0; core_id < num_possible_cpus(); core_id++) {
3022 -+ tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
3023 -+ if (enable)
3024 -+ tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
3025 -+ else
3026 -+ tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
3027 -+ pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
3028 -+ }
3029 -+ }
3030 -+}
3031 -+
3032 - static const struct of_device_id exynos_dt_pmu_match[] = {
3033 - { .compatible = "samsung,exynos3250-pmu" },
3034 - { .compatible = "samsung,exynos4210-pmu" },
3035 -diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
3036 -index d2e9f12..d45e8cd 100644
3037 ---- a/arch/arm/mach-exynos/platsmp.c
3038 -+++ b/arch/arm/mach-exynos/platsmp.c
3039 -@@ -34,30 +34,6 @@
3040 -
3041 - extern void exynos4_secondary_startup(void);
3042 -
3043 --/*
3044 -- * Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs
3045 -- * during hot-(un)plugging CPUx.
3046 -- *
3047 -- * The feature can be cleared safely during first boot of secondary CPU.
3048 -- *
3049 -- * Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering
3050 -- * down a CPU so the CPU idle clock down feature could properly detect global
3051 -- * idle state when CPUx is off.
3052 -- */
3053 --static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable)
3054 --{
3055 -- if (soc_is_exynos4()) {
3056 -- unsigned int tmp;
3057 --
3058 -- tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
3059 -- if (enable)
3060 -- tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
3061 -- else
3062 -- tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
3063 -- pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
3064 -- }
3065 --}
3066 --
3067 - #ifdef CONFIG_HOTPLUG_CPU
3068 - static inline void cpu_leave_lowpower(u32 core_id)
3069 - {
3070 -@@ -73,8 +49,6 @@ static inline void cpu_leave_lowpower(u32 core_id)
3071 - : "=&r" (v)
3072 - : "Ir" (CR_C), "Ir" (0x40)
3073 - : "cc");
3074 --
3075 -- exynos_set_delayed_reset_assertion(core_id, false);
3076 - }
3077 -
3078 - static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
3079 -@@ -87,14 +61,6 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
3080 - /* Turn the CPU off on next WFI instruction. */
3081 - exynos_cpu_power_down(core_id);
3082 -
3083 -- /*
3084 -- * Exynos4 SoCs require setting
3085 -- * USE_DELAYED_RESET_ASSERTION so the CPU idle
3086 -- * clock down feature could properly detect
3087 -- * global idle state when CPUx is off.
3088 -- */
3089 -- exynos_set_delayed_reset_assertion(core_id, true);
3090 --
3091 - wfi();
3092 -
3093 - if (pen_release == core_id) {
3094 -@@ -354,9 +320,6 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
3095 - udelay(10);
3096 - }
3097 -
3098 -- /* No harm if this is called during first boot of secondary CPU */
3099 -- exynos_set_delayed_reset_assertion(core_id, false);
3100 --
3101 - /*
3102 - * now the secondary core is starting up let it run its
3103 - * calibrations, then wait for it to finish
3104 -@@ -403,6 +366,8 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
3105 -
3106 - exynos_sysram_init();
3107 -
3108 -+ exynos_set_delayed_reset_assertion(true);
3109 -+
3110 - if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
3111 - scu_enable(scu_base_addr());
3112 -
3113 -diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3114 -index 318d127..582ef2d 100644
3115 ---- a/arch/arm/mach-exynos/suspend.c
3116 -+++ b/arch/arm/mach-exynos/suspend.c
3117 -@@ -235,6 +235,8 @@ static void exynos_pm_enter_sleep_mode(void)
3118 -
3119 - static void exynos_pm_prepare(void)
3120 - {
3121 -+ exynos_set_delayed_reset_assertion(false);
3122 -+
3123 - /* Set wake-up mask registers */
3124 - exynos_pm_set_wakeup_mask();
3125 -
3126 -@@ -383,6 +385,7 @@ early_wakeup:
3127 -
3128 - /* Clear SLEEP mode set in INFORM1 */
3129 - pmu_raw_writel(0x0, S5P_INFORM1);
3130 -+ exynos_set_delayed_reset_assertion(true);
3131 - }
3132 -
3133 - static void exynos3250_pm_resume(void)
3134 -diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
3135 -index 05adc8b..401d8d0 100644
3136 ---- a/arch/powerpc/kernel/idle_power7.S
3137 -+++ b/arch/powerpc/kernel/idle_power7.S
3138 -@@ -500,9 +500,11 @@ BEGIN_FTR_SECTION
3139 - CHECK_HMI_INTERRUPT
3140 - END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
3141 - ld r1,PACAR1(r13)
3142 -+ ld r6,_CCR(r1)
3143 - ld r4,_MSR(r1)
3144 - ld r5,_NIP(r1)
3145 - addi r1,r1,INT_FRAME_SIZE
3146 -+ mtcr r6
3147 - mtspr SPRN_SRR1,r4
3148 - mtspr SPRN_SRR0,r5
3149 - rfid
3150 -diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
3151 -index 4e3d5a9..03189d8 100644
3152 ---- a/arch/x86/kernel/kprobes/core.c
3153 -+++ b/arch/x86/kernel/kprobes/core.c
3154 -@@ -354,6 +354,7 @@ int __copy_instruction(u8 *dest, u8 *src)
3155 - {
3156 - struct insn insn;
3157 - kprobe_opcode_t buf[MAX_INSN_SIZE];
3158 -+ int length;
3159 - unsigned long recovered_insn =
3160 - recover_probed_instruction(buf, (unsigned long)src);
3161 -
3162 -@@ -361,16 +362,18 @@ int __copy_instruction(u8 *dest, u8 *src)
3163 - return 0;
3164 - kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
3165 - insn_get_length(&insn);
3166 -+ length = insn.length;
3167 -+
3168 - /* Another subsystem puts a breakpoint, failed to recover */
3169 - if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
3170 - return 0;
3171 -- memcpy(dest, insn.kaddr, insn.length);
3172 -+ memcpy(dest, insn.kaddr, length);
3173 -
3174 - #ifdef CONFIG_X86_64
3175 - if (insn_rip_relative(&insn)) {
3176 - s64 newdisp;
3177 - u8 *disp;
3178 -- kernel_insn_init(&insn, dest, insn.length);
3179 -+ kernel_insn_init(&insn, dest, length);
3180 - insn_get_displacement(&insn);
3181 - /*
3182 - * The copied instruction uses the %rip-relative addressing
3183 -@@ -394,7 +397,7 @@ int __copy_instruction(u8 *dest, u8 *src)
3184 - *(s32 *) disp = (s32) newdisp;
3185 - }
3186 - #endif
3187 -- return insn.length;
3188 -+ return length;
3189 - }
3190 -
3191 - static int arch_copy_kprobe(struct kprobe *p)
3192 -diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
3193 -index 4ee827d..3cb2b58 100644
3194 ---- a/arch/x86/kvm/lapic.c
3195 -+++ b/arch/x86/kvm/lapic.c
3196 -@@ -1064,6 +1064,17 @@ static void update_divide_count(struct kvm_lapic *apic)
3197 - apic->divide_count);
3198 - }
3199 -
3200 -+static void apic_update_lvtt(struct kvm_lapic *apic)
3201 -+{
3202 -+ u32 timer_mode = kvm_apic_get_reg(apic, APIC_LVTT) &
3203 -+ apic->lapic_timer.timer_mode_mask;
3204 -+
3205 -+ if (apic->lapic_timer.timer_mode != timer_mode) {
3206 -+ apic->lapic_timer.timer_mode = timer_mode;
3207 -+ hrtimer_cancel(&apic->lapic_timer.timer);
3208 -+ }
3209 -+}
3210 -+
3211 - static void apic_timer_expired(struct kvm_lapic *apic)
3212 - {
3213 - struct kvm_vcpu *vcpu = apic->vcpu;
3214 -@@ -1272,6 +1283,7 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
3215 - apic_set_reg(apic, APIC_LVTT + 0x10 * i,
3216 - lvt_val | APIC_LVT_MASKED);
3217 - }
3218 -+ apic_update_lvtt(apic);
3219 - atomic_set(&apic->lapic_timer.pending, 0);
3220 -
3221 - }
3222 -@@ -1304,20 +1316,13 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
3223 -
3224 - break;
3225 -
3226 -- case APIC_LVTT: {
3227 -- u32 timer_mode = val & apic->lapic_timer.timer_mode_mask;
3228 --
3229 -- if (apic->lapic_timer.timer_mode != timer_mode) {
3230 -- apic->lapic_timer.timer_mode = timer_mode;
3231 -- hrtimer_cancel(&apic->lapic_timer.timer);
3232 -- }
3233 --
3234 -+ case APIC_LVTT:
3235 - if (!kvm_apic_sw_enabled(apic))
3236 - val |= APIC_LVT_MASKED;
3237 - val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
3238 - apic_set_reg(apic, APIC_LVTT, val);
3239 -+ apic_update_lvtt(apic);
3240 - break;
3241 -- }
3242 -
3243 - case APIC_TMICT:
3244 - if (apic_lvtt_tscdeadline(apic))
3245 -@@ -1552,7 +1557,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
3246 -
3247 - for (i = 0; i < APIC_LVT_NUM; i++)
3248 - apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
3249 -- apic->lapic_timer.timer_mode = 0;
3250 -+ apic_update_lvtt(apic);
3251 - apic_set_reg(apic, APIC_LVT0,
3252 - SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
3253 -
3254 -@@ -1778,6 +1783,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
3255 -
3256 - apic_update_ppr(apic);
3257 - hrtimer_cancel(&apic->lapic_timer.timer);
3258 -+ apic_update_lvtt(apic);
3259 - update_divide_count(apic);
3260 - start_apic_timer(apic);
3261 - apic->irr_pending = true;
3262 -diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
3263 -index 288547a..f26ebc5 100644
3264 ---- a/drivers/bluetooth/ath3k.c
3265 -+++ b/drivers/bluetooth/ath3k.c
3266 -@@ -80,6 +80,7 @@ static const struct usb_device_id ath3k_table[] = {
3267 - { USB_DEVICE(0x0489, 0xe057) },
3268 - { USB_DEVICE(0x0489, 0xe056) },
3269 - { USB_DEVICE(0x0489, 0xe05f) },
3270 -+ { USB_DEVICE(0x0489, 0xe076) },
3271 - { USB_DEVICE(0x0489, 0xe078) },
3272 - { USB_DEVICE(0x04c5, 0x1330) },
3273 - { USB_DEVICE(0x04CA, 0x3004) },
3274 -@@ -111,6 +112,7 @@ static const struct usb_device_id ath3k_table[] = {
3275 - { USB_DEVICE(0x13d3, 0x3408) },
3276 - { USB_DEVICE(0x13d3, 0x3423) },
3277 - { USB_DEVICE(0x13d3, 0x3432) },
3278 -+ { USB_DEVICE(0x13d3, 0x3474) },
3279 -
3280 - /* Atheros AR5BBU12 with sflash firmware */
3281 - { USB_DEVICE(0x0489, 0xE02C) },
3282 -@@ -135,6 +137,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
3283 - { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
3284 - { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
3285 - { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
3286 -+ { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
3287 - { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
3288 - { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
3289 - { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
3290 -@@ -166,6 +169,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
3291 - { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
3292 - { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
3293 - { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
3294 -+ { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
3295 -
3296 - /* Atheros AR5BBU22 with sflash firmware */
3297 - { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
3298 -diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
3299 -index 2c527da..4fc4157 100644
3300 ---- a/drivers/bluetooth/btusb.c
3301 -+++ b/drivers/bluetooth/btusb.c
3302 -@@ -174,6 +174,7 @@ static const struct usb_device_id blacklist_table[] = {
3303 - { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
3304 - { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
3305 - { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
3306 -+ { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
3307 - { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
3308 - { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
3309 - { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
3310 -@@ -205,6 +206,7 @@ static const struct usb_device_id blacklist_table[] = {
3311 - { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
3312 - { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
3313 - { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
3314 -+ { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
3315 -
3316 - /* Atheros AR5BBU12 with sflash firmware */
3317 - { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
3318 -diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
3319 -index 6ec79db..cbbe403 100644
3320 ---- a/drivers/clk/at91/clk-pll.c
3321 -+++ b/drivers/clk/at91/clk-pll.c
3322 -@@ -173,8 +173,7 @@ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
3323 - int i = 0;
3324 -
3325 - /* Check if parent_rate is a valid input rate */
3326 -- if (parent_rate < characteristics->input.min ||
3327 -- parent_rate > characteristics->input.max)
3328 -+ if (parent_rate < characteristics->input.min)
3329 - return -ERANGE;
3330 -
3331 - /*
3332 -@@ -187,6 +186,15 @@ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
3333 - if (!mindiv)
3334 - mindiv = 1;
3335 -
3336 -+ if (parent_rate > characteristics->input.max) {
3337 -+ tmpdiv = DIV_ROUND_UP(parent_rate, characteristics->input.max);
3338 -+ if (tmpdiv > PLL_DIV_MAX)
3339 -+ return -ERANGE;
3340 -+
3341 -+ if (tmpdiv > mindiv)
3342 -+ mindiv = tmpdiv;
3343 -+ }
3344 -+
3345 - /*
3346 - * Calculate the maximum divider which is limited by PLL register
3347 - * layout (limited by the MUL or DIV field size).
3348 -diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
3349 -index 69abb08..eb8e5dc 100644
3350 ---- a/drivers/clk/at91/pmc.h
3351 -+++ b/drivers/clk/at91/pmc.h
3352 -@@ -121,7 +121,7 @@ extern void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
3353 - struct at91_pmc *pmc);
3354 - #endif
3355 -
3356 --#if defined(CONFIG_HAVE_AT91_SMD)
3357 -+#if defined(CONFIG_HAVE_AT91_H32MX)
3358 - extern void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
3359 - struct at91_pmc *pmc);
3360 - #endif
3361 -diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
3362 -index f347ab7..08b0da2 100644
3363 ---- a/drivers/crypto/caam/caamhash.c
3364 -+++ b/drivers/crypto/caam/caamhash.c
3365 -@@ -1543,6 +1543,8 @@ static int ahash_init(struct ahash_request *req)
3366 -
3367 - state->current_buf = 0;
3368 - state->buf_dma = 0;
3369 -+ state->buflen_0 = 0;
3370 -+ state->buflen_1 = 0;
3371 -
3372 - return 0;
3373 - }
3374 -diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
3375 -index ae31e55..a48dc25 100644
3376 ---- a/drivers/crypto/caam/caamrng.c
3377 -+++ b/drivers/crypto/caam/caamrng.c
3378 -@@ -56,7 +56,7 @@
3379 -
3380 - /* Buffer, its dma address and lock */
3381 - struct buf_data {
3382 -- u8 buf[RN_BUF_SIZE];
3383 -+ u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
3384 - dma_addr_t addr;
3385 - struct completion filled;
3386 - u32 hw_desc[DESC_JOB_O_LEN];
3387 -diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
3388 -index ec4d932..169123a 100644
3389 ---- a/drivers/gpu/drm/i915/i915_drv.c
3390 -+++ b/drivers/gpu/drm/i915/i915_drv.c
3391 -@@ -693,6 +693,16 @@ static int i915_drm_resume(struct drm_device *dev)
3392 - intel_init_pch_refclk(dev);
3393 - drm_mode_config_reset(dev);
3394 -
3395 -+ /*
3396 -+ * Interrupts have to be enabled before any batches are run.
3397 -+ * If not the GPU will hang. i915_gem_init_hw() will initiate
3398 -+ * batches to update/restore the context.
3399 -+ *
3400 -+ * Modeset enabling in intel_modeset_init_hw() also needs
3401 -+ * working interrupts.
3402 -+ */
3403 -+ intel_runtime_pm_enable_interrupts(dev_priv);
3404 -+
3405 - mutex_lock(&dev->struct_mutex);
3406 - if (i915_gem_init_hw(dev)) {
3407 - DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
3408 -@@ -700,9 +710,6 @@ static int i915_drm_resume(struct drm_device *dev)
3409 - }
3410 - mutex_unlock(&dev->struct_mutex);
3411 -
3412 -- /* We need working interrupts for modeset enabling ... */
3413 -- intel_runtime_pm_enable_interrupts(dev_priv);
3414 --
3415 - intel_modeset_init_hw(dev);
3416 -
3417 - spin_lock_irq(&dev_priv->irq_lock);
3418 -diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
3419 -index 7a628e4..9536ec3 100644
3420 ---- a/drivers/gpu/drm/i915/i915_gem.c
3421 -+++ b/drivers/gpu/drm/i915/i915_gem.c
3422 -@@ -2732,6 +2732,9 @@ void i915_gem_reset(struct drm_device *dev)
3423 - void
3424 - i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
3425 - {
3426 -+ if (list_empty(&ring->request_list))
3427 -+ return;
3428 -+
3429 - WARN_ON(i915_verify_lists(ring->dev));
3430 -
3431 - /* Retire requests first as we use it above for the early return.
3432 -@@ -3088,8 +3091,8 @@ int i915_vma_unbind(struct i915_vma *vma)
3433 - } else if (vma->ggtt_view.pages) {
3434 - sg_free_table(vma->ggtt_view.pages);
3435 - kfree(vma->ggtt_view.pages);
3436 -- vma->ggtt_view.pages = NULL;
3437 - }
3438 -+ vma->ggtt_view.pages = NULL;
3439 - }
3440 -
3441 - drm_mm_remove_node(&vma->node);
3442 -diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
3443 -index 9872ba9..2ffeda3 100644
3444 ---- a/drivers/gpu/drm/mgag200/mgag200_mode.c
3445 -+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
3446 -@@ -1526,6 +1526,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
3447 - return MODE_BANDWIDTH;
3448 - }
3449 -
3450 -+ if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 ||
3451 -+ (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) {
3452 -+ return MODE_H_ILLEGAL;
3453 -+ }
3454 -+
3455 - if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
3456 - mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
3457 - mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
3458 -diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
3459 -index 686411e..b82f2dd 100644
3460 ---- a/drivers/gpu/drm/radeon/radeon_kms.c
3461 -+++ b/drivers/gpu/drm/radeon/radeon_kms.c
3462 -@@ -547,6 +547,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
3463 - else
3464 - *value = 1;
3465 - break;
3466 -+ case RADEON_INFO_VA_UNMAP_WORKING:
3467 -+ *value = true;
3468 -+ break;
3469 - default:
3470 - DRM_DEBUG_KMS("Invalid request %d\n", info->request);
3471 - return -EINVAL;
3472 -diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
3473 -index 147029a..ac72ece 100644
3474 ---- a/drivers/infiniband/ulp/isert/ib_isert.c
3475 -+++ b/drivers/infiniband/ulp/isert/ib_isert.c
3476 -@@ -2316,7 +2316,6 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
3477 - page_off = offset % PAGE_SIZE;
3478 -
3479 - send_wr->sg_list = ib_sge;
3480 -- send_wr->num_sge = sg_nents;
3481 - send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
3482 - /*
3483 - * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
3484 -@@ -2336,14 +2335,17 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
3485 - ib_sge->addr, ib_sge->length, ib_sge->lkey);
3486 - page_off = 0;
3487 - data_left -= ib_sge->length;
3488 -+ if (!data_left)
3489 -+ break;
3490 - ib_sge++;
3491 - isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
3492 - }
3493 -
3494 -+ send_wr->num_sge = ++i;
3495 - isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
3496 - send_wr->sg_list, send_wr->num_sge);
3497 -
3498 -- return sg_nents;
3499 -+ return send_wr->num_sge;
3500 - }
3501 -
3502 - static int
3503 -@@ -3311,6 +3313,7 @@ static void isert_free_conn(struct iscsi_conn *conn)
3504 - {
3505 - struct isert_conn *isert_conn = conn->context;
3506 -
3507 -+ isert_wait4flush(isert_conn);
3508 - isert_put_conn(isert_conn);
3509 - }
3510 -
3511 -diff --git a/drivers/md/dm.c b/drivers/md/dm.c
3512 -index 9b4e30a..beda011 100644
3513 ---- a/drivers/md/dm.c
3514 -+++ b/drivers/md/dm.c
3515 -@@ -1889,8 +1889,8 @@ static int map_request(struct dm_target *ti, struct request *rq,
3516 - dm_kill_unmapped_request(rq, r);
3517 - return r;
3518 - }
3519 -- if (IS_ERR(clone))
3520 -- return DM_MAPIO_REQUEUE;
3521 -+ if (r != DM_MAPIO_REMAPPED)
3522 -+ return r;
3523 - if (setup_clone(clone, rq, tio, GFP_KERNEL)) {
3524 - /* -ENOMEM */
3525 - ti->type->release_clone_rq(clone);
3526 -diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
3527 -index 75345c1..5c91df5 100644
3528 ---- a/drivers/net/wireless/b43/main.c
3529 -+++ b/drivers/net/wireless/b43/main.c
3530 -@@ -5365,6 +5365,10 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
3531 - *have_5ghz_phy = true;
3532 - return;
3533 - case 0x4321: /* BCM4306 */
3534 -+ /* There are 14e4:4321 PCI devs with 2.4 GHz BCM4321 (N-PHY) */
3535 -+ if (dev->phy.type != B43_PHYTYPE_G)
3536 -+ break;
3537 -+ /* fall through */
3538 - case 0x4313: /* BCM4311 */
3539 - case 0x431a: /* BCM4318 */
3540 - case 0x432a: /* BCM4321 */
3541 -diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
3542 -index 220c0fd..50faef4 100644
3543 ---- a/drivers/usb/class/cdc-acm.c
3544 -+++ b/drivers/usb/class/cdc-acm.c
3545 -@@ -1468,6 +1468,11 @@ skip_countries:
3546 - goto alloc_fail8;
3547 - }
3548 -
3549 -+ if (quirks & CLEAR_HALT_CONDITIONS) {
3550 -+ usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress));
3551 -+ usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress));
3552 -+ }
3553 -+
3554 - return 0;
3555 - alloc_fail8:
3556 - if (acm->country_codes) {
3557 -@@ -1747,6 +1752,10 @@ static const struct usb_device_id acm_ids[] = {
3558 - .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
3559 - },
3560 -
3561 -+ { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
3562 -+ .driver_info = CLEAR_HALT_CONDITIONS,
3563 -+ },
3564 -+
3565 - /* Nokia S60 phones expose two ACM channels. The first is
3566 - * a modem and is picked up by the standard AT-command
3567 - * information below. The second is 'vendor-specific' but
3568 -diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
3569 -index ffeb3c8..b3b6c9d 100644
3570 ---- a/drivers/usb/class/cdc-acm.h
3571 -+++ b/drivers/usb/class/cdc-acm.h
3572 -@@ -133,3 +133,4 @@ struct acm {
3573 - #define NO_DATA_INTERFACE BIT(4)
3574 - #define IGNORE_DEVICE BIT(5)
3575 - #define QUIRK_CONTROL_LINE_STATE BIT(6)
3576 -+#define CLEAR_HALT_CONDITIONS BIT(7)
3577 -diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
3578 -index 50d0fb4..76d2ede 100644
3579 ---- a/include/uapi/drm/radeon_drm.h
3580 -+++ b/include/uapi/drm/radeon_drm.h
3581 -@@ -1034,6 +1034,7 @@ struct drm_radeon_cs {
3582 - #define RADEON_INFO_VRAM_USAGE 0x1e
3583 - #define RADEON_INFO_GTT_USAGE 0x1f
3584 - #define RADEON_INFO_ACTIVE_CU_COUNT 0x20
3585 -+#define RADEON_INFO_VA_UNMAP_WORKING 0x25
3586 -
3587 - struct drm_radeon_info {
3588 - uint32_t request;
3589 -diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
3590 -index ced69da..7f2e97c 100644
3591 ---- a/kernel/trace/trace_events_filter.c
3592 -+++ b/kernel/trace/trace_events_filter.c
3593 -@@ -1369,19 +1369,26 @@ static int check_preds(struct filter_parse_state *ps)
3594 - {
3595 - int n_normal_preds = 0, n_logical_preds = 0;
3596 - struct postfix_elt *elt;
3597 -+ int cnt = 0;
3598 -
3599 - list_for_each_entry(elt, &ps->postfix, list) {
3600 -- if (elt->op == OP_NONE)
3601 -+ if (elt->op == OP_NONE) {
3602 -+ cnt++;
3603 - continue;
3604 -+ }
3605 -
3606 - if (elt->op == OP_AND || elt->op == OP_OR) {
3607 - n_logical_preds++;
3608 -+ cnt--;
3609 - continue;
3610 - }
3611 -+ if (elt->op != OP_NOT)
3612 -+ cnt--;
3613 - n_normal_preds++;
3614 -+ WARN_ON_ONCE(cnt < 0);
3615 - }
3616 -
3617 -- if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
3618 -+ if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
3619 - parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
3620 - return -EINVAL;
3621 - }
3622 -diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
3623 -index 87eff31..60b3100 100644
3624 ---- a/sound/pci/hda/patch_sigmatel.c
3625 -+++ b/sound/pci/hda/patch_sigmatel.c
3626 -@@ -100,6 +100,7 @@ enum {
3627 - STAC_HP_ENVY_BASS,
3628 - STAC_HP_BNB13_EQ,
3629 - STAC_HP_ENVY_TS_BASS,
3630 -+ STAC_HP_ENVY_TS_DAC_BIND,
3631 - STAC_92HD83XXX_GPIO10_EAPD,
3632 - STAC_92HD83XXX_MODELS
3633 - };
3634 -@@ -2170,6 +2171,22 @@ static void stac92hd83xxx_fixup_gpio10_eapd(struct hda_codec *codec,
3635 - spec->eapd_switch = 0;
3636 - }
3637 -
3638 -+static void hp_envy_ts_fixup_dac_bind(struct hda_codec *codec,
3639 -+ const struct hda_fixup *fix,
3640 -+ int action)
3641 -+{
3642 -+ struct sigmatel_spec *spec = codec->spec;
3643 -+ static hda_nid_t preferred_pairs[] = {
3644 -+ 0xd, 0x13,
3645 -+ 0
3646 -+ };
3647 -+
3648 -+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
3649 -+ return;
3650 -+
3651 -+ spec->gen.preferred_dacs = preferred_pairs;
3652 -+}
3653 -+
3654 - static const struct hda_verb hp_bnb13_eq_verbs[] = {
3655 - /* 44.1KHz base */
3656 - { 0x22, 0x7A6, 0x3E },
3657 -@@ -2685,6 +2702,12 @@ static const struct hda_fixup stac92hd83xxx_fixups[] = {
3658 - {}
3659 - },
3660 - },
3661 -+ [STAC_HP_ENVY_TS_DAC_BIND] = {
3662 -+ .type = HDA_FIXUP_FUNC,
3663 -+ .v.func = hp_envy_ts_fixup_dac_bind,
3664 -+ .chained = true,
3665 -+ .chain_id = STAC_HP_ENVY_TS_BASS,
3666 -+ },
3667 - [STAC_92HD83XXX_GPIO10_EAPD] = {
3668 - .type = HDA_FIXUP_FUNC,
3669 - .v.func = stac92hd83xxx_fixup_gpio10_eapd,
3670 -@@ -2763,6 +2786,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
3671 - "HP bNB13", STAC_HP_BNB13_EQ),
3672 - SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x190e,
3673 - "HP ENVY TS", STAC_HP_ENVY_TS_BASS),
3674 -+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1967,
3675 -+ "HP ENVY TS", STAC_HP_ENVY_TS_DAC_BIND),
3676 - SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1940,
3677 - "HP bNB13", STAC_HP_BNB13_EQ),
3678 - SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1941,
3679
3680 diff --git a/4.0.7/4420_grsecurity-3.1-4.0.7-201506300712.patch b/4.0.7/4420_grsecurity-3.1-4.0.7-201507050833.patch
3681 similarity index 99%
3682 rename from 4.0.7/4420_grsecurity-3.1-4.0.7-201506300712.patch
3683 rename to 4.0.7/4420_grsecurity-3.1-4.0.7-201507050833.patch
3684 index 37bee2c..c471dac 100644
3685 --- a/4.0.7/4420_grsecurity-3.1-4.0.7-201506300712.patch
3686 +++ b/4.0.7/4420_grsecurity-3.1-4.0.7-201507050833.patch
3687 @@ -50607,10 +50607,59 @@ index ce2e2cf..f81e500 100644
3688 __u32 protocols;
3689
3690 diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
3691 -index 24d3d24..ff70d28 100644
3692 +index 24d3d24..b662ba0 100644
3693 --- a/drivers/nfc/st21nfca/st21nfca.c
3694 +++ b/drivers/nfc/st21nfca/st21nfca.c
3695 -@@ -588,7 +588,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
3696 +@@ -148,14 +148,14 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
3697 + ST21NFCA_DEVICE_MGNT_GATE,
3698 + ST21NFCA_DEVICE_MGNT_PIPE);
3699 + if (r < 0)
3700 +- goto free_info;
3701 ++ return r;
3702 +
3703 + /* Get pipe list */
3704 + r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
3705 + ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list),
3706 + &skb_pipe_list);
3707 + if (r < 0)
3708 +- goto free_info;
3709 ++ return r;
3710 +
3711 + /* Complete the existing gate_pipe table */
3712 + for (i = 0; i < skb_pipe_list->len; i++) {
3713 +@@ -181,6 +181,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
3714 + info->src_host_id != ST21NFCA_ESE_HOST_ID) {
3715 + pr_err("Unexpected apdu_reader pipe on host %x\n",
3716 + info->src_host_id);
3717 ++ kfree_skb(skb_pipe_info);
3718 + continue;
3719 + }
3720 +
3721 +@@ -200,6 +201,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
3722 + hdev->pipes[st21nfca_gates[j].pipe].dest_host =
3723 + info->src_host_id;
3724 + }
3725 ++ kfree_skb(skb_pipe_info);
3726 + }
3727 +
3728 + /*
3729 +@@ -214,13 +216,12 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
3730 + st21nfca_gates[i].gate,
3731 + st21nfca_gates[i].pipe);
3732 + if (r < 0)
3733 +- goto free_info;
3734 ++ goto free_list;
3735 + }
3736 + }
3737 +
3738 + memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
3739 +-free_info:
3740 +- kfree_skb(skb_pipe_info);
3741 ++free_list:
3742 + kfree_skb(skb_pipe_list);
3743 + return r;
3744 + }
3745 +@@ -588,7 +589,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
3746 goto exit;
3747 }
3748
3749 @@ -55636,6 +55685,19 @@ index 8cfc319..4868255 100644
3750
3751 return 0;
3752 }
3753 +diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
3754 +index 0495c94..289e201 100644
3755 +--- a/drivers/usb/gadget/configfs.c
3756 ++++ b/drivers/usb/gadget/configfs.c
3757 +@@ -571,7 +571,7 @@ static struct config_group *function_make(
3758 + if (IS_ERR(fi))
3759 + return ERR_CAST(fi);
3760 +
3761 +- ret = config_item_set_name(&fi->group.cg_item, name);
3762 ++ ret = config_item_set_name(&fi->group.cg_item, "%s", name);
3763 + if (ret) {
3764 + usb_put_function_instance(fi);
3765 + return ERR_PTR(ret);
3766 diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
3767 index 9719abf..789d5d9 100644
3768 --- a/drivers/usb/gadget/function/f_uac1.c
3769 @@ -68286,6 +68348,28 @@ index cf0db00..c7f70e8 100644
3770
3771 /*
3772 * We'll have a dentry and an inode for
3773 +diff --git a/fs/configfs/item.c b/fs/configfs/item.c
3774 +index e65f9ff..3ed264d 100644
3775 +--- a/fs/configfs/item.c
3776 ++++ b/fs/configfs/item.c
3777 +@@ -116,7 +116,7 @@ void config_item_init_type_name(struct config_item *item,
3778 + const char *name,
3779 + struct config_item_type *type)
3780 + {
3781 +- config_item_set_name(item, name);
3782 ++ config_item_set_name(item, "%s", name);
3783 + item->ci_type = type;
3784 + config_item_init(item);
3785 + }
3786 +@@ -125,7 +125,7 @@ EXPORT_SYMBOL(config_item_init_type_name);
3787 + void config_group_init_type_name(struct config_group *group, const char *name,
3788 + struct config_item_type *type)
3789 + {
3790 +- config_item_set_name(&group->cg_item, name);
3791 ++ config_item_set_name(&group->cg_item, "%s", name);
3792 + group->cg_item.ci_type = type;
3793 + config_group_init(group);
3794 + }
3795 diff --git a/fs/coredump.c b/fs/coredump.c
3796 index bbbe139..b76fae5 100644
3797 --- a/fs/coredump.c
3798 @@ -81317,7 +81401,7 @@ index 0000000..8ee8e4f
3799 +}
3800 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
3801 new file mode 100644
3802 -index 0000000..f056b81
3803 +index 0000000..ed6ee43
3804 --- /dev/null
3805 +++ b/grsecurity/gracl_ip.c
3806 @@ -0,0 +1,386 @@
3807 @@ -81391,7 +81475,7 @@ index 0000000..f056b81
3808 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
3809 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
3810 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
3811 -+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
3812 ++ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf", "alg", "nfc", "vsock"
3813 + };
3814 +
3815 +const char *
3816 @@ -89034,9 +89118,18 @@ index 5d5aaae..0ea9b84 100644
3817 extern bool completion_done(struct completion *x);
3818
3819 diff --git a/include/linux/configfs.h b/include/linux/configfs.h
3820 -index 34025df..d94bbbc 100644
3821 +index 34025df..2a6ee32 100644
3822 --- a/include/linux/configfs.h
3823 +++ b/include/linux/configfs.h
3824 +@@ -64,7 +64,7 @@ struct config_item {
3825 + struct dentry *ci_dentry;
3826 + };
3827 +
3828 +-extern int config_item_set_name(struct config_item *, const char *, ...);
3829 ++extern __printf(2, 3) int config_item_set_name(struct config_item *, const char *, ...);
3830 +
3831 + static inline char *config_item_name(struct config_item * item)
3832 + {
3833 @@ -125,7 +125,7 @@ struct configfs_attribute {
3834 const char *ca_name;
3835 struct module *ca_owner;