Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.14 commit in: /
Date: Tue, 07 Jul 2015 00:45:00
Message-Id: 1436229879.0c2198a08e7e9db57b392782a846baf554f1d862.mpagano@gentoo
1 commit: 0c2198a08e7e9db57b392782a846baf554f1d862
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jul 7 00:44:39 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Jul 7 00:44:39 2015 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0c2198a0
7
8 Linux patch 3.14.47
9
10 0000_README | 4 +
11 1046_linux-3.14.47.patch | 1395 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1399 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index a04d242..7bd96c3 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -226,6 +226,10 @@ Patch: 1045_linux-3.14.46.patch
19 From: http://www.kernel.org
20 Desc: Linux 3.14.46
21
22 +Patch: 1046_linux-3.14.47.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 3.14.47
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1046_linux-3.14.47.patch b/1046_linux-3.14.47.patch
31 new file mode 100644
32 index 0000000..862f253
33 --- /dev/null
34 +++ b/1046_linux-3.14.47.patch
35 @@ -0,0 +1,1395 @@
36 +diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
37 +index 6cd63a9010fb..bc6d61773ee2 100644
38 +--- a/Documentation/virtual/kvm/api.txt
39 ++++ b/Documentation/virtual/kvm/api.txt
40 +@@ -2344,7 +2344,8 @@ should be created before this ioctl is invoked.
41 +
42 + Possible features:
43 + - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
44 +- Depends on KVM_CAP_ARM_PSCI.
45 ++ Depends on KVM_CAP_ARM_PSCI. If not set, the CPU will be powered on
46 ++ and execute guest code when KVM_RUN is called.
47 + - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
48 + Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
49 +
50 +diff --git a/Makefile b/Makefile
51 +index def39fdd9df4..f9041e6d4d19 100644
52 +--- a/Makefile
53 ++++ b/Makefile
54 +@@ -1,6 +1,6 @@
55 + VERSION = 3
56 + PATCHLEVEL = 14
57 +-SUBLEVEL = 46
58 ++SUBLEVEL = 47
59 + EXTRAVERSION =
60 + NAME = Remembering Coco
61 +
62 +diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
63 +index 0fa90c962ac8..853e2becad18 100644
64 +--- a/arch/arm/include/asm/kvm_emulate.h
65 ++++ b/arch/arm/include/asm/kvm_emulate.h
66 +@@ -33,6 +33,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
67 + void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
68 + void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
69 +
70 ++static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
71 ++{
72 ++ vcpu->arch.hcr = HCR_GUEST_MASK;
73 ++}
74 ++
75 + static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
76 + {
77 + return 1;
78 +diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
79 +index 0cbdb8ed71cf..9f7923193cda 100644
80 +--- a/arch/arm/include/asm/kvm_mmu.h
81 ++++ b/arch/arm/include/asm/kvm_mmu.h
82 +@@ -47,6 +47,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
83 + void free_boot_hyp_pgd(void);
84 + void free_hyp_pgds(void);
85 +
86 ++void stage2_unmap_vm(struct kvm *kvm);
87 + int kvm_alloc_stage2_pgd(struct kvm *kvm);
88 + void kvm_free_stage2_pgd(struct kvm *kvm);
89 + int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
90 +@@ -78,17 +79,6 @@ static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
91 + flush_pmd_entry(pte);
92 + }
93 +
94 +-static inline bool kvm_is_write_fault(unsigned long hsr)
95 +-{
96 +- unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
97 +- if (hsr_ec == HSR_EC_IABT)
98 +- return false;
99 +- else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
100 +- return false;
101 +- else
102 +- return true;
103 +-}
104 +-
105 + static inline void kvm_clean_pgd(pgd_t *pgd)
106 + {
107 + clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
108 +diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
109 +index df6e75e47ae0..2e74a617147d 100644
110 +--- a/arch/arm/kvm/arm.c
111 ++++ b/arch/arm/kvm/arm.c
112 +@@ -220,6 +220,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
113 + int err;
114 + struct kvm_vcpu *vcpu;
115 +
116 ++ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
117 ++ err = -EBUSY;
118 ++ goto out;
119 ++ }
120 ++
121 + vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
122 + if (!vcpu) {
123 + err = -ENOMEM;
124 +@@ -427,9 +432,9 @@ static void update_vttbr(struct kvm *kvm)
125 +
126 + /* update vttbr to be used with the new vmid */
127 + pgd_phys = virt_to_phys(kvm->arch.pgd);
128 ++ BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
129 + vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
130 +- kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
131 +- kvm->arch.vttbr |= vmid;
132 ++ kvm->arch.vttbr = pgd_phys | vmid;
133 +
134 + spin_unlock(&kvm_vmid_lock);
135 + }
136 +@@ -676,10 +681,21 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
137 + return ret;
138 +
139 + /*
140 ++ * Ensure a rebooted VM will fault in RAM pages and detect if the
141 ++ * guest MMU is turned off and flush the caches as needed.
142 ++ */
143 ++ if (vcpu->arch.has_run_once)
144 ++ stage2_unmap_vm(vcpu->kvm);
145 ++
146 ++ vcpu_reset_hcr(vcpu);
147 ++
148 ++ /*
149 + * Handle the "start in power-off" case by marking the VCPU as paused.
150 + */
151 +- if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
152 ++ if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
153 + vcpu->arch.pause = true;
154 ++ else
155 ++ vcpu->arch.pause = false;
156 +
157 + return 0;
158 + }
159 +@@ -825,7 +841,8 @@ static int hyp_init_cpu_notify(struct notifier_block *self,
160 + switch (action) {
161 + case CPU_STARTING:
162 + case CPU_STARTING_FROZEN:
163 +- cpu_init_hyp_mode(NULL);
164 ++ if (__hyp_get_vectors() == hyp_default_vectors)
165 ++ cpu_init_hyp_mode(NULL);
166 + break;
167 + }
168 +
169 +diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
170 +index b23a59c1c522..2786eae10c0d 100644
171 +--- a/arch/arm/kvm/guest.c
172 ++++ b/arch/arm/kvm/guest.c
173 +@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
174 +
175 + int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
176 + {
177 +- vcpu->arch.hcr = HCR_GUEST_MASK;
178 + return 0;
179 + }
180 +
181 +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
182 +index 70ed2c1f57b0..524b4b57f650 100644
183 +--- a/arch/arm/kvm/mmu.c
184 ++++ b/arch/arm/kvm/mmu.c
185 +@@ -197,7 +197,8 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
186 + pgd = pgdp + pgd_index(addr);
187 + do {
188 + next = kvm_pgd_addr_end(addr, end);
189 +- unmap_puds(kvm, pgd, addr, next);
190 ++ if (!pgd_none(*pgd))
191 ++ unmap_puds(kvm, pgd, addr, next);
192 + } while (pgd++, addr = next, addr != end);
193 + }
194 +
195 +@@ -555,6 +556,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
196 + unmap_range(kvm, kvm->arch.pgd, start, size);
197 + }
198 +
199 ++static void stage2_unmap_memslot(struct kvm *kvm,
200 ++ struct kvm_memory_slot *memslot)
201 ++{
202 ++ hva_t hva = memslot->userspace_addr;
203 ++ phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
204 ++ phys_addr_t size = PAGE_SIZE * memslot->npages;
205 ++ hva_t reg_end = hva + size;
206 ++
207 ++ /*
208 ++ * A memory region could potentially cover multiple VMAs, and any holes
209 ++ * between them, so iterate over all of them to find out if we should
210 ++ * unmap any of them.
211 ++ *
212 ++ * +--------------------------------------------+
213 ++ * +---------------+----------------+ +----------------+
214 ++ * | : VMA 1 | VMA 2 | | VMA 3 : |
215 ++ * +---------------+----------------+ +----------------+
216 ++ * | memory region |
217 ++ * +--------------------------------------------+
218 ++ */
219 ++ do {
220 ++ struct vm_area_struct *vma = find_vma(current->mm, hva);
221 ++ hva_t vm_start, vm_end;
222 ++
223 ++ if (!vma || vma->vm_start >= reg_end)
224 ++ break;
225 ++
226 ++ /*
227 ++ * Take the intersection of this VMA with the memory region
228 ++ */
229 ++ vm_start = max(hva, vma->vm_start);
230 ++ vm_end = min(reg_end, vma->vm_end);
231 ++
232 ++ if (!(vma->vm_flags & VM_PFNMAP)) {
233 ++ gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
234 ++ unmap_stage2_range(kvm, gpa, vm_end - vm_start);
235 ++ }
236 ++ hva = vm_end;
237 ++ } while (hva < reg_end);
238 ++}
239 ++
240 ++/**
241 ++ * stage2_unmap_vm - Unmap Stage-2 RAM mappings
242 ++ * @kvm: The struct kvm pointer
243 ++ *
244 ++ * Go through the memregions and unmap any reguler RAM
245 ++ * backing memory already mapped to the VM.
246 ++ */
247 ++void stage2_unmap_vm(struct kvm *kvm)
248 ++{
249 ++ struct kvm_memslots *slots;
250 ++ struct kvm_memory_slot *memslot;
251 ++ int idx;
252 ++
253 ++ idx = srcu_read_lock(&kvm->srcu);
254 ++ spin_lock(&kvm->mmu_lock);
255 ++
256 ++ slots = kvm_memslots(kvm);
257 ++ kvm_for_each_memslot(memslot, slots)
258 ++ stage2_unmap_memslot(kvm, memslot);
259 ++
260 ++ spin_unlock(&kvm->mmu_lock);
261 ++ srcu_read_unlock(&kvm->srcu, idx);
262 ++}
263 ++
264 + /**
265 + * kvm_free_stage2_pgd - free all stage-2 tables
266 + * @kvm: The KVM struct pointer for the VM.
267 +@@ -746,6 +812,19 @@ static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
268 + return false;
269 + }
270 +
271 ++static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
272 ++{
273 ++ if (kvm_vcpu_trap_is_iabt(vcpu))
274 ++ return false;
275 ++
276 ++ return kvm_vcpu_dabt_iswrite(vcpu);
277 ++}
278 ++
279 ++static bool kvm_is_device_pfn(unsigned long pfn)
280 ++{
281 ++ return !pfn_valid(pfn);
282 ++}
283 ++
284 + static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
285 + struct kvm_memory_slot *memslot,
286 + unsigned long fault_status)
287 +@@ -761,7 +840,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
288 + pfn_t pfn;
289 + pgprot_t mem_type = PAGE_S2;
290 +
291 +- write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
292 ++ write_fault = kvm_is_write_fault(vcpu);
293 + if (fault_status == FSC_PERM && !write_fault) {
294 + kvm_err("Unexpected L2 read permission error\n");
295 + return -EFAULT;
296 +@@ -770,6 +849,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
297 + /* Let's check if we will get back a huge page backed by hugetlbfs */
298 + down_read(&current->mm->mmap_sem);
299 + vma = find_vma_intersection(current->mm, hva, hva + 1);
300 ++ if (unlikely(!vma)) {
301 ++ kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
302 ++ up_read(&current->mm->mmap_sem);
303 ++ return -EFAULT;
304 ++ }
305 ++
306 + if (is_vm_hugetlb_page(vma)) {
307 + hugetlb = true;
308 + gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
309 +@@ -810,7 +895,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
310 + if (is_error_pfn(pfn))
311 + return -EFAULT;
312 +
313 +- if (kvm_is_mmio_pfn(pfn))
314 ++ if (kvm_is_device_pfn(pfn))
315 + mem_type = PAGE_S2_DEVICE;
316 +
317 + spin_lock(&kvm->mmu_lock);
318 +@@ -836,7 +921,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
319 + }
320 + coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
321 + ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
322 +- mem_type == PAGE_S2_DEVICE);
323 ++ pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
324 + }
325 +
326 +
327 +@@ -912,6 +997,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
328 +
329 + memslot = gfn_to_memslot(vcpu->kvm, gfn);
330 +
331 ++ /* Userspace should not be able to register out-of-bounds IPAs */
332 ++ VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
333 ++
334 + ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status);
335 + if (ret == 0)
336 + ret = 1;
337 +@@ -1136,6 +1224,14 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
338 + struct kvm_userspace_memory_region *mem,
339 + enum kvm_mr_change change)
340 + {
341 ++ /*
342 ++ * Prevent userspace from creating a memory region outside of the IPA
343 ++ * space addressable by the KVM guest IPA space.
344 ++ */
345 ++ if (memslot->base_gfn + memslot->npages >=
346 ++ (KVM_PHYS_SIZE >> PAGE_SHIFT))
347 ++ return -EFAULT;
348 ++
349 + return 0;
350 + }
351 +
352 +diff --git a/arch/arm/mach-dove/board-dt.c b/arch/arm/mach-dove/board-dt.c
353 +index 49fa9abd09da..7a7a09a5d5ff 100644
354 +--- a/arch/arm/mach-dove/board-dt.c
355 ++++ b/arch/arm/mach-dove/board-dt.c
356 +@@ -26,7 +26,7 @@ static void __init dove_dt_init(void)
357 + #ifdef CONFIG_CACHE_TAUROS2
358 + tauros2_init(0);
359 + #endif
360 +- BUG_ON(mvebu_mbus_dt_init());
361 ++ BUG_ON(mvebu_mbus_dt_init(false));
362 + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
363 + }
364 +
365 +diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
366 +index 01a5765a8b26..b509556f6cfd 100644
367 +--- a/arch/arm/mach-imx/clk-imx6q.c
368 ++++ b/arch/arm/mach-imx/clk-imx6q.c
369 +@@ -406,7 +406,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
370 + clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
371 + clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
372 + clk[rom] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
373 +- clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4);
374 ++ clk[sata] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4);
375 + clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
376 + clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
377 + clk[spdif] = imx_clk_gate2("spdif", "spdif_podf", base + 0x7c, 14);
378 +diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
379 +index 78188159484d..79e629da1c92 100644
380 +--- a/arch/arm/mach-kirkwood/board-dt.c
381 ++++ b/arch/arm/mach-kirkwood/board-dt.c
382 +@@ -116,7 +116,7 @@ static void __init kirkwood_dt_init(void)
383 + */
384 + writel(readl(CPU_CONFIG) & ~CPU_CONFIG_ERROR_PROP, CPU_CONFIG);
385 +
386 +- BUG_ON(mvebu_mbus_dt_init());
387 ++ BUG_ON(mvebu_mbus_dt_init(false));
388 +
389 + kirkwood_l2_init();
390 +
391 +diff --git a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c
392 +index f6c9d1d85c14..79c3766a56fd 100644
393 +--- a/arch/arm/mach-mvebu/armada-370-xp.c
394 ++++ b/arch/arm/mach-mvebu/armada-370-xp.c
395 +@@ -41,7 +41,7 @@ static void __init armada_370_xp_timer_and_clk_init(void)
396 + of_clk_init(NULL);
397 + clocksource_of_init();
398 + coherency_init();
399 +- BUG_ON(mvebu_mbus_dt_init());
400 ++ BUG_ON(mvebu_mbus_dt_init(coherency_available()));
401 + #ifdef CONFIG_CACHE_L2X0
402 + l2x0_of_init(0, ~0UL);
403 + #endif
404 +diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
405 +index c295c10f9217..49bad4d66fa2 100644
406 +--- a/arch/arm/mach-mvebu/coherency.c
407 ++++ b/arch/arm/mach-mvebu/coherency.c
408 +@@ -121,6 +121,20 @@ static struct notifier_block mvebu_hwcc_platform_nb = {
409 + .notifier_call = mvebu_hwcc_platform_notifier,
410 + };
411 +
412 ++/*
413 ++ * Keep track of whether we have IO hardware coherency enabled or not.
414 ++ * On Armada 370's we will not be using it for example. We need to make
415 ++ * that available [through coherency_available()] so the mbus controller
416 ++ * doesn't enable the IO coherency bit in the attribute bits of the
417 ++ * chip selects.
418 ++ */
419 ++static int coherency_enabled;
420 ++
421 ++int coherency_available(void)
422 ++{
423 ++ return coherency_enabled;
424 ++}
425 ++
426 + int __init coherency_init(void)
427 + {
428 + struct device_node *np;
429 +@@ -164,6 +178,7 @@ int __init coherency_init(void)
430 + coherency_base = of_iomap(np, 0);
431 + coherency_cpu_base = of_iomap(np, 1);
432 + set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
433 ++ coherency_enabled = 1;
434 + of_node_put(np);
435 + }
436 +
437 +diff --git a/arch/arm/mach-mvebu/coherency.h b/arch/arm/mach-mvebu/coherency.h
438 +index 760226c41353..63e18c64a8e3 100644
439 +--- a/arch/arm/mach-mvebu/coherency.h
440 ++++ b/arch/arm/mach-mvebu/coherency.h
441 +@@ -17,6 +17,7 @@
442 + extern unsigned long coherency_phys_base;
443 +
444 + int set_cpu_coherent(unsigned int cpu_id, int smp_group_id);
445 ++int coherency_available(void);
446 + int coherency_init(void);
447 +
448 + #endif /* __MACH_370_XP_COHERENCY_H */
449 +diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
450 +index 00fbaa75dc7b..ea68925a4480 100644
451 +--- a/arch/arm64/include/asm/kvm_arm.h
452 ++++ b/arch/arm64/include/asm/kvm_arm.h
453 +@@ -18,6 +18,7 @@
454 + #ifndef __ARM64_KVM_ARM_H__
455 + #define __ARM64_KVM_ARM_H__
456 +
457 ++#include <asm/memory.h>
458 + #include <asm/types.h>
459 +
460 + /* Hyp Configuration Register (HCR) bits */
461 +@@ -122,6 +123,17 @@
462 + #define VTCR_EL2_T0SZ_MASK 0x3f
463 + #define VTCR_EL2_T0SZ_40B 24
464 +
465 ++/*
466 ++ * We configure the Stage-2 page tables to always restrict the IPA space to be
467 ++ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
468 ++ * not known to exist and will break with this configuration.
469 ++ *
470 ++ * Note that when using 4K pages, we concatenate two first level page tables
471 ++ * together.
472 ++ *
473 ++ * The magic numbers used for VTTBR_X in this patch can be found in Tables
474 ++ * D4-23 and D4-25 in ARM DDI 0487A.b.
475 ++ */
476 + #ifdef CONFIG_ARM64_64K_PAGES
477 + /*
478 + * Stage2 translation configuration:
479 +@@ -151,9 +163,9 @@
480 + #endif
481 +
482 + #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
483 +-#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
484 +-#define VTTBR_VMID_SHIFT (48LLU)
485 +-#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
486 ++#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
487 ++#define VTTBR_VMID_SHIFT (UL(48))
488 ++#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
489 +
490 + /* Hyp System Trap Register */
491 + #define HSTR_EL2_TTEE (1 << 16)
492 +@@ -176,13 +188,13 @@
493 +
494 + /* Exception Syndrome Register (ESR) bits */
495 + #define ESR_EL2_EC_SHIFT (26)
496 +-#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT)
497 +-#define ESR_EL2_IL (1U << 25)
498 ++#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT)
499 ++#define ESR_EL2_IL (UL(1) << 25)
500 + #define ESR_EL2_ISS (ESR_EL2_IL - 1)
501 + #define ESR_EL2_ISV_SHIFT (24)
502 +-#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT)
503 ++#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT)
504 + #define ESR_EL2_SAS_SHIFT (22)
505 +-#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT)
506 ++#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT)
507 + #define ESR_EL2_SSE (1 << 21)
508 + #define ESR_EL2_SRT_SHIFT (16)
509 + #define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
510 +@@ -196,16 +208,16 @@
511 + #define ESR_EL2_FSC_TYPE (0x3c)
512 +
513 + #define ESR_EL2_CV_SHIFT (24)
514 +-#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT)
515 ++#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT)
516 + #define ESR_EL2_COND_SHIFT (20)
517 +-#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT)
518 ++#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT)
519 +
520 +
521 + #define FSC_FAULT (0x04)
522 + #define FSC_PERM (0x0c)
523 +
524 + /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
525 +-#define HPFAR_MASK (~0xFUL)
526 ++#define HPFAR_MASK (~UL(0xf))
527 +
528 + #define ESR_EL2_EC_UNKNOWN (0x00)
529 + #define ESR_EL2_EC_WFI (0x01)
530 +diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
531 +index dd8ecfc3f995..681cb9080100 100644
532 +--- a/arch/arm64/include/asm/kvm_emulate.h
533 ++++ b/arch/arm64/include/asm/kvm_emulate.h
534 +@@ -38,6 +38,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
535 + void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
536 + void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
537 +
538 ++static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
539 ++{
540 ++ vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
541 ++}
542 ++
543 + static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
544 + {
545 + return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
546 +diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
547 +index 8e138c7c53ac..0d51874c838f 100644
548 +--- a/arch/arm64/include/asm/kvm_mmu.h
549 ++++ b/arch/arm64/include/asm/kvm_mmu.h
550 +@@ -59,10 +59,9 @@
551 + #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
552 +
553 + /*
554 +- * Align KVM with the kernel's view of physical memory. Should be
555 +- * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
556 ++ * We currently only support a 40bit IPA.
557 + */
558 +-#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT
559 ++#define KVM_PHYS_SHIFT (40)
560 + #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
561 + #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
562 +
563 +@@ -75,6 +74,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
564 + void free_boot_hyp_pgd(void);
565 + void free_hyp_pgds(void);
566 +
567 ++void stage2_unmap_vm(struct kvm *kvm);
568 + int kvm_alloc_stage2_pgd(struct kvm *kvm);
569 + void kvm_free_stage2_pgd(struct kvm *kvm);
570 + int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
571 +@@ -93,19 +93,6 @@ void kvm_clear_hyp_idmap(void);
572 + #define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
573 + #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
574 +
575 +-static inline bool kvm_is_write_fault(unsigned long esr)
576 +-{
577 +- unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
578 +-
579 +- if (esr_ec == ESR_EL2_EC_IABT)
580 +- return false;
581 +-
582 +- if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
583 +- return false;
584 +-
585 +- return true;
586 +-}
587 +-
588 + static inline void kvm_clean_pgd(pgd_t *pgd) {}
589 + static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
590 + static inline void kvm_clean_pte(pte_t *pte) {}
591 +diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
592 +index 08745578d54d..a8d81fa8c527 100644
593 +--- a/arch/arm64/kvm/guest.c
594 ++++ b/arch/arm64/kvm/guest.c
595 +@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
596 +
597 + int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
598 + {
599 +- vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
600 + return 0;
601 + }
602 +
603 +diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
604 +index 3974881388bb..b76159a153a5 100644
605 +--- a/arch/arm64/mm/dma-mapping.c
606 ++++ b/arch/arm64/mm/dma-mapping.c
607 +@@ -54,8 +54,7 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
608 +
609 + *dma_handle = phys_to_dma(dev, page_to_phys(page));
610 + addr = page_address(page);
611 +- if (flags & __GFP_ZERO)
612 +- memset(addr, 0, size);
613 ++ memset(addr, 0, size);
614 + return addr;
615 + } else {
616 + return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
617 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
618 +index 2f645c90e4d8..5dab54accc56 100644
619 +--- a/arch/x86/Kconfig
620 ++++ b/arch/x86/Kconfig
621 +@@ -160,7 +160,7 @@ config SBUS
622 +
623 + config NEED_DMA_MAP_STATE
624 + def_bool y
625 +- depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG
626 ++ depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB
627 +
628 + config NEED_SG_DMA_LENGTH
629 + def_bool y
630 +diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
631 +index 18f739129e72..43a07bf48dea 100644
632 +--- a/arch/x86/kernel/cpu/microcode/intel_early.c
633 ++++ b/arch/x86/kernel/cpu/microcode/intel_early.c
634 +@@ -321,7 +321,7 @@ get_matching_model_microcode(int cpu, unsigned long start,
635 + unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
636 + int i;
637 +
638 +- while (leftover) {
639 ++ while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
640 + mc_header = (struct microcode_header_intel *)ucode_ptr;
641 +
642 + mc_size = get_totalsize(mc_header);
643 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
644 +index a1f5b1866cbe..490fee15fea5 100644
645 +--- a/arch/x86/kernel/kprobes/core.c
646 ++++ b/arch/x86/kernel/kprobes/core.c
647 +@@ -326,13 +326,16 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
648 + {
649 + struct insn insn;
650 + kprobe_opcode_t buf[MAX_INSN_SIZE];
651 ++ int length;
652 +
653 + kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src));
654 + insn_get_length(&insn);
655 ++ length = insn.length;
656 ++
657 + /* Another subsystem puts a breakpoint, failed to recover */
658 + if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
659 + return 0;
660 +- memcpy(dest, insn.kaddr, insn.length);
661 ++ memcpy(dest, insn.kaddr, length);
662 +
663 + #ifdef CONFIG_X86_64
664 + if (insn_rip_relative(&insn)) {
665 +@@ -362,7 +365,7 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
666 + *(s32 *) disp = (s32) newdisp;
667 + }
668 + #endif
669 +- return insn.length;
670 ++ return length;
671 + }
672 +
673 + static int __kprobes arch_copy_kprobe(struct kprobe *p)
674 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
675 +index 9643eda60a52..074633411ea8 100644
676 +--- a/arch/x86/kvm/svm.c
677 ++++ b/arch/x86/kvm/svm.c
678 +@@ -495,8 +495,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
679 + {
680 + struct vcpu_svm *svm = to_svm(vcpu);
681 +
682 +- if (svm->vmcb->control.next_rip != 0)
683 ++ if (svm->vmcb->control.next_rip != 0) {
684 ++ WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
685 + svm->next_rip = svm->vmcb->control.next_rip;
686 ++ }
687 +
688 + if (!svm->next_rip) {
689 + if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
690 +@@ -4246,7 +4248,9 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
691 + break;
692 + }
693 +
694 +- vmcb->control.next_rip = info->next_rip;
695 ++ /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
696 ++ if (static_cpu_has(X86_FEATURE_NRIPS))
697 ++ vmcb->control.next_rip = info->next_rip;
698 + vmcb->control.exit_code = icpt_info.exit_code;
699 + vmexit = nested_svm_exit_handled(svm);
700 +
701 +diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
702 +index e990deed2d33..1aa0130a63d5 100644
703 +--- a/drivers/bus/mvebu-mbus.c
704 ++++ b/drivers/bus/mvebu-mbus.c
705 +@@ -701,7 +701,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
706 + phys_addr_t sdramwins_phys_base,
707 + size_t sdramwins_size)
708 + {
709 +- struct device_node *np;
710 + int win;
711 +
712 + mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
713 +@@ -714,12 +713,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
714 + return -ENOMEM;
715 + }
716 +
717 +- np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
718 +- if (np) {
719 +- mbus->hw_io_coherency = 1;
720 +- of_node_put(np);
721 +- }
722 +-
723 + for (win = 0; win < mbus->soc->num_wins; win++)
724 + mvebu_mbus_disable_window(mbus, win);
725 +
726 +@@ -889,7 +882,7 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
727 + }
728 + }
729 +
730 +-int __init mvebu_mbus_dt_init(void)
731 ++int __init mvebu_mbus_dt_init(bool is_coherent)
732 + {
733 + struct resource mbuswins_res, sdramwins_res;
734 + struct device_node *np, *controller;
735 +@@ -928,6 +921,8 @@ int __init mvebu_mbus_dt_init(void)
736 + return -EINVAL;
737 + }
738 +
739 ++ mbus_state.hw_io_coherency = is_coherent;
740 ++
741 + /* Get optional pcie-{mem,io}-aperture properties */
742 + mvebu_mbus_get_pcie_resources(np, &mbus_state.pcie_mem_aperture,
743 + &mbus_state.pcie_io_aperture);
744 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
745 +index c611bcc01f7e..3e623ab5e315 100644
746 +--- a/drivers/edac/sb_edac.c
747 ++++ b/drivers/edac/sb_edac.c
748 +@@ -765,7 +765,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
749 + u32 reg;
750 + u64 limit, prv = 0;
751 + u64 tmp_mb;
752 +- u32 mb, kb;
753 ++ u32 gb, mb;
754 + u32 rir_way;
755 +
756 + /*
757 +@@ -775,15 +775,17 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
758 + pvt->tolm = pvt->info.get_tolm(pvt);
759 + tmp_mb = (1 + pvt->tolm) >> 20;
760 +
761 +- mb = div_u64_rem(tmp_mb, 1000, &kb);
762 +- edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
763 ++ gb = div_u64_rem(tmp_mb, 1024, &mb);
764 ++ edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
765 ++ gb, (mb*1000)/1024, (u64)pvt->tolm);
766 +
767 + /* Address range is already 45:25 */
768 + pvt->tohm = pvt->info.get_tohm(pvt);
769 + tmp_mb = (1 + pvt->tohm) >> 20;
770 +
771 +- mb = div_u64_rem(tmp_mb, 1000, &kb);
772 +- edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
773 ++ gb = div_u64_rem(tmp_mb, 1024, &mb);
774 ++ edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
775 ++ gb, (mb*1000)/1024, (u64)pvt->tohm);
776 +
777 + /*
778 + * Step 2) Get SAD range and SAD Interleave list
779 +@@ -805,11 +807,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
780 + break;
781 +
782 + tmp_mb = (limit + 1) >> 20;
783 +- mb = div_u64_rem(tmp_mb, 1000, &kb);
784 ++ gb = div_u64_rem(tmp_mb, 1024, &mb);
785 + edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
786 + n_sads,
787 + get_dram_attr(reg),
788 +- mb, kb,
789 ++ gb, (mb*1000)/1024,
790 + ((u64)tmp_mb) << 20L,
791 + INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
792 + reg);
793 +@@ -840,9 +842,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
794 + break;
795 + tmp_mb = (limit + 1) >> 20;
796 +
797 +- mb = div_u64_rem(tmp_mb, 1000, &kb);
798 ++ gb = div_u64_rem(tmp_mb, 1024, &mb);
799 + edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
800 +- n_tads, mb, kb,
801 ++ n_tads, gb, (mb*1000)/1024,
802 + ((u64)tmp_mb) << 20L,
803 + (u32)TAD_SOCK(reg),
804 + (u32)TAD_CH(reg),
805 +@@ -865,10 +867,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
806 + tad_ch_nilv_offset[j],
807 + &reg);
808 + tmp_mb = TAD_OFFSET(reg) >> 20;
809 +- mb = div_u64_rem(tmp_mb, 1000, &kb);
810 ++ gb = div_u64_rem(tmp_mb, 1024, &mb);
811 + edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
812 + i, j,
813 +- mb, kb,
814 ++ gb, (mb*1000)/1024,
815 + ((u64)tmp_mb) << 20L,
816 + reg);
817 + }
818 +@@ -890,10 +892,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
819 +
820 + tmp_mb = RIR_LIMIT(reg) >> 20;
821 + rir_way = 1 << RIR_WAY(reg);
822 +- mb = div_u64_rem(tmp_mb, 1000, &kb);
823 ++ gb = div_u64_rem(tmp_mb, 1024, &mb);
824 + edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
825 + i, j,
826 +- mb, kb,
827 ++ gb, (mb*1000)/1024,
828 + ((u64)tmp_mb) << 20L,
829 + rir_way,
830 + reg);
831 +@@ -904,10 +906,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
832 + &reg);
833 + tmp_mb = RIR_OFFSET(reg) << 6;
834 +
835 +- mb = div_u64_rem(tmp_mb, 1000, &kb);
836 ++ gb = div_u64_rem(tmp_mb, 1024, &mb);
837 + edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
838 + i, j, k,
839 +- mb, kb,
840 ++ gb, (mb*1000)/1024,
841 + ((u64)tmp_mb) << 20L,
842 + (u32)RIR_RNK_TGT(reg),
843 + reg);
844 +@@ -945,7 +947,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
845 + u8 ch_way, sck_way, pkg, sad_ha = 0;
846 + u32 tad_offset;
847 + u32 rir_way;
848 +- u32 mb, kb;
849 ++ u32 mb, gb;
850 + u64 ch_addr, offset, limit = 0, prv = 0;
851 +
852 +
853 +@@ -1183,10 +1185,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
854 + continue;
855 +
856 + limit = RIR_LIMIT(reg);
857 +- mb = div_u64_rem(limit >> 20, 1000, &kb);
858 ++ gb = div_u64_rem(limit >> 20, 1024, &mb);
859 + edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
860 + n_rir,
861 +- mb, kb,
862 ++ gb, (mb*1000)/1024,
863 + limit,
864 + 1 << RIR_WAY(reg));
865 + if (ch_addr <= limit)
866 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
867 +index 019a04a31384..a467261b10b9 100644
868 +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
869 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
870 +@@ -810,8 +810,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
871 + tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
872 + tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
873 + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
874 +- tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
875 +- MLX4_WQE_CTRL_TCP_UDP_CSUM);
876 ++ if (!skb->encapsulation)
877 ++ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
878 ++ MLX4_WQE_CTRL_TCP_UDP_CSUM);
879 ++ else
880 ++ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
881 + ring->tx_csum++;
882 + }
883 +
884 +diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
885 +index 528bff5ec91f..85d370e1ca79 100644
886 +--- a/drivers/scsi/hpsa.c
887 ++++ b/drivers/scsi/hpsa.c
888 +@@ -3984,10 +3984,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
889 +
890 + /* Save the PCI command register */
891 + pci_read_config_word(pdev, 4, &command_register);
892 +- /* Turn the board off. This is so that later pci_restore_state()
893 +- * won't turn the board on before the rest of config space is ready.
894 +- */
895 +- pci_disable_device(pdev);
896 + pci_save_state(pdev);
897 +
898 + /* find the first memory BAR, so we can find the cfg table */
899 +@@ -4035,11 +4031,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
900 + goto unmap_cfgtable;
901 +
902 + pci_restore_state(pdev);
903 +- rc = pci_enable_device(pdev);
904 +- if (rc) {
905 +- dev_warn(&pdev->dev, "failed to enable device.\n");
906 +- goto unmap_cfgtable;
907 +- }
908 + pci_write_config_word(pdev, 4, command_register);
909 +
910 + /* Some devices (notably the HP Smart Array 5i Controller)
911 +@@ -4525,6 +4516,23 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
912 + if (!reset_devices)
913 + return 0;
914 +
915 ++ /* kdump kernel is loading, we don't know in which state is
916 ++ * the pci interface. The dev->enable_cnt is equal zero
917 ++ * so we call enable+disable, wait a while and switch it on.
918 ++ */
919 ++ rc = pci_enable_device(pdev);
920 ++ if (rc) {
921 ++ dev_warn(&pdev->dev, "Failed to enable PCI device\n");
922 ++ return -ENODEV;
923 ++ }
924 ++ pci_disable_device(pdev);
925 ++ msleep(260); /* a randomly chosen number */
926 ++ rc = pci_enable_device(pdev);
927 ++ if (rc) {
928 ++ dev_warn(&pdev->dev, "failed to enable device.\n");
929 ++ return -ENODEV;
930 ++ }
931 ++ pci_set_master(pdev);
932 + /* Reset the controller with a PCI power-cycle or via doorbell */
933 + rc = hpsa_kdump_hard_reset_controller(pdev);
934 +
935 +@@ -4533,10 +4541,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
936 + * "performant mode". Or, it might be 640x, which can't reset
937 + * due to concerns about shared bbwc between 6402/6404 pair.
938 + */
939 +- if (rc == -ENOTSUPP)
940 +- return rc; /* just try to do the kdump anyhow. */
941 +- if (rc)
942 +- return -ENODEV;
943 ++ if (rc) {
944 ++ if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */
945 ++ rc = -ENODEV;
946 ++ goto out_disable;
947 ++ }
948 +
949 + /* Now try to get the controller to respond to a no-op */
950 + dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
951 +@@ -4547,7 +4556,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
952 + dev_warn(&pdev->dev, "no-op failed%s\n",
953 + (i < 11 ? "; re-trying" : ""));
954 + }
955 +- return 0;
956 ++
957 ++out_disable:
958 ++
959 ++ pci_disable_device(pdev);
960 ++ return rc;
961 + }
962 +
963 + static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
964 +@@ -4690,6 +4703,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
965 + iounmap(h->transtable);
966 + if (h->cfgtable)
967 + iounmap(h->cfgtable);
968 ++ pci_disable_device(h->pdev);
969 + pci_release_regions(h->pdev);
970 + kfree(h);
971 + }
972 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
973 +index 93de3ba994e7..f8ffee4562d3 100644
974 +--- a/fs/btrfs/ctree.c
975 ++++ b/fs/btrfs/ctree.c
976 +@@ -2963,7 +2963,7 @@ done:
977 + */
978 + if (!p->leave_spinning)
979 + btrfs_set_path_blocking(p);
980 +- if (ret < 0)
981 ++ if (ret < 0 && !p->skip_release_on_error)
982 + btrfs_release_path(p);
983 + return ret;
984 + }
985 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
986 +index d3511cc17091..3b39eb4cb309 100644
987 +--- a/fs/btrfs/ctree.h
988 ++++ b/fs/btrfs/ctree.h
989 +@@ -608,6 +608,7 @@ struct btrfs_path {
990 + unsigned int skip_locking:1;
991 + unsigned int leave_spinning:1;
992 + unsigned int search_commit_root:1;
993 ++ unsigned int skip_release_on_error:1;
994 + };
995 +
996 + /*
997 +@@ -3609,6 +3610,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
998 + int verify_dir_item(struct btrfs_root *root,
999 + struct extent_buffer *leaf,
1000 + struct btrfs_dir_item *dir_item);
1001 ++struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
1002 ++ struct btrfs_path *path,
1003 ++ const char *name,
1004 ++ int name_len);
1005 +
1006 + /* orphan.c */
1007 + int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
1008 +diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
1009 +index a0691df5dcea..9521a93b5303 100644
1010 +--- a/fs/btrfs/dir-item.c
1011 ++++ b/fs/btrfs/dir-item.c
1012 +@@ -21,10 +21,6 @@
1013 + #include "hash.h"
1014 + #include "transaction.h"
1015 +
1016 +-static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
1017 +- struct btrfs_path *path,
1018 +- const char *name, int name_len);
1019 +-
1020 + /*
1021 + * insert a name into a directory, doing overflow properly if there is a hash
1022 + * collision. data_size indicates how big the item inserted should be. On
1023 +@@ -383,9 +379,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
1024 + * this walks through all the entries in a dir item and finds one
1025 + * for a specific name.
1026 + */
1027 +-static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
1028 +- struct btrfs_path *path,
1029 +- const char *name, int name_len)
1030 ++struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
1031 ++ struct btrfs_path *path,
1032 ++ const char *name, int name_len)
1033 + {
1034 + struct btrfs_dir_item *dir_item;
1035 + unsigned long name_ptr;
1036 +diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
1037 +index 488e987c3374..618e86ceede7 100644
1038 +--- a/fs/btrfs/xattr.c
1039 ++++ b/fs/btrfs/xattr.c
1040 +@@ -29,6 +29,7 @@
1041 + #include "xattr.h"
1042 + #include "disk-io.h"
1043 + #include "props.h"
1044 ++#include "locking.h"
1045 +
1046 +
1047 + ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
1048 +@@ -91,7 +92,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
1049 + struct inode *inode, const char *name,
1050 + const void *value, size_t size, int flags)
1051 + {
1052 +- struct btrfs_dir_item *di;
1053 ++ struct btrfs_dir_item *di = NULL;
1054 + struct btrfs_root *root = BTRFS_I(inode)->root;
1055 + struct btrfs_path *path;
1056 + size_t name_len = strlen(name);
1057 +@@ -103,84 +104,119 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
1058 + path = btrfs_alloc_path();
1059 + if (!path)
1060 + return -ENOMEM;
1061 ++ path->skip_release_on_error = 1;
1062 ++
1063 ++ if (!value) {
1064 ++ di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
1065 ++ name, name_len, -1);
1066 ++ if (!di && (flags & XATTR_REPLACE))
1067 ++ ret = -ENODATA;
1068 ++ else if (di)
1069 ++ ret = btrfs_delete_one_dir_name(trans, root, path, di);
1070 ++ goto out;
1071 ++ }
1072 +
1073 ++ /*
1074 ++ * For a replace we can't just do the insert blindly.
1075 ++ * Do a lookup first (read-only btrfs_search_slot), and return if xattr
1076 ++ * doesn't exist. If it exists, fall down below to the insert/replace
1077 ++ * path - we can't race with a concurrent xattr delete, because the VFS
1078 ++ * locks the inode's i_mutex before calling setxattr or removexattr.
1079 ++ */
1080 + if (flags & XATTR_REPLACE) {
1081 +- di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
1082 +- name_len, -1);
1083 +- if (IS_ERR(di)) {
1084 +- ret = PTR_ERR(di);
1085 +- goto out;
1086 +- } else if (!di) {
1087 ++ ASSERT(mutex_is_locked(&inode->i_mutex));
1088 ++ di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
1089 ++ name, name_len, 0);
1090 ++ if (!di) {
1091 + ret = -ENODATA;
1092 + goto out;
1093 + }
1094 +- ret = btrfs_delete_one_dir_name(trans, root, path, di);
1095 +- if (ret)
1096 +- goto out;
1097 + btrfs_release_path(path);
1098 ++ di = NULL;
1099 ++ }
1100 +
1101 ++ ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
1102 ++ name, name_len, value, size);
1103 ++ if (ret == -EOVERFLOW) {
1104 + /*
1105 +- * remove the attribute
1106 ++ * We have an existing item in a leaf, split_leaf couldn't
1107 ++ * expand it. That item might have or not a dir_item that
1108 ++ * matches our target xattr, so lets check.
1109 + */
1110 +- if (!value)
1111 +- goto out;
1112 +- } else {
1113 +- di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
1114 +- name, name_len, 0);
1115 +- if (IS_ERR(di)) {
1116 +- ret = PTR_ERR(di);
1117 ++ ret = 0;
1118 ++ btrfs_assert_tree_locked(path->nodes[0]);
1119 ++ di = btrfs_match_dir_item_name(root, path, name, name_len);
1120 ++ if (!di && !(flags & XATTR_REPLACE)) {
1121 ++ ret = -ENOSPC;
1122 + goto out;
1123 + }
1124 +- if (!di && !value)
1125 +- goto out;
1126 +- btrfs_release_path(path);
1127 ++ } else if (ret == -EEXIST) {
1128 ++ ret = 0;
1129 ++ di = btrfs_match_dir_item_name(root, path, name, name_len);
1130 ++ ASSERT(di); /* logic error */
1131 ++ } else if (ret) {
1132 ++ goto out;
1133 + }
1134 +
1135 +-again:
1136 +- ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
1137 +- name, name_len, value, size);
1138 +- /*
1139 +- * If we're setting an xattr to a new value but the new value is say
1140 +- * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting
1141 +- * back from split_leaf. This is because it thinks we'll be extending
1142 +- * the existing item size, but we're asking for enough space to add the
1143 +- * item itself. So if we get EOVERFLOW just set ret to EEXIST and let
1144 +- * the rest of the function figure it out.
1145 +- */
1146 +- if (ret == -EOVERFLOW)
1147 ++ if (di && (flags & XATTR_CREATE)) {
1148 + ret = -EEXIST;
1149 ++ goto out;
1150 ++ }
1151 +
1152 +- if (ret == -EEXIST) {
1153 +- if (flags & XATTR_CREATE)
1154 +- goto out;
1155 ++ if (di) {
1156 + /*
1157 +- * We can't use the path we already have since we won't have the
1158 +- * proper locking for a delete, so release the path and
1159 +- * re-lookup to delete the thing.
1160 ++ * We're doing a replace, and it must be atomic, that is, at
1161 ++ * any point in time we have either the old or the new xattr
1162 ++ * value in the tree. We don't want readers (getxattr and
1163 ++ * listxattrs) to miss a value, this is specially important
1164 ++ * for ACLs.
1165 + */
1166 +- btrfs_release_path(path);
1167 +- di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
1168 +- name, name_len, -1);
1169 +- if (IS_ERR(di)) {
1170 +- ret = PTR_ERR(di);
1171 +- goto out;
1172 +- } else if (!di) {
1173 +- /* Shouldn't happen but just in case... */
1174 +- btrfs_release_path(path);
1175 +- goto again;
1176 ++ const int slot = path->slots[0];
1177 ++ struct extent_buffer *leaf = path->nodes[0];
1178 ++ const u16 old_data_len = btrfs_dir_data_len(leaf, di);
1179 ++ const u32 item_size = btrfs_item_size_nr(leaf, slot);
1180 ++ const u32 data_size = sizeof(*di) + name_len + size;
1181 ++ struct btrfs_item *item;
1182 ++ unsigned long data_ptr;
1183 ++ char *ptr;
1184 ++
1185 ++ if (size > old_data_len) {
1186 ++ if (btrfs_leaf_free_space(root, leaf) <
1187 ++ (size - old_data_len)) {
1188 ++ ret = -ENOSPC;
1189 ++ goto out;
1190 ++ }
1191 + }
1192 +
1193 +- ret = btrfs_delete_one_dir_name(trans, root, path, di);
1194 +- if (ret)
1195 +- goto out;
1196 ++ if (old_data_len + name_len + sizeof(*di) == item_size) {
1197 ++ /* No other xattrs packed in the same leaf item. */
1198 ++ if (size > old_data_len)
1199 ++ btrfs_extend_item(root, path,
1200 ++ size - old_data_len);
1201 ++ else if (size < old_data_len)
1202 ++ btrfs_truncate_item(root, path, data_size, 1);
1203 ++ } else {
1204 ++ /* There are other xattrs packed in the same item. */
1205 ++ ret = btrfs_delete_one_dir_name(trans, root, path, di);
1206 ++ if (ret)
1207 ++ goto out;
1208 ++ btrfs_extend_item(root, path, data_size);
1209 ++ }
1210 +
1211 ++ item = btrfs_item_nr(slot);
1212 ++ ptr = btrfs_item_ptr(leaf, slot, char);
1213 ++ ptr += btrfs_item_size(leaf, item) - data_size;
1214 ++ di = (struct btrfs_dir_item *)ptr;
1215 ++ btrfs_set_dir_data_len(leaf, di, size);
1216 ++ data_ptr = ((unsigned long)(di + 1)) + name_len;
1217 ++ write_extent_buffer(leaf, value, data_ptr, size);
1218 ++ btrfs_mark_buffer_dirty(leaf);
1219 ++ } else {
1220 + /*
1221 +- * We have a value to set, so go back and try to insert it now.
1222 ++ * Insert, and we had space for the xattr, so path->slots[0] is
1223 ++ * where our xattr dir_item is and btrfs_insert_xattr_item()
1224 ++ * filled it.
1225 + */
1226 +- if (value) {
1227 +- btrfs_release_path(path);
1228 +- goto again;
1229 +- }
1230 + }
1231 + out:
1232 + btrfs_free_path(path);
1233 +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
1234 +index 7fe30f655aa5..35f54bc96519 100644
1235 +--- a/fs/ocfs2/file.c
1236 ++++ b/fs/ocfs2/file.c
1237 +@@ -2478,9 +2478,7 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1238 + struct address_space *mapping = out->f_mapping;
1239 + struct inode *inode = mapping->host;
1240 + struct splice_desc sd = {
1241 +- .total_len = len,
1242 + .flags = flags,
1243 +- .pos = *ppos,
1244 + .u.file = out,
1245 + };
1246 +
1247 +@@ -2490,6 +2488,12 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1248 + out->f_path.dentry->d_name.len,
1249 + out->f_path.dentry->d_name.name, len);
1250 +
1251 ++ ret = generic_write_checks(out, ppos, &len, 0);
1252 ++ if (ret)
1253 ++ return ret;
1254 ++ sd.total_len = len;
1255 ++ sd.pos = *ppos;
1256 ++
1257 + pipe_lock(pipe);
1258 +
1259 + splice_from_pipe_begin(&sd);
1260 +diff --git a/fs/splice.c b/fs/splice.c
1261 +index 12028fa41def..f345d53f94da 100644
1262 +--- a/fs/splice.c
1263 ++++ b/fs/splice.c
1264 +@@ -1012,13 +1012,17 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
1265 + struct address_space *mapping = out->f_mapping;
1266 + struct inode *inode = mapping->host;
1267 + struct splice_desc sd = {
1268 +- .total_len = len,
1269 + .flags = flags,
1270 +- .pos = *ppos,
1271 + .u.file = out,
1272 + };
1273 + ssize_t ret;
1274 +
1275 ++ ret = generic_write_checks(out, ppos, &len, S_ISBLK(inode->i_mode));
1276 ++ if (ret)
1277 ++ return ret;
1278 ++ sd.total_len = len;
1279 ++ sd.pos = *ppos;
1280 ++
1281 + pipe_lock(pipe);
1282 +
1283 + splice_from_pipe_begin(&sd);
1284 +diff --git a/include/linux/mbus.h b/include/linux/mbus.h
1285 +index 345b8c53b897..550c88fb0267 100644
1286 +--- a/include/linux/mbus.h
1287 ++++ b/include/linux/mbus.h
1288 +@@ -73,6 +73,6 @@ int mvebu_mbus_del_window(phys_addr_t base, size_t size);
1289 + int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base,
1290 + size_t mbus_size, phys_addr_t sdram_phys_base,
1291 + size_t sdram_size);
1292 +-int mvebu_mbus_dt_init(void);
1293 ++int mvebu_mbus_dt_init(bool is_coherent);
1294 +
1295 + #endif /* __LINUX_MBUS_H */
1296 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
1297 +index c68e5e0628df..99de2409f731 100644
1298 +--- a/net/netfilter/nf_tables_api.c
1299 ++++ b/net/netfilter/nf_tables_api.c
1300 +@@ -855,7 +855,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1301 +
1302 + if (nla[NFTA_CHAIN_POLICY]) {
1303 + if ((chain != NULL &&
1304 +- !(chain->flags & NFT_BASE_CHAIN)) ||
1305 ++ !(chain->flags & NFT_BASE_CHAIN)))
1306 ++ return -EOPNOTSUPP;
1307 ++
1308 ++ if (chain == NULL &&
1309 + nla[NFTA_CHAIN_HOOK] == NULL)
1310 + return -EOPNOTSUPP;
1311 +
1312 +diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
1313 +index 9e287cb56a04..54330fb5efaf 100644
1314 +--- a/net/netfilter/nfnetlink_cthelper.c
1315 ++++ b/net/netfilter/nfnetlink_cthelper.c
1316 +@@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
1317 + if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
1318 + return -EINVAL;
1319 +
1320 ++ /* Not all fields are initialized so first zero the tuple */
1321 ++ memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
1322 ++
1323 + tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
1324 + tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
1325 +
1326 +@@ -86,7 +89,7 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
1327 + static int
1328 + nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
1329 + {
1330 +- const struct nf_conn_help *help = nfct_help(ct);
1331 ++ struct nf_conn_help *help = nfct_help(ct);
1332 +
1333 + if (attr == NULL)
1334 + return -EINVAL;
1335 +@@ -94,7 +97,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
1336 + if (help->helper->data_len == 0)
1337 + return -EINVAL;
1338 +
1339 +- memcpy(&help->data, nla_data(attr), help->helper->data_len);
1340 ++ memcpy(help->data, nla_data(attr), help->helper->data_len);
1341 + return 0;
1342 + }
1343 +
1344 +diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
1345 +index 7350723aeb15..969589590814 100644
1346 +--- a/net/netfilter/nft_compat.c
1347 ++++ b/net/netfilter/nft_compat.c
1348 +@@ -82,6 +82,9 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
1349 + entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
1350 + break;
1351 + case AF_INET6:
1352 ++ if (proto)
1353 ++ entry->e6.ipv6.flags |= IP6T_F_PROTO;
1354 ++
1355 + entry->e6.ipv6.proto = proto;
1356 + entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
1357 + break;
1358 +@@ -313,6 +316,9 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
1359 + entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
1360 + break;
1361 + case AF_INET6:
1362 ++ if (proto)
1363 ++ entry->e6.ipv6.flags |= IP6T_F_PROTO;
1364 ++
1365 + entry->e6.ipv6.proto = proto;
1366 + entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
1367 + break;
1368 +diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
1369 +index 1316e558db64..c324a52bb407 100644
1370 +--- a/virt/kvm/arm/vgic.c
1371 ++++ b/virt/kvm/arm/vgic.c
1372 +@@ -674,7 +674,7 @@ static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
1373 + {
1374 + struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1375 + int sgi;
1376 +- int min_sgi = (offset & ~0x3) * 4;
1377 ++ int min_sgi = (offset & ~0x3);
1378 + int max_sgi = min_sgi + 3;
1379 + int vcpu_id = vcpu->vcpu_id;
1380 + u32 reg = 0;
1381 +@@ -695,7 +695,7 @@ static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
1382 + {
1383 + struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1384 + int sgi;
1385 +- int min_sgi = (offset & ~0x3) * 4;
1386 ++ int min_sgi = (offset & ~0x3);
1387 + int max_sgi = min_sgi + 3;
1388 + int vcpu_id = vcpu->vcpu_id;
1389 + u32 reg;
1390 +@@ -1387,7 +1387,8 @@ out:
1391 + int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1392 + bool level)
1393 + {
1394 +- if (vgic_update_irq_state(kvm, cpuid, irq_num, level))
1395 ++ if (likely(vgic_initialized(kvm)) &&
1396 ++ vgic_update_irq_state(kvm, cpuid, irq_num, level))
1397 + vgic_kick_vcpus(kvm);
1398 +
1399 + return 0;
1400 +@@ -1610,7 +1611,7 @@ out:
1401 +
1402 + int kvm_vgic_create(struct kvm *kvm)
1403 + {
1404 +- int i, vcpu_lock_idx = -1, ret = 0;
1405 ++ int i, vcpu_lock_idx = -1, ret;
1406 + struct kvm_vcpu *vcpu;
1407 +
1408 + mutex_lock(&kvm->lock);
1409 +@@ -1625,6 +1626,7 @@ int kvm_vgic_create(struct kvm *kvm)
1410 + * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
1411 + * that no other VCPUs are run while we create the vgic.
1412 + */
1413 ++ ret = -EBUSY;
1414 + kvm_for_each_vcpu(i, vcpu, kvm) {
1415 + if (!mutex_trylock(&vcpu->mutex))
1416 + goto out_unlock;
1417 +@@ -1632,11 +1634,10 @@ int kvm_vgic_create(struct kvm *kvm)
1418 + }
1419 +
1420 + kvm_for_each_vcpu(i, vcpu, kvm) {
1421 +- if (vcpu->arch.has_run_once) {
1422 +- ret = -EBUSY;
1423 ++ if (vcpu->arch.has_run_once)
1424 + goto out_unlock;
1425 +- }
1426 + }
1427 ++ ret = 0;
1428 +
1429 + spin_lock_init(&kvm->arch.vgic.lock);
1430 + kvm->arch.vgic.vctrl_base = vgic_vctrl_base;