1 |
commit: 99e4cd48b7fc7ffe5e9689983a0f607689471d1a |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Fri Feb 14 23:33:59 2020 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Fri Feb 14 23:33:59 2020 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=99e4cd48 |
7 |
|
8 |
Linux patch 4.4.214 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1213_linux-4.4.214.patch | 11112 +++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 11116 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index fb1c3ff..a0335a4 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -895,6 +895,10 @@ Patch: 1212_linux-4.4.213.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 4.4.213 |
23 |
|
24 |
+Patch: 1213_linux-4.4.214.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 4.4.214 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1213_linux-4.4.214.patch b/1213_linux-4.4.214.patch |
33 |
new file mode 100644 |
34 |
index 0000000..d9f280f |
35 |
--- /dev/null |
36 |
+++ b/1213_linux-4.4.214.patch |
37 |
@@ -0,0 +1,11112 @@ |
38 |
+diff --git a/Makefile b/Makefile |
39 |
+index 6e86896525d9..89f09ef4c552 100644 |
40 |
+--- a/Makefile |
41 |
++++ b/Makefile |
42 |
+@@ -1,6 +1,6 @@ |
43 |
+ VERSION = 4 |
44 |
+ PATCHLEVEL = 4 |
45 |
+-SUBLEVEL = 213 |
46 |
++SUBLEVEL = 214 |
47 |
+ EXTRAVERSION = |
48 |
+ NAME = Blurry Fish Butt |
49 |
+ |
50 |
+diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi |
51 |
+index 44a578c10732..2f52e584f3f7 100644 |
52 |
+--- a/arch/arc/boot/dts/axs10x_mb.dtsi |
53 |
++++ b/arch/arc/boot/dts/axs10x_mb.dtsi |
54 |
+@@ -44,6 +44,7 @@ |
55 |
+ interrupt-names = "macirq"; |
56 |
+ phy-mode = "rgmii"; |
57 |
+ snps,pbl = < 32 >; |
58 |
++ snps,multicast-filter-bins = <256>; |
59 |
+ clocks = <&apbclk>; |
60 |
+ clock-names = "stmmaceth"; |
61 |
+ max-speed = <100>; |
62 |
+diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi |
63 |
+index a53279160f98..6b1894400ccc 100644 |
64 |
+--- a/arch/arm/boot/dts/sama5d3.dtsi |
65 |
++++ b/arch/arm/boot/dts/sama5d3.dtsi |
66 |
+@@ -1106,49 +1106,49 @@ |
67 |
+ usart0_clk: usart0_clk { |
68 |
+ #clock-cells = <0>; |
69 |
+ reg = <12>; |
70 |
+- atmel,clk-output-range = <0 66000000>; |
71 |
++ atmel,clk-output-range = <0 83000000>; |
72 |
+ }; |
73 |
+ |
74 |
+ usart1_clk: usart1_clk { |
75 |
+ #clock-cells = <0>; |
76 |
+ reg = <13>; |
77 |
+- atmel,clk-output-range = <0 66000000>; |
78 |
++ atmel,clk-output-range = <0 83000000>; |
79 |
+ }; |
80 |
+ |
81 |
+ usart2_clk: usart2_clk { |
82 |
+ #clock-cells = <0>; |
83 |
+ reg = <14>; |
84 |
+- atmel,clk-output-range = <0 66000000>; |
85 |
++ atmel,clk-output-range = <0 83000000>; |
86 |
+ }; |
87 |
+ |
88 |
+ usart3_clk: usart3_clk { |
89 |
+ #clock-cells = <0>; |
90 |
+ reg = <15>; |
91 |
+- atmel,clk-output-range = <0 66000000>; |
92 |
++ atmel,clk-output-range = <0 83000000>; |
93 |
+ }; |
94 |
+ |
95 |
+ uart0_clk: uart0_clk { |
96 |
+ #clock-cells = <0>; |
97 |
+ reg = <16>; |
98 |
+- atmel,clk-output-range = <0 66000000>; |
99 |
++ atmel,clk-output-range = <0 83000000>; |
100 |
+ }; |
101 |
+ |
102 |
+ twi0_clk: twi0_clk { |
103 |
+ reg = <18>; |
104 |
+ #clock-cells = <0>; |
105 |
+- atmel,clk-output-range = <0 16625000>; |
106 |
++ atmel,clk-output-range = <0 41500000>; |
107 |
+ }; |
108 |
+ |
109 |
+ twi1_clk: twi1_clk { |
110 |
+ #clock-cells = <0>; |
111 |
+ reg = <19>; |
112 |
+- atmel,clk-output-range = <0 16625000>; |
113 |
++ atmel,clk-output-range = <0 41500000>; |
114 |
+ }; |
115 |
+ |
116 |
+ twi2_clk: twi2_clk { |
117 |
+ #clock-cells = <0>; |
118 |
+ reg = <20>; |
119 |
+- atmel,clk-output-range = <0 16625000>; |
120 |
++ atmel,clk-output-range = <0 41500000>; |
121 |
+ }; |
122 |
+ |
123 |
+ mci0_clk: mci0_clk { |
124 |
+@@ -1164,19 +1164,19 @@ |
125 |
+ spi0_clk: spi0_clk { |
126 |
+ #clock-cells = <0>; |
127 |
+ reg = <24>; |
128 |
+- atmel,clk-output-range = <0 133000000>; |
129 |
++ atmel,clk-output-range = <0 166000000>; |
130 |
+ }; |
131 |
+ |
132 |
+ spi1_clk: spi1_clk { |
133 |
+ #clock-cells = <0>; |
134 |
+ reg = <25>; |
135 |
+- atmel,clk-output-range = <0 133000000>; |
136 |
++ atmel,clk-output-range = <0 166000000>; |
137 |
+ }; |
138 |
+ |
139 |
+ tcb0_clk: tcb0_clk { |
140 |
+ #clock-cells = <0>; |
141 |
+ reg = <26>; |
142 |
+- atmel,clk-output-range = <0 133000000>; |
143 |
++ atmel,clk-output-range = <0 166000000>; |
144 |
+ }; |
145 |
+ |
146 |
+ pwm_clk: pwm_clk { |
147 |
+@@ -1187,7 +1187,7 @@ |
148 |
+ adc_clk: adc_clk { |
149 |
+ #clock-cells = <0>; |
150 |
+ reg = <29>; |
151 |
+- atmel,clk-output-range = <0 66000000>; |
152 |
++ atmel,clk-output-range = <0 83000000>; |
153 |
+ }; |
154 |
+ |
155 |
+ dma0_clk: dma0_clk { |
156 |
+@@ -1218,13 +1218,13 @@ |
157 |
+ ssc0_clk: ssc0_clk { |
158 |
+ #clock-cells = <0>; |
159 |
+ reg = <38>; |
160 |
+- atmel,clk-output-range = <0 66000000>; |
161 |
++ atmel,clk-output-range = <0 83000000>; |
162 |
+ }; |
163 |
+ |
164 |
+ ssc1_clk: ssc1_clk { |
165 |
+ #clock-cells = <0>; |
166 |
+ reg = <39>; |
167 |
+- atmel,clk-output-range = <0 66000000>; |
168 |
++ atmel,clk-output-range = <0 83000000>; |
169 |
+ }; |
170 |
+ |
171 |
+ sha_clk: sha_clk { |
172 |
+diff --git a/arch/arm/boot/dts/sama5d3_can.dtsi b/arch/arm/boot/dts/sama5d3_can.dtsi |
173 |
+index c5a3772741bf..0fac79f75c06 100644 |
174 |
+--- a/arch/arm/boot/dts/sama5d3_can.dtsi |
175 |
++++ b/arch/arm/boot/dts/sama5d3_can.dtsi |
176 |
+@@ -37,13 +37,13 @@ |
177 |
+ can0_clk: can0_clk { |
178 |
+ #clock-cells = <0>; |
179 |
+ reg = <40>; |
180 |
+- atmel,clk-output-range = <0 66000000>; |
181 |
++ atmel,clk-output-range = <0 83000000>; |
182 |
+ }; |
183 |
+ |
184 |
+ can1_clk: can1_clk { |
185 |
+ #clock-cells = <0>; |
186 |
+ reg = <41>; |
187 |
+- atmel,clk-output-range = <0 66000000>; |
188 |
++ atmel,clk-output-range = <0 83000000>; |
189 |
+ }; |
190 |
+ }; |
191 |
+ }; |
192 |
+diff --git a/arch/arm/boot/dts/sama5d3_tcb1.dtsi b/arch/arm/boot/dts/sama5d3_tcb1.dtsi |
193 |
+index 801f9745e82f..b80dbc45a3c2 100644 |
194 |
+--- a/arch/arm/boot/dts/sama5d3_tcb1.dtsi |
195 |
++++ b/arch/arm/boot/dts/sama5d3_tcb1.dtsi |
196 |
+@@ -23,6 +23,7 @@ |
197 |
+ tcb1_clk: tcb1_clk { |
198 |
+ #clock-cells = <0>; |
199 |
+ reg = <27>; |
200 |
++ atmel,clk-output-range = <0 166000000>; |
201 |
+ }; |
202 |
+ }; |
203 |
+ }; |
204 |
+diff --git a/arch/arm/boot/dts/sama5d3_uart.dtsi b/arch/arm/boot/dts/sama5d3_uart.dtsi |
205 |
+index 2511d748867b..71818c7bfb67 100644 |
206 |
+--- a/arch/arm/boot/dts/sama5d3_uart.dtsi |
207 |
++++ b/arch/arm/boot/dts/sama5d3_uart.dtsi |
208 |
+@@ -42,13 +42,13 @@ |
209 |
+ uart0_clk: uart0_clk { |
210 |
+ #clock-cells = <0>; |
211 |
+ reg = <16>; |
212 |
+- atmel,clk-output-range = <0 66000000>; |
213 |
++ atmel,clk-output-range = <0 83000000>; |
214 |
+ }; |
215 |
+ |
216 |
+ uart1_clk: uart1_clk { |
217 |
+ #clock-cells = <0>; |
218 |
+ reg = <17>; |
219 |
+- atmel,clk-output-range = <0 66000000>; |
220 |
++ atmel,clk-output-range = <0 83000000>; |
221 |
+ }; |
222 |
+ }; |
223 |
+ }; |
224 |
+diff --git a/arch/arm/mach-tegra/sleep-tegra30.S b/arch/arm/mach-tegra/sleep-tegra30.S |
225 |
+index 9a2f0b051e10..c6cf775975a2 100644 |
226 |
+--- a/arch/arm/mach-tegra/sleep-tegra30.S |
227 |
++++ b/arch/arm/mach-tegra/sleep-tegra30.S |
228 |
+@@ -379,6 +379,14 @@ _pll_m_c_x_done: |
229 |
+ pll_locked r1, r0, CLK_RESET_PLLC_BASE |
230 |
+ pll_locked r1, r0, CLK_RESET_PLLX_BASE |
231 |
+ |
232 |
++ tegra_get_soc_id TEGRA_APB_MISC_BASE, r1 |
233 |
++ cmp r1, #TEGRA30 |
234 |
++ beq 1f |
235 |
++ ldr r1, [r0, #CLK_RESET_PLLP_BASE] |
236 |
++ bic r1, r1, #(1<<31) @ disable PllP bypass |
237 |
++ str r1, [r0, #CLK_RESET_PLLP_BASE] |
238 |
++1: |
239 |
++ |
240 |
+ mov32 r7, TEGRA_TMRUS_BASE |
241 |
+ ldr r1, [r7] |
242 |
+ add r1, r1, #LOCK_DELAY |
243 |
+@@ -638,7 +646,10 @@ tegra30_switch_cpu_to_clk32k: |
244 |
+ str r0, [r4, #PMC_PLLP_WB0_OVERRIDE] |
245 |
+ |
246 |
+ /* disable PLLP, PLLA, PLLC and PLLX */ |
247 |
++ tegra_get_soc_id TEGRA_APB_MISC_BASE, r1 |
248 |
++ cmp r1, #TEGRA30 |
249 |
+ ldr r0, [r5, #CLK_RESET_PLLP_BASE] |
250 |
++ orrne r0, r0, #(1 << 31) @ enable PllP bypass on fast cluster |
251 |
+ bic r0, r0, #(1 << 30) |
252 |
+ str r0, [r5, #CLK_RESET_PLLP_BASE] |
253 |
+ ldr r0, [r5, #CLK_RESET_PLLA_BASE] |
254 |
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig |
255 |
+index 01b6c00a7060..4ece20178145 100644 |
256 |
+--- a/arch/powerpc/Kconfig |
257 |
++++ b/arch/powerpc/Kconfig |
258 |
+@@ -93,6 +93,7 @@ config PPC |
259 |
+ select BINFMT_ELF |
260 |
+ select ARCH_HAS_ELF_RANDOMIZE |
261 |
+ select OF |
262 |
++ select OF_DMA_DEFAULT_COHERENT if !NOT_COHERENT_CACHE |
263 |
+ select OF_EARLY_FLATTREE |
264 |
+ select OF_RESERVED_MEM |
265 |
+ select HAVE_FTRACE_MCOUNT_RECORD |
266 |
+diff --git a/arch/powerpc/boot/4xx.c b/arch/powerpc/boot/4xx.c |
267 |
+index 9d3bd4c45a24..1c4354f922fd 100644 |
268 |
+--- a/arch/powerpc/boot/4xx.c |
269 |
++++ b/arch/powerpc/boot/4xx.c |
270 |
+@@ -232,7 +232,7 @@ void ibm4xx_denali_fixup_memsize(void) |
271 |
+ dpath = 8; /* 64 bits */ |
272 |
+ |
273 |
+ /* get address pins (rows) */ |
274 |
+- val = SDRAM0_READ(DDR0_42); |
275 |
++ val = SDRAM0_READ(DDR0_42); |
276 |
+ |
277 |
+ row = DDR_GET_VAL(val, DDR_APIN, DDR_APIN_SHIFT); |
278 |
+ if (row > max_row) |
279 |
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c |
280 |
+index 767ac1572c02..54c6ba87a25a 100644 |
281 |
+--- a/arch/powerpc/kvm/book3s_hv.c |
282 |
++++ b/arch/powerpc/kvm/book3s_hv.c |
283 |
+@@ -1669,7 +1669,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, |
284 |
+ mutex_unlock(&kvm->lock); |
285 |
+ |
286 |
+ if (!vcore) |
287 |
+- goto free_vcpu; |
288 |
++ goto uninit_vcpu; |
289 |
+ |
290 |
+ spin_lock(&vcore->lock); |
291 |
+ ++vcore->num_threads; |
292 |
+@@ -1685,6 +1685,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, |
293 |
+ |
294 |
+ return vcpu; |
295 |
+ |
296 |
++uninit_vcpu: |
297 |
++ kvm_vcpu_uninit(vcpu); |
298 |
+ free_vcpu: |
299 |
+ kmem_cache_free(kvm_vcpu_cache, vcpu); |
300 |
+ out: |
301 |
+diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c |
302 |
+index 81313844d81c..91db2852aa6e 100644 |
303 |
+--- a/arch/powerpc/kvm/book3s_pr.c |
304 |
++++ b/arch/powerpc/kvm/book3s_pr.c |
305 |
+@@ -1434,10 +1434,12 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, |
306 |
+ |
307 |
+ err = kvmppc_mmu_init(vcpu); |
308 |
+ if (err < 0) |
309 |
+- goto uninit_vcpu; |
310 |
++ goto free_shared_page; |
311 |
+ |
312 |
+ return vcpu; |
313 |
+ |
314 |
++free_shared_page: |
315 |
++ free_page((unsigned long)vcpu->arch.shared); |
316 |
+ uninit_vcpu: |
317 |
+ kvm_vcpu_uninit(vcpu); |
318 |
+ free_shadow_vcpu: |
319 |
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c |
320 |
+index e8b1027e1b5b..0e65d52eb56d 100644 |
321 |
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c |
322 |
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c |
323 |
+@@ -205,8 +205,10 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb) |
324 |
+ |
325 |
+ for (i = 0; i < scns_per_block; i++) { |
326 |
+ pfn = PFN_DOWN(phys_addr); |
327 |
+- if (!pfn_present(pfn)) |
328 |
++ if (!pfn_present(pfn)) { |
329 |
++ phys_addr += MIN_MEMORY_BLOCK_SIZE; |
330 |
+ continue; |
331 |
++ } |
332 |
+ |
333 |
+ rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION); |
334 |
+ phys_addr += MIN_MEMORY_BLOCK_SIZE; |
335 |
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c |
336 |
+index 3e8865b187de..17b322e8b799 100644 |
337 |
+--- a/arch/powerpc/platforms/pseries/iommu.c |
338 |
++++ b/arch/powerpc/platforms/pseries/iommu.c |
339 |
+@@ -202,10 +202,10 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) |
340 |
+ return be64_to_cpu(*tcep); |
341 |
+ } |
342 |
+ |
343 |
+-static void tce_free_pSeriesLP(struct iommu_table*, long, long); |
344 |
++static void tce_free_pSeriesLP(unsigned long liobn, long, long); |
345 |
+ static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long); |
346 |
+ |
347 |
+-static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, |
348 |
++static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift, |
349 |
+ long npages, unsigned long uaddr, |
350 |
+ enum dma_data_direction direction, |
351 |
+ struct dma_attrs *attrs) |
352 |
+@@ -216,25 +216,25 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, |
353 |
+ int ret = 0; |
354 |
+ long tcenum_start = tcenum, npages_start = npages; |
355 |
+ |
356 |
+- rpn = __pa(uaddr) >> TCE_SHIFT; |
357 |
++ rpn = __pa(uaddr) >> tceshift; |
358 |
+ proto_tce = TCE_PCI_READ; |
359 |
+ if (direction != DMA_TO_DEVICE) |
360 |
+ proto_tce |= TCE_PCI_WRITE; |
361 |
+ |
362 |
+ while (npages--) { |
363 |
+- tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; |
364 |
+- rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce); |
365 |
++ tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift; |
366 |
++ rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce); |
367 |
+ |
368 |
+ if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { |
369 |
+ ret = (int)rc; |
370 |
+- tce_free_pSeriesLP(tbl, tcenum_start, |
371 |
++ tce_free_pSeriesLP(liobn, tcenum_start, |
372 |
+ (npages_start - (npages + 1))); |
373 |
+ break; |
374 |
+ } |
375 |
+ |
376 |
+ if (rc && printk_ratelimit()) { |
377 |
+ printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); |
378 |
+- printk("\tindex = 0x%llx\n", (u64)tbl->it_index); |
379 |
++ printk("\tindex = 0x%llx\n", (u64)liobn); |
380 |
+ printk("\ttcenum = 0x%llx\n", (u64)tcenum); |
381 |
+ printk("\ttce val = 0x%llx\n", tce ); |
382 |
+ dump_stack(); |
383 |
+@@ -263,7 +263,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, |
384 |
+ unsigned long flags; |
385 |
+ |
386 |
+ if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) { |
387 |
+- return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, |
388 |
++ return tce_build_pSeriesLP(tbl->it_index, tcenum, |
389 |
++ tbl->it_page_shift, npages, uaddr, |
390 |
+ direction, attrs); |
391 |
+ } |
392 |
+ |
393 |
+@@ -279,8 +280,9 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, |
394 |
+ /* If allocation fails, fall back to the loop implementation */ |
395 |
+ if (!tcep) { |
396 |
+ local_irq_restore(flags); |
397 |
+- return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, |
398 |
+- direction, attrs); |
399 |
++ return tce_build_pSeriesLP(tbl->it_index, tcenum, |
400 |
++ tbl->it_page_shift, |
401 |
++ npages, uaddr, direction, attrs); |
402 |
+ } |
403 |
+ __this_cpu_write(tce_page, tcep); |
404 |
+ } |
405 |
+@@ -331,16 +333,16 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, |
406 |
+ return ret; |
407 |
+ } |
408 |
+ |
409 |
+-static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) |
410 |
++static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages) |
411 |
+ { |
412 |
+ u64 rc; |
413 |
+ |
414 |
+ while (npages--) { |
415 |
+- rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0); |
416 |
++ rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0); |
417 |
+ |
418 |
+ if (rc && printk_ratelimit()) { |
419 |
+ printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); |
420 |
+- printk("\tindex = 0x%llx\n", (u64)tbl->it_index); |
421 |
++ printk("\tindex = 0x%llx\n", (u64)liobn); |
422 |
+ printk("\ttcenum = 0x%llx\n", (u64)tcenum); |
423 |
+ dump_stack(); |
424 |
+ } |
425 |
+@@ -355,7 +357,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n |
426 |
+ u64 rc; |
427 |
+ |
428 |
+ if (!firmware_has_feature(FW_FEATURE_MULTITCE)) |
429 |
+- return tce_free_pSeriesLP(tbl, tcenum, npages); |
430 |
++ return tce_free_pSeriesLP(tbl->it_index, tcenum, npages); |
431 |
+ |
432 |
+ rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages); |
433 |
+ |
434 |
+@@ -470,6 +472,19 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, |
435 |
+ u64 rc = 0; |
436 |
+ long l, limit; |
437 |
+ |
438 |
++ if (!firmware_has_feature(FW_FEATURE_MULTITCE)) { |
439 |
++ unsigned long tceshift = be32_to_cpu(maprange->tce_shift); |
440 |
++ unsigned long dmastart = (start_pfn << PAGE_SHIFT) + |
441 |
++ be64_to_cpu(maprange->dma_base); |
442 |
++ unsigned long tcenum = dmastart >> tceshift; |
443 |
++ unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift; |
444 |
++ void *uaddr = __va(start_pfn << PAGE_SHIFT); |
445 |
++ |
446 |
++ return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn), |
447 |
++ tcenum, tceshift, npages, (unsigned long) uaddr, |
448 |
++ DMA_BIDIRECTIONAL, 0); |
449 |
++ } |
450 |
++ |
451 |
+ local_irq_disable(); /* to protect tcep and the page behind it */ |
452 |
+ tcep = __this_cpu_read(tce_page); |
453 |
+ |
454 |
+diff --git a/arch/sparc/include/uapi/asm/ipcbuf.h b/arch/sparc/include/uapi/asm/ipcbuf.h |
455 |
+index 66013b4fe10d..58da9c4addb2 100644 |
456 |
+--- a/arch/sparc/include/uapi/asm/ipcbuf.h |
457 |
++++ b/arch/sparc/include/uapi/asm/ipcbuf.h |
458 |
+@@ -14,19 +14,19 @@ |
459 |
+ |
460 |
+ struct ipc64_perm |
461 |
+ { |
462 |
+- __kernel_key_t key; |
463 |
+- __kernel_uid_t uid; |
464 |
+- __kernel_gid_t gid; |
465 |
+- __kernel_uid_t cuid; |
466 |
+- __kernel_gid_t cgid; |
467 |
++ __kernel_key_t key; |
468 |
++ __kernel_uid32_t uid; |
469 |
++ __kernel_gid32_t gid; |
470 |
++ __kernel_uid32_t cuid; |
471 |
++ __kernel_gid32_t cgid; |
472 |
+ #ifndef __arch64__ |
473 |
+- unsigned short __pad0; |
474 |
++ unsigned short __pad0; |
475 |
+ #endif |
476 |
+- __kernel_mode_t mode; |
477 |
+- unsigned short __pad1; |
478 |
+- unsigned short seq; |
479 |
+- unsigned long long __unused1; |
480 |
+- unsigned long long __unused2; |
481 |
++ __kernel_mode_t mode; |
482 |
++ unsigned short __pad1; |
483 |
++ unsigned short seq; |
484 |
++ unsigned long long __unused1; |
485 |
++ unsigned long long __unused2; |
486 |
+ }; |
487 |
+ |
488 |
+ #endif /* __SPARC_IPCBUF_H */ |
489 |
+diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c |
490 |
+index c2a9dd816c5c..9a7983968ba8 100644 |
491 |
+--- a/arch/x86/kernel/cpu/tsx.c |
492 |
++++ b/arch/x86/kernel/cpu/tsx.c |
493 |
+@@ -115,11 +115,12 @@ void __init tsx_init(void) |
494 |
+ tsx_disable(); |
495 |
+ |
496 |
+ /* |
497 |
+- * tsx_disable() will change the state of the |
498 |
+- * RTM CPUID bit. Clear it here since it is now |
499 |
+- * expected to be not set. |
500 |
++ * tsx_disable() will change the state of the RTM and HLE CPUID |
501 |
++ * bits. Clear them here since they are now expected to be not |
502 |
++ * set. |
503 |
+ */ |
504 |
+ setup_clear_cpu_cap(X86_FEATURE_RTM); |
505 |
++ setup_clear_cpu_cap(X86_FEATURE_HLE); |
506 |
+ } else if (tsx_ctrl_state == TSX_CTRL_ENABLE) { |
507 |
+ |
508 |
+ /* |
509 |
+@@ -131,10 +132,10 @@ void __init tsx_init(void) |
510 |
+ tsx_enable(); |
511 |
+ |
512 |
+ /* |
513 |
+- * tsx_enable() will change the state of the |
514 |
+- * RTM CPUID bit. Force it here since it is now |
515 |
+- * expected to be set. |
516 |
++ * tsx_enable() will change the state of the RTM and HLE CPUID |
517 |
++ * bits. Force them here since they are now expected to be set. |
518 |
+ */ |
519 |
+ setup_force_cpu_cap(X86_FEATURE_RTM); |
520 |
++ setup_force_cpu_cap(X86_FEATURE_HLE); |
521 |
+ } |
522 |
+ } |
523 |
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c |
524 |
+index 6c7847b3aa2d..ffbdd201c1f1 100644 |
525 |
+--- a/arch/x86/kvm/emulate.c |
526 |
++++ b/arch/x86/kvm/emulate.c |
527 |
+@@ -23,6 +23,7 @@ |
528 |
+ #include <linux/kvm_host.h> |
529 |
+ #include "kvm_cache_regs.h" |
530 |
+ #include <linux/module.h> |
531 |
++#include <linux/nospec.h> |
532 |
+ #include <asm/kvm_emulate.h> |
533 |
+ #include <linux/stringify.h> |
534 |
+ #include <asm/debugreg.h> |
535 |
+@@ -5041,16 +5042,28 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) |
536 |
+ ctxt->ad_bytes = def_ad_bytes ^ 6; |
537 |
+ break; |
538 |
+ case 0x26: /* ES override */ |
539 |
++ has_seg_override = true; |
540 |
++ ctxt->seg_override = VCPU_SREG_ES; |
541 |
++ break; |
542 |
+ case 0x2e: /* CS override */ |
543 |
++ has_seg_override = true; |
544 |
++ ctxt->seg_override = VCPU_SREG_CS; |
545 |
++ break; |
546 |
+ case 0x36: /* SS override */ |
547 |
++ has_seg_override = true; |
548 |
++ ctxt->seg_override = VCPU_SREG_SS; |
549 |
++ break; |
550 |
+ case 0x3e: /* DS override */ |
551 |
+ has_seg_override = true; |
552 |
+- ctxt->seg_override = (ctxt->b >> 3) & 3; |
553 |
++ ctxt->seg_override = VCPU_SREG_DS; |
554 |
+ break; |
555 |
+ case 0x64: /* FS override */ |
556 |
++ has_seg_override = true; |
557 |
++ ctxt->seg_override = VCPU_SREG_FS; |
558 |
++ break; |
559 |
+ case 0x65: /* GS override */ |
560 |
+ has_seg_override = true; |
561 |
+- ctxt->seg_override = ctxt->b & 7; |
562 |
++ ctxt->seg_override = VCPU_SREG_GS; |
563 |
+ break; |
564 |
+ case 0x40 ... 0x4f: /* REX */ |
565 |
+ if (mode != X86EMUL_MODE_PROT64) |
566 |
+@@ -5134,10 +5147,15 @@ done_prefixes: |
567 |
+ } |
568 |
+ break; |
569 |
+ case Escape: |
570 |
+- if (ctxt->modrm > 0xbf) |
571 |
+- opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; |
572 |
+- else |
573 |
++ if (ctxt->modrm > 0xbf) { |
574 |
++ size_t size = ARRAY_SIZE(opcode.u.esc->high); |
575 |
++ u32 index = array_index_nospec( |
576 |
++ ctxt->modrm - 0xc0, size); |
577 |
++ |
578 |
++ opcode = opcode.u.esc->high[index]; |
579 |
++ } else { |
580 |
+ opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; |
581 |
++ } |
582 |
+ break; |
583 |
+ case InstrDual: |
584 |
+ if ((ctxt->modrm >> 6) == 3) |
585 |
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c |
586 |
+index 62cf8c915e95..fce6fa012d30 100644 |
587 |
+--- a/arch/x86/kvm/hyperv.c |
588 |
++++ b/arch/x86/kvm/hyperv.c |
589 |
+@@ -26,6 +26,7 @@ |
590 |
+ #include "hyperv.h" |
591 |
+ |
592 |
+ #include <linux/kvm_host.h> |
593 |
++#include <linux/nospec.h> |
594 |
+ #include <trace/events/kvm.h> |
595 |
+ |
596 |
+ #include "trace.h" |
597 |
+@@ -53,11 +54,12 @@ static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu, |
598 |
+ u32 index, u64 *pdata) |
599 |
+ { |
600 |
+ struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; |
601 |
++ size_t size = ARRAY_SIZE(hv->hv_crash_param); |
602 |
+ |
603 |
+- if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) |
604 |
++ if (WARN_ON_ONCE(index >= size)) |
605 |
+ return -EINVAL; |
606 |
+ |
607 |
+- *pdata = hv->hv_crash_param[index]; |
608 |
++ *pdata = hv->hv_crash_param[array_index_nospec(index, size)]; |
609 |
+ return 0; |
610 |
+ } |
611 |
+ |
612 |
+@@ -96,11 +98,12 @@ static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu, |
613 |
+ u32 index, u64 data) |
614 |
+ { |
615 |
+ struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; |
616 |
++ size_t size = ARRAY_SIZE(hv->hv_crash_param); |
617 |
+ |
618 |
+- if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) |
619 |
++ if (WARN_ON_ONCE(index >= size)) |
620 |
+ return -EINVAL; |
621 |
+ |
622 |
+- hv->hv_crash_param[index] = data; |
623 |
++ hv->hv_crash_param[array_index_nospec(index, size)] = data; |
624 |
+ return 0; |
625 |
+ } |
626 |
+ |
627 |
+diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c |
628 |
+index 7cc2360f1848..791850bfc981 100644 |
629 |
+--- a/arch/x86/kvm/i8259.c |
630 |
++++ b/arch/x86/kvm/i8259.c |
631 |
+@@ -456,46 +456,37 @@ static u32 elcr_ioport_read(void *opaque, u32 addr1) |
632 |
+ return s->elcr; |
633 |
+ } |
634 |
+ |
635 |
+-static int picdev_in_range(gpa_t addr) |
636 |
+-{ |
637 |
+- switch (addr) { |
638 |
+- case 0x20: |
639 |
+- case 0x21: |
640 |
+- case 0xa0: |
641 |
+- case 0xa1: |
642 |
+- case 0x4d0: |
643 |
+- case 0x4d1: |
644 |
+- return 1; |
645 |
+- default: |
646 |
+- return 0; |
647 |
+- } |
648 |
+-} |
649 |
+- |
650 |
+ static int picdev_write(struct kvm_pic *s, |
651 |
+ gpa_t addr, int len, const void *val) |
652 |
+ { |
653 |
+ unsigned char data = *(unsigned char *)val; |
654 |
+- if (!picdev_in_range(addr)) |
655 |
+- return -EOPNOTSUPP; |
656 |
+ |
657 |
+ if (len != 1) { |
658 |
+ pr_pic_unimpl("non byte write\n"); |
659 |
+ return 0; |
660 |
+ } |
661 |
+- pic_lock(s); |
662 |
+ switch (addr) { |
663 |
+ case 0x20: |
664 |
+ case 0x21: |
665 |
++ pic_lock(s); |
666 |
++ pic_ioport_write(&s->pics[0], addr, data); |
667 |
++ pic_unlock(s); |
668 |
++ break; |
669 |
+ case 0xa0: |
670 |
+ case 0xa1: |
671 |
+- pic_ioport_write(&s->pics[addr >> 7], addr, data); |
672 |
++ pic_lock(s); |
673 |
++ pic_ioport_write(&s->pics[1], addr, data); |
674 |
++ pic_unlock(s); |
675 |
+ break; |
676 |
+ case 0x4d0: |
677 |
+ case 0x4d1: |
678 |
++ pic_lock(s); |
679 |
+ elcr_ioport_write(&s->pics[addr & 1], addr, data); |
680 |
++ pic_unlock(s); |
681 |
+ break; |
682 |
++ default: |
683 |
++ return -EOPNOTSUPP; |
684 |
+ } |
685 |
+- pic_unlock(s); |
686 |
+ return 0; |
687 |
+ } |
688 |
+ |
689 |
+@@ -503,29 +494,31 @@ static int picdev_read(struct kvm_pic *s, |
690 |
+ gpa_t addr, int len, void *val) |
691 |
+ { |
692 |
+ unsigned char data = 0; |
693 |
+- if (!picdev_in_range(addr)) |
694 |
+- return -EOPNOTSUPP; |
695 |
+ |
696 |
+ if (len != 1) { |
697 |
+ memset(val, 0, len); |
698 |
+ pr_pic_unimpl("non byte read\n"); |
699 |
+ return 0; |
700 |
+ } |
701 |
+- pic_lock(s); |
702 |
+ switch (addr) { |
703 |
+ case 0x20: |
704 |
+ case 0x21: |
705 |
+ case 0xa0: |
706 |
+ case 0xa1: |
707 |
++ pic_lock(s); |
708 |
+ data = pic_ioport_read(&s->pics[addr >> 7], addr); |
709 |
++ pic_unlock(s); |
710 |
+ break; |
711 |
+ case 0x4d0: |
712 |
+ case 0x4d1: |
713 |
++ pic_lock(s); |
714 |
+ data = elcr_ioport_read(&s->pics[addr & 1], addr); |
715 |
++ pic_unlock(s); |
716 |
+ break; |
717 |
++ default: |
718 |
++ return -EOPNOTSUPP; |
719 |
+ } |
720 |
+ *(unsigned char *)val = data; |
721 |
+- pic_unlock(s); |
722 |
+ return 0; |
723 |
+ } |
724 |
+ |
725 |
+diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c |
726 |
+index d380111351c0..086833ecb9f2 100644 |
727 |
+--- a/arch/x86/kvm/ioapic.c |
728 |
++++ b/arch/x86/kvm/ioapic.c |
729 |
+@@ -36,6 +36,7 @@ |
730 |
+ #include <linux/io.h> |
731 |
+ #include <linux/slab.h> |
732 |
+ #include <linux/export.h> |
733 |
++#include <linux/nospec.h> |
734 |
+ #include <asm/processor.h> |
735 |
+ #include <asm/page.h> |
736 |
+ #include <asm/current.h> |
737 |
+@@ -73,13 +74,14 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, |
738 |
+ default: |
739 |
+ { |
740 |
+ u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; |
741 |
+- u64 redir_content; |
742 |
++ u64 redir_content = ~0ULL; |
743 |
+ |
744 |
+- if (redir_index < IOAPIC_NUM_PINS) |
745 |
+- redir_content = |
746 |
+- ioapic->redirtbl[redir_index].bits; |
747 |
+- else |
748 |
+- redir_content = ~0ULL; |
749 |
++ if (redir_index < IOAPIC_NUM_PINS) { |
750 |
++ u32 index = array_index_nospec( |
751 |
++ redir_index, IOAPIC_NUM_PINS); |
752 |
++ |
753 |
++ redir_content = ioapic->redirtbl[index].bits; |
754 |
++ } |
755 |
+ |
756 |
+ result = (ioapic->ioregsel & 0x1) ? |
757 |
+ (redir_content >> 32) & 0xffffffff : |
758 |
+@@ -289,6 +291,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) |
759 |
+ ioapic_debug("change redir index %x val %x\n", index, val); |
760 |
+ if (index >= IOAPIC_NUM_PINS) |
761 |
+ return; |
762 |
++ index = array_index_nospec(index, IOAPIC_NUM_PINS); |
763 |
+ e = &ioapic->redirtbl[index]; |
764 |
+ mask_before = e->fields.mask; |
765 |
+ /* Preserve read-only fields */ |
766 |
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c |
767 |
+index 3c70f6c76d3a..ce8c4ae25c15 100644 |
768 |
+--- a/arch/x86/kvm/lapic.c |
769 |
++++ b/arch/x86/kvm/lapic.c |
770 |
+@@ -36,6 +36,7 @@ |
771 |
+ #include <asm/delay.h> |
772 |
+ #include <linux/atomic.h> |
773 |
+ #include <linux/jump_label.h> |
774 |
++#include <linux/nospec.h> |
775 |
+ #include "kvm_cache_regs.h" |
776 |
+ #include "irq.h" |
777 |
+ #include "trace.h" |
778 |
+@@ -1432,15 +1433,21 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) |
779 |
+ case APIC_LVTTHMR: |
780 |
+ case APIC_LVTPC: |
781 |
+ case APIC_LVT1: |
782 |
+- case APIC_LVTERR: |
783 |
++ case APIC_LVTERR: { |
784 |
+ /* TODO: Check vector */ |
785 |
++ size_t size; |
786 |
++ u32 index; |
787 |
++ |
788 |
+ if (!kvm_apic_sw_enabled(apic)) |
789 |
+ val |= APIC_LVT_MASKED; |
790 |
+ |
791 |
+- val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4]; |
792 |
++ size = ARRAY_SIZE(apic_lvt_mask); |
793 |
++ index = array_index_nospec( |
794 |
++ (reg - APIC_LVTT) >> 4, size); |
795 |
++ val &= apic_lvt_mask[index]; |
796 |
+ apic_set_reg(apic, reg, val); |
797 |
+- |
798 |
+ break; |
799 |
++ } |
800 |
+ |
801 |
+ case APIC_LVTT: |
802 |
+ if (!kvm_apic_sw_enabled(apic)) |
803 |
+diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c |
804 |
+index 0149ac59c273..3e3016411020 100644 |
805 |
+--- a/arch/x86/kvm/mtrr.c |
806 |
++++ b/arch/x86/kvm/mtrr.c |
807 |
+@@ -17,6 +17,7 @@ |
808 |
+ */ |
809 |
+ |
810 |
+ #include <linux/kvm_host.h> |
811 |
++#include <linux/nospec.h> |
812 |
+ #include <asm/mtrr.h> |
813 |
+ |
814 |
+ #include "cpuid.h" |
815 |
+@@ -202,11 +203,15 @@ static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit) |
816 |
+ break; |
817 |
+ case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000: |
818 |
+ *seg = 1; |
819 |
+- *unit = msr - MSR_MTRRfix16K_80000; |
820 |
++ *unit = array_index_nospec( |
821 |
++ msr - MSR_MTRRfix16K_80000, |
822 |
++ MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1); |
823 |
+ break; |
824 |
+ case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000: |
825 |
+ *seg = 2; |
826 |
+- *unit = msr - MSR_MTRRfix4K_C0000; |
827 |
++ *unit = array_index_nospec( |
828 |
++ msr - MSR_MTRRfix4K_C0000, |
829 |
++ MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1); |
830 |
+ break; |
831 |
+ default: |
832 |
+ return false; |
833 |
+diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h |
834 |
+index f96e1f962587..fbf3d25af765 100644 |
835 |
+--- a/arch/x86/kvm/pmu.h |
836 |
++++ b/arch/x86/kvm/pmu.h |
837 |
+@@ -1,6 +1,8 @@ |
838 |
+ #ifndef __KVM_X86_PMU_H |
839 |
+ #define __KVM_X86_PMU_H |
840 |
+ |
841 |
++#include <linux/nospec.h> |
842 |
++ |
843 |
+ #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) |
844 |
+ #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) |
845 |
+ #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) |
846 |
+@@ -80,8 +82,12 @@ static inline bool pmc_is_enabled(struct kvm_pmc *pmc) |
847 |
+ static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, |
848 |
+ u32 base) |
849 |
+ { |
850 |
+- if (msr >= base && msr < base + pmu->nr_arch_gp_counters) |
851 |
+- return &pmu->gp_counters[msr - base]; |
852 |
++ if (msr >= base && msr < base + pmu->nr_arch_gp_counters) { |
853 |
++ u32 index = array_index_nospec(msr - base, |
854 |
++ pmu->nr_arch_gp_counters); |
855 |
++ |
856 |
++ return &pmu->gp_counters[index]; |
857 |
++ } |
858 |
+ |
859 |
+ return NULL; |
860 |
+ } |
861 |
+@@ -91,8 +97,12 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) |
862 |
+ { |
863 |
+ int base = MSR_CORE_PERF_FIXED_CTR0; |
864 |
+ |
865 |
+- if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) |
866 |
+- return &pmu->fixed_counters[msr - base]; |
867 |
++ if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) { |
868 |
++ u32 index = array_index_nospec(msr - base, |
869 |
++ pmu->nr_arch_fixed_counters); |
870 |
++ |
871 |
++ return &pmu->fixed_counters[index]; |
872 |
++ } |
873 |
+ |
874 |
+ return NULL; |
875 |
+ } |
876 |
+diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c |
877 |
+index 8fc07ea23344..822829f00590 100644 |
878 |
+--- a/arch/x86/kvm/pmu_intel.c |
879 |
++++ b/arch/x86/kvm/pmu_intel.c |
880 |
+@@ -87,10 +87,14 @@ static unsigned intel_find_arch_event(struct kvm_pmu *pmu, |
881 |
+ |
882 |
+ static unsigned intel_find_fixed_event(int idx) |
883 |
+ { |
884 |
+- if (idx >= ARRAY_SIZE(fixed_pmc_events)) |
885 |
++ u32 event; |
886 |
++ size_t size = ARRAY_SIZE(fixed_pmc_events); |
887 |
++ |
888 |
++ if (idx >= size) |
889 |
+ return PERF_COUNT_HW_MAX; |
890 |
+ |
891 |
+- return intel_arch_events[fixed_pmc_events[idx]].event_type; |
892 |
++ event = fixed_pmc_events[array_index_nospec(idx, size)]; |
893 |
++ return intel_arch_events[event].event_type; |
894 |
+ } |
895 |
+ |
896 |
+ /* check if a PMC is enabled by comparising it with globl_ctrl bits. */ |
897 |
+@@ -131,15 +135,19 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, |
898 |
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
899 |
+ bool fixed = idx & (1u << 30); |
900 |
+ struct kvm_pmc *counters; |
901 |
++ unsigned int num_counters; |
902 |
+ |
903 |
+ idx &= ~(3u << 30); |
904 |
+- if (!fixed && idx >= pmu->nr_arch_gp_counters) |
905 |
+- return NULL; |
906 |
+- if (fixed && idx >= pmu->nr_arch_fixed_counters) |
907 |
++ if (fixed) { |
908 |
++ counters = pmu->fixed_counters; |
909 |
++ num_counters = pmu->nr_arch_fixed_counters; |
910 |
++ } else { |
911 |
++ counters = pmu->gp_counters; |
912 |
++ num_counters = pmu->nr_arch_gp_counters; |
913 |
++ } |
914 |
++ if (idx >= num_counters) |
915 |
+ return NULL; |
916 |
+- counters = fixed ? pmu->fixed_counters : pmu->gp_counters; |
917 |
+- |
918 |
+- return &counters[idx]; |
919 |
++ return &counters[array_index_nospec(idx, num_counters)]; |
920 |
+ } |
921 |
+ |
922 |
+ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) |
923 |
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
924 |
+index 9344ac6b4f99..6c2b45f5d501 100644 |
925 |
+--- a/arch/x86/kvm/vmx.c |
926 |
++++ b/arch/x86/kvm/vmx.c |
927 |
+@@ -7261,8 +7261,10 @@ static int handle_vmread(struct kvm_vcpu *vcpu) |
928 |
+ /* _system ok, as nested_vmx_check_permission verified cpl=0 */ |
929 |
+ if (kvm_write_guest_virt_system(vcpu, gva, &field_value, |
930 |
+ (is_long_mode(vcpu) ? 8 : 4), |
931 |
+- &e)) |
932 |
++ &e)) { |
933 |
+ kvm_inject_page_fault(vcpu, &e); |
934 |
++ return 1; |
935 |
++ } |
936 |
+ } |
937 |
+ |
938 |
+ nested_vmx_succeed(vcpu); |
939 |
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c |
940 |
+new file mode 100644 |
941 |
+index 000000000000..3791ce8d269e |
942 |
+--- /dev/null |
943 |
++++ b/arch/x86/kvm/vmx/vmx.c |
944 |
+@@ -0,0 +1,8033 @@ |
945 |
++// SPDX-License-Identifier: GPL-2.0-only |
946 |
++/* |
947 |
++ * Kernel-based Virtual Machine driver for Linux |
948 |
++ * |
949 |
++ * This module enables machines with Intel VT-x extensions to run virtual |
950 |
++ * machines without emulation or binary translation. |
951 |
++ * |
952 |
++ * Copyright (C) 2006 Qumranet, Inc. |
953 |
++ * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
954 |
++ * |
955 |
++ * Authors: |
956 |
++ * Avi Kivity <avi@××××××××.com> |
957 |
++ * Yaniv Kamay <yaniv@××××××××.com> |
958 |
++ */ |
959 |
++ |
960 |
++#include <linux/frame.h> |
961 |
++#include <linux/highmem.h> |
962 |
++#include <linux/hrtimer.h> |
963 |
++#include <linux/kernel.h> |
964 |
++#include <linux/kvm_host.h> |
965 |
++#include <linux/module.h> |
966 |
++#include <linux/moduleparam.h> |
967 |
++#include <linux/mod_devicetable.h> |
968 |
++#include <linux/mm.h> |
969 |
++#include <linux/sched.h> |
970 |
++#include <linux/sched/smt.h> |
971 |
++#include <linux/slab.h> |
972 |
++#include <linux/tboot.h> |
973 |
++#include <linux/trace_events.h> |
974 |
++ |
975 |
++#include <asm/apic.h> |
976 |
++#include <asm/asm.h> |
977 |
++#include <asm/cpu.h> |
978 |
++#include <asm/debugreg.h> |
979 |
++#include <asm/desc.h> |
980 |
++#include <asm/fpu/internal.h> |
981 |
++#include <asm/io.h> |
982 |
++#include <asm/irq_remapping.h> |
983 |
++#include <asm/kexec.h> |
984 |
++#include <asm/perf_event.h> |
985 |
++#include <asm/mce.h> |
986 |
++#include <asm/mmu_context.h> |
987 |
++#include <asm/mshyperv.h> |
988 |
++#include <asm/spec-ctrl.h> |
989 |
++#include <asm/virtext.h> |
990 |
++#include <asm/vmx.h> |
991 |
++ |
992 |
++#include "capabilities.h" |
993 |
++#include "cpuid.h" |
994 |
++#include "evmcs.h" |
995 |
++#include "irq.h" |
996 |
++#include "kvm_cache_regs.h" |
997 |
++#include "lapic.h" |
998 |
++#include "mmu.h" |
999 |
++#include "nested.h" |
1000 |
++#include "ops.h" |
1001 |
++#include "pmu.h" |
1002 |
++#include "trace.h" |
1003 |
++#include "vmcs.h" |
1004 |
++#include "vmcs12.h" |
1005 |
++#include "vmx.h" |
1006 |
++#include "x86.h" |
1007 |
++ |
1008 |
++MODULE_AUTHOR("Qumranet"); |
1009 |
++MODULE_LICENSE("GPL"); |
1010 |
++ |
1011 |
++static const struct x86_cpu_id vmx_cpu_id[] = { |
1012 |
++ X86_FEATURE_MATCH(X86_FEATURE_VMX), |
1013 |
++ {} |
1014 |
++}; |
1015 |
++MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); |
1016 |
++ |
1017 |
++bool __read_mostly enable_vpid = 1; |
1018 |
++module_param_named(vpid, enable_vpid, bool, 0444); |
1019 |
++ |
1020 |
++static bool __read_mostly enable_vnmi = 1; |
1021 |
++module_param_named(vnmi, enable_vnmi, bool, S_IRUGO); |
1022 |
++ |
1023 |
++bool __read_mostly flexpriority_enabled = 1; |
1024 |
++module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); |
1025 |
++ |
1026 |
++bool __read_mostly enable_ept = 1; |
1027 |
++module_param_named(ept, enable_ept, bool, S_IRUGO); |
1028 |
++ |
1029 |
++bool __read_mostly enable_unrestricted_guest = 1; |
1030 |
++module_param_named(unrestricted_guest, |
1031 |
++ enable_unrestricted_guest, bool, S_IRUGO); |
1032 |
++ |
1033 |
++bool __read_mostly enable_ept_ad_bits = 1; |
1034 |
++module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); |
1035 |
++ |
1036 |
++static bool __read_mostly emulate_invalid_guest_state = true; |
1037 |
++module_param(emulate_invalid_guest_state, bool, S_IRUGO); |
1038 |
++ |
1039 |
++static bool __read_mostly fasteoi = 1; |
1040 |
++module_param(fasteoi, bool, S_IRUGO); |
1041 |
++ |
1042 |
++static bool __read_mostly enable_apicv = 1; |
1043 |
++module_param(enable_apicv, bool, S_IRUGO); |
1044 |
++ |
1045 |
++/* |
1046 |
++ * If nested=1, nested virtualization is supported, i.e., guests may use |
1047 |
++ * VMX and be a hypervisor for its own guests. If nested=0, guests may not |
1048 |
++ * use VMX instructions. |
1049 |
++ */ |
1050 |
++static bool __read_mostly nested = 1; |
1051 |
++module_param(nested, bool, S_IRUGO); |
1052 |
++ |
1053 |
++bool __read_mostly enable_pml = 1; |
1054 |
++module_param_named(pml, enable_pml, bool, S_IRUGO); |
1055 |
++ |
1056 |
++static bool __read_mostly dump_invalid_vmcs = 0; |
1057 |
++module_param(dump_invalid_vmcs, bool, 0644); |
1058 |
++ |
1059 |
++#define MSR_BITMAP_MODE_X2APIC 1 |
1060 |
++#define MSR_BITMAP_MODE_X2APIC_APICV 2 |
1061 |
++ |
1062 |
++#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL |
1063 |
++ |
1064 |
++/* Guest_tsc -> host_tsc conversion requires 64-bit division. */ |
1065 |
++static int __read_mostly cpu_preemption_timer_multi; |
1066 |
++static bool __read_mostly enable_preemption_timer = 1; |
1067 |
++#ifdef CONFIG_X86_64 |
1068 |
++module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); |
1069 |
++#endif |
1070 |
++ |
1071 |
++#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD) |
1072 |
++#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE |
1073 |
++#define KVM_VM_CR0_ALWAYS_ON \ |
1074 |
++ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \ |
1075 |
++ X86_CR0_WP | X86_CR0_PG | X86_CR0_PE) |
1076 |
++#define KVM_CR4_GUEST_OWNED_BITS \ |
1077 |
++ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ |
1078 |
++ | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD) |
1079 |
++ |
1080 |
++#define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE |
1081 |
++#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) |
1082 |
++#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) |
1083 |
++ |
1084 |
++#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) |
1085 |
++ |
1086 |
++#define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \ |
1087 |
++ RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \ |
1088 |
++ RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \ |
1089 |
++ RTIT_STATUS_BYTECNT)) |
1090 |
++ |
1091 |
++#define MSR_IA32_RTIT_OUTPUT_BASE_MASK \ |
1092 |
++ (~((1UL << cpuid_query_maxphyaddr(vcpu)) - 1) | 0x7f) |
1093 |
++ |
1094 |
++/* |
1095 |
++ * These 2 parameters are used to config the controls for Pause-Loop Exiting: |
1096 |
++ * ple_gap: upper bound on the amount of time between two successive |
1097 |
++ * executions of PAUSE in a loop. Also indicate if ple enabled. |
1098 |
++ * According to test, this time is usually smaller than 128 cycles. |
1099 |
++ * ple_window: upper bound on the amount of time a guest is allowed to execute |
1100 |
++ * in a PAUSE loop. Tests indicate that most spinlocks are held for |
1101 |
++ * less than 2^12 cycles |
1102 |
++ * Time is measured based on a counter that runs at the same rate as the TSC, |
1103 |
++ * refer SDM volume 3b section 21.6.13 & 22.1.3. |
1104 |
++ */ |
1105 |
++static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP; |
1106 |
++module_param(ple_gap, uint, 0444); |
1107 |
++ |
1108 |
++static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; |
1109 |
++module_param(ple_window, uint, 0444); |
1110 |
++ |
1111 |
++/* Default doubles per-vcpu window every exit. */ |
1112 |
++static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW; |
1113 |
++module_param(ple_window_grow, uint, 0444); |
1114 |
++ |
1115 |
++/* Default resets per-vcpu window every exit to ple_window. */ |
1116 |
++static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; |
1117 |
++module_param(ple_window_shrink, uint, 0444); |
1118 |
++ |
1119 |
++/* Default is to compute the maximum so we can never overflow. */ |
1120 |
++static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; |
1121 |
++module_param(ple_window_max, uint, 0444); |
1122 |
++ |
1123 |
++/* Default is SYSTEM mode, 1 for host-guest mode */ |
1124 |
++int __read_mostly pt_mode = PT_MODE_SYSTEM; |
1125 |
++module_param(pt_mode, int, S_IRUGO); |
1126 |
++ |
1127 |
++static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); |
1128 |
++static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); |
1129 |
++static DEFINE_MUTEX(vmx_l1d_flush_mutex); |
1130 |
++ |
1131 |
++/* Storage for pre module init parameter parsing */ |
1132 |
++static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO; |
1133 |
++ |
1134 |
++static const struct { |
1135 |
++ const char *option; |
1136 |
++ bool for_parse; |
1137 |
++} vmentry_l1d_param[] = { |
1138 |
++ [VMENTER_L1D_FLUSH_AUTO] = {"auto", true}, |
1139 |
++ [VMENTER_L1D_FLUSH_NEVER] = {"never", true}, |
1140 |
++ [VMENTER_L1D_FLUSH_COND] = {"cond", true}, |
1141 |
++ [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true}, |
1142 |
++ [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false}, |
1143 |
++ [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false}, |
1144 |
++}; |
1145 |
++ |
1146 |
++#define L1D_CACHE_ORDER 4 |
1147 |
++static void *vmx_l1d_flush_pages; |
1148 |
++ |
1149 |
++static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) |
1150 |
++{ |
1151 |
++ struct page *page; |
1152 |
++ unsigned int i; |
1153 |
++ |
1154 |
++ if (!boot_cpu_has_bug(X86_BUG_L1TF)) { |
1155 |
++ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; |
1156 |
++ return 0; |
1157 |
++ } |
1158 |
++ |
1159 |
++ if (!enable_ept) { |
1160 |
++ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; |
1161 |
++ return 0; |
1162 |
++ } |
1163 |
++ |
1164 |
++ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { |
1165 |
++ u64 msr; |
1166 |
++ |
1167 |
++ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); |
1168 |
++ if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { |
1169 |
++ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; |
1170 |
++ return 0; |
1171 |
++ } |
1172 |
++ } |
1173 |
++ |
1174 |
++ /* If set to auto use the default l1tf mitigation method */ |
1175 |
++ if (l1tf == VMENTER_L1D_FLUSH_AUTO) { |
1176 |
++ switch (l1tf_mitigation) { |
1177 |
++ case L1TF_MITIGATION_OFF: |
1178 |
++ l1tf = VMENTER_L1D_FLUSH_NEVER; |
1179 |
++ break; |
1180 |
++ case L1TF_MITIGATION_FLUSH_NOWARN: |
1181 |
++ case L1TF_MITIGATION_FLUSH: |
1182 |
++ case L1TF_MITIGATION_FLUSH_NOSMT: |
1183 |
++ l1tf = VMENTER_L1D_FLUSH_COND; |
1184 |
++ break; |
1185 |
++ case L1TF_MITIGATION_FULL: |
1186 |
++ case L1TF_MITIGATION_FULL_FORCE: |
1187 |
++ l1tf = VMENTER_L1D_FLUSH_ALWAYS; |
1188 |
++ break; |
1189 |
++ } |
1190 |
++ } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) { |
1191 |
++ l1tf = VMENTER_L1D_FLUSH_ALWAYS; |
1192 |
++ } |
1193 |
++ |
1194 |
++ if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages && |
1195 |
++ !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) { |
1196 |
++ /* |
1197 |
++ * This allocation for vmx_l1d_flush_pages is not tied to a VM |
1198 |
++ * lifetime and so should not be charged to a memcg. |
1199 |
++ */ |
1200 |
++ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER); |
1201 |
++ if (!page) |
1202 |
++ return -ENOMEM; |
1203 |
++ vmx_l1d_flush_pages = page_address(page); |
1204 |
++ |
1205 |
++ /* |
1206 |
++ * Initialize each page with a different pattern in |
1207 |
++ * order to protect against KSM in the nested |
1208 |
++ * virtualization case. |
1209 |
++ */ |
1210 |
++ for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) { |
1211 |
++ memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1, |
1212 |
++ PAGE_SIZE); |
1213 |
++ } |
1214 |
++ } |
1215 |
++ |
1216 |
++ l1tf_vmx_mitigation = l1tf; |
1217 |
++ |
1218 |
++ if (l1tf != VMENTER_L1D_FLUSH_NEVER) |
1219 |
++ static_branch_enable(&vmx_l1d_should_flush); |
1220 |
++ else |
1221 |
++ static_branch_disable(&vmx_l1d_should_flush); |
1222 |
++ |
1223 |
++ if (l1tf == VMENTER_L1D_FLUSH_COND) |
1224 |
++ static_branch_enable(&vmx_l1d_flush_cond); |
1225 |
++ else |
1226 |
++ static_branch_disable(&vmx_l1d_flush_cond); |
1227 |
++ return 0; |
1228 |
++} |
1229 |
++ |
1230 |
++static int vmentry_l1d_flush_parse(const char *s) |
1231 |
++{ |
1232 |
++ unsigned int i; |
1233 |
++ |
1234 |
++ if (s) { |
1235 |
++ for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) { |
1236 |
++ if (vmentry_l1d_param[i].for_parse && |
1237 |
++ sysfs_streq(s, vmentry_l1d_param[i].option)) |
1238 |
++ return i; |
1239 |
++ } |
1240 |
++ } |
1241 |
++ return -EINVAL; |
1242 |
++} |
1243 |
++ |
1244 |
++static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) |
1245 |
++{ |
1246 |
++ int l1tf, ret; |
1247 |
++ |
1248 |
++ l1tf = vmentry_l1d_flush_parse(s); |
1249 |
++ if (l1tf < 0) |
1250 |
++ return l1tf; |
1251 |
++ |
1252 |
++ if (!boot_cpu_has(X86_BUG_L1TF)) |
1253 |
++ return 0; |
1254 |
++ |
1255 |
++ /* |
1256 |
++ * Has vmx_init() run already? If not then this is the pre init |
1257 |
++ * parameter parsing. In that case just store the value and let |
1258 |
++ * vmx_init() do the proper setup after enable_ept has been |
1259 |
++ * established. |
1260 |
++ */ |
1261 |
++ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) { |
1262 |
++ vmentry_l1d_flush_param = l1tf; |
1263 |
++ return 0; |
1264 |
++ } |
1265 |
++ |
1266 |
++ mutex_lock(&vmx_l1d_flush_mutex); |
1267 |
++ ret = vmx_setup_l1d_flush(l1tf); |
1268 |
++ mutex_unlock(&vmx_l1d_flush_mutex); |
1269 |
++ return ret; |
1270 |
++} |
1271 |
++ |
1272 |
++static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) |
1273 |
++{ |
1274 |
++ if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param))) |
1275 |
++ return sprintf(s, "???\n"); |
1276 |
++ |
1277 |
++ return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); |
1278 |
++} |
1279 |
++ |
1280 |
++static const struct kernel_param_ops vmentry_l1d_flush_ops = { |
1281 |
++ .set = vmentry_l1d_flush_set, |
1282 |
++ .get = vmentry_l1d_flush_get, |
1283 |
++}; |
1284 |
++module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644); |
1285 |
++ |
1286 |
++static bool guest_state_valid(struct kvm_vcpu *vcpu); |
1287 |
++static u32 vmx_segment_access_rights(struct kvm_segment *var); |
1288 |
++static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, |
1289 |
++ u32 msr, int type); |
1290 |
++ |
1291 |
++void vmx_vmexit(void); |
1292 |
++ |
1293 |
++#define vmx_insn_failed(fmt...) \ |
1294 |
++do { \ |
1295 |
++ WARN_ONCE(1, fmt); \ |
1296 |
++ pr_warn_ratelimited(fmt); \ |
1297 |
++} while (0) |
1298 |
++ |
1299 |
++asmlinkage void vmread_error(unsigned long field, bool fault) |
1300 |
++{ |
1301 |
++ if (fault) |
1302 |
++ kvm_spurious_fault(); |
1303 |
++ else |
1304 |
++ vmx_insn_failed("kvm: vmread failed: field=%lx\n", field); |
1305 |
++} |
1306 |
++ |
1307 |
++noinline void vmwrite_error(unsigned long field, unsigned long value) |
1308 |
++{ |
1309 |
++ vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n", |
1310 |
++ field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); |
1311 |
++} |
1312 |
++ |
1313 |
++noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr) |
1314 |
++{ |
1315 |
++ vmx_insn_failed("kvm: vmclear failed: %p/%llx\n", vmcs, phys_addr); |
1316 |
++} |
1317 |
++ |
1318 |
++noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr) |
1319 |
++{ |
1320 |
++ vmx_insn_failed("kvm: vmptrld failed: %p/%llx\n", vmcs, phys_addr); |
1321 |
++} |
1322 |
++ |
1323 |
++noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva) |
1324 |
++{ |
1325 |
++ vmx_insn_failed("kvm: invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n", |
1326 |
++ ext, vpid, gva); |
1327 |
++} |
1328 |
++ |
1329 |
++noinline void invept_error(unsigned long ext, u64 eptp, gpa_t gpa) |
1330 |
++{ |
1331 |
++ vmx_insn_failed("kvm: invept failed: ext=0x%lx eptp=%llx gpa=0x%llx\n", |
1332 |
++ ext, eptp, gpa); |
1333 |
++} |
1334 |
++ |
1335 |
++static DEFINE_PER_CPU(struct vmcs *, vmxarea); |
1336 |
++DEFINE_PER_CPU(struct vmcs *, current_vmcs); |
1337 |
++/* |
1338 |
++ * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed |
1339 |
++ * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. |
1340 |
++ */ |
1341 |
++static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); |
1342 |
++ |
1343 |
++/* |
1344 |
++ * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we |
1345 |
++ * can find which vCPU should be waken up. |
1346 |
++ */ |
1347 |
++static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); |
1348 |
++static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); |
1349 |
++ |
1350 |
++static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); |
1351 |
++static DEFINE_SPINLOCK(vmx_vpid_lock); |
1352 |
++ |
1353 |
++struct vmcs_config vmcs_config; |
1354 |
++struct vmx_capability vmx_capability; |
1355 |
++ |
1356 |
++#define VMX_SEGMENT_FIELD(seg) \ |
1357 |
++ [VCPU_SREG_##seg] = { \ |
1358 |
++ .selector = GUEST_##seg##_SELECTOR, \ |
1359 |
++ .base = GUEST_##seg##_BASE, \ |
1360 |
++ .limit = GUEST_##seg##_LIMIT, \ |
1361 |
++ .ar_bytes = GUEST_##seg##_AR_BYTES, \ |
1362 |
++ } |
1363 |
++ |
1364 |
++static const struct kvm_vmx_segment_field { |
1365 |
++ unsigned selector; |
1366 |
++ unsigned base; |
1367 |
++ unsigned limit; |
1368 |
++ unsigned ar_bytes; |
1369 |
++} kvm_vmx_segment_fields[] = { |
1370 |
++ VMX_SEGMENT_FIELD(CS), |
1371 |
++ VMX_SEGMENT_FIELD(DS), |
1372 |
++ VMX_SEGMENT_FIELD(ES), |
1373 |
++ VMX_SEGMENT_FIELD(FS), |
1374 |
++ VMX_SEGMENT_FIELD(GS), |
1375 |
++ VMX_SEGMENT_FIELD(SS), |
1376 |
++ VMX_SEGMENT_FIELD(TR), |
1377 |
++ VMX_SEGMENT_FIELD(LDTR), |
1378 |
++}; |
1379 |
++ |
1380 |
++u64 host_efer; |
1381 |
++static unsigned long host_idt_base; |
1382 |
++ |
1383 |
++/* |
1384 |
++ * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm |
1385 |
++ * will emulate SYSCALL in legacy mode if the vendor string in guest |
1386 |
++ * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To |
1387 |
++ * support this emulation, IA32_STAR must always be included in |
1388 |
++ * vmx_msr_index[], even in i386 builds. |
1389 |
++ */ |
1390 |
++const u32 vmx_msr_index[] = { |
1391 |
++#ifdef CONFIG_X86_64 |
1392 |
++ MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, |
1393 |
++#endif |
1394 |
++ MSR_EFER, MSR_TSC_AUX, MSR_STAR, |
1395 |
++ MSR_IA32_TSX_CTRL, |
1396 |
++}; |
1397 |
++ |
1398 |
++#if IS_ENABLED(CONFIG_HYPERV) |
1399 |
++static bool __read_mostly enlightened_vmcs = true; |
1400 |
++module_param(enlightened_vmcs, bool, 0444); |
1401 |
++ |
1402 |
++/* check_ept_pointer() should be under protection of ept_pointer_lock. */ |
1403 |
++static void check_ept_pointer_match(struct kvm *kvm) |
1404 |
++{ |
1405 |
++ struct kvm_vcpu *vcpu; |
1406 |
++ u64 tmp_eptp = INVALID_PAGE; |
1407 |
++ int i; |
1408 |
++ |
1409 |
++ kvm_for_each_vcpu(i, vcpu, kvm) { |
1410 |
++ if (!VALID_PAGE(tmp_eptp)) { |
1411 |
++ tmp_eptp = to_vmx(vcpu)->ept_pointer; |
1412 |
++ } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) { |
1413 |
++ to_kvm_vmx(kvm)->ept_pointers_match |
1414 |
++ = EPT_POINTERS_MISMATCH; |
1415 |
++ return; |
1416 |
++ } |
1417 |
++ } |
1418 |
++ |
1419 |
++ to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH; |
1420 |
++} |
1421 |
++ |
1422 |
++static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, |
1423 |
++ void *data) |
1424 |
++{ |
1425 |
++ struct kvm_tlb_range *range = data; |
1426 |
++ |
1427 |
++ return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn, |
1428 |
++ range->pages); |
1429 |
++} |
1430 |
++ |
1431 |
++static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm, |
1432 |
++ struct kvm_vcpu *vcpu, struct kvm_tlb_range *range) |
1433 |
++{ |
1434 |
++ u64 ept_pointer = to_vmx(vcpu)->ept_pointer; |
1435 |
++ |
1436 |
++ /* |
1437 |
++ * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs address |
1438 |
++ * of the base of EPT PML4 table, strip off EPT configuration |
1439 |
++ * information. |
1440 |
++ */ |
1441 |
++ if (range) |
1442 |
++ return hyperv_flush_guest_mapping_range(ept_pointer & PAGE_MASK, |
1443 |
++ kvm_fill_hv_flush_list_func, (void *)range); |
1444 |
++ else |
1445 |
++ return hyperv_flush_guest_mapping(ept_pointer & PAGE_MASK); |
1446 |
++} |
1447 |
++ |
1448 |
++static int hv_remote_flush_tlb_with_range(struct kvm *kvm, |
1449 |
++ struct kvm_tlb_range *range) |
1450 |
++{ |
1451 |
++ struct kvm_vcpu *vcpu; |
1452 |
++ int ret = 0, i; |
1453 |
++ |
1454 |
++ spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); |
1455 |
++ |
1456 |
++ if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK) |
1457 |
++ check_ept_pointer_match(kvm); |
1458 |
++ |
1459 |
++ if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) { |
1460 |
++ kvm_for_each_vcpu(i, vcpu, kvm) { |
1461 |
++ /* If ept_pointer is invalid pointer, bypass flush request. */ |
1462 |
++ if (VALID_PAGE(to_vmx(vcpu)->ept_pointer)) |
1463 |
++ ret |= __hv_remote_flush_tlb_with_range( |
1464 |
++ kvm, vcpu, range); |
1465 |
++ } |
1466 |
++ } else { |
1467 |
++ ret = __hv_remote_flush_tlb_with_range(kvm, |
1468 |
++ kvm_get_vcpu(kvm, 0), range); |
1469 |
++ } |
1470 |
++ |
1471 |
++ spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); |
1472 |
++ return ret; |
1473 |
++} |
1474 |
++static int hv_remote_flush_tlb(struct kvm *kvm) |
1475 |
++{ |
1476 |
++ return hv_remote_flush_tlb_with_range(kvm, NULL); |
1477 |
++} |
1478 |
++ |
1479 |
++static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu) |
1480 |
++{ |
1481 |
++ struct hv_enlightened_vmcs *evmcs; |
1482 |
++ struct hv_partition_assist_pg **p_hv_pa_pg = |
1483 |
++ &vcpu->kvm->arch.hyperv.hv_pa_pg; |
1484 |
++ /* |
1485 |
++ * Synthetic VM-Exit is not enabled in current code and so All |
1486 |
++ * evmcs in singe VM shares same assist page. |
1487 |
++ */ |
1488 |
++ if (!*p_hv_pa_pg) |
1489 |
++ *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL); |
1490 |
++ |
1491 |
++ if (!*p_hv_pa_pg) |
1492 |
++ return -ENOMEM; |
1493 |
++ |
1494 |
++ evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs; |
1495 |
++ |
1496 |
++ evmcs->partition_assist_page = |
1497 |
++ __pa(*p_hv_pa_pg); |
1498 |
++ evmcs->hv_vm_id = (unsigned long)vcpu->kvm; |
1499 |
++ evmcs->hv_enlightenments_control.nested_flush_hypercall = 1; |
1500 |
++ |
1501 |
++ return 0; |
1502 |
++} |
1503 |
++ |
1504 |
++#endif /* IS_ENABLED(CONFIG_HYPERV) */ |
1505 |
++ |
1506 |
++/* |
1507 |
++ * Comment's format: document - errata name - stepping - processor name. |
1508 |
++ * Refer from |
1509 |
++ * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp |
1510 |
++ */ |
1511 |
++static u32 vmx_preemption_cpu_tfms[] = { |
1512 |
++/* 323344.pdf - BA86 - D0 - Xeon 7500 Series */ |
1513 |
++0x000206E6, |
1514 |
++/* 323056.pdf - AAX65 - C2 - Xeon L3406 */ |
1515 |
++/* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */ |
1516 |
++/* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */ |
1517 |
++0x00020652, |
1518 |
++/* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */ |
1519 |
++0x00020655, |
1520 |
++/* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */ |
1521 |
++/* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */ |
1522 |
++/* |
1523 |
++ * 320767.pdf - AAP86 - B1 - |
1524 |
++ * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile |
1525 |
++ */ |
1526 |
++0x000106E5, |
1527 |
++/* 321333.pdf - AAM126 - C0 - Xeon 3500 */ |
1528 |
++0x000106A0, |
1529 |
++/* 321333.pdf - AAM126 - C1 - Xeon 3500 */ |
1530 |
++0x000106A1, |
1531 |
++/* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */ |
1532 |
++0x000106A4, |
1533 |
++ /* 321333.pdf - AAM126 - D0 - Xeon 3500 */ |
1534 |
++ /* 321324.pdf - AAK139 - D0 - Xeon 5500 */ |
1535 |
++ /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */ |
1536 |
++0x000106A5, |
1537 |
++ /* Xeon E3-1220 V2 */ |
1538 |
++0x000306A8, |
1539 |
++}; |
1540 |
++ |
1541 |
++static inline bool cpu_has_broken_vmx_preemption_timer(void) |
1542 |
++{ |
1543 |
++ u32 eax = cpuid_eax(0x00000001), i; |
1544 |
++ |
1545 |
++ /* Clear the reserved bits */ |
1546 |
++ eax &= ~(0x3U << 14 | 0xfU << 28); |
1547 |
++ for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++) |
1548 |
++ if (eax == vmx_preemption_cpu_tfms[i]) |
1549 |
++ return true; |
1550 |
++ |
1551 |
++ return false; |
1552 |
++} |
1553 |
++ |
1554 |
++static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu) |
1555 |
++{ |
1556 |
++ return flexpriority_enabled && lapic_in_kernel(vcpu); |
1557 |
++} |
1558 |
++ |
1559 |
++static inline bool report_flexpriority(void) |
1560 |
++{ |
1561 |
++ return flexpriority_enabled; |
1562 |
++} |
1563 |
++ |
1564 |
++static inline int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) |
1565 |
++{ |
1566 |
++ int i; |
1567 |
++ |
1568 |
++ for (i = 0; i < vmx->nmsrs; ++i) |
1569 |
++ if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) |
1570 |
++ return i; |
1571 |
++ return -1; |
1572 |
++} |
1573 |
++ |
1574 |
++struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) |
1575 |
++{ |
1576 |
++ int i; |
1577 |
++ |
1578 |
++ i = __find_msr_index(vmx, msr); |
1579 |
++ if (i >= 0) |
1580 |
++ return &vmx->guest_msrs[i]; |
1581 |
++ return NULL; |
1582 |
++} |
1583 |
++ |
1584 |
++static int vmx_set_guest_msr(struct vcpu_vmx *vmx, struct shared_msr_entry *msr, u64 data) |
1585 |
++{ |
1586 |
++ int ret = 0; |
1587 |
++ |
1588 |
++ u64 old_msr_data = msr->data; |
1589 |
++ msr->data = data; |
1590 |
++ if (msr - vmx->guest_msrs < vmx->save_nmsrs) { |
1591 |
++ preempt_disable(); |
1592 |
++ ret = kvm_set_shared_msr(msr->index, msr->data, |
1593 |
++ msr->mask); |
1594 |
++ preempt_enable(); |
1595 |
++ if (ret) |
1596 |
++ msr->data = old_msr_data; |
1597 |
++ } |
1598 |
++ return ret; |
1599 |
++} |
1600 |
++ |
1601 |
++void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) |
1602 |
++{ |
1603 |
++ vmcs_clear(loaded_vmcs->vmcs); |
1604 |
++ if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) |
1605 |
++ vmcs_clear(loaded_vmcs->shadow_vmcs); |
1606 |
++ loaded_vmcs->cpu = -1; |
1607 |
++ loaded_vmcs->launched = 0; |
1608 |
++} |
1609 |
++ |
1610 |
++#ifdef CONFIG_KEXEC_CORE |
1611 |
++/* |
1612 |
++ * This bitmap is used to indicate whether the vmclear |
1613 |
++ * operation is enabled on all cpus. All disabled by |
1614 |
++ * default. |
1615 |
++ */ |
1616 |
++static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; |
1617 |
++ |
1618 |
++static inline void crash_enable_local_vmclear(int cpu) |
1619 |
++{ |
1620 |
++ cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); |
1621 |
++} |
1622 |
++ |
1623 |
++static inline void crash_disable_local_vmclear(int cpu) |
1624 |
++{ |
1625 |
++ cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); |
1626 |
++} |
1627 |
++ |
1628 |
++static inline int crash_local_vmclear_enabled(int cpu) |
1629 |
++{ |
1630 |
++ return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); |
1631 |
++} |
1632 |
++ |
1633 |
++static void crash_vmclear_local_loaded_vmcss(void) |
1634 |
++{ |
1635 |
++ int cpu = raw_smp_processor_id(); |
1636 |
++ struct loaded_vmcs *v; |
1637 |
++ |
1638 |
++ if (!crash_local_vmclear_enabled(cpu)) |
1639 |
++ return; |
1640 |
++ |
1641 |
++ list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), |
1642 |
++ loaded_vmcss_on_cpu_link) |
1643 |
++ vmcs_clear(v->vmcs); |
1644 |
++} |
1645 |
++#else |
1646 |
++static inline void crash_enable_local_vmclear(int cpu) { } |
1647 |
++static inline void crash_disable_local_vmclear(int cpu) { } |
1648 |
++#endif /* CONFIG_KEXEC_CORE */ |
1649 |
++ |
1650 |
++static void __loaded_vmcs_clear(void *arg) |
1651 |
++{ |
1652 |
++ struct loaded_vmcs *loaded_vmcs = arg; |
1653 |
++ int cpu = raw_smp_processor_id(); |
1654 |
++ |
1655 |
++ if (loaded_vmcs->cpu != cpu) |
1656 |
++ return; /* vcpu migration can race with cpu offline */ |
1657 |
++ if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) |
1658 |
++ per_cpu(current_vmcs, cpu) = NULL; |
1659 |
++ crash_disable_local_vmclear(cpu); |
1660 |
++ list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); |
1661 |
++ |
1662 |
++ /* |
1663 |
++ * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link |
1664 |
++ * is before setting loaded_vmcs->vcpu to -1 which is done in |
1665 |
++ * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist |
1666 |
++ * then adds the vmcs into percpu list before it is deleted. |
1667 |
++ */ |
1668 |
++ smp_wmb(); |
1669 |
++ |
1670 |
++ loaded_vmcs_init(loaded_vmcs); |
1671 |
++ crash_enable_local_vmclear(cpu); |
1672 |
++} |
1673 |
++ |
1674 |
++void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) |
1675 |
++{ |
1676 |
++ int cpu = loaded_vmcs->cpu; |
1677 |
++ |
1678 |
++ if (cpu != -1) |
1679 |
++ smp_call_function_single(cpu, |
1680 |
++ __loaded_vmcs_clear, loaded_vmcs, 1); |
1681 |
++} |
1682 |
++ |
1683 |
++static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, |
1684 |
++ unsigned field) |
1685 |
++{ |
1686 |
++ bool ret; |
1687 |
++ u32 mask = 1 << (seg * SEG_FIELD_NR + field); |
1688 |
++ |
1689 |
++ if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) { |
1690 |
++ kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS); |
1691 |
++ vmx->segment_cache.bitmask = 0; |
1692 |
++ } |
1693 |
++ ret = vmx->segment_cache.bitmask & mask; |
1694 |
++ vmx->segment_cache.bitmask |= mask; |
1695 |
++ return ret; |
1696 |
++} |
1697 |
++ |
1698 |
++static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) |
1699 |
++{ |
1700 |
++ u16 *p = &vmx->segment_cache.seg[seg].selector; |
1701 |
++ |
1702 |
++ if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) |
1703 |
++ *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); |
1704 |
++ return *p; |
1705 |
++} |
1706 |
++ |
1707 |
++static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) |
1708 |
++{ |
1709 |
++ ulong *p = &vmx->segment_cache.seg[seg].base; |
1710 |
++ |
1711 |
++ if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) |
1712 |
++ *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); |
1713 |
++ return *p; |
1714 |
++} |
1715 |
++ |
1716 |
++static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) |
1717 |
++{ |
1718 |
++ u32 *p = &vmx->segment_cache.seg[seg].limit; |
1719 |
++ |
1720 |
++ if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) |
1721 |
++ *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); |
1722 |
++ return *p; |
1723 |
++} |
1724 |
++ |
1725 |
++static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) |
1726 |
++{ |
1727 |
++ u32 *p = &vmx->segment_cache.seg[seg].ar; |
1728 |
++ |
1729 |
++ if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) |
1730 |
++ *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); |
1731 |
++ return *p; |
1732 |
++} |
1733 |
++ |
1734 |
++void update_exception_bitmap(struct kvm_vcpu *vcpu) |
1735 |
++{ |
1736 |
++ u32 eb; |
1737 |
++ |
1738 |
++ eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | |
1739 |
++ (1u << DB_VECTOR) | (1u << AC_VECTOR); |
1740 |
++ /* |
1741 |
++ * Guest access to VMware backdoor ports could legitimately |
1742 |
++ * trigger #GP because of TSS I/O permission bitmap. |
1743 |
++ * We intercept those #GP and allow access to them anyway |
1744 |
++ * as VMware does. |
1745 |
++ */ |
1746 |
++ if (enable_vmware_backdoor) |
1747 |
++ eb |= (1u << GP_VECTOR); |
1748 |
++ if ((vcpu->guest_debug & |
1749 |
++ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == |
1750 |
++ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) |
1751 |
++ eb |= 1u << BP_VECTOR; |
1752 |
++ if (to_vmx(vcpu)->rmode.vm86_active) |
1753 |
++ eb = ~0; |
1754 |
++ if (enable_ept) |
1755 |
++ eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ |
1756 |
++ |
1757 |
++ /* When we are running a nested L2 guest and L1 specified for it a |
1758 |
++ * certain exception bitmap, we must trap the same exceptions and pass |
1759 |
++ * them to L1. When running L2, we will only handle the exceptions |
1760 |
++ * specified above if L1 did not want them. |
1761 |
++ */ |
1762 |
++ if (is_guest_mode(vcpu)) |
1763 |
++ eb |= get_vmcs12(vcpu)->exception_bitmap; |
1764 |
++ |
1765 |
++ vmcs_write32(EXCEPTION_BITMAP, eb); |
1766 |
++} |
1767 |
++ |
1768 |
++/* |
1769 |
++ * Check if MSR is intercepted for currently loaded MSR bitmap. |
1770 |
++ */ |
1771 |
++static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) |
1772 |
++{ |
1773 |
++ unsigned long *msr_bitmap; |
1774 |
++ int f = sizeof(unsigned long); |
1775 |
++ |
1776 |
++ if (!cpu_has_vmx_msr_bitmap()) |
1777 |
++ return true; |
1778 |
++ |
1779 |
++ msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap; |
1780 |
++ |
1781 |
++ if (msr <= 0x1fff) { |
1782 |
++ return !!test_bit(msr, msr_bitmap + 0x800 / f); |
1783 |
++ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { |
1784 |
++ msr &= 0x1fff; |
1785 |
++ return !!test_bit(msr, msr_bitmap + 0xc00 / f); |
1786 |
++ } |
1787 |
++ |
1788 |
++ return true; |
1789 |
++} |
1790 |
++ |
1791 |
++static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, |
1792 |
++ unsigned long entry, unsigned long exit) |
1793 |
++{ |
1794 |
++ vm_entry_controls_clearbit(vmx, entry); |
1795 |
++ vm_exit_controls_clearbit(vmx, exit); |
1796 |
++} |
1797 |
++ |
1798 |
++int vmx_find_msr_index(struct vmx_msrs *m, u32 msr) |
1799 |
++{ |
1800 |
++ unsigned int i; |
1801 |
++ |
1802 |
++ for (i = 0; i < m->nr; ++i) { |
1803 |
++ if (m->val[i].index == msr) |
1804 |
++ return i; |
1805 |
++ } |
1806 |
++ return -ENOENT; |
1807 |
++} |
1808 |
++ |
1809 |
++static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) |
1810 |
++{ |
1811 |
++ int i; |
1812 |
++ struct msr_autoload *m = &vmx->msr_autoload; |
1813 |
++ |
1814 |
++ switch (msr) { |
1815 |
++ case MSR_EFER: |
1816 |
++ if (cpu_has_load_ia32_efer()) { |
1817 |
++ clear_atomic_switch_msr_special(vmx, |
1818 |
++ VM_ENTRY_LOAD_IA32_EFER, |
1819 |
++ VM_EXIT_LOAD_IA32_EFER); |
1820 |
++ return; |
1821 |
++ } |
1822 |
++ break; |
1823 |
++ case MSR_CORE_PERF_GLOBAL_CTRL: |
1824 |
++ if (cpu_has_load_perf_global_ctrl()) { |
1825 |
++ clear_atomic_switch_msr_special(vmx, |
1826 |
++ VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, |
1827 |
++ VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); |
1828 |
++ return; |
1829 |
++ } |
1830 |
++ break; |
1831 |
++ } |
1832 |
++ i = vmx_find_msr_index(&m->guest, msr); |
1833 |
++ if (i < 0) |
1834 |
++ goto skip_guest; |
1835 |
++ --m->guest.nr; |
1836 |
++ m->guest.val[i] = m->guest.val[m->guest.nr]; |
1837 |
++ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); |
1838 |
++ |
1839 |
++skip_guest: |
1840 |
++ i = vmx_find_msr_index(&m->host, msr); |
1841 |
++ if (i < 0) |
1842 |
++ return; |
1843 |
++ |
1844 |
++ --m->host.nr; |
1845 |
++ m->host.val[i] = m->host.val[m->host.nr]; |
1846 |
++ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); |
1847 |
++} |
1848 |
++ |
1849 |
++static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, |
1850 |
++ unsigned long entry, unsigned long exit, |
1851 |
++ unsigned long guest_val_vmcs, unsigned long host_val_vmcs, |
1852 |
++ u64 guest_val, u64 host_val) |
1853 |
++{ |
1854 |
++ vmcs_write64(guest_val_vmcs, guest_val); |
1855 |
++ if (host_val_vmcs != HOST_IA32_EFER) |
1856 |
++ vmcs_write64(host_val_vmcs, host_val); |
1857 |
++ vm_entry_controls_setbit(vmx, entry); |
1858 |
++ vm_exit_controls_setbit(vmx, exit); |
1859 |
++} |
1860 |
++ |
1861 |
++static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, |
1862 |
++ u64 guest_val, u64 host_val, bool entry_only) |
1863 |
++{ |
1864 |
++ int i, j = 0; |
1865 |
++ struct msr_autoload *m = &vmx->msr_autoload; |
1866 |
++ |
1867 |
++ switch (msr) { |
1868 |
++ case MSR_EFER: |
1869 |
++ if (cpu_has_load_ia32_efer()) { |
1870 |
++ add_atomic_switch_msr_special(vmx, |
1871 |
++ VM_ENTRY_LOAD_IA32_EFER, |
1872 |
++ VM_EXIT_LOAD_IA32_EFER, |
1873 |
++ GUEST_IA32_EFER, |
1874 |
++ HOST_IA32_EFER, |
1875 |
++ guest_val, host_val); |
1876 |
++ return; |
1877 |
++ } |
1878 |
++ break; |
1879 |
++ case MSR_CORE_PERF_GLOBAL_CTRL: |
1880 |
++ if (cpu_has_load_perf_global_ctrl()) { |
1881 |
++ add_atomic_switch_msr_special(vmx, |
1882 |
++ VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, |
1883 |
++ VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, |
1884 |
++ GUEST_IA32_PERF_GLOBAL_CTRL, |
1885 |
++ HOST_IA32_PERF_GLOBAL_CTRL, |
1886 |
++ guest_val, host_val); |
1887 |
++ return; |
1888 |
++ } |
1889 |
++ break; |
1890 |
++ case MSR_IA32_PEBS_ENABLE: |
1891 |
++ /* PEBS needs a quiescent period after being disabled (to write |
1892 |
++ * a record). Disabling PEBS through VMX MSR swapping doesn't |
1893 |
++ * provide that period, so a CPU could write host's record into |
1894 |
++ * guest's memory. |
1895 |
++ */ |
1896 |
++ wrmsrl(MSR_IA32_PEBS_ENABLE, 0); |
1897 |
++ } |
1898 |
++ |
1899 |
++ i = vmx_find_msr_index(&m->guest, msr); |
1900 |
++ if (!entry_only) |
1901 |
++ j = vmx_find_msr_index(&m->host, msr); |
1902 |
++ |
1903 |
++ if ((i < 0 && m->guest.nr == NR_LOADSTORE_MSRS) || |
1904 |
++ (j < 0 && m->host.nr == NR_LOADSTORE_MSRS)) { |
1905 |
++ printk_once(KERN_WARNING "Not enough msr switch entries. " |
1906 |
++ "Can't add msr %x\n", msr); |
1907 |
++ return; |
1908 |
++ } |
1909 |
++ if (i < 0) { |
1910 |
++ i = m->guest.nr++; |
1911 |
++ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); |
1912 |
++ } |
1913 |
++ m->guest.val[i].index = msr; |
1914 |
++ m->guest.val[i].value = guest_val; |
1915 |
++ |
1916 |
++ if (entry_only) |
1917 |
++ return; |
1918 |
++ |
1919 |
++ if (j < 0) { |
1920 |
++ j = m->host.nr++; |
1921 |
++ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); |
1922 |
++ } |
1923 |
++ m->host.val[j].index = msr; |
1924 |
++ m->host.val[j].value = host_val; |
1925 |
++} |
1926 |
++ |
1927 |
++static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) |
1928 |
++{ |
1929 |
++ u64 guest_efer = vmx->vcpu.arch.efer; |
1930 |
++ u64 ignore_bits = 0; |
1931 |
++ |
1932 |
++ /* Shadow paging assumes NX to be available. */ |
1933 |
++ if (!enable_ept) |
1934 |
++ guest_efer |= EFER_NX; |
1935 |
++ |
1936 |
++ /* |
1937 |
++ * LMA and LME handled by hardware; SCE meaningless outside long mode. |
1938 |
++ */ |
1939 |
++ ignore_bits |= EFER_SCE; |
1940 |
++#ifdef CONFIG_X86_64 |
1941 |
++ ignore_bits |= EFER_LMA | EFER_LME; |
1942 |
++ /* SCE is meaningful only in long mode on Intel */ |
1943 |
++ if (guest_efer & EFER_LMA) |
1944 |
++ ignore_bits &= ~(u64)EFER_SCE; |
1945 |
++#endif |
1946 |
++ |
1947 |
++ /* |
1948 |
++ * On EPT, we can't emulate NX, so we must switch EFER atomically. |
1949 |
++ * On CPUs that support "load IA32_EFER", always switch EFER |
1950 |
++ * atomically, since it's faster than switching it manually. |
1951 |
++ */ |
1952 |
++ if (cpu_has_load_ia32_efer() || |
1953 |
++ (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { |
1954 |
++ if (!(guest_efer & EFER_LMA)) |
1955 |
++ guest_efer &= ~EFER_LME; |
1956 |
++ if (guest_efer != host_efer) |
1957 |
++ add_atomic_switch_msr(vmx, MSR_EFER, |
1958 |
++ guest_efer, host_efer, false); |
1959 |
++ else |
1960 |
++ clear_atomic_switch_msr(vmx, MSR_EFER); |
1961 |
++ return false; |
1962 |
++ } else { |
1963 |
++ clear_atomic_switch_msr(vmx, MSR_EFER); |
1964 |
++ |
1965 |
++ guest_efer &= ~ignore_bits; |
1966 |
++ guest_efer |= host_efer & ignore_bits; |
1967 |
++ |
1968 |
++ vmx->guest_msrs[efer_offset].data = guest_efer; |
1969 |
++ vmx->guest_msrs[efer_offset].mask = ~ignore_bits; |
1970 |
++ |
1971 |
++ return true; |
1972 |
++ } |
1973 |
++} |
1974 |
++ |
1975 |
++#ifdef CONFIG_X86_32 |
1976 |
++/* |
1977 |
++ * On 32-bit kernels, VM exits still load the FS and GS bases from the |
1978 |
++ * VMCS rather than the segment table. KVM uses this helper to figure |
1979 |
++ * out the current bases to poke them into the VMCS before entry. |
1980 |
++ */ |
1981 |
++static unsigned long segment_base(u16 selector) |
1982 |
++{ |
1983 |
++ struct desc_struct *table; |
1984 |
++ unsigned long v; |
1985 |
++ |
1986 |
++ if (!(selector & ~SEGMENT_RPL_MASK)) |
1987 |
++ return 0; |
1988 |
++ |
1989 |
++ table = get_current_gdt_ro(); |
1990 |
++ |
1991 |
++ if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) { |
1992 |
++ u16 ldt_selector = kvm_read_ldt(); |
1993 |
++ |
1994 |
++ if (!(ldt_selector & ~SEGMENT_RPL_MASK)) |
1995 |
++ return 0; |
1996 |
++ |
1997 |
++ table = (struct desc_struct *)segment_base(ldt_selector); |
1998 |
++ } |
1999 |
++ v = get_desc_base(&table[selector >> 3]); |
2000 |
++ return v; |
2001 |
++} |
2002 |
++#endif |
2003 |
++ |
2004 |
++static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range) |
2005 |
++{ |
2006 |
++ u32 i; |
2007 |
++ |
2008 |
++ wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status); |
2009 |
++ wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); |
2010 |
++ wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); |
2011 |
++ wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); |
2012 |
++ for (i = 0; i < addr_range; i++) { |
2013 |
++ wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); |
2014 |
++ wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); |
2015 |
++ } |
2016 |
++} |
2017 |
++ |
2018 |
++static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range) |
2019 |
++{ |
2020 |
++ u32 i; |
2021 |
++ |
2022 |
++ rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status); |
2023 |
++ rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); |
2024 |
++ rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); |
2025 |
++ rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); |
2026 |
++ for (i = 0; i < addr_range; i++) { |
2027 |
++ rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); |
2028 |
++ rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); |
2029 |
++ } |
2030 |
++} |
2031 |
++ |
2032 |
++static void pt_guest_enter(struct vcpu_vmx *vmx) |
2033 |
++{ |
2034 |
++ if (pt_mode == PT_MODE_SYSTEM) |
2035 |
++ return; |
2036 |
++ |
2037 |
++ /* |
2038 |
++ * GUEST_IA32_RTIT_CTL is already set in the VMCS. |
2039 |
++ * Save host state before VM entry. |
2040 |
++ */ |
2041 |
++ rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); |
2042 |
++ if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { |
2043 |
++ wrmsrl(MSR_IA32_RTIT_CTL, 0); |
2044 |
++ pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range); |
2045 |
++ pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range); |
2046 |
++ } |
2047 |
++} |
2048 |
++ |
2049 |
++static void pt_guest_exit(struct vcpu_vmx *vmx) |
2050 |
++{ |
2051 |
++ if (pt_mode == PT_MODE_SYSTEM) |
2052 |
++ return; |
2053 |
++ |
2054 |
++ if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { |
2055 |
++ pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range); |
2056 |
++ pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range); |
2057 |
++ } |
2058 |
++ |
2059 |
++ /* Reload host state (IA32_RTIT_CTL will be cleared on VM exit). */ |
2060 |
++ wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); |
2061 |
++} |
2062 |
++ |
2063 |
++void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, |
2064 |
++ unsigned long fs_base, unsigned long gs_base) |
2065 |
++{ |
2066 |
++ if (unlikely(fs_sel != host->fs_sel)) { |
2067 |
++ if (!(fs_sel & 7)) |
2068 |
++ vmcs_write16(HOST_FS_SELECTOR, fs_sel); |
2069 |
++ else |
2070 |
++ vmcs_write16(HOST_FS_SELECTOR, 0); |
2071 |
++ host->fs_sel = fs_sel; |
2072 |
++ } |
2073 |
++ if (unlikely(gs_sel != host->gs_sel)) { |
2074 |
++ if (!(gs_sel & 7)) |
2075 |
++ vmcs_write16(HOST_GS_SELECTOR, gs_sel); |
2076 |
++ else |
2077 |
++ vmcs_write16(HOST_GS_SELECTOR, 0); |
2078 |
++ host->gs_sel = gs_sel; |
2079 |
++ } |
2080 |
++ if (unlikely(fs_base != host->fs_base)) { |
2081 |
++ vmcs_writel(HOST_FS_BASE, fs_base); |
2082 |
++ host->fs_base = fs_base; |
2083 |
++ } |
2084 |
++ if (unlikely(gs_base != host->gs_base)) { |
2085 |
++ vmcs_writel(HOST_GS_BASE, gs_base); |
2086 |
++ host->gs_base = gs_base; |
2087 |
++ } |
2088 |
++} |
2089 |
++ |
2090 |
++void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) |
2091 |
++{ |
2092 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
2093 |
++ struct vmcs_host_state *host_state; |
2094 |
++#ifdef CONFIG_X86_64 |
2095 |
++ int cpu = raw_smp_processor_id(); |
2096 |
++#endif |
2097 |
++ unsigned long fs_base, gs_base; |
2098 |
++ u16 fs_sel, gs_sel; |
2099 |
++ int i; |
2100 |
++ |
2101 |
++ vmx->req_immediate_exit = false; |
2102 |
++ |
2103 |
++ /* |
2104 |
++ * Note that guest MSRs to be saved/restored can also be changed |
2105 |
++ * when guest state is loaded. This happens when guest transitions |
2106 |
++ * to/from long-mode by setting MSR_EFER.LMA. |
2107 |
++ */ |
2108 |
++ if (!vmx->guest_msrs_ready) { |
2109 |
++ vmx->guest_msrs_ready = true; |
2110 |
++ for (i = 0; i < vmx->save_nmsrs; ++i) |
2111 |
++ kvm_set_shared_msr(vmx->guest_msrs[i].index, |
2112 |
++ vmx->guest_msrs[i].data, |
2113 |
++ vmx->guest_msrs[i].mask); |
2114 |
++ |
2115 |
++ } |
2116 |
++ if (vmx->guest_state_loaded) |
2117 |
++ return; |
2118 |
++ |
2119 |
++ host_state = &vmx->loaded_vmcs->host_state; |
2120 |
++ |
2121 |
++ /* |
2122 |
++ * Set host fs and gs selectors. Unfortunately, 22.2.3 does not |
2123 |
++ * allow segment selectors with cpl > 0 or ti == 1. |
2124 |
++ */ |
2125 |
++ host_state->ldt_sel = kvm_read_ldt(); |
2126 |
++ |
2127 |
++#ifdef CONFIG_X86_64 |
2128 |
++ savesegment(ds, host_state->ds_sel); |
2129 |
++ savesegment(es, host_state->es_sel); |
2130 |
++ |
2131 |
++ gs_base = cpu_kernelmode_gs_base(cpu); |
2132 |
++ if (likely(is_64bit_mm(current->mm))) { |
2133 |
++ save_fsgs_for_kvm(); |
2134 |
++ fs_sel = current->thread.fsindex; |
2135 |
++ gs_sel = current->thread.gsindex; |
2136 |
++ fs_base = current->thread.fsbase; |
2137 |
++ vmx->msr_host_kernel_gs_base = current->thread.gsbase; |
2138 |
++ } else { |
2139 |
++ savesegment(fs, fs_sel); |
2140 |
++ savesegment(gs, gs_sel); |
2141 |
++ fs_base = read_msr(MSR_FS_BASE); |
2142 |
++ vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); |
2143 |
++ } |
2144 |
++ |
2145 |
++ wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
2146 |
++#else |
2147 |
++ savesegment(fs, fs_sel); |
2148 |
++ savesegment(gs, gs_sel); |
2149 |
++ fs_base = segment_base(fs_sel); |
2150 |
++ gs_base = segment_base(gs_sel); |
2151 |
++#endif |
2152 |
++ |
2153 |
++ vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base); |
2154 |
++ vmx->guest_state_loaded = true; |
2155 |
++} |
2156 |
++ |
2157 |
++static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) |
2158 |
++{ |
2159 |
++ struct vmcs_host_state *host_state; |
2160 |
++ |
2161 |
++ if (!vmx->guest_state_loaded) |
2162 |
++ return; |
2163 |
++ |
2164 |
++ host_state = &vmx->loaded_vmcs->host_state; |
2165 |
++ |
2166 |
++ ++vmx->vcpu.stat.host_state_reload; |
2167 |
++ |
2168 |
++#ifdef CONFIG_X86_64 |
2169 |
++ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
2170 |
++#endif |
2171 |
++ if (host_state->ldt_sel || (host_state->gs_sel & 7)) { |
2172 |
++ kvm_load_ldt(host_state->ldt_sel); |
2173 |
++#ifdef CONFIG_X86_64 |
2174 |
++ load_gs_index(host_state->gs_sel); |
2175 |
++#else |
2176 |
++ loadsegment(gs, host_state->gs_sel); |
2177 |
++#endif |
2178 |
++ } |
2179 |
++ if (host_state->fs_sel & 7) |
2180 |
++ loadsegment(fs, host_state->fs_sel); |
2181 |
++#ifdef CONFIG_X86_64 |
2182 |
++ if (unlikely(host_state->ds_sel | host_state->es_sel)) { |
2183 |
++ loadsegment(ds, host_state->ds_sel); |
2184 |
++ loadsegment(es, host_state->es_sel); |
2185 |
++ } |
2186 |
++#endif |
2187 |
++ invalidate_tss_limit(); |
2188 |
++#ifdef CONFIG_X86_64 |
2189 |
++ wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
2190 |
++#endif |
2191 |
++ load_fixmap_gdt(raw_smp_processor_id()); |
2192 |
++ vmx->guest_state_loaded = false; |
2193 |
++ vmx->guest_msrs_ready = false; |
2194 |
++} |
2195 |
++ |
2196 |
++#ifdef CONFIG_X86_64 |
2197 |
++static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) |
2198 |
++{ |
2199 |
++ preempt_disable(); |
2200 |
++ if (vmx->guest_state_loaded) |
2201 |
++ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
2202 |
++ preempt_enable(); |
2203 |
++ return vmx->msr_guest_kernel_gs_base; |
2204 |
++} |
2205 |
++ |
2206 |
++static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) |
2207 |
++{ |
2208 |
++ preempt_disable(); |
2209 |
++ if (vmx->guest_state_loaded) |
2210 |
++ wrmsrl(MSR_KERNEL_GS_BASE, data); |
2211 |
++ preempt_enable(); |
2212 |
++ vmx->msr_guest_kernel_gs_base = data; |
2213 |
++} |
2214 |
++#endif |
2215 |
++ |
2216 |
++static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) |
2217 |
++{ |
2218 |
++ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); |
2219 |
++ struct pi_desc old, new; |
2220 |
++ unsigned int dest; |
2221 |
++ |
2222 |
++ /* |
2223 |
++ * In case of hot-plug or hot-unplug, we may have to undo |
2224 |
++ * vmx_vcpu_pi_put even if there is no assigned device. And we |
2225 |
++ * always keep PI.NDST up to date for simplicity: it makes the |
2226 |
++ * code easier, and CPU migration is not a fast path. |
2227 |
++ */ |
2228 |
++ if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) |
2229 |
++ return; |
2230 |
++ |
2231 |
++ /* |
2232 |
++ * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change |
2233 |
++ * PI.NDST: pi_post_block is the one expected to change PID.NDST and the |
2234 |
++ * wakeup handler expects the vCPU to be on the blocked_vcpu_list that |
2235 |
++ * matches PI.NDST. Otherwise, a vcpu may not be able to be woken up |
2236 |
++ * correctly. |
2237 |
++ */ |
2238 |
++ if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || vcpu->cpu == cpu) { |
2239 |
++ pi_clear_sn(pi_desc); |
2240 |
++ goto after_clear_sn; |
2241 |
++ } |
2242 |
++ |
2243 |
++ /* The full case. */ |
2244 |
++ do { |
2245 |
++ old.control = new.control = pi_desc->control; |
2246 |
++ |
2247 |
++ dest = cpu_physical_id(cpu); |
2248 |
++ |
2249 |
++ if (x2apic_enabled()) |
2250 |
++ new.ndst = dest; |
2251 |
++ else |
2252 |
++ new.ndst = (dest << 8) & 0xFF00; |
2253 |
++ |
2254 |
++ new.sn = 0; |
2255 |
++ } while (cmpxchg64(&pi_desc->control, old.control, |
2256 |
++ new.control) != old.control); |
2257 |
++ |
2258 |
++after_clear_sn: |
2259 |
++ |
2260 |
++ /* |
2261 |
++ * Clear SN before reading the bitmap. The VT-d firmware |
2262 |
++ * writes the bitmap and reads SN atomically (5.2.3 in the |
2263 |
++ * spec), so it doesn't really have a memory barrier that |
2264 |
++ * pairs with this, but we cannot do that and we need one. |
2265 |
++ */ |
2266 |
++ smp_mb__after_atomic(); |
2267 |
++ |
2268 |
++ if (!pi_is_pir_empty(pi_desc)) |
2269 |
++ pi_set_on(pi_desc); |
2270 |
++} |
2271 |
++ |
2272 |
++void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu) |
2273 |
++{ |
2274 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
2275 |
++ bool already_loaded = vmx->loaded_vmcs->cpu == cpu; |
2276 |
++ |
2277 |
++ if (!already_loaded) { |
2278 |
++ loaded_vmcs_clear(vmx->loaded_vmcs); |
2279 |
++ local_irq_disable(); |
2280 |
++ crash_disable_local_vmclear(cpu); |
2281 |
++ |
2282 |
++ /* |
2283 |
++ * Read loaded_vmcs->cpu should be before fetching |
2284 |
++ * loaded_vmcs->loaded_vmcss_on_cpu_link. |
2285 |
++ * See the comments in __loaded_vmcs_clear(). |
2286 |
++ */ |
2287 |
++ smp_rmb(); |
2288 |
++ |
2289 |
++ list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, |
2290 |
++ &per_cpu(loaded_vmcss_on_cpu, cpu)); |
2291 |
++ crash_enable_local_vmclear(cpu); |
2292 |
++ local_irq_enable(); |
2293 |
++ } |
2294 |
++ |
2295 |
++ if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { |
2296 |
++ per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; |
2297 |
++ vmcs_load(vmx->loaded_vmcs->vmcs); |
2298 |
++ indirect_branch_prediction_barrier(); |
2299 |
++ } |
2300 |
++ |
2301 |
++ if (!already_loaded) { |
2302 |
++ void *gdt = get_current_gdt_ro(); |
2303 |
++ unsigned long sysenter_esp; |
2304 |
++ |
2305 |
++ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
2306 |
++ |
2307 |
++ /* |
2308 |
++ * Linux uses per-cpu TSS and GDT, so set these when switching |
2309 |
++ * processors. See 22.2.4. |
2310 |
++ */ |
2311 |
++ vmcs_writel(HOST_TR_BASE, |
2312 |
++ (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss); |
2313 |
++ vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ |
2314 |
++ |
2315 |
++ rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); |
2316 |
++ vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ |
2317 |
++ |
2318 |
++ vmx->loaded_vmcs->cpu = cpu; |
2319 |
++ } |
2320 |
++ |
2321 |
++ /* Setup TSC multiplier */ |
2322 |
++ if (kvm_has_tsc_control && |
2323 |
++ vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) |
2324 |
++ decache_tsc_multiplier(vmx); |
2325 |
++} |
2326 |
++ |
2327 |
++/* |
2328 |
++ * Switches to specified vcpu, until a matching vcpu_put(), but assumes |
2329 |
++ * vcpu mutex is already taken. |
2330 |
++ */ |
2331 |
++void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
2332 |
++{ |
2333 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
2334 |
++ |
2335 |
++ vmx_vcpu_load_vmcs(vcpu, cpu); |
2336 |
++ |
2337 |
++ vmx_vcpu_pi_load(vcpu, cpu); |
2338 |
++ |
2339 |
++ vmx->host_pkru = read_pkru(); |
2340 |
++ vmx->host_debugctlmsr = get_debugctlmsr(); |
2341 |
++} |
2342 |
++ |
2343 |
++static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) |
2344 |
++{ |
2345 |
++ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); |
2346 |
++ |
2347 |
++ if (!kvm_arch_has_assigned_device(vcpu->kvm) || |
2348 |
++ !irq_remapping_cap(IRQ_POSTING_CAP) || |
2349 |
++ !kvm_vcpu_apicv_active(vcpu)) |
2350 |
++ return; |
2351 |
++ |
2352 |
++ /* Set SN when the vCPU is preempted */ |
2353 |
++ if (vcpu->preempted) |
2354 |
++ pi_set_sn(pi_desc); |
2355 |
++} |
2356 |
++ |
2357 |
++static void vmx_vcpu_put(struct kvm_vcpu *vcpu) |
2358 |
++{ |
2359 |
++ vmx_vcpu_pi_put(vcpu); |
2360 |
++ |
2361 |
++ vmx_prepare_switch_to_host(to_vmx(vcpu)); |
2362 |
++} |
2363 |
++ |
2364 |
++static bool emulation_required(struct kvm_vcpu *vcpu) |
2365 |
++{ |
2366 |
++ return emulate_invalid_guest_state && !guest_state_valid(vcpu); |
2367 |
++} |
2368 |
++ |
2369 |
++static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); |
2370 |
++ |
2371 |
++unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) |
2372 |
++{ |
2373 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
2374 |
++ unsigned long rflags, save_rflags; |
2375 |
++ |
2376 |
++ if (!kvm_register_is_available(vcpu, VCPU_EXREG_RFLAGS)) { |
2377 |
++ kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS); |
2378 |
++ rflags = vmcs_readl(GUEST_RFLAGS); |
2379 |
++ if (vmx->rmode.vm86_active) { |
2380 |
++ rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; |
2381 |
++ save_rflags = vmx->rmode.save_rflags; |
2382 |
++ rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; |
2383 |
++ } |
2384 |
++ vmx->rflags = rflags; |
2385 |
++ } |
2386 |
++ return vmx->rflags; |
2387 |
++} |
2388 |
++ |
2389 |
++void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
2390 |
++{ |
2391 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
2392 |
++ unsigned long old_rflags; |
2393 |
++ |
2394 |
++ if (enable_unrestricted_guest) { |
2395 |
++ kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS); |
2396 |
++ vmx->rflags = rflags; |
2397 |
++ vmcs_writel(GUEST_RFLAGS, rflags); |
2398 |
++ return; |
2399 |
++ } |
2400 |
++ |
2401 |
++ old_rflags = vmx_get_rflags(vcpu); |
2402 |
++ vmx->rflags = rflags; |
2403 |
++ if (vmx->rmode.vm86_active) { |
2404 |
++ vmx->rmode.save_rflags = rflags; |
2405 |
++ rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
2406 |
++ } |
2407 |
++ vmcs_writel(GUEST_RFLAGS, rflags); |
2408 |
++ |
2409 |
++ if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM) |
2410 |
++ vmx->emulation_required = emulation_required(vcpu); |
2411 |
++} |
2412 |
++ |
2413 |
++u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) |
2414 |
++{ |
2415 |
++ u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); |
2416 |
++ int ret = 0; |
2417 |
++ |
2418 |
++ if (interruptibility & GUEST_INTR_STATE_STI) |
2419 |
++ ret |= KVM_X86_SHADOW_INT_STI; |
2420 |
++ if (interruptibility & GUEST_INTR_STATE_MOV_SS) |
2421 |
++ ret |= KVM_X86_SHADOW_INT_MOV_SS; |
2422 |
++ |
2423 |
++ return ret; |
2424 |
++} |
2425 |
++ |
2426 |
++void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) |
2427 |
++{ |
2428 |
++ u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); |
2429 |
++ u32 interruptibility = interruptibility_old; |
2430 |
++ |
2431 |
++ interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); |
2432 |
++ |
2433 |
++ if (mask & KVM_X86_SHADOW_INT_MOV_SS) |
2434 |
++ interruptibility |= GUEST_INTR_STATE_MOV_SS; |
2435 |
++ else if (mask & KVM_X86_SHADOW_INT_STI) |
2436 |
++ interruptibility |= GUEST_INTR_STATE_STI; |
2437 |
++ |
2438 |
++ if ((interruptibility != interruptibility_old)) |
2439 |
++ vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); |
2440 |
++} |
2441 |
++ |
2442 |
++static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data) |
2443 |
++{ |
2444 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
2445 |
++ unsigned long value; |
2446 |
++ |
2447 |
++ /* |
2448 |
++ * Any MSR write that attempts to change bits marked reserved will |
2449 |
++ * case a #GP fault. |
2450 |
++ */ |
2451 |
++ if (data & vmx->pt_desc.ctl_bitmask) |
2452 |
++ return 1; |
2453 |
++ |
2454 |
++ /* |
2455 |
++ * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will |
2456 |
++ * result in a #GP unless the same write also clears TraceEn. |
2457 |
++ */ |
2458 |
++ if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) && |
2459 |
++ ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN)) |
2460 |
++ return 1; |
2461 |
++ |
2462 |
++ /* |
2463 |
++ * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit |
2464 |
++ * and FabricEn would cause #GP, if |
2465 |
++ * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0 |
2466 |
++ */ |
2467 |
++ if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) && |
2468 |
++ !(data & RTIT_CTL_FABRIC_EN) && |
2469 |
++ !intel_pt_validate_cap(vmx->pt_desc.caps, |
2470 |
++ PT_CAP_single_range_output)) |
2471 |
++ return 1; |
2472 |
++ |
2473 |
++ /* |
2474 |
++ * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that |
2475 |
++ * utilize encodings marked reserved will casue a #GP fault. |
2476 |
++ */ |
2477 |
++ value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods); |
2478 |
++ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) && |
2479 |
++ !test_bit((data & RTIT_CTL_MTC_RANGE) >> |
2480 |
++ RTIT_CTL_MTC_RANGE_OFFSET, &value)) |
2481 |
++ return 1; |
2482 |
++ value = intel_pt_validate_cap(vmx->pt_desc.caps, |
2483 |
++ PT_CAP_cycle_thresholds); |
2484 |
++ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && |
2485 |
++ !test_bit((data & RTIT_CTL_CYC_THRESH) >> |
2486 |
++ RTIT_CTL_CYC_THRESH_OFFSET, &value)) |
2487 |
++ return 1; |
2488 |
++ value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods); |
2489 |
++ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && |
2490 |
++ !test_bit((data & RTIT_CTL_PSB_FREQ) >> |
2491 |
++ RTIT_CTL_PSB_FREQ_OFFSET, &value)) |
2492 |
++ return 1; |
2493 |
++ |
2494 |
++ /* |
2495 |
++ * If ADDRx_CFG is reserved or the encodings is >2 will |
2496 |
++ * cause a #GP fault. |
2497 |
++ */ |
2498 |
++ value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET; |
2499 |
++ if ((value && (vmx->pt_desc.addr_range < 1)) || (value > 2)) |
2500 |
++ return 1; |
2501 |
++ value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET; |
2502 |
++ if ((value && (vmx->pt_desc.addr_range < 2)) || (value > 2)) |
2503 |
++ return 1; |
2504 |
++ value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET; |
2505 |
++ if ((value && (vmx->pt_desc.addr_range < 3)) || (value > 2)) |
2506 |
++ return 1; |
2507 |
++ value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET; |
2508 |
++ if ((value && (vmx->pt_desc.addr_range < 4)) || (value > 2)) |
2509 |
++ return 1; |
2510 |
++ |
2511 |
++ return 0; |
2512 |
++} |
2513 |
++ |
2514 |
++static int skip_emulated_instruction(struct kvm_vcpu *vcpu) |
2515 |
++{ |
2516 |
++ unsigned long rip; |
2517 |
++ |
2518 |
++ /* |
2519 |
++ * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on |
2520 |
++ * undefined behavior: Intel's SDM doesn't mandate the VMCS field be |
2521 |
++ * set when EPT misconfig occurs. In practice, real hardware updates |
2522 |
++ * VM_EXIT_INSTRUCTION_LEN on EPT misconfig, but other hypervisors |
2523 |
++ * (namely Hyper-V) don't set it due to it being undefined behavior, |
2524 |
++ * i.e. we end up advancing IP with some random value. |
2525 |
++ */ |
2526 |
++ if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || |
2527 |
++ to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) { |
2528 |
++ rip = kvm_rip_read(vcpu); |
2529 |
++ rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); |
2530 |
++ kvm_rip_write(vcpu, rip); |
2531 |
++ } else { |
2532 |
++ if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) |
2533 |
++ return 0; |
2534 |
++ } |
2535 |
++ |
2536 |
++ /* skipping an emulated instruction also counts */ |
2537 |
++ vmx_set_interrupt_shadow(vcpu, 0); |
2538 |
++ |
2539 |
++ return 1; |
2540 |
++} |
2541 |
++ |
2542 |
++static void vmx_clear_hlt(struct kvm_vcpu *vcpu) |
2543 |
++{ |
2544 |
++ /* |
2545 |
++ * Ensure that we clear the HLT state in the VMCS. We don't need to |
2546 |
++ * explicitly skip the instruction because if the HLT state is set, |
2547 |
++ * then the instruction is already executing and RIP has already been |
2548 |
++ * advanced. |
2549 |
++ */ |
2550 |
++ if (kvm_hlt_in_guest(vcpu->kvm) && |
2551 |
++ vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT) |
2552 |
++ vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); |
2553 |
++} |
2554 |
++ |
2555 |
++static void vmx_queue_exception(struct kvm_vcpu *vcpu) |
2556 |
++{ |
2557 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
2558 |
++ unsigned nr = vcpu->arch.exception.nr; |
2559 |
++ bool has_error_code = vcpu->arch.exception.has_error_code; |
2560 |
++ u32 error_code = vcpu->arch.exception.error_code; |
2561 |
++ u32 intr_info = nr | INTR_INFO_VALID_MASK; |
2562 |
++ |
2563 |
++ kvm_deliver_exception_payload(vcpu); |
2564 |
++ |
2565 |
++ if (has_error_code) { |
2566 |
++ vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); |
2567 |
++ intr_info |= INTR_INFO_DELIVER_CODE_MASK; |
2568 |
++ } |
2569 |
++ |
2570 |
++ if (vmx->rmode.vm86_active) { |
2571 |
++ int inc_eip = 0; |
2572 |
++ if (kvm_exception_is_soft(nr)) |
2573 |
++ inc_eip = vcpu->arch.event_exit_inst_len; |
2574 |
++ kvm_inject_realmode_interrupt(vcpu, nr, inc_eip); |
2575 |
++ return; |
2576 |
++ } |
2577 |
++ |
2578 |
++ WARN_ON_ONCE(vmx->emulation_required); |
2579 |
++ |
2580 |
++ if (kvm_exception_is_soft(nr)) { |
2581 |
++ vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, |
2582 |
++ vmx->vcpu.arch.event_exit_inst_len); |
2583 |
++ intr_info |= INTR_TYPE_SOFT_EXCEPTION; |
2584 |
++ } else |
2585 |
++ intr_info |= INTR_TYPE_HARD_EXCEPTION; |
2586 |
++ |
2587 |
++ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); |
2588 |
++ |
2589 |
++ vmx_clear_hlt(vcpu); |
2590 |
++} |
2591 |
++ |
2592 |
++static bool vmx_rdtscp_supported(void) |
2593 |
++{ |
2594 |
++ return cpu_has_vmx_rdtscp(); |
2595 |
++} |
2596 |
++ |
2597 |
++static bool vmx_invpcid_supported(void) |
2598 |
++{ |
2599 |
++ return cpu_has_vmx_invpcid(); |
2600 |
++} |
2601 |
++ |
2602 |
++/* |
2603 |
++ * Swap MSR entry in host/guest MSR entry array. |
2604 |
++ */ |
2605 |
++static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) |
2606 |
++{ |
2607 |
++ struct shared_msr_entry tmp; |
2608 |
++ |
2609 |
++ tmp = vmx->guest_msrs[to]; |
2610 |
++ vmx->guest_msrs[to] = vmx->guest_msrs[from]; |
2611 |
++ vmx->guest_msrs[from] = tmp; |
2612 |
++} |
2613 |
++ |
2614 |
++/* |
2615 |
++ * Set up the vmcs to automatically save and restore system |
2616 |
++ * msrs. Don't touch the 64-bit msrs if the guest is in legacy |
2617 |
++ * mode, as fiddling with msrs is very expensive. |
2618 |
++ */ |
2619 |
++static void setup_msrs(struct vcpu_vmx *vmx) |
2620 |
++{ |
2621 |
++ int save_nmsrs, index; |
2622 |
++ |
2623 |
++ save_nmsrs = 0; |
2624 |
++#ifdef CONFIG_X86_64 |
2625 |
++ /* |
2626 |
++ * The SYSCALL MSRs are only needed on long mode guests, and only |
2627 |
++ * when EFER.SCE is set. |
2628 |
++ */ |
2629 |
++ if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) { |
2630 |
++ index = __find_msr_index(vmx, MSR_STAR); |
2631 |
++ if (index >= 0) |
2632 |
++ move_msr_up(vmx, index, save_nmsrs++); |
2633 |
++ index = __find_msr_index(vmx, MSR_LSTAR); |
2634 |
++ if (index >= 0) |
2635 |
++ move_msr_up(vmx, index, save_nmsrs++); |
2636 |
++ index = __find_msr_index(vmx, MSR_SYSCALL_MASK); |
2637 |
++ if (index >= 0) |
2638 |
++ move_msr_up(vmx, index, save_nmsrs++); |
2639 |
++ } |
2640 |
++#endif |
2641 |
++ index = __find_msr_index(vmx, MSR_EFER); |
2642 |
++ if (index >= 0 && update_transition_efer(vmx, index)) |
2643 |
++ move_msr_up(vmx, index, save_nmsrs++); |
2644 |
++ index = __find_msr_index(vmx, MSR_TSC_AUX); |
2645 |
++ if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) |
2646 |
++ move_msr_up(vmx, index, save_nmsrs++); |
2647 |
++ index = __find_msr_index(vmx, MSR_IA32_TSX_CTRL); |
2648 |
++ if (index >= 0) |
2649 |
++ move_msr_up(vmx, index, save_nmsrs++); |
2650 |
++ |
2651 |
++ vmx->save_nmsrs = save_nmsrs; |
2652 |
++ vmx->guest_msrs_ready = false; |
2653 |
++ |
2654 |
++ if (cpu_has_vmx_msr_bitmap()) |
2655 |
++ vmx_update_msr_bitmap(&vmx->vcpu); |
2656 |
++} |
2657 |
++ |
2658 |
++static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu) |
2659 |
++{ |
2660 |
++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
2661 |
++ |
2662 |
++ if (is_guest_mode(vcpu) && |
2663 |
++ (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)) |
2664 |
++ return vcpu->arch.tsc_offset - vmcs12->tsc_offset; |
2665 |
++ |
2666 |
++ return vcpu->arch.tsc_offset; |
2667 |
++} |
2668 |
++ |
2669 |
++static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
2670 |
++{ |
2671 |
++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
2672 |
++ u64 g_tsc_offset = 0; |
2673 |
++ |
2674 |
++ /* |
2675 |
++ * We're here if L1 chose not to trap WRMSR to TSC. According |
2676 |
++ * to the spec, this should set L1's TSC; The offset that L1 |
2677 |
++ * set for L2 remains unchanged, and still needs to be added |
2678 |
++ * to the newly set TSC to get L2's TSC. |
2679 |
++ */ |
2680 |
++ if (is_guest_mode(vcpu) && |
2681 |
++ (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)) |
2682 |
++ g_tsc_offset = vmcs12->tsc_offset; |
2683 |
++ |
2684 |
++ trace_kvm_write_tsc_offset(vcpu->vcpu_id, |
2685 |
++ vcpu->arch.tsc_offset - g_tsc_offset, |
2686 |
++ offset); |
2687 |
++ vmcs_write64(TSC_OFFSET, offset + g_tsc_offset); |
2688 |
++ return offset + g_tsc_offset; |
2689 |
++} |
2690 |
++ |
2691 |
++/* |
2692 |
++ * nested_vmx_allowed() checks whether a guest should be allowed to use VMX |
2693 |
++ * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for |
2694 |
++ * all guests if the "nested" module option is off, and can also be disabled |
2695 |
++ * for a single guest by disabling its VMX cpuid bit. |
2696 |
++ */ |
2697 |
++bool nested_vmx_allowed(struct kvm_vcpu *vcpu) |
2698 |
++{ |
2699 |
++ return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX); |
2700 |
++} |
2701 |
++ |
2702 |
++static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, |
2703 |
++ uint64_t val) |
2704 |
++{ |
2705 |
++ uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; |
2706 |
++ |
2707 |
++ return !(val & ~valid_bits); |
2708 |
++} |
2709 |
++ |
2710 |
++static int vmx_get_msr_feature(struct kvm_msr_entry *msr) |
2711 |
++{ |
2712 |
++ switch (msr->index) { |
2713 |
++ case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: |
2714 |
++ if (!nested) |
2715 |
++ return 1; |
2716 |
++ return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data); |
2717 |
++ default: |
2718 |
++ return 1; |
2719 |
++ } |
2720 |
++} |
2721 |
++ |
2722 |
++/* |
2723 |
++ * Reads an msr value (of 'msr_index') into 'pdata'. |
2724 |
++ * Returns 0 on success, non-0 otherwise. |
2725 |
++ * Assumes vcpu_load() was already called. |
2726 |
++ */ |
2727 |
++static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
2728 |
++{ |
2729 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
2730 |
++ struct shared_msr_entry *msr; |
2731 |
++ u32 index; |
2732 |
++ |
2733 |
++ switch (msr_info->index) { |
2734 |
++#ifdef CONFIG_X86_64 |
2735 |
++ case MSR_FS_BASE: |
2736 |
++ msr_info->data = vmcs_readl(GUEST_FS_BASE); |
2737 |
++ break; |
2738 |
++ case MSR_GS_BASE: |
2739 |
++ msr_info->data = vmcs_readl(GUEST_GS_BASE); |
2740 |
++ break; |
2741 |
++ case MSR_KERNEL_GS_BASE: |
2742 |
++ msr_info->data = vmx_read_guest_kernel_gs_base(vmx); |
2743 |
++ break; |
2744 |
++#endif |
2745 |
++ case MSR_EFER: |
2746 |
++ return kvm_get_msr_common(vcpu, msr_info); |
2747 |
++ case MSR_IA32_TSX_CTRL: |
2748 |
++ if (!msr_info->host_initiated && |
2749 |
++ !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) |
2750 |
++ return 1; |
2751 |
++ goto find_shared_msr; |
2752 |
++ case MSR_IA32_UMWAIT_CONTROL: |
2753 |
++ if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) |
2754 |
++ return 1; |
2755 |
++ |
2756 |
++ msr_info->data = vmx->msr_ia32_umwait_control; |
2757 |
++ break; |
2758 |
++ case MSR_IA32_SPEC_CTRL: |
2759 |
++ if (!msr_info->host_initiated && |
2760 |
++ !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) |
2761 |
++ return 1; |
2762 |
++ |
2763 |
++ msr_info->data = to_vmx(vcpu)->spec_ctrl; |
2764 |
++ break; |
2765 |
++ case MSR_IA32_SYSENTER_CS: |
2766 |
++ msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); |
2767 |
++ break; |
2768 |
++ case MSR_IA32_SYSENTER_EIP: |
2769 |
++ msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP); |
2770 |
++ break; |
2771 |
++ case MSR_IA32_SYSENTER_ESP: |
2772 |
++ msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); |
2773 |
++ break; |
2774 |
++ case MSR_IA32_BNDCFGS: |
2775 |
++ if (!kvm_mpx_supported() || |
2776 |
++ (!msr_info->host_initiated && |
2777 |
++ !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) |
2778 |
++ return 1; |
2779 |
++ msr_info->data = vmcs_read64(GUEST_BNDCFGS); |
2780 |
++ break; |
2781 |
++ case MSR_IA32_MCG_EXT_CTL: |
2782 |
++ if (!msr_info->host_initiated && |
2783 |
++ !(vmx->msr_ia32_feature_control & |
2784 |
++ FEATURE_CONTROL_LMCE)) |
2785 |
++ return 1; |
2786 |
++ msr_info->data = vcpu->arch.mcg_ext_ctl; |
2787 |
++ break; |
2788 |
++ case MSR_IA32_FEATURE_CONTROL: |
2789 |
++ msr_info->data = vmx->msr_ia32_feature_control; |
2790 |
++ break; |
2791 |
++ case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: |
2792 |
++ if (!nested_vmx_allowed(vcpu)) |
2793 |
++ return 1; |
2794 |
++ return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, |
2795 |
++ &msr_info->data); |
2796 |
++ case MSR_IA32_RTIT_CTL: |
2797 |
++ if (pt_mode != PT_MODE_HOST_GUEST) |
2798 |
++ return 1; |
2799 |
++ msr_info->data = vmx->pt_desc.guest.ctl; |
2800 |
++ break; |
2801 |
++ case MSR_IA32_RTIT_STATUS: |
2802 |
++ if (pt_mode != PT_MODE_HOST_GUEST) |
2803 |
++ return 1; |
2804 |
++ msr_info->data = vmx->pt_desc.guest.status; |
2805 |
++ break; |
2806 |
++ case MSR_IA32_RTIT_CR3_MATCH: |
2807 |
++ if ((pt_mode != PT_MODE_HOST_GUEST) || |
2808 |
++ !intel_pt_validate_cap(vmx->pt_desc.caps, |
2809 |
++ PT_CAP_cr3_filtering)) |
2810 |
++ return 1; |
2811 |
++ msr_info->data = vmx->pt_desc.guest.cr3_match; |
2812 |
++ break; |
2813 |
++ case MSR_IA32_RTIT_OUTPUT_BASE: |
2814 |
++ if ((pt_mode != PT_MODE_HOST_GUEST) || |
2815 |
++ (!intel_pt_validate_cap(vmx->pt_desc.caps, |
2816 |
++ PT_CAP_topa_output) && |
2817 |
++ !intel_pt_validate_cap(vmx->pt_desc.caps, |
2818 |
++ PT_CAP_single_range_output))) |
2819 |
++ return 1; |
2820 |
++ msr_info->data = vmx->pt_desc.guest.output_base; |
2821 |
++ break; |
2822 |
++ case MSR_IA32_RTIT_OUTPUT_MASK: |
2823 |
++ if ((pt_mode != PT_MODE_HOST_GUEST) || |
2824 |
++ (!intel_pt_validate_cap(vmx->pt_desc.caps, |
2825 |
++ PT_CAP_topa_output) && |
2826 |
++ !intel_pt_validate_cap(vmx->pt_desc.caps, |
2827 |
++ PT_CAP_single_range_output))) |
2828 |
++ return 1; |
2829 |
++ msr_info->data = vmx->pt_desc.guest.output_mask; |
2830 |
++ break; |
2831 |
++ case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: |
2832 |
++ index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; |
2833 |
++ if ((pt_mode != PT_MODE_HOST_GUEST) || |
2834 |
++ (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, |
2835 |
++ PT_CAP_num_address_ranges))) |
2836 |
++ return 1; |
2837 |
++ if (is_noncanonical_address(data, vcpu)) |
2838 |
++ return 1; |
2839 |
++ if (index % 2) |
2840 |
++ msr_info->data = vmx->pt_desc.guest.addr_b[index / 2]; |
2841 |
++ else |
2842 |
++ msr_info->data = vmx->pt_desc.guest.addr_a[index / 2]; |
2843 |
++ break; |
2844 |
++ case MSR_TSC_AUX: |
2845 |
++ if (!msr_info->host_initiated && |
2846 |
++ !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) |
2847 |
++ return 1; |
2848 |
++ goto find_shared_msr; |
2849 |
++ default: |
2850 |
++ find_shared_msr: |
2851 |
++ msr = find_msr_entry(vmx, msr_info->index); |
2852 |
++ if (msr) { |
2853 |
++ msr_info->data = msr->data; |
2854 |
++ break; |
2855 |
++ } |
2856 |
++ return kvm_get_msr_common(vcpu, msr_info); |
2857 |
++ } |
2858 |
++ |
2859 |
++ return 0; |
2860 |
++} |
2861 |
++ |
2862 |
++/* |
2863 |
++ * Writes msr value into the appropriate "register". |
2864 |
++ * Returns 0 on success, non-0 otherwise. |
2865 |
++ * Assumes vcpu_load() was already called. |
2866 |
++ */ |
2867 |
++static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
2868 |
++{ |
2869 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
2870 |
++ struct shared_msr_entry *msr; |
2871 |
++ int ret = 0; |
2872 |
++ u32 msr_index = msr_info->index; |
2873 |
++ u64 data = msr_info->data; |
2874 |
++ u32 index; |
2875 |
++ |
2876 |
++ switch (msr_index) { |
2877 |
++ case MSR_EFER: |
2878 |
++ ret = kvm_set_msr_common(vcpu, msr_info); |
2879 |
++ break; |
2880 |
++#ifdef CONFIG_X86_64 |
2881 |
++ case MSR_FS_BASE: |
2882 |
++ vmx_segment_cache_clear(vmx); |
2883 |
++ vmcs_writel(GUEST_FS_BASE, data); |
2884 |
++ break; |
2885 |
++ case MSR_GS_BASE: |
2886 |
++ vmx_segment_cache_clear(vmx); |
2887 |
++ vmcs_writel(GUEST_GS_BASE, data); |
2888 |
++ break; |
2889 |
++ case MSR_KERNEL_GS_BASE: |
2890 |
++ vmx_write_guest_kernel_gs_base(vmx, data); |
2891 |
++ break; |
2892 |
++#endif |
2893 |
++ case MSR_IA32_SYSENTER_CS: |
2894 |
++ if (is_guest_mode(vcpu)) |
2895 |
++ get_vmcs12(vcpu)->guest_sysenter_cs = data; |
2896 |
++ vmcs_write32(GUEST_SYSENTER_CS, data); |
2897 |
++ break; |
2898 |
++ case MSR_IA32_SYSENTER_EIP: |
2899 |
++ if (is_guest_mode(vcpu)) |
2900 |
++ get_vmcs12(vcpu)->guest_sysenter_eip = data; |
2901 |
++ vmcs_writel(GUEST_SYSENTER_EIP, data); |
2902 |
++ break; |
2903 |
++ case MSR_IA32_SYSENTER_ESP: |
2904 |
++ if (is_guest_mode(vcpu)) |
2905 |
++ get_vmcs12(vcpu)->guest_sysenter_esp = data; |
2906 |
++ vmcs_writel(GUEST_SYSENTER_ESP, data); |
2907 |
++ break; |
2908 |
++ case MSR_IA32_DEBUGCTLMSR: |
2909 |
++ if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls & |
2910 |
++ VM_EXIT_SAVE_DEBUG_CONTROLS) |
2911 |
++ get_vmcs12(vcpu)->guest_ia32_debugctl = data; |
2912 |
++ |
2913 |
++ ret = kvm_set_msr_common(vcpu, msr_info); |
2914 |
++ break; |
2915 |
++ |
2916 |
++ case MSR_IA32_BNDCFGS: |
2917 |
++ if (!kvm_mpx_supported() || |
2918 |
++ (!msr_info->host_initiated && |
2919 |
++ !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) |
2920 |
++ return 1; |
2921 |
++ if (is_noncanonical_address(data & PAGE_MASK, vcpu) || |
2922 |
++ (data & MSR_IA32_BNDCFGS_RSVD)) |
2923 |
++ return 1; |
2924 |
++ vmcs_write64(GUEST_BNDCFGS, data); |
2925 |
++ break; |
2926 |
++ case MSR_IA32_UMWAIT_CONTROL: |
2927 |
++ if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) |
2928 |
++ return 1; |
2929 |
++ |
2930 |
++ /* The reserved bit 1 and non-32 bit [63:32] should be zero */ |
2931 |
++ if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) |
2932 |
++ return 1; |
2933 |
++ |
2934 |
++ vmx->msr_ia32_umwait_control = data; |
2935 |
++ break; |
2936 |
++ case MSR_IA32_SPEC_CTRL: |
2937 |
++ if (!msr_info->host_initiated && |
2938 |
++ !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) |
2939 |
++ return 1; |
2940 |
++ |
2941 |
++ /* The STIBP bit doesn't fault even if it's not advertised */ |
2942 |
++ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD)) |
2943 |
++ return 1; |
2944 |
++ |
2945 |
++ vmx->spec_ctrl = data; |
2946 |
++ |
2947 |
++ if (!data) |
2948 |
++ break; |
2949 |
++ |
2950 |
++ /* |
2951 |
++ * For non-nested: |
2952 |
++ * When it's written (to non-zero) for the first time, pass |
2953 |
++ * it through. |
2954 |
++ * |
2955 |
++ * For nested: |
2956 |
++ * The handling of the MSR bitmap for L2 guests is done in |
2957 |
++ * nested_vmx_prepare_msr_bitmap. We should not touch the |
2958 |
++ * vmcs02.msr_bitmap here since it gets completely overwritten |
2959 |
++ * in the merging. We update the vmcs01 here for L1 as well |
2960 |
++ * since it will end up touching the MSR anyway now. |
2961 |
++ */ |
2962 |
++ vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, |
2963 |
++ MSR_IA32_SPEC_CTRL, |
2964 |
++ MSR_TYPE_RW); |
2965 |
++ break; |
2966 |
++ case MSR_IA32_TSX_CTRL: |
2967 |
++ if (!msr_info->host_initiated && |
2968 |
++ !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) |
2969 |
++ return 1; |
2970 |
++ if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR)) |
2971 |
++ return 1; |
2972 |
++ goto find_shared_msr; |
2973 |
++ case MSR_IA32_PRED_CMD: |
2974 |
++ if (!msr_info->host_initiated && |
2975 |
++ !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) |
2976 |
++ return 1; |
2977 |
++ |
2978 |
++ if (data & ~PRED_CMD_IBPB) |
2979 |
++ return 1; |
2980 |
++ |
2981 |
++ if (!data) |
2982 |
++ break; |
2983 |
++ |
2984 |
++ wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); |
2985 |
++ |
2986 |
++ /* |
2987 |
++ * For non-nested: |
2988 |
++ * When it's written (to non-zero) for the first time, pass |
2989 |
++ * it through. |
2990 |
++ * |
2991 |
++ * For nested: |
2992 |
++ * The handling of the MSR bitmap for L2 guests is done in |
2993 |
++ * nested_vmx_prepare_msr_bitmap. We should not touch the |
2994 |
++ * vmcs02.msr_bitmap here since it gets completely overwritten |
2995 |
++ * in the merging. |
2996 |
++ */ |
2997 |
++ vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, |
2998 |
++ MSR_TYPE_W); |
2999 |
++ break; |
3000 |
++ case MSR_IA32_CR_PAT: |
3001 |
++ if (!kvm_pat_valid(data)) |
3002 |
++ return 1; |
3003 |
++ |
3004 |
++ if (is_guest_mode(vcpu) && |
3005 |
++ get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) |
3006 |
++ get_vmcs12(vcpu)->guest_ia32_pat = data; |
3007 |
++ |
3008 |
++ if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { |
3009 |
++ vmcs_write64(GUEST_IA32_PAT, data); |
3010 |
++ vcpu->arch.pat = data; |
3011 |
++ break; |
3012 |
++ } |
3013 |
++ ret = kvm_set_msr_common(vcpu, msr_info); |
3014 |
++ break; |
3015 |
++ case MSR_IA32_TSC_ADJUST: |
3016 |
++ ret = kvm_set_msr_common(vcpu, msr_info); |
3017 |
++ break; |
3018 |
++ case MSR_IA32_MCG_EXT_CTL: |
3019 |
++ if ((!msr_info->host_initiated && |
3020 |
++ !(to_vmx(vcpu)->msr_ia32_feature_control & |
3021 |
++ FEATURE_CONTROL_LMCE)) || |
3022 |
++ (data & ~MCG_EXT_CTL_LMCE_EN)) |
3023 |
++ return 1; |
3024 |
++ vcpu->arch.mcg_ext_ctl = data; |
3025 |
++ break; |
3026 |
++ case MSR_IA32_FEATURE_CONTROL: |
3027 |
++ if (!vmx_feature_control_msr_valid(vcpu, data) || |
3028 |
++ (to_vmx(vcpu)->msr_ia32_feature_control & |
3029 |
++ FEATURE_CONTROL_LOCKED && !msr_info->host_initiated)) |
3030 |
++ return 1; |
3031 |
++ vmx->msr_ia32_feature_control = data; |
3032 |
++ if (msr_info->host_initiated && data == 0) |
3033 |
++ vmx_leave_nested(vcpu); |
3034 |
++ break; |
3035 |
++ case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: |
3036 |
++ if (!msr_info->host_initiated) |
3037 |
++ return 1; /* they are read-only */ |
3038 |
++ if (!nested_vmx_allowed(vcpu)) |
3039 |
++ return 1; |
3040 |
++ return vmx_set_vmx_msr(vcpu, msr_index, data); |
3041 |
++ case MSR_IA32_RTIT_CTL: |
3042 |
++ if ((pt_mode != PT_MODE_HOST_GUEST) || |
3043 |
++ vmx_rtit_ctl_check(vcpu, data) || |
3044 |
++ vmx->nested.vmxon) |
3045 |
++ return 1; |
3046 |
++ vmcs_write64(GUEST_IA32_RTIT_CTL, data); |
3047 |
++ vmx->pt_desc.guest.ctl = data; |
3048 |
++ pt_update_intercept_for_msr(vmx); |
3049 |
++ break; |
3050 |
++ case MSR_IA32_RTIT_STATUS: |
3051 |
++ if ((pt_mode != PT_MODE_HOST_GUEST) || |
3052 |
++ (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || |
3053 |
++ (data & MSR_IA32_RTIT_STATUS_MASK)) |
3054 |
++ return 1; |
3055 |
++ vmx->pt_desc.guest.status = data; |
3056 |
++ break; |
3057 |
++ case MSR_IA32_RTIT_CR3_MATCH: |
3058 |
++ if ((pt_mode != PT_MODE_HOST_GUEST) || |
3059 |
++ (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || |
3060 |
++ !intel_pt_validate_cap(vmx->pt_desc.caps, |
3061 |
++ PT_CAP_cr3_filtering)) |
3062 |
++ return 1; |
3063 |
++ vmx->pt_desc.guest.cr3_match = data; |
3064 |
++ break; |
3065 |
++ case MSR_IA32_RTIT_OUTPUT_BASE: |
3066 |
++ if ((pt_mode != PT_MODE_HOST_GUEST) || |
3067 |
++ (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || |
3068 |
++ (!intel_pt_validate_cap(vmx->pt_desc.caps, |
3069 |
++ PT_CAP_topa_output) && |
3070 |
++ !intel_pt_validate_cap(vmx->pt_desc.caps, |
3071 |
++ PT_CAP_single_range_output)) || |
3072 |
++ (data & MSR_IA32_RTIT_OUTPUT_BASE_MASK)) |
3073 |
++ return 1; |
3074 |
++ vmx->pt_desc.guest.output_base = data; |
3075 |
++ break; |
3076 |
++ case MSR_IA32_RTIT_OUTPUT_MASK: |
3077 |
++ if ((pt_mode != PT_MODE_HOST_GUEST) || |
3078 |
++ (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || |
3079 |
++ (!intel_pt_validate_cap(vmx->pt_desc.caps, |
3080 |
++ PT_CAP_topa_output) && |
3081 |
++ !intel_pt_validate_cap(vmx->pt_desc.caps, |
3082 |
++ PT_CAP_single_range_output))) |
3083 |
++ return 1; |
3084 |
++ vmx->pt_desc.guest.output_mask = data; |
3085 |
++ break; |
3086 |
++ case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: |
3087 |
++ index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; |
3088 |
++ if ((pt_mode != PT_MODE_HOST_GUEST) || |
3089 |
++ (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || |
3090 |
++ (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, |
3091 |
++ PT_CAP_num_address_ranges))) |
3092 |
++ return 1; |
3093 |
++ if (is_noncanonical_address(data, vcpu)) |
3094 |
++ return 1; |
3095 |
++ if (index % 2) |
3096 |
++ vmx->pt_desc.guest.addr_b[index / 2] = data; |
3097 |
++ else |
3098 |
++ vmx->pt_desc.guest.addr_a[index / 2] = data; |
3099 |
++ break; |
3100 |
++ case MSR_TSC_AUX: |
3101 |
++ if (!msr_info->host_initiated && |
3102 |
++ !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) |
3103 |
++ return 1; |
3104 |
++ /* Check reserved bit, higher 32 bits should be zero */ |
3105 |
++ if ((data >> 32) != 0) |
3106 |
++ return 1; |
3107 |
++ goto find_shared_msr; |
3108 |
++ |
3109 |
++ default: |
3110 |
++ find_shared_msr: |
3111 |
++ msr = find_msr_entry(vmx, msr_index); |
3112 |
++ if (msr) |
3113 |
++ ret = vmx_set_guest_msr(vmx, msr, data); |
3114 |
++ else |
3115 |
++ ret = kvm_set_msr_common(vcpu, msr_info); |
3116 |
++ } |
3117 |
++ |
3118 |
++ return ret; |
3119 |
++} |
3120 |
++ |
3121 |
++static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) |
3122 |
++{ |
3123 |
++ kvm_register_mark_available(vcpu, reg); |
3124 |
++ |
3125 |
++ switch (reg) { |
3126 |
++ case VCPU_REGS_RSP: |
3127 |
++ vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); |
3128 |
++ break; |
3129 |
++ case VCPU_REGS_RIP: |
3130 |
++ vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); |
3131 |
++ break; |
3132 |
++ case VCPU_EXREG_PDPTR: |
3133 |
++ if (enable_ept) |
3134 |
++ ept_save_pdptrs(vcpu); |
3135 |
++ break; |
3136 |
++ case VCPU_EXREG_CR3: |
3137 |
++ if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu))) |
3138 |
++ vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); |
3139 |
++ break; |
3140 |
++ default: |
3141 |
++ WARN_ON_ONCE(1); |
3142 |
++ break; |
3143 |
++ } |
3144 |
++} |
3145 |
++ |
3146 |
++static __init int cpu_has_kvm_support(void) |
3147 |
++{ |
3148 |
++ return cpu_has_vmx(); |
3149 |
++} |
3150 |
++ |
3151 |
++static __init int vmx_disabled_by_bios(void) |
3152 |
++{ |
3153 |
++ u64 msr; |
3154 |
++ |
3155 |
++ rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); |
3156 |
++ if (msr & FEATURE_CONTROL_LOCKED) { |
3157 |
++ /* launched w/ TXT and VMX disabled */ |
3158 |
++ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) |
3159 |
++ && tboot_enabled()) |
3160 |
++ return 1; |
3161 |
++ /* launched w/o TXT and VMX only enabled w/ TXT */ |
3162 |
++ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) |
3163 |
++ && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) |
3164 |
++ && !tboot_enabled()) { |
3165 |
++ printk(KERN_WARNING "kvm: disable TXT in the BIOS or " |
3166 |
++ "activate TXT before enabling KVM\n"); |
3167 |
++ return 1; |
3168 |
++ } |
3169 |
++ /* launched w/o TXT and VMX disabled */ |
3170 |
++ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) |
3171 |
++ && !tboot_enabled()) |
3172 |
++ return 1; |
3173 |
++ } |
3174 |
++ |
3175 |
++ return 0; |
3176 |
++} |
3177 |
++ |
3178 |
++static void kvm_cpu_vmxon(u64 addr) |
3179 |
++{ |
3180 |
++ cr4_set_bits(X86_CR4_VMXE); |
3181 |
++ intel_pt_handle_vmx(1); |
3182 |
++ |
3183 |
++ asm volatile ("vmxon %0" : : "m"(addr)); |
3184 |
++} |
3185 |
++ |
3186 |
++static int hardware_enable(void) |
3187 |
++{ |
3188 |
++ int cpu = raw_smp_processor_id(); |
3189 |
++ u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); |
3190 |
++ u64 old, test_bits; |
3191 |
++ |
3192 |
++ if (cr4_read_shadow() & X86_CR4_VMXE) |
3193 |
++ return -EBUSY; |
3194 |
++ |
3195 |
++ /* |
3196 |
++ * This can happen if we hot-added a CPU but failed to allocate |
3197 |
++ * VP assist page for it. |
3198 |
++ */ |
3199 |
++ if (static_branch_unlikely(&enable_evmcs) && |
3200 |
++ !hv_get_vp_assist_page(cpu)) |
3201 |
++ return -EFAULT; |
3202 |
++ |
3203 |
++ INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); |
3204 |
++ INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); |
3205 |
++ spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); |
3206 |
++ |
3207 |
++ /* |
3208 |
++ * Now we can enable the vmclear operation in kdump |
3209 |
++ * since the loaded_vmcss_on_cpu list on this cpu |
3210 |
++ * has been initialized. |
3211 |
++ * |
3212 |
++ * Though the cpu is not in VMX operation now, there |
3213 |
++ * is no problem to enable the vmclear operation |
3214 |
++ * for the loaded_vmcss_on_cpu list is empty! |
3215 |
++ */ |
3216 |
++ crash_enable_local_vmclear(cpu); |
3217 |
++ |
3218 |
++ rdmsrl(MSR_IA32_FEATURE_CONTROL, old); |
3219 |
++ |
3220 |
++ test_bits = FEATURE_CONTROL_LOCKED; |
3221 |
++ test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; |
3222 |
++ if (tboot_enabled()) |
3223 |
++ test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX; |
3224 |
++ |
3225 |
++ if ((old & test_bits) != test_bits) { |
3226 |
++ /* enable and lock */ |
3227 |
++ wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); |
3228 |
++ } |
3229 |
++ kvm_cpu_vmxon(phys_addr); |
3230 |
++ if (enable_ept) |
3231 |
++ ept_sync_global(); |
3232 |
++ |
3233 |
++ return 0; |
3234 |
++} |
3235 |
++ |
3236 |
++static void vmclear_local_loaded_vmcss(void) |
3237 |
++{ |
3238 |
++ int cpu = raw_smp_processor_id(); |
3239 |
++ struct loaded_vmcs *v, *n; |
3240 |
++ |
3241 |
++ list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), |
3242 |
++ loaded_vmcss_on_cpu_link) |
3243 |
++ __loaded_vmcs_clear(v); |
3244 |
++} |
3245 |
++ |
3246 |
++ |
3247 |
++/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() |
3248 |
++ * tricks. |
3249 |
++ */ |
3250 |
++static void kvm_cpu_vmxoff(void) |
3251 |
++{ |
3252 |
++ asm volatile (__ex("vmxoff")); |
3253 |
++ |
3254 |
++ intel_pt_handle_vmx(0); |
3255 |
++ cr4_clear_bits(X86_CR4_VMXE); |
3256 |
++} |
3257 |
++ |
3258 |
++static void hardware_disable(void) |
3259 |
++{ |
3260 |
++ vmclear_local_loaded_vmcss(); |
3261 |
++ kvm_cpu_vmxoff(); |
3262 |
++} |
3263 |
++ |
3264 |
++static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, |
3265 |
++ u32 msr, u32 *result) |
3266 |
++{ |
3267 |
++ u32 vmx_msr_low, vmx_msr_high; |
3268 |
++ u32 ctl = ctl_min | ctl_opt; |
3269 |
++ |
3270 |
++ rdmsr(msr, vmx_msr_low, vmx_msr_high); |
3271 |
++ |
3272 |
++ ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ |
3273 |
++ ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ |
3274 |
++ |
3275 |
++ /* Ensure minimum (required) set of control bits are supported. */ |
3276 |
++ if (ctl_min & ~ctl) |
3277 |
++ return -EIO; |
3278 |
++ |
3279 |
++ *result = ctl; |
3280 |
++ return 0; |
3281 |
++} |
3282 |
++ |
3283 |
++static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, |
3284 |
++ struct vmx_capability *vmx_cap) |
3285 |
++{ |
3286 |
++ u32 vmx_msr_low, vmx_msr_high; |
3287 |
++ u32 min, opt, min2, opt2; |
3288 |
++ u32 _pin_based_exec_control = 0; |
3289 |
++ u32 _cpu_based_exec_control = 0; |
3290 |
++ u32 _cpu_based_2nd_exec_control = 0; |
3291 |
++ u32 _vmexit_control = 0; |
3292 |
++ u32 _vmentry_control = 0; |
3293 |
++ |
3294 |
++ memset(vmcs_conf, 0, sizeof(*vmcs_conf)); |
3295 |
++ min = CPU_BASED_HLT_EXITING | |
3296 |
++#ifdef CONFIG_X86_64 |
3297 |
++ CPU_BASED_CR8_LOAD_EXITING | |
3298 |
++ CPU_BASED_CR8_STORE_EXITING | |
3299 |
++#endif |
3300 |
++ CPU_BASED_CR3_LOAD_EXITING | |
3301 |
++ CPU_BASED_CR3_STORE_EXITING | |
3302 |
++ CPU_BASED_UNCOND_IO_EXITING | |
3303 |
++ CPU_BASED_MOV_DR_EXITING | |
3304 |
++ CPU_BASED_USE_TSC_OFFSETTING | |
3305 |
++ CPU_BASED_MWAIT_EXITING | |
3306 |
++ CPU_BASED_MONITOR_EXITING | |
3307 |
++ CPU_BASED_INVLPG_EXITING | |
3308 |
++ CPU_BASED_RDPMC_EXITING; |
3309 |
++ |
3310 |
++ opt = CPU_BASED_TPR_SHADOW | |
3311 |
++ CPU_BASED_USE_MSR_BITMAPS | |
3312 |
++ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; |
3313 |
++ if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, |
3314 |
++ &_cpu_based_exec_control) < 0) |
3315 |
++ return -EIO; |
3316 |
++#ifdef CONFIG_X86_64 |
3317 |
++ if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) |
3318 |
++ _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & |
3319 |
++ ~CPU_BASED_CR8_STORE_EXITING; |
3320 |
++#endif |
3321 |
++ if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { |
3322 |
++ min2 = 0; |
3323 |
++ opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | |
3324 |
++ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | |
3325 |
++ SECONDARY_EXEC_WBINVD_EXITING | |
3326 |
++ SECONDARY_EXEC_ENABLE_VPID | |
3327 |
++ SECONDARY_EXEC_ENABLE_EPT | |
3328 |
++ SECONDARY_EXEC_UNRESTRICTED_GUEST | |
3329 |
++ SECONDARY_EXEC_PAUSE_LOOP_EXITING | |
3330 |
++ SECONDARY_EXEC_DESC | |
3331 |
++ SECONDARY_EXEC_RDTSCP | |
3332 |
++ SECONDARY_EXEC_ENABLE_INVPCID | |
3333 |
++ SECONDARY_EXEC_APIC_REGISTER_VIRT | |
3334 |
++ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | |
3335 |
++ SECONDARY_EXEC_SHADOW_VMCS | |
3336 |
++ SECONDARY_EXEC_XSAVES | |
3337 |
++ SECONDARY_EXEC_RDSEED_EXITING | |
3338 |
++ SECONDARY_EXEC_RDRAND_EXITING | |
3339 |
++ SECONDARY_EXEC_ENABLE_PML | |
3340 |
++ SECONDARY_EXEC_TSC_SCALING | |
3341 |
++ SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | |
3342 |
++ SECONDARY_EXEC_PT_USE_GPA | |
3343 |
++ SECONDARY_EXEC_PT_CONCEAL_VMX | |
3344 |
++ SECONDARY_EXEC_ENABLE_VMFUNC | |
3345 |
++ SECONDARY_EXEC_ENCLS_EXITING; |
3346 |
++ if (adjust_vmx_controls(min2, opt2, |
3347 |
++ MSR_IA32_VMX_PROCBASED_CTLS2, |
3348 |
++ &_cpu_based_2nd_exec_control) < 0) |
3349 |
++ return -EIO; |
3350 |
++ } |
3351 |
++#ifndef CONFIG_X86_64 |
3352 |
++ if (!(_cpu_based_2nd_exec_control & |
3353 |
++ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) |
3354 |
++ _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; |
3355 |
++#endif |
3356 |
++ |
3357 |
++ if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) |
3358 |
++ _cpu_based_2nd_exec_control &= ~( |
3359 |
++ SECONDARY_EXEC_APIC_REGISTER_VIRT | |
3360 |
++ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | |
3361 |
++ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); |
3362 |
++ |
3363 |
++ rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP, |
3364 |
++ &vmx_cap->ept, &vmx_cap->vpid); |
3365 |
++ |
3366 |
++ if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { |
3367 |
++ /* CR3 accesses and invlpg don't need to cause VM Exits when EPT |
3368 |
++ enabled */ |
3369 |
++ _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | |
3370 |
++ CPU_BASED_CR3_STORE_EXITING | |
3371 |
++ CPU_BASED_INVLPG_EXITING); |
3372 |
++ } else if (vmx_cap->ept) { |
3373 |
++ vmx_cap->ept = 0; |
3374 |
++ pr_warn_once("EPT CAP should not exist if not support " |
3375 |
++ "1-setting enable EPT VM-execution control\n"); |
3376 |
++ } |
3377 |
++ if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) && |
3378 |
++ vmx_cap->vpid) { |
3379 |
++ vmx_cap->vpid = 0; |
3380 |
++ pr_warn_once("VPID CAP should not exist if not support " |
3381 |
++ "1-setting enable VPID VM-execution control\n"); |
3382 |
++ } |
3383 |
++ |
3384 |
++ min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT; |
3385 |
++#ifdef CONFIG_X86_64 |
3386 |
++ min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; |
3387 |
++#endif |
3388 |
++ opt = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | |
3389 |
++ VM_EXIT_LOAD_IA32_PAT | |
3390 |
++ VM_EXIT_LOAD_IA32_EFER | |
3391 |
++ VM_EXIT_CLEAR_BNDCFGS | |
3392 |
++ VM_EXIT_PT_CONCEAL_PIP | |
3393 |
++ VM_EXIT_CLEAR_IA32_RTIT_CTL; |
3394 |
++ if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, |
3395 |
++ &_vmexit_control) < 0) |
3396 |
++ return -EIO; |
3397 |
++ |
3398 |
++ min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; |
3399 |
++ opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR | |
3400 |
++ PIN_BASED_VMX_PREEMPTION_TIMER; |
3401 |
++ if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, |
3402 |
++ &_pin_based_exec_control) < 0) |
3403 |
++ return -EIO; |
3404 |
++ |
3405 |
++ if (cpu_has_broken_vmx_preemption_timer()) |
3406 |
++ _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; |
3407 |
++ if (!(_cpu_based_2nd_exec_control & |
3408 |
++ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)) |
3409 |
++ _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR; |
3410 |
++ |
3411 |
++ min = VM_ENTRY_LOAD_DEBUG_CONTROLS; |
3412 |
++ opt = VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | |
3413 |
++ VM_ENTRY_LOAD_IA32_PAT | |
3414 |
++ VM_ENTRY_LOAD_IA32_EFER | |
3415 |
++ VM_ENTRY_LOAD_BNDCFGS | |
3416 |
++ VM_ENTRY_PT_CONCEAL_PIP | |
3417 |
++ VM_ENTRY_LOAD_IA32_RTIT_CTL; |
3418 |
++ if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, |
3419 |
++ &_vmentry_control) < 0) |
3420 |
++ return -EIO; |
3421 |
++ |
3422 |
++ /* |
3423 |
++ * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they |
3424 |
++ * can't be used due to an errata where VM Exit may incorrectly clear |
3425 |
++ * IA32_PERF_GLOBAL_CTRL[34:32]. Workaround the errata by using the |
3426 |
++ * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL. |
3427 |
++ */ |
3428 |
++ if (boot_cpu_data.x86 == 0x6) { |
3429 |
++ switch (boot_cpu_data.x86_model) { |
3430 |
++ case 26: /* AAK155 */ |
3431 |
++ case 30: /* AAP115 */ |
3432 |
++ case 37: /* AAT100 */ |
3433 |
++ case 44: /* BC86,AAY89,BD102 */ |
3434 |
++ case 46: /* BA97 */ |
3435 |
++ _vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; |
3436 |
++ _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; |
3437 |
++ pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " |
3438 |
++ "does not work properly. Using workaround\n"); |
3439 |
++ break; |
3440 |
++ default: |
3441 |
++ break; |
3442 |
++ } |
3443 |
++ } |
3444 |
++ |
3445 |
++ |
3446 |
++ rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); |
3447 |
++ |
3448 |
++ /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ |
3449 |
++ if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) |
3450 |
++ return -EIO; |
3451 |
++ |
3452 |
++#ifdef CONFIG_X86_64 |
3453 |
++ /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ |
3454 |
++ if (vmx_msr_high & (1u<<16)) |
3455 |
++ return -EIO; |
3456 |
++#endif |
3457 |
++ |
3458 |
++ /* Require Write-Back (WB) memory type for VMCS accesses. */ |
3459 |
++ if (((vmx_msr_high >> 18) & 15) != 6) |
3460 |
++ return -EIO; |
3461 |
++ |
3462 |
++ vmcs_conf->size = vmx_msr_high & 0x1fff; |
3463 |
++ vmcs_conf->order = get_order(vmcs_conf->size); |
3464 |
++ vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; |
3465 |
++ |
3466 |
++ vmcs_conf->revision_id = vmx_msr_low; |
3467 |
++ |
3468 |
++ vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; |
3469 |
++ vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; |
3470 |
++ vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; |
3471 |
++ vmcs_conf->vmexit_ctrl = _vmexit_control; |
3472 |
++ vmcs_conf->vmentry_ctrl = _vmentry_control; |
3473 |
++ |
3474 |
++ if (static_branch_unlikely(&enable_evmcs)) |
3475 |
++ evmcs_sanitize_exec_ctrls(vmcs_conf); |
3476 |
++ |
3477 |
++ return 0; |
3478 |
++} |
3479 |
++ |
3480 |
++struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags) |
3481 |
++{ |
3482 |
++ int node = cpu_to_node(cpu); |
3483 |
++ struct page *pages; |
3484 |
++ struct vmcs *vmcs; |
3485 |
++ |
3486 |
++ pages = __alloc_pages_node(node, flags, vmcs_config.order); |
3487 |
++ if (!pages) |
3488 |
++ return NULL; |
3489 |
++ vmcs = page_address(pages); |
3490 |
++ memset(vmcs, 0, vmcs_config.size); |
3491 |
++ |
3492 |
++ /* KVM supports Enlightened VMCS v1 only */ |
3493 |
++ if (static_branch_unlikely(&enable_evmcs)) |
3494 |
++ vmcs->hdr.revision_id = KVM_EVMCS_VERSION; |
3495 |
++ else |
3496 |
++ vmcs->hdr.revision_id = vmcs_config.revision_id; |
3497 |
++ |
3498 |
++ if (shadow) |
3499 |
++ vmcs->hdr.shadow_vmcs = 1; |
3500 |
++ return vmcs; |
3501 |
++} |
3502 |
++ |
3503 |
++void free_vmcs(struct vmcs *vmcs) |
3504 |
++{ |
3505 |
++ free_pages((unsigned long)vmcs, vmcs_config.order); |
3506 |
++} |
3507 |
++ |
3508 |
++/* |
3509 |
++ * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded |
3510 |
++ */ |
3511 |
++void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) |
3512 |
++{ |
3513 |
++ if (!loaded_vmcs->vmcs) |
3514 |
++ return; |
3515 |
++ loaded_vmcs_clear(loaded_vmcs); |
3516 |
++ free_vmcs(loaded_vmcs->vmcs); |
3517 |
++ loaded_vmcs->vmcs = NULL; |
3518 |
++ if (loaded_vmcs->msr_bitmap) |
3519 |
++ free_page((unsigned long)loaded_vmcs->msr_bitmap); |
3520 |
++ WARN_ON(loaded_vmcs->shadow_vmcs != NULL); |
3521 |
++} |
3522 |
++ |
3523 |
++int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) |
3524 |
++{ |
3525 |
++ loaded_vmcs->vmcs = alloc_vmcs(false); |
3526 |
++ if (!loaded_vmcs->vmcs) |
3527 |
++ return -ENOMEM; |
3528 |
++ |
3529 |
++ loaded_vmcs->shadow_vmcs = NULL; |
3530 |
++ loaded_vmcs->hv_timer_soft_disabled = false; |
3531 |
++ loaded_vmcs_init(loaded_vmcs); |
3532 |
++ |
3533 |
++ if (cpu_has_vmx_msr_bitmap()) { |
3534 |
++ loaded_vmcs->msr_bitmap = (unsigned long *) |
3535 |
++ __get_free_page(GFP_KERNEL_ACCOUNT); |
3536 |
++ if (!loaded_vmcs->msr_bitmap) |
3537 |
++ goto out_vmcs; |
3538 |
++ memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); |
3539 |
++ |
3540 |
++ if (IS_ENABLED(CONFIG_HYPERV) && |
3541 |
++ static_branch_unlikely(&enable_evmcs) && |
3542 |
++ (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { |
3543 |
++ struct hv_enlightened_vmcs *evmcs = |
3544 |
++ (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs; |
3545 |
++ |
3546 |
++ evmcs->hv_enlightenments_control.msr_bitmap = 1; |
3547 |
++ } |
3548 |
++ } |
3549 |
++ |
3550 |
++ memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state)); |
3551 |
++ memset(&loaded_vmcs->controls_shadow, 0, |
3552 |
++ sizeof(struct vmcs_controls_shadow)); |
3553 |
++ |
3554 |
++ return 0; |
3555 |
++ |
3556 |
++out_vmcs: |
3557 |
++ free_loaded_vmcs(loaded_vmcs); |
3558 |
++ return -ENOMEM; |
3559 |
++} |
3560 |
++ |
3561 |
++static void free_kvm_area(void) |
3562 |
++{ |
3563 |
++ int cpu; |
3564 |
++ |
3565 |
++ for_each_possible_cpu(cpu) { |
3566 |
++ free_vmcs(per_cpu(vmxarea, cpu)); |
3567 |
++ per_cpu(vmxarea, cpu) = NULL; |
3568 |
++ } |
3569 |
++} |
3570 |
++ |
3571 |
++static __init int alloc_kvm_area(void) |
3572 |
++{ |
3573 |
++ int cpu; |
3574 |
++ |
3575 |
++ for_each_possible_cpu(cpu) { |
3576 |
++ struct vmcs *vmcs; |
3577 |
++ |
3578 |
++ vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL); |
3579 |
++ if (!vmcs) { |
3580 |
++ free_kvm_area(); |
3581 |
++ return -ENOMEM; |
3582 |
++ } |
3583 |
++ |
3584 |
++ /* |
3585 |
++ * When eVMCS is enabled, alloc_vmcs_cpu() sets |
3586 |
++ * vmcs->revision_id to KVM_EVMCS_VERSION instead of |
3587 |
++ * revision_id reported by MSR_IA32_VMX_BASIC. |
3588 |
++ * |
3589 |
++ * However, even though not explicitly documented by |
3590 |
++ * TLFS, VMXArea passed as VMXON argument should |
3591 |
++ * still be marked with revision_id reported by |
3592 |
++ * physical CPU. |
3593 |
++ */ |
3594 |
++ if (static_branch_unlikely(&enable_evmcs)) |
3595 |
++ vmcs->hdr.revision_id = vmcs_config.revision_id; |
3596 |
++ |
3597 |
++ per_cpu(vmxarea, cpu) = vmcs; |
3598 |
++ } |
3599 |
++ return 0; |
3600 |
++} |
3601 |
++ |
3602 |
++static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, |
3603 |
++ struct kvm_segment *save) |
3604 |
++{ |
3605 |
++ if (!emulate_invalid_guest_state) { |
3606 |
++ /* |
3607 |
++ * CS and SS RPL should be equal during guest entry according |
3608 |
++ * to VMX spec, but in reality it is not always so. Since vcpu |
3609 |
++ * is in the middle of the transition from real mode to |
3610 |
++ * protected mode it is safe to assume that RPL 0 is a good |
3611 |
++ * default value. |
3612 |
++ */ |
3613 |
++ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) |
3614 |
++ save->selector &= ~SEGMENT_RPL_MASK; |
3615 |
++ save->dpl = save->selector & SEGMENT_RPL_MASK; |
3616 |
++ save->s = 1; |
3617 |
++ } |
3618 |
++ vmx_set_segment(vcpu, save, seg); |
3619 |
++} |
3620 |
++ |
3621 |
++static void enter_pmode(struct kvm_vcpu *vcpu) |
3622 |
++{ |
3623 |
++ unsigned long flags; |
3624 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
3625 |
++ |
3626 |
++ /* |
3627 |
++ * Update real mode segment cache. It may be not up-to-date if sement |
3628 |
++ * register was written while vcpu was in a guest mode. |
3629 |
++ */ |
3630 |
++ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); |
3631 |
++ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); |
3632 |
++ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); |
3633 |
++ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); |
3634 |
++ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); |
3635 |
++ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); |
3636 |
++ |
3637 |
++ vmx->rmode.vm86_active = 0; |
3638 |
++ |
3639 |
++ vmx_segment_cache_clear(vmx); |
3640 |
++ |
3641 |
++ vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); |
3642 |
++ |
3643 |
++ flags = vmcs_readl(GUEST_RFLAGS); |
3644 |
++ flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; |
3645 |
++ flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; |
3646 |
++ vmcs_writel(GUEST_RFLAGS, flags); |
3647 |
++ |
3648 |
++ vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | |
3649 |
++ (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); |
3650 |
++ |
3651 |
++ update_exception_bitmap(vcpu); |
3652 |
++ |
3653 |
++ fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); |
3654 |
++ fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); |
3655 |
++ fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); |
3656 |
++ fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); |
3657 |
++ fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); |
3658 |
++ fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); |
3659 |
++} |
3660 |
++ |
3661 |
++static void fix_rmode_seg(int seg, struct kvm_segment *save) |
3662 |
++{ |
3663 |
++ const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; |
3664 |
++ struct kvm_segment var = *save; |
3665 |
++ |
3666 |
++ var.dpl = 0x3; |
3667 |
++ if (seg == VCPU_SREG_CS) |
3668 |
++ var.type = 0x3; |
3669 |
++ |
3670 |
++ if (!emulate_invalid_guest_state) { |
3671 |
++ var.selector = var.base >> 4; |
3672 |
++ var.base = var.base & 0xffff0; |
3673 |
++ var.limit = 0xffff; |
3674 |
++ var.g = 0; |
3675 |
++ var.db = 0; |
3676 |
++ var.present = 1; |
3677 |
++ var.s = 1; |
3678 |
++ var.l = 0; |
3679 |
++ var.unusable = 0; |
3680 |
++ var.type = 0x3; |
3681 |
++ var.avl = 0; |
3682 |
++ if (save->base & 0xf) |
3683 |
++ printk_once(KERN_WARNING "kvm: segment base is not " |
3684 |
++ "paragraph aligned when entering " |
3685 |
++ "protected mode (seg=%d)", seg); |
3686 |
++ } |
3687 |
++ |
3688 |
++ vmcs_write16(sf->selector, var.selector); |
3689 |
++ vmcs_writel(sf->base, var.base); |
3690 |
++ vmcs_write32(sf->limit, var.limit); |
3691 |
++ vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); |
3692 |
++} |
3693 |
++ |
3694 |
++static void enter_rmode(struct kvm_vcpu *vcpu) |
3695 |
++{ |
3696 |
++ unsigned long flags; |
3697 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
3698 |
++ struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm); |
3699 |
++ |
3700 |
++ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); |
3701 |
++ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); |
3702 |
++ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); |
3703 |
++ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); |
3704 |
++ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); |
3705 |
++ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); |
3706 |
++ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); |
3707 |
++ |
3708 |
++ vmx->rmode.vm86_active = 1; |
3709 |
++ |
3710 |
++ /* |
3711 |
++ * Very old userspace does not call KVM_SET_TSS_ADDR before entering |
3712 |
++ * vcpu. Warn the user that an update is overdue. |
3713 |
++ */ |
3714 |
++ if (!kvm_vmx->tss_addr) |
3715 |
++ printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " |
3716 |
++ "called before entering vcpu\n"); |
3717 |
++ |
3718 |
++ vmx_segment_cache_clear(vmx); |
3719 |
++ |
3720 |
++ vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr); |
3721 |
++ vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); |
3722 |
++ vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); |
3723 |
++ |
3724 |
++ flags = vmcs_readl(GUEST_RFLAGS); |
3725 |
++ vmx->rmode.save_rflags = flags; |
3726 |
++ |
3727 |
++ flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
3728 |
++ |
3729 |
++ vmcs_writel(GUEST_RFLAGS, flags); |
3730 |
++ vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); |
3731 |
++ update_exception_bitmap(vcpu); |
3732 |
++ |
3733 |
++ fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); |
3734 |
++ fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); |
3735 |
++ fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); |
3736 |
++ fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); |
3737 |
++ fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); |
3738 |
++ fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); |
3739 |
++ |
3740 |
++ kvm_mmu_reset_context(vcpu); |
3741 |
++} |
3742 |
++ |
3743 |
++void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) |
3744 |
++{ |
3745 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
3746 |
++ struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); |
3747 |
++ |
3748 |
++ if (!msr) |
3749 |
++ return; |
3750 |
++ |
3751 |
++ vcpu->arch.efer = efer; |
3752 |
++ if (efer & EFER_LMA) { |
3753 |
++ vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); |
3754 |
++ msr->data = efer; |
3755 |
++ } else { |
3756 |
++ vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); |
3757 |
++ |
3758 |
++ msr->data = efer & ~EFER_LME; |
3759 |
++ } |
3760 |
++ setup_msrs(vmx); |
3761 |
++} |
3762 |
++ |
3763 |
++#ifdef CONFIG_X86_64 |
3764 |
++ |
3765 |
++static void enter_lmode(struct kvm_vcpu *vcpu) |
3766 |
++{ |
3767 |
++ u32 guest_tr_ar; |
3768 |
++ |
3769 |
++ vmx_segment_cache_clear(to_vmx(vcpu)); |
3770 |
++ |
3771 |
++ guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); |
3772 |
++ if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) { |
3773 |
++ pr_debug_ratelimited("%s: tss fixup for long mode. \n", |
3774 |
++ __func__); |
3775 |
++ vmcs_write32(GUEST_TR_AR_BYTES, |
3776 |
++ (guest_tr_ar & ~VMX_AR_TYPE_MASK) |
3777 |
++ | VMX_AR_TYPE_BUSY_64_TSS); |
3778 |
++ } |
3779 |
++ vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); |
3780 |
++} |
3781 |
++ |
3782 |
++static void exit_lmode(struct kvm_vcpu *vcpu) |
3783 |
++{ |
3784 |
++ vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); |
3785 |
++ vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); |
3786 |
++} |
3787 |
++ |
3788 |
++#endif |
3789 |
++ |
3790 |
++static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr) |
3791 |
++{ |
3792 |
++ int vpid = to_vmx(vcpu)->vpid; |
3793 |
++ |
3794 |
++ if (!vpid_sync_vcpu_addr(vpid, addr)) |
3795 |
++ vpid_sync_context(vpid); |
3796 |
++ |
3797 |
++ /* |
3798 |
++ * If VPIDs are not supported or enabled, then the above is a no-op. |
3799 |
++ * But we don't really need a TLB flush in that case anyway, because |
3800 |
++ * each VM entry/exit includes an implicit flush when VPID is 0. |
3801 |
++ */ |
3802 |
++} |
3803 |
++ |
3804 |
++static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) |
3805 |
++{ |
3806 |
++ ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; |
3807 |
++ |
3808 |
++ vcpu->arch.cr0 &= ~cr0_guest_owned_bits; |
3809 |
++ vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; |
3810 |
++} |
3811 |
++ |
3812 |
++static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) |
3813 |
++{ |
3814 |
++ ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; |
3815 |
++ |
3816 |
++ vcpu->arch.cr4 &= ~cr4_guest_owned_bits; |
3817 |
++ vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; |
3818 |
++} |
3819 |
++ |
3820 |
++static void ept_load_pdptrs(struct kvm_vcpu *vcpu) |
3821 |
++{ |
3822 |
++ struct kvm_mmu *mmu = vcpu->arch.walk_mmu; |
3823 |
++ |
3824 |
++ if (!kvm_register_is_dirty(vcpu, VCPU_EXREG_PDPTR)) |
3825 |
++ return; |
3826 |
++ |
3827 |
++ if (is_pae_paging(vcpu)) { |
3828 |
++ vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); |
3829 |
++ vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); |
3830 |
++ vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); |
3831 |
++ vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); |
3832 |
++ } |
3833 |
++} |
3834 |
++ |
3835 |
++void ept_save_pdptrs(struct kvm_vcpu *vcpu) |
3836 |
++{ |
3837 |
++ struct kvm_mmu *mmu = vcpu->arch.walk_mmu; |
3838 |
++ |
3839 |
++ if (is_pae_paging(vcpu)) { |
3840 |
++ mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); |
3841 |
++ mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); |
3842 |
++ mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); |
3843 |
++ mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); |
3844 |
++ } |
3845 |
++ |
3846 |
++ kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); |
3847 |
++} |
3848 |
++ |
3849 |
++static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, |
3850 |
++ unsigned long cr0, |
3851 |
++ struct kvm_vcpu *vcpu) |
3852 |
++{ |
3853 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
3854 |
++ |
3855 |
++ if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) |
3856 |
++ vmx_cache_reg(vcpu, VCPU_EXREG_CR3); |
3857 |
++ if (!(cr0 & X86_CR0_PG)) { |
3858 |
++ /* From paging/starting to nonpaging */ |
3859 |
++ exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING | |
3860 |
++ CPU_BASED_CR3_STORE_EXITING); |
3861 |
++ vcpu->arch.cr0 = cr0; |
3862 |
++ vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); |
3863 |
++ } else if (!is_paging(vcpu)) { |
3864 |
++ /* From nonpaging to paging */ |
3865 |
++ exec_controls_clearbit(vmx, CPU_BASED_CR3_LOAD_EXITING | |
3866 |
++ CPU_BASED_CR3_STORE_EXITING); |
3867 |
++ vcpu->arch.cr0 = cr0; |
3868 |
++ vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); |
3869 |
++ } |
3870 |
++ |
3871 |
++ if (!(cr0 & X86_CR0_WP)) |
3872 |
++ *hw_cr0 &= ~X86_CR0_WP; |
3873 |
++} |
3874 |
++ |
3875 |
++void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
3876 |
++{ |
3877 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
3878 |
++ unsigned long hw_cr0; |
3879 |
++ |
3880 |
++ hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF); |
3881 |
++ if (enable_unrestricted_guest) |
3882 |
++ hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; |
3883 |
++ else { |
3884 |
++ hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; |
3885 |
++ |
3886 |
++ if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) |
3887 |
++ enter_pmode(vcpu); |
3888 |
++ |
3889 |
++ if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) |
3890 |
++ enter_rmode(vcpu); |
3891 |
++ } |
3892 |
++ |
3893 |
++#ifdef CONFIG_X86_64 |
3894 |
++ if (vcpu->arch.efer & EFER_LME) { |
3895 |
++ if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) |
3896 |
++ enter_lmode(vcpu); |
3897 |
++ if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) |
3898 |
++ exit_lmode(vcpu); |
3899 |
++ } |
3900 |
++#endif |
3901 |
++ |
3902 |
++ if (enable_ept && !enable_unrestricted_guest) |
3903 |
++ ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); |
3904 |
++ |
3905 |
++ vmcs_writel(CR0_READ_SHADOW, cr0); |
3906 |
++ vmcs_writel(GUEST_CR0, hw_cr0); |
3907 |
++ vcpu->arch.cr0 = cr0; |
3908 |
++ |
3909 |
++ /* depends on vcpu->arch.cr0 to be set to a new value */ |
3910 |
++ vmx->emulation_required = emulation_required(vcpu); |
3911 |
++} |
3912 |
++ |
3913 |
++static int get_ept_level(struct kvm_vcpu *vcpu) |
3914 |
++{ |
3915 |
++ if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48)) |
3916 |
++ return 5; |
3917 |
++ return 4; |
3918 |
++} |
3919 |
++ |
3920 |
++u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa) |
3921 |
++{ |
3922 |
++ u64 eptp = VMX_EPTP_MT_WB; |
3923 |
++ |
3924 |
++ eptp |= (get_ept_level(vcpu) == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4; |
3925 |
++ |
3926 |
++ if (enable_ept_ad_bits && |
3927 |
++ (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu))) |
3928 |
++ eptp |= VMX_EPTP_AD_ENABLE_BIT; |
3929 |
++ eptp |= (root_hpa & PAGE_MASK); |
3930 |
++ |
3931 |
++ return eptp; |
3932 |
++} |
3933 |
++ |
3934 |
++void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
3935 |
++{ |
3936 |
++ struct kvm *kvm = vcpu->kvm; |
3937 |
++ bool update_guest_cr3 = true; |
3938 |
++ unsigned long guest_cr3; |
3939 |
++ u64 eptp; |
3940 |
++ |
3941 |
++ guest_cr3 = cr3; |
3942 |
++ if (enable_ept) { |
3943 |
++ eptp = construct_eptp(vcpu, cr3); |
3944 |
++ vmcs_write64(EPT_POINTER, eptp); |
3945 |
++ |
3946 |
++ if (kvm_x86_ops->tlb_remote_flush) { |
3947 |
++ spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); |
3948 |
++ to_vmx(vcpu)->ept_pointer = eptp; |
3949 |
++ to_kvm_vmx(kvm)->ept_pointers_match |
3950 |
++ = EPT_POINTERS_CHECK; |
3951 |
++ spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); |
3952 |
++ } |
3953 |
++ |
3954 |
++ /* Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter. */ |
3955 |
++ if (is_guest_mode(vcpu)) |
3956 |
++ update_guest_cr3 = false; |
3957 |
++ else if (!enable_unrestricted_guest && !is_paging(vcpu)) |
3958 |
++ guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; |
3959 |
++ else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) |
3960 |
++ guest_cr3 = vcpu->arch.cr3; |
3961 |
++ else /* vmcs01.GUEST_CR3 is already up-to-date. */ |
3962 |
++ update_guest_cr3 = false; |
3963 |
++ ept_load_pdptrs(vcpu); |
3964 |
++ } |
3965 |
++ |
3966 |
++ if (update_guest_cr3) |
3967 |
++ vmcs_writel(GUEST_CR3, guest_cr3); |
3968 |
++} |
3969 |
++ |
3970 |
++int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
3971 |
++{ |
3972 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
3973 |
++ /* |
3974 |
++ * Pass through host's Machine Check Enable value to hw_cr4, which |
3975 |
++ * is in force while we are in guest mode. Do not let guests control |
3976 |
++ * this bit, even if host CR4.MCE == 0. |
3977 |
++ */ |
3978 |
++ unsigned long hw_cr4; |
3979 |
++ |
3980 |
++ hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE); |
3981 |
++ if (enable_unrestricted_guest) |
3982 |
++ hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST; |
3983 |
++ else if (vmx->rmode.vm86_active) |
3984 |
++ hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON; |
3985 |
++ else |
3986 |
++ hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; |
3987 |
++ |
3988 |
++ if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) { |
3989 |
++ if (cr4 & X86_CR4_UMIP) { |
3990 |
++ secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC); |
3991 |
++ hw_cr4 &= ~X86_CR4_UMIP; |
3992 |
++ } else if (!is_guest_mode(vcpu) || |
3993 |
++ !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) { |
3994 |
++ secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC); |
3995 |
++ } |
3996 |
++ } |
3997 |
++ |
3998 |
++ if (cr4 & X86_CR4_VMXE) { |
3999 |
++ /* |
4000 |
++ * To use VMXON (and later other VMX instructions), a guest |
4001 |
++ * must first be able to turn on cr4.VMXE (see handle_vmon()). |
4002 |
++ * So basically the check on whether to allow nested VMX |
4003 |
++ * is here. We operate under the default treatment of SMM, |
4004 |
++ * so VMX cannot be enabled under SMM. |
4005 |
++ */ |
4006 |
++ if (!nested_vmx_allowed(vcpu) || is_smm(vcpu)) |
4007 |
++ return 1; |
4008 |
++ } |
4009 |
++ |
4010 |
++ if (vmx->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) |
4011 |
++ return 1; |
4012 |
++ |
4013 |
++ vcpu->arch.cr4 = cr4; |
4014 |
++ |
4015 |
++ if (!enable_unrestricted_guest) { |
4016 |
++ if (enable_ept) { |
4017 |
++ if (!is_paging(vcpu)) { |
4018 |
++ hw_cr4 &= ~X86_CR4_PAE; |
4019 |
++ hw_cr4 |= X86_CR4_PSE; |
4020 |
++ } else if (!(cr4 & X86_CR4_PAE)) { |
4021 |
++ hw_cr4 &= ~X86_CR4_PAE; |
4022 |
++ } |
4023 |
++ } |
4024 |
++ |
4025 |
++ /* |
4026 |
++ * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in |
4027 |
++ * hardware. To emulate this behavior, SMEP/SMAP/PKU needs |
4028 |
++ * to be manually disabled when guest switches to non-paging |
4029 |
++ * mode. |
4030 |
++ * |
4031 |
++ * If !enable_unrestricted_guest, the CPU is always running |
4032 |
++ * with CR0.PG=1 and CR4 needs to be modified. |
4033 |
++ * If enable_unrestricted_guest, the CPU automatically |
4034 |
++ * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0. |
4035 |
++ */ |
4036 |
++ if (!is_paging(vcpu)) |
4037 |
++ hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); |
4038 |
++ } |
4039 |
++ |
4040 |
++ vmcs_writel(CR4_READ_SHADOW, cr4); |
4041 |
++ vmcs_writel(GUEST_CR4, hw_cr4); |
4042 |
++ return 0; |
4043 |
++} |
4044 |
++ |
4045 |
++void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) |
4046 |
++{ |
4047 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
4048 |
++ u32 ar; |
4049 |
++ |
4050 |
++ if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { |
4051 |
++ *var = vmx->rmode.segs[seg]; |
4052 |
++ if (seg == VCPU_SREG_TR |
4053 |
++ || var->selector == vmx_read_guest_seg_selector(vmx, seg)) |
4054 |
++ return; |
4055 |
++ var->base = vmx_read_guest_seg_base(vmx, seg); |
4056 |
++ var->selector = vmx_read_guest_seg_selector(vmx, seg); |
4057 |
++ return; |
4058 |
++ } |
4059 |
++ var->base = vmx_read_guest_seg_base(vmx, seg); |
4060 |
++ var->limit = vmx_read_guest_seg_limit(vmx, seg); |
4061 |
++ var->selector = vmx_read_guest_seg_selector(vmx, seg); |
4062 |
++ ar = vmx_read_guest_seg_ar(vmx, seg); |
4063 |
++ var->unusable = (ar >> 16) & 1; |
4064 |
++ var->type = ar & 15; |
4065 |
++ var->s = (ar >> 4) & 1; |
4066 |
++ var->dpl = (ar >> 5) & 3; |
4067 |
++ /* |
4068 |
++ * Some userspaces do not preserve unusable property. Since usable |
4069 |
++ * segment has to be present according to VMX spec we can use present |
4070 |
++ * property to amend userspace bug by making unusable segment always |
4071 |
++ * nonpresent. vmx_segment_access_rights() already marks nonpresent |
4072 |
++ * segment as unusable. |
4073 |
++ */ |
4074 |
++ var->present = !var->unusable; |
4075 |
++ var->avl = (ar >> 12) & 1; |
4076 |
++ var->l = (ar >> 13) & 1; |
4077 |
++ var->db = (ar >> 14) & 1; |
4078 |
++ var->g = (ar >> 15) & 1; |
4079 |
++} |
4080 |
++ |
4081 |
++static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) |
4082 |
++{ |
4083 |
++ struct kvm_segment s; |
4084 |
++ |
4085 |
++ if (to_vmx(vcpu)->rmode.vm86_active) { |
4086 |
++ vmx_get_segment(vcpu, &s, seg); |
4087 |
++ return s.base; |
4088 |
++ } |
4089 |
++ return vmx_read_guest_seg_base(to_vmx(vcpu), seg); |
4090 |
++} |
4091 |
++ |
4092 |
++int vmx_get_cpl(struct kvm_vcpu *vcpu) |
4093 |
++{ |
4094 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
4095 |
++ |
4096 |
++ if (unlikely(vmx->rmode.vm86_active)) |
4097 |
++ return 0; |
4098 |
++ else { |
4099 |
++ int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); |
4100 |
++ return VMX_AR_DPL(ar); |
4101 |
++ } |
4102 |
++} |
4103 |
++ |
4104 |
++static u32 vmx_segment_access_rights(struct kvm_segment *var) |
4105 |
++{ |
4106 |
++ u32 ar; |
4107 |
++ |
4108 |
++ if (var->unusable || !var->present) |
4109 |
++ ar = 1 << 16; |
4110 |
++ else { |
4111 |
++ ar = var->type & 15; |
4112 |
++ ar |= (var->s & 1) << 4; |
4113 |
++ ar |= (var->dpl & 3) << 5; |
4114 |
++ ar |= (var->present & 1) << 7; |
4115 |
++ ar |= (var->avl & 1) << 12; |
4116 |
++ ar |= (var->l & 1) << 13; |
4117 |
++ ar |= (var->db & 1) << 14; |
4118 |
++ ar |= (var->g & 1) << 15; |
4119 |
++ } |
4120 |
++ |
4121 |
++ return ar; |
4122 |
++} |
4123 |
++ |
4124 |
++void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) |
4125 |
++{ |
4126 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
4127 |
++ const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; |
4128 |
++ |
4129 |
++ vmx_segment_cache_clear(vmx); |
4130 |
++ |
4131 |
++ if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { |
4132 |
++ vmx->rmode.segs[seg] = *var; |
4133 |
++ if (seg == VCPU_SREG_TR) |
4134 |
++ vmcs_write16(sf->selector, var->selector); |
4135 |
++ else if (var->s) |
4136 |
++ fix_rmode_seg(seg, &vmx->rmode.segs[seg]); |
4137 |
++ goto out; |
4138 |
++ } |
4139 |
++ |
4140 |
++ vmcs_writel(sf->base, var->base); |
4141 |
++ vmcs_write32(sf->limit, var->limit); |
4142 |
++ vmcs_write16(sf->selector, var->selector); |
4143 |
++ |
4144 |
++ /* |
4145 |
++ * Fix the "Accessed" bit in AR field of segment registers for older |
4146 |
++ * qemu binaries. |
4147 |
++ * IA32 arch specifies that at the time of processor reset the |
4148 |
++ * "Accessed" bit in the AR field of segment registers is 1. And qemu |
4149 |
++ * is setting it to 0 in the userland code. This causes invalid guest |
4150 |
++ * state vmexit when "unrestricted guest" mode is turned on. |
4151 |
++ * Fix for this setup issue in cpu_reset is being pushed in the qemu |
4152 |
++ * tree. Newer qemu binaries with that qemu fix would not need this |
4153 |
++ * kvm hack. |
4154 |
++ */ |
4155 |
++ if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) |
4156 |
++ var->type |= 0x1; /* Accessed */ |
4157 |
++ |
4158 |
++ vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); |
4159 |
++ |
4160 |
++out: |
4161 |
++ vmx->emulation_required = emulation_required(vcpu); |
4162 |
++} |
4163 |
++ |
4164 |
++static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) |
4165 |
++{ |
4166 |
++ u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); |
4167 |
++ |
4168 |
++ *db = (ar >> 14) & 1; |
4169 |
++ *l = (ar >> 13) & 1; |
4170 |
++} |
4171 |
++ |
4172 |
++static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
4173 |
++{ |
4174 |
++ dt->size = vmcs_read32(GUEST_IDTR_LIMIT); |
4175 |
++ dt->address = vmcs_readl(GUEST_IDTR_BASE); |
4176 |
++} |
4177 |
++ |
4178 |
++static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
4179 |
++{ |
4180 |
++ vmcs_write32(GUEST_IDTR_LIMIT, dt->size); |
4181 |
++ vmcs_writel(GUEST_IDTR_BASE, dt->address); |
4182 |
++} |
4183 |
++ |
4184 |
++static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
4185 |
++{ |
4186 |
++ dt->size = vmcs_read32(GUEST_GDTR_LIMIT); |
4187 |
++ dt->address = vmcs_readl(GUEST_GDTR_BASE); |
4188 |
++} |
4189 |
++ |
4190 |
++static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
4191 |
++{ |
4192 |
++ vmcs_write32(GUEST_GDTR_LIMIT, dt->size); |
4193 |
++ vmcs_writel(GUEST_GDTR_BASE, dt->address); |
4194 |
++} |
4195 |
++ |
4196 |
++static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) |
4197 |
++{ |
4198 |
++ struct kvm_segment var; |
4199 |
++ u32 ar; |
4200 |
++ |
4201 |
++ vmx_get_segment(vcpu, &var, seg); |
4202 |
++ var.dpl = 0x3; |
4203 |
++ if (seg == VCPU_SREG_CS) |
4204 |
++ var.type = 0x3; |
4205 |
++ ar = vmx_segment_access_rights(&var); |
4206 |
++ |
4207 |
++ if (var.base != (var.selector << 4)) |
4208 |
++ return false; |
4209 |
++ if (var.limit != 0xffff) |
4210 |
++ return false; |
4211 |
++ if (ar != 0xf3) |
4212 |
++ return false; |
4213 |
++ |
4214 |
++ return true; |
4215 |
++} |
4216 |
++ |
4217 |
++static bool code_segment_valid(struct kvm_vcpu *vcpu) |
4218 |
++{ |
4219 |
++ struct kvm_segment cs; |
4220 |
++ unsigned int cs_rpl; |
4221 |
++ |
4222 |
++ vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); |
4223 |
++ cs_rpl = cs.selector & SEGMENT_RPL_MASK; |
4224 |
++ |
4225 |
++ if (cs.unusable) |
4226 |
++ return false; |
4227 |
++ if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK)) |
4228 |
++ return false; |
4229 |
++ if (!cs.s) |
4230 |
++ return false; |
4231 |
++ if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) { |
4232 |
++ if (cs.dpl > cs_rpl) |
4233 |
++ return false; |
4234 |
++ } else { |
4235 |
++ if (cs.dpl != cs_rpl) |
4236 |
++ return false; |
4237 |
++ } |
4238 |
++ if (!cs.present) |
4239 |
++ return false; |
4240 |
++ |
4241 |
++ /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ |
4242 |
++ return true; |
4243 |
++} |
4244 |
++ |
4245 |
++static bool stack_segment_valid(struct kvm_vcpu *vcpu) |
4246 |
++{ |
4247 |
++ struct kvm_segment ss; |
4248 |
++ unsigned int ss_rpl; |
4249 |
++ |
4250 |
++ vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); |
4251 |
++ ss_rpl = ss.selector & SEGMENT_RPL_MASK; |
4252 |
++ |
4253 |
++ if (ss.unusable) |
4254 |
++ return true; |
4255 |
++ if (ss.type != 3 && ss.type != 7) |
4256 |
++ return false; |
4257 |
++ if (!ss.s) |
4258 |
++ return false; |
4259 |
++ if (ss.dpl != ss_rpl) /* DPL != RPL */ |
4260 |
++ return false; |
4261 |
++ if (!ss.present) |
4262 |
++ return false; |
4263 |
++ |
4264 |
++ return true; |
4265 |
++} |
4266 |
++ |
4267 |
++static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) |
4268 |
++{ |
4269 |
++ struct kvm_segment var; |
4270 |
++ unsigned int rpl; |
4271 |
++ |
4272 |
++ vmx_get_segment(vcpu, &var, seg); |
4273 |
++ rpl = var.selector & SEGMENT_RPL_MASK; |
4274 |
++ |
4275 |
++ if (var.unusable) |
4276 |
++ return true; |
4277 |
++ if (!var.s) |
4278 |
++ return false; |
4279 |
++ if (!var.present) |
4280 |
++ return false; |
4281 |
++ if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) { |
4282 |
++ if (var.dpl < rpl) /* DPL < RPL */ |
4283 |
++ return false; |
4284 |
++ } |
4285 |
++ |
4286 |
++ /* TODO: Add other members to kvm_segment_field to allow checking for other access |
4287 |
++ * rights flags |
4288 |
++ */ |
4289 |
++ return true; |
4290 |
++} |
4291 |
++ |
4292 |
++static bool tr_valid(struct kvm_vcpu *vcpu) |
4293 |
++{ |
4294 |
++ struct kvm_segment tr; |
4295 |
++ |
4296 |
++ vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); |
4297 |
++ |
4298 |
++ if (tr.unusable) |
4299 |
++ return false; |
4300 |
++ if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */ |
4301 |
++ return false; |
4302 |
++ if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ |
4303 |
++ return false; |
4304 |
++ if (!tr.present) |
4305 |
++ return false; |
4306 |
++ |
4307 |
++ return true; |
4308 |
++} |
4309 |
++ |
4310 |
++static bool ldtr_valid(struct kvm_vcpu *vcpu) |
4311 |
++{ |
4312 |
++ struct kvm_segment ldtr; |
4313 |
++ |
4314 |
++ vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); |
4315 |
++ |
4316 |
++ if (ldtr.unusable) |
4317 |
++ return true; |
4318 |
++ if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */ |
4319 |
++ return false; |
4320 |
++ if (ldtr.type != 2) |
4321 |
++ return false; |
4322 |
++ if (!ldtr.present) |
4323 |
++ return false; |
4324 |
++ |
4325 |
++ return true; |
4326 |
++} |
4327 |
++ |
4328 |
++static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) |
4329 |
++{ |
4330 |
++ struct kvm_segment cs, ss; |
4331 |
++ |
4332 |
++ vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); |
4333 |
++ vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); |
4334 |
++ |
4335 |
++ return ((cs.selector & SEGMENT_RPL_MASK) == |
4336 |
++ (ss.selector & SEGMENT_RPL_MASK)); |
4337 |
++} |
4338 |
++ |
4339 |
++/* |
4340 |
++ * Check if guest state is valid. Returns true if valid, false if |
4341 |
++ * not. |
4342 |
++ * We assume that registers are always usable |
4343 |
++ */ |
4344 |
++static bool guest_state_valid(struct kvm_vcpu *vcpu) |
4345 |
++{ |
4346 |
++ if (enable_unrestricted_guest) |
4347 |
++ return true; |
4348 |
++ |
4349 |
++ /* real mode guest state checks */ |
4350 |
++ if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { |
4351 |
++ if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) |
4352 |
++ return false; |
4353 |
++ if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) |
4354 |
++ return false; |
4355 |
++ if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) |
4356 |
++ return false; |
4357 |
++ if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) |
4358 |
++ return false; |
4359 |
++ if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) |
4360 |
++ return false; |
4361 |
++ if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) |
4362 |
++ return false; |
4363 |
++ } else { |
4364 |
++ /* protected mode guest state checks */ |
4365 |
++ if (!cs_ss_rpl_check(vcpu)) |
4366 |
++ return false; |
4367 |
++ if (!code_segment_valid(vcpu)) |
4368 |
++ return false; |
4369 |
++ if (!stack_segment_valid(vcpu)) |
4370 |
++ return false; |
4371 |
++ if (!data_segment_valid(vcpu, VCPU_SREG_DS)) |
4372 |
++ return false; |
4373 |
++ if (!data_segment_valid(vcpu, VCPU_SREG_ES)) |
4374 |
++ return false; |
4375 |
++ if (!data_segment_valid(vcpu, VCPU_SREG_FS)) |
4376 |
++ return false; |
4377 |
++ if (!data_segment_valid(vcpu, VCPU_SREG_GS)) |
4378 |
++ return false; |
4379 |
++ if (!tr_valid(vcpu)) |
4380 |
++ return false; |
4381 |
++ if (!ldtr_valid(vcpu)) |
4382 |
++ return false; |
4383 |
++ } |
4384 |
++ /* TODO: |
4385 |
++ * - Add checks on RIP |
4386 |
++ * - Add checks on RFLAGS |
4387 |
++ */ |
4388 |
++ |
4389 |
++ return true; |
4390 |
++} |
4391 |
++ |
4392 |
++static int init_rmode_tss(struct kvm *kvm) |
4393 |
++{ |
4394 |
++ gfn_t fn; |
4395 |
++ u16 data = 0; |
4396 |
++ int idx, r; |
4397 |
++ |
4398 |
++ idx = srcu_read_lock(&kvm->srcu); |
4399 |
++ fn = to_kvm_vmx(kvm)->tss_addr >> PAGE_SHIFT; |
4400 |
++ r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); |
4401 |
++ if (r < 0) |
4402 |
++ goto out; |
4403 |
++ data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; |
4404 |
++ r = kvm_write_guest_page(kvm, fn++, &data, |
4405 |
++ TSS_IOPB_BASE_OFFSET, sizeof(u16)); |
4406 |
++ if (r < 0) |
4407 |
++ goto out; |
4408 |
++ r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); |
4409 |
++ if (r < 0) |
4410 |
++ goto out; |
4411 |
++ r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); |
4412 |
++ if (r < 0) |
4413 |
++ goto out; |
4414 |
++ data = ~0; |
4415 |
++ r = kvm_write_guest_page(kvm, fn, &data, |
4416 |
++ RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, |
4417 |
++ sizeof(u8)); |
4418 |
++out: |
4419 |
++ srcu_read_unlock(&kvm->srcu, idx); |
4420 |
++ return r; |
4421 |
++} |
4422 |
++ |
4423 |
++static int init_rmode_identity_map(struct kvm *kvm) |
4424 |
++{ |
4425 |
++ struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm); |
4426 |
++ int i, idx, r = 0; |
4427 |
++ kvm_pfn_t identity_map_pfn; |
4428 |
++ u32 tmp; |
4429 |
++ |
4430 |
++ /* Protect kvm_vmx->ept_identity_pagetable_done. */ |
4431 |
++ mutex_lock(&kvm->slots_lock); |
4432 |
++ |
4433 |
++ if (likely(kvm_vmx->ept_identity_pagetable_done)) |
4434 |
++ goto out2; |
4435 |
++ |
4436 |
++ if (!kvm_vmx->ept_identity_map_addr) |
4437 |
++ kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; |
4438 |
++ identity_map_pfn = kvm_vmx->ept_identity_map_addr >> PAGE_SHIFT; |
4439 |
++ |
4440 |
++ r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, |
4441 |
++ kvm_vmx->ept_identity_map_addr, PAGE_SIZE); |
4442 |
++ if (r < 0) |
4443 |
++ goto out2; |
4444 |
++ |
4445 |
++ idx = srcu_read_lock(&kvm->srcu); |
4446 |
++ r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); |
4447 |
++ if (r < 0) |
4448 |
++ goto out; |
4449 |
++ /* Set up identity-mapping pagetable for EPT in real mode */ |
4450 |
++ for (i = 0; i < PT32_ENT_PER_PAGE; i++) { |
4451 |
++ tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | |
4452 |
++ _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); |
4453 |
++ r = kvm_write_guest_page(kvm, identity_map_pfn, |
4454 |
++ &tmp, i * sizeof(tmp), sizeof(tmp)); |
4455 |
++ if (r < 0) |
4456 |
++ goto out; |
4457 |
++ } |
4458 |
++ kvm_vmx->ept_identity_pagetable_done = true; |
4459 |
++ |
4460 |
++out: |
4461 |
++ srcu_read_unlock(&kvm->srcu, idx); |
4462 |
++ |
4463 |
++out2: |
4464 |
++ mutex_unlock(&kvm->slots_lock); |
4465 |
++ return r; |
4466 |
++} |
4467 |
++ |
4468 |
++static void seg_setup(int seg) |
4469 |
++{ |
4470 |
++ const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; |
4471 |
++ unsigned int ar; |
4472 |
++ |
4473 |
++ vmcs_write16(sf->selector, 0); |
4474 |
++ vmcs_writel(sf->base, 0); |
4475 |
++ vmcs_write32(sf->limit, 0xffff); |
4476 |
++ ar = 0x93; |
4477 |
++ if (seg == VCPU_SREG_CS) |
4478 |
++ ar |= 0x08; /* code segment */ |
4479 |
++ |
4480 |
++ vmcs_write32(sf->ar_bytes, ar); |
4481 |
++} |
4482 |
++ |
4483 |
++static int alloc_apic_access_page(struct kvm *kvm) |
4484 |
++{ |
4485 |
++ struct page *page; |
4486 |
++ int r = 0; |
4487 |
++ |
4488 |
++ mutex_lock(&kvm->slots_lock); |
4489 |
++ if (kvm->arch.apic_access_page_done) |
4490 |
++ goto out; |
4491 |
++ r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, |
4492 |
++ APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); |
4493 |
++ if (r) |
4494 |
++ goto out; |
4495 |
++ |
4496 |
++ page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); |
4497 |
++ if (is_error_page(page)) { |
4498 |
++ r = -EFAULT; |
4499 |
++ goto out; |
4500 |
++ } |
4501 |
++ |
4502 |
++ /* |
4503 |
++ * Do not pin the page in memory, so that memory hot-unplug |
4504 |
++ * is able to migrate it. |
4505 |
++ */ |
4506 |
++ put_page(page); |
4507 |
++ kvm->arch.apic_access_page_done = true; |
4508 |
++out: |
4509 |
++ mutex_unlock(&kvm->slots_lock); |
4510 |
++ return r; |
4511 |
++} |
4512 |
++ |
4513 |
++int allocate_vpid(void) |
4514 |
++{ |
4515 |
++ int vpid; |
4516 |
++ |
4517 |
++ if (!enable_vpid) |
4518 |
++ return 0; |
4519 |
++ spin_lock(&vmx_vpid_lock); |
4520 |
++ vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); |
4521 |
++ if (vpid < VMX_NR_VPIDS) |
4522 |
++ __set_bit(vpid, vmx_vpid_bitmap); |
4523 |
++ else |
4524 |
++ vpid = 0; |
4525 |
++ spin_unlock(&vmx_vpid_lock); |
4526 |
++ return vpid; |
4527 |
++} |
4528 |
++ |
4529 |
++void free_vpid(int vpid) |
4530 |
++{ |
4531 |
++ if (!enable_vpid || vpid == 0) |
4532 |
++ return; |
4533 |
++ spin_lock(&vmx_vpid_lock); |
4534 |
++ __clear_bit(vpid, vmx_vpid_bitmap); |
4535 |
++ spin_unlock(&vmx_vpid_lock); |
4536 |
++} |
4537 |
++ |
4538 |
++static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, |
4539 |
++ u32 msr, int type) |
4540 |
++{ |
4541 |
++ int f = sizeof(unsigned long); |
4542 |
++ |
4543 |
++ if (!cpu_has_vmx_msr_bitmap()) |
4544 |
++ return; |
4545 |
++ |
4546 |
++ if (static_branch_unlikely(&enable_evmcs)) |
4547 |
++ evmcs_touch_msr_bitmap(); |
4548 |
++ |
4549 |
++ /* |
4550 |
++ * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals |
4551 |
++ * have the write-low and read-high bitmap offsets the wrong way round. |
4552 |
++ * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. |
4553 |
++ */ |
4554 |
++ if (msr <= 0x1fff) { |
4555 |
++ if (type & MSR_TYPE_R) |
4556 |
++ /* read-low */ |
4557 |
++ __clear_bit(msr, msr_bitmap + 0x000 / f); |
4558 |
++ |
4559 |
++ if (type & MSR_TYPE_W) |
4560 |
++ /* write-low */ |
4561 |
++ __clear_bit(msr, msr_bitmap + 0x800 / f); |
4562 |
++ |
4563 |
++ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { |
4564 |
++ msr &= 0x1fff; |
4565 |
++ if (type & MSR_TYPE_R) |
4566 |
++ /* read-high */ |
4567 |
++ __clear_bit(msr, msr_bitmap + 0x400 / f); |
4568 |
++ |
4569 |
++ if (type & MSR_TYPE_W) |
4570 |
++ /* write-high */ |
4571 |
++ __clear_bit(msr, msr_bitmap + 0xc00 / f); |
4572 |
++ |
4573 |
++ } |
4574 |
++} |
4575 |
++ |
4576 |
++static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, |
4577 |
++ u32 msr, int type) |
4578 |
++{ |
4579 |
++ int f = sizeof(unsigned long); |
4580 |
++ |
4581 |
++ if (!cpu_has_vmx_msr_bitmap()) |
4582 |
++ return; |
4583 |
++ |
4584 |
++ if (static_branch_unlikely(&enable_evmcs)) |
4585 |
++ evmcs_touch_msr_bitmap(); |
4586 |
++ |
4587 |
++ /* |
4588 |
++ * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals |
4589 |
++ * have the write-low and read-high bitmap offsets the wrong way round. |
4590 |
++ * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. |
4591 |
++ */ |
4592 |
++ if (msr <= 0x1fff) { |
4593 |
++ if (type & MSR_TYPE_R) |
4594 |
++ /* read-low */ |
4595 |
++ __set_bit(msr, msr_bitmap + 0x000 / f); |
4596 |
++ |
4597 |
++ if (type & MSR_TYPE_W) |
4598 |
++ /* write-low */ |
4599 |
++ __set_bit(msr, msr_bitmap + 0x800 / f); |
4600 |
++ |
4601 |
++ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { |
4602 |
++ msr &= 0x1fff; |
4603 |
++ if (type & MSR_TYPE_R) |
4604 |
++ /* read-high */ |
4605 |
++ __set_bit(msr, msr_bitmap + 0x400 / f); |
4606 |
++ |
4607 |
++ if (type & MSR_TYPE_W) |
4608 |
++ /* write-high */ |
4609 |
++ __set_bit(msr, msr_bitmap + 0xc00 / f); |
4610 |
++ |
4611 |
++ } |
4612 |
++} |
4613 |
++ |
4614 |
++static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap, |
4615 |
++ u32 msr, int type, bool value) |
4616 |
++{ |
4617 |
++ if (value) |
4618 |
++ vmx_enable_intercept_for_msr(msr_bitmap, msr, type); |
4619 |
++ else |
4620 |
++ vmx_disable_intercept_for_msr(msr_bitmap, msr, type); |
4621 |
++} |
4622 |
++ |
4623 |
++static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu) |
4624 |
++{ |
4625 |
++ u8 mode = 0; |
4626 |
++ |
4627 |
++ if (cpu_has_secondary_exec_ctrls() && |
4628 |
++ (secondary_exec_controls_get(to_vmx(vcpu)) & |
4629 |
++ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { |
4630 |
++ mode |= MSR_BITMAP_MODE_X2APIC; |
4631 |
++ if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) |
4632 |
++ mode |= MSR_BITMAP_MODE_X2APIC_APICV; |
4633 |
++ } |
4634 |
++ |
4635 |
++ return mode; |
4636 |
++} |
4637 |
++ |
4638 |
++static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap, |
4639 |
++ u8 mode) |
4640 |
++{ |
4641 |
++ int msr; |
4642 |
++ |
4643 |
++ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { |
4644 |
++ unsigned word = msr / BITS_PER_LONG; |
4645 |
++ msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0; |
4646 |
++ msr_bitmap[word + (0x800 / sizeof(long))] = ~0; |
4647 |
++ } |
4648 |
++ |
4649 |
++ if (mode & MSR_BITMAP_MODE_X2APIC) { |
4650 |
++ /* |
4651 |
++ * TPR reads and writes can be virtualized even if virtual interrupt |
4652 |
++ * delivery is not in use. |
4653 |
++ */ |
4654 |
++ vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW); |
4655 |
++ if (mode & MSR_BITMAP_MODE_X2APIC_APICV) { |
4656 |
++ vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R); |
4657 |
++ vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W); |
4658 |
++ vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W); |
4659 |
++ } |
4660 |
++ } |
4661 |
++} |
4662 |
++ |
4663 |
++void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu) |
4664 |
++{ |
4665 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
4666 |
++ unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; |
4667 |
++ u8 mode = vmx_msr_bitmap_mode(vcpu); |
4668 |
++ u8 changed = mode ^ vmx->msr_bitmap_mode; |
4669 |
++ |
4670 |
++ if (!changed) |
4671 |
++ return; |
4672 |
++ |
4673 |
++ if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV)) |
4674 |
++ vmx_update_msr_bitmap_x2apic(msr_bitmap, mode); |
4675 |
++ |
4676 |
++ vmx->msr_bitmap_mode = mode; |
4677 |
++} |
4678 |
++ |
4679 |
++void pt_update_intercept_for_msr(struct vcpu_vmx *vmx) |
4680 |
++{ |
4681 |
++ unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; |
4682 |
++ bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); |
4683 |
++ u32 i; |
4684 |
++ |
4685 |
++ vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_STATUS, |
4686 |
++ MSR_TYPE_RW, flag); |
4687 |
++ vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_OUTPUT_BASE, |
4688 |
++ MSR_TYPE_RW, flag); |
4689 |
++ vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_OUTPUT_MASK, |
4690 |
++ MSR_TYPE_RW, flag); |
4691 |
++ vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_CR3_MATCH, |
4692 |
++ MSR_TYPE_RW, flag); |
4693 |
++ for (i = 0; i < vmx->pt_desc.addr_range; i++) { |
4694 |
++ vmx_set_intercept_for_msr(msr_bitmap, |
4695 |
++ MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag); |
4696 |
++ vmx_set_intercept_for_msr(msr_bitmap, |
4697 |
++ MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag); |
4698 |
++ } |
4699 |
++} |
4700 |
++ |
4701 |
++static bool vmx_get_enable_apicv(struct kvm *kvm) |
4702 |
++{ |
4703 |
++ return enable_apicv; |
4704 |
++} |
4705 |
++ |
4706 |
++static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) |
4707 |
++{ |
4708 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
4709 |
++ void *vapic_page; |
4710 |
++ u32 vppr; |
4711 |
++ int rvi; |
4712 |
++ |
4713 |
++ if (WARN_ON_ONCE(!is_guest_mode(vcpu)) || |
4714 |
++ !nested_cpu_has_vid(get_vmcs12(vcpu)) || |
4715 |
++ WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn)) |
4716 |
++ return false; |
4717 |
++ |
4718 |
++ rvi = vmx_get_rvi(); |
4719 |
++ |
4720 |
++ vapic_page = vmx->nested.virtual_apic_map.hva; |
4721 |
++ vppr = *((u32 *)(vapic_page + APIC_PROCPRI)); |
4722 |
++ |
4723 |
++ return ((rvi & 0xf0) > (vppr & 0xf0)); |
4724 |
++} |
4725 |
++ |
4726 |
++static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, |
4727 |
++ bool nested) |
4728 |
++{ |
4729 |
++#ifdef CONFIG_SMP |
4730 |
++ int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR; |
4731 |
++ |
4732 |
++ if (vcpu->mode == IN_GUEST_MODE) { |
4733 |
++ /* |
4734 |
++ * The vector of interrupt to be delivered to vcpu had |
4735 |
++ * been set in PIR before this function. |
4736 |
++ * |
4737 |
++ * Following cases will be reached in this block, and |
4738 |
++ * we always send a notification event in all cases as |
4739 |
++ * explained below. |
4740 |
++ * |
4741 |
++ * Case 1: vcpu keeps in non-root mode. Sending a |
4742 |
++ * notification event posts the interrupt to vcpu. |
4743 |
++ * |
4744 |
++ * Case 2: vcpu exits to root mode and is still |
4745 |
++ * runnable. PIR will be synced to vIRR before the |
4746 |
++ * next vcpu entry. Sending a notification event in |
4747 |
++ * this case has no effect, as vcpu is not in root |
4748 |
++ * mode. |
4749 |
++ * |
4750 |
++ * Case 3: vcpu exits to root mode and is blocked. |
4751 |
++ * vcpu_block() has already synced PIR to vIRR and |
4752 |
++ * never blocks vcpu if vIRR is not cleared. Therefore, |
4753 |
++ * a blocked vcpu here does not wait for any requested |
4754 |
++ * interrupts in PIR, and sending a notification event |
4755 |
++ * which has no effect is safe here. |
4756 |
++ */ |
4757 |
++ |
4758 |
++ apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); |
4759 |
++ return true; |
4760 |
++ } |
4761 |
++#endif |
4762 |
++ return false; |
4763 |
++} |
4764 |
++ |
4765 |
++static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, |
4766 |
++ int vector) |
4767 |
++{ |
4768 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
4769 |
++ |
4770 |
++ if (is_guest_mode(vcpu) && |
4771 |
++ vector == vmx->nested.posted_intr_nv) { |
4772 |
++ /* |
4773 |
++ * If a posted intr is not recognized by hardware, |
4774 |
++ * we will accomplish it in the next vmentry. |
4775 |
++ */ |
4776 |
++ vmx->nested.pi_pending = true; |
4777 |
++ kvm_make_request(KVM_REQ_EVENT, vcpu); |
4778 |
++ /* the PIR and ON have been set by L1. */ |
4779 |
++ if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true)) |
4780 |
++ kvm_vcpu_kick(vcpu); |
4781 |
++ return 0; |
4782 |
++ } |
4783 |
++ return -1; |
4784 |
++} |
4785 |
++/* |
4786 |
++ * Send interrupt to vcpu via posted interrupt way. |
4787 |
++ * 1. If target vcpu is running(non-root mode), send posted interrupt |
4788 |
++ * notification to vcpu and hardware will sync PIR to vIRR atomically. |
4789 |
++ * 2. If target vcpu isn't running(root mode), kick it to pick up the |
4790 |
++ * interrupt from PIR in next vmentry. |
4791 |
++ */ |
4792 |
++static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) |
4793 |
++{ |
4794 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
4795 |
++ int r; |
4796 |
++ |
4797 |
++ r = vmx_deliver_nested_posted_interrupt(vcpu, vector); |
4798 |
++ if (!r) |
4799 |
++ return; |
4800 |
++ |
4801 |
++ if (pi_test_and_set_pir(vector, &vmx->pi_desc)) |
4802 |
++ return; |
4803 |
++ |
4804 |
++ /* If a previous notification has sent the IPI, nothing to do. */ |
4805 |
++ if (pi_test_and_set_on(&vmx->pi_desc)) |
4806 |
++ return; |
4807 |
++ |
4808 |
++ if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) |
4809 |
++ kvm_vcpu_kick(vcpu); |
4810 |
++} |
4811 |
++ |
4812 |
++/* |
4813 |
++ * Set up the vmcs's constant host-state fields, i.e., host-state fields that |
4814 |
++ * will not change in the lifetime of the guest. |
4815 |
++ * Note that host-state that does change is set elsewhere. E.g., host-state |
4816 |
++ * that is set differently for each CPU is set in vmx_vcpu_load(), not here. |
4817 |
++ */ |
4818 |
++void vmx_set_constant_host_state(struct vcpu_vmx *vmx) |
4819 |
++{ |
4820 |
++ u32 low32, high32; |
4821 |
++ unsigned long tmpl; |
4822 |
++ unsigned long cr0, cr3, cr4; |
4823 |
++ |
4824 |
++ cr0 = read_cr0(); |
4825 |
++ WARN_ON(cr0 & X86_CR0_TS); |
4826 |
++ vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */ |
4827 |
++ |
4828 |
++ /* |
4829 |
++ * Save the most likely value for this task's CR3 in the VMCS. |
4830 |
++ * We can't use __get_current_cr3_fast() because we're not atomic. |
4831 |
++ */ |
4832 |
++ cr3 = __read_cr3(); |
4833 |
++ vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ |
4834 |
++ vmx->loaded_vmcs->host_state.cr3 = cr3; |
4835 |
++ |
4836 |
++ /* Save the most likely value for this task's CR4 in the VMCS. */ |
4837 |
++ cr4 = cr4_read_shadow(); |
4838 |
++ vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ |
4839 |
++ vmx->loaded_vmcs->host_state.cr4 = cr4; |
4840 |
++ |
4841 |
++ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ |
4842 |
++#ifdef CONFIG_X86_64 |
4843 |
++ /* |
4844 |
++ * Load null selectors, so we can avoid reloading them in |
4845 |
++ * vmx_prepare_switch_to_host(), in case userspace uses |
4846 |
++ * the null selectors too (the expected case). |
4847 |
++ */ |
4848 |
++ vmcs_write16(HOST_DS_SELECTOR, 0); |
4849 |
++ vmcs_write16(HOST_ES_SELECTOR, 0); |
4850 |
++#else |
4851 |
++ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
4852 |
++ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
4853 |
++#endif |
4854 |
++ vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
4855 |
++ vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ |
4856 |
++ |
4857 |
++ vmcs_writel(HOST_IDTR_BASE, host_idt_base); /* 22.2.4 */ |
4858 |
++ |
4859 |
++ vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */ |
4860 |
++ |
4861 |
++ rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); |
4862 |
++ vmcs_write32(HOST_IA32_SYSENTER_CS, low32); |
4863 |
++ rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); |
4864 |
++ vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ |
4865 |
++ |
4866 |
++ if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { |
4867 |
++ rdmsr(MSR_IA32_CR_PAT, low32, high32); |
4868 |
++ vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); |
4869 |
++ } |
4870 |
++ |
4871 |
++ if (cpu_has_load_ia32_efer()) |
4872 |
++ vmcs_write64(HOST_IA32_EFER, host_efer); |
4873 |
++} |
4874 |
++ |
4875 |
++void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) |
4876 |
++{ |
4877 |
++ vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; |
4878 |
++ if (enable_ept) |
4879 |
++ vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; |
4880 |
++ if (is_guest_mode(&vmx->vcpu)) |
4881 |
++ vmx->vcpu.arch.cr4_guest_owned_bits &= |
4882 |
++ ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; |
4883 |
++ vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); |
4884 |
++} |
4885 |
++ |
4886 |
++u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) |
4887 |
++{ |
4888 |
++ u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; |
4889 |
++ |
4890 |
++ if (!kvm_vcpu_apicv_active(&vmx->vcpu)) |
4891 |
++ pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; |
4892 |
++ |
4893 |
++ if (!enable_vnmi) |
4894 |
++ pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS; |
4895 |
++ |
4896 |
++ if (!enable_preemption_timer) |
4897 |
++ pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; |
4898 |
++ |
4899 |
++ return pin_based_exec_ctrl; |
4900 |
++} |
4901 |
++ |
4902 |
++static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) |
4903 |
++{ |
4904 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
4905 |
++ |
4906 |
++ pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx)); |
4907 |
++ if (cpu_has_secondary_exec_ctrls()) { |
4908 |
++ if (kvm_vcpu_apicv_active(vcpu)) |
4909 |
++ secondary_exec_controls_setbit(vmx, |
4910 |
++ SECONDARY_EXEC_APIC_REGISTER_VIRT | |
4911 |
++ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); |
4912 |
++ else |
4913 |
++ secondary_exec_controls_clearbit(vmx, |
4914 |
++ SECONDARY_EXEC_APIC_REGISTER_VIRT | |
4915 |
++ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); |
4916 |
++ } |
4917 |
++ |
4918 |
++ if (cpu_has_vmx_msr_bitmap()) |
4919 |
++ vmx_update_msr_bitmap(vcpu); |
4920 |
++} |
4921 |
++ |
4922 |
++u32 vmx_exec_control(struct vcpu_vmx *vmx) |
4923 |
++{ |
4924 |
++ u32 exec_control = vmcs_config.cpu_based_exec_ctrl; |
4925 |
++ |
4926 |
++ if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) |
4927 |
++ exec_control &= ~CPU_BASED_MOV_DR_EXITING; |
4928 |
++ |
4929 |
++ if (!cpu_need_tpr_shadow(&vmx->vcpu)) { |
4930 |
++ exec_control &= ~CPU_BASED_TPR_SHADOW; |
4931 |
++#ifdef CONFIG_X86_64 |
4932 |
++ exec_control |= CPU_BASED_CR8_STORE_EXITING | |
4933 |
++ CPU_BASED_CR8_LOAD_EXITING; |
4934 |
++#endif |
4935 |
++ } |
4936 |
++ if (!enable_ept) |
4937 |
++ exec_control |= CPU_BASED_CR3_STORE_EXITING | |
4938 |
++ CPU_BASED_CR3_LOAD_EXITING | |
4939 |
++ CPU_BASED_INVLPG_EXITING; |
4940 |
++ if (kvm_mwait_in_guest(vmx->vcpu.kvm)) |
4941 |
++ exec_control &= ~(CPU_BASED_MWAIT_EXITING | |
4942 |
++ CPU_BASED_MONITOR_EXITING); |
4943 |
++ if (kvm_hlt_in_guest(vmx->vcpu.kvm)) |
4944 |
++ exec_control &= ~CPU_BASED_HLT_EXITING; |
4945 |
++ return exec_control; |
4946 |
++} |
4947 |
++ |
4948 |
++ |
4949 |
++static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) |
4950 |
++{ |
4951 |
++ struct kvm_vcpu *vcpu = &vmx->vcpu; |
4952 |
++ |
4953 |
++ u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; |
4954 |
++ |
4955 |
++ if (pt_mode == PT_MODE_SYSTEM) |
4956 |
++ exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX); |
4957 |
++ if (!cpu_need_virtualize_apic_accesses(vcpu)) |
4958 |
++ exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; |
4959 |
++ if (vmx->vpid == 0) |
4960 |
++ exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; |
4961 |
++ if (!enable_ept) { |
4962 |
++ exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; |
4963 |
++ enable_unrestricted_guest = 0; |
4964 |
++ } |
4965 |
++ if (!enable_unrestricted_guest) |
4966 |
++ exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; |
4967 |
++ if (kvm_pause_in_guest(vmx->vcpu.kvm)) |
4968 |
++ exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; |
4969 |
++ if (!kvm_vcpu_apicv_active(vcpu)) |
4970 |
++ exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | |
4971 |
++ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); |
4972 |
++ exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; |
4973 |
++ |
4974 |
++ /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP, |
4975 |
++ * in vmx_set_cr4. */ |
4976 |
++ exec_control &= ~SECONDARY_EXEC_DESC; |
4977 |
++ |
4978 |
++ /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD |
4979 |
++ (handle_vmptrld). |
4980 |
++ We can NOT enable shadow_vmcs here because we don't have yet |
4981 |
++ a current VMCS12 |
4982 |
++ */ |
4983 |
++ exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; |
4984 |
++ |
4985 |
++ if (!enable_pml) |
4986 |
++ exec_control &= ~SECONDARY_EXEC_ENABLE_PML; |
4987 |
++ |
4988 |
++ if (vmx_xsaves_supported()) { |
4989 |
++ /* Exposing XSAVES only when XSAVE is exposed */ |
4990 |
++ bool xsaves_enabled = |
4991 |
++ guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && |
4992 |
++ guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); |
4993 |
++ |
4994 |
++ vcpu->arch.xsaves_enabled = xsaves_enabled; |
4995 |
++ |
4996 |
++ if (!xsaves_enabled) |
4997 |
++ exec_control &= ~SECONDARY_EXEC_XSAVES; |
4998 |
++ |
4999 |
++ if (nested) { |
5000 |
++ if (xsaves_enabled) |
5001 |
++ vmx->nested.msrs.secondary_ctls_high |= |
5002 |
++ SECONDARY_EXEC_XSAVES; |
5003 |
++ else |
5004 |
++ vmx->nested.msrs.secondary_ctls_high &= |
5005 |
++ ~SECONDARY_EXEC_XSAVES; |
5006 |
++ } |
5007 |
++ } |
5008 |
++ |
5009 |
++ if (vmx_rdtscp_supported()) { |
5010 |
++ bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP); |
5011 |
++ if (!rdtscp_enabled) |
5012 |
++ exec_control &= ~SECONDARY_EXEC_RDTSCP; |
5013 |
++ |
5014 |
++ if (nested) { |
5015 |
++ if (rdtscp_enabled) |
5016 |
++ vmx->nested.msrs.secondary_ctls_high |= |
5017 |
++ SECONDARY_EXEC_RDTSCP; |
5018 |
++ else |
5019 |
++ vmx->nested.msrs.secondary_ctls_high &= |
5020 |
++ ~SECONDARY_EXEC_RDTSCP; |
5021 |
++ } |
5022 |
++ } |
5023 |
++ |
5024 |
++ if (vmx_invpcid_supported()) { |
5025 |
++ /* Exposing INVPCID only when PCID is exposed */ |
5026 |
++ bool invpcid_enabled = |
5027 |
++ guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) && |
5028 |
++ guest_cpuid_has(vcpu, X86_FEATURE_PCID); |
5029 |
++ |
5030 |
++ if (!invpcid_enabled) { |
5031 |
++ exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; |
5032 |
++ guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID); |
5033 |
++ } |
5034 |
++ |
5035 |
++ if (nested) { |
5036 |
++ if (invpcid_enabled) |
5037 |
++ vmx->nested.msrs.secondary_ctls_high |= |
5038 |
++ SECONDARY_EXEC_ENABLE_INVPCID; |
5039 |
++ else |
5040 |
++ vmx->nested.msrs.secondary_ctls_high &= |
5041 |
++ ~SECONDARY_EXEC_ENABLE_INVPCID; |
5042 |
++ } |
5043 |
++ } |
5044 |
++ |
5045 |
++ if (vmx_rdrand_supported()) { |
5046 |
++ bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND); |
5047 |
++ if (rdrand_enabled) |
5048 |
++ exec_control &= ~SECONDARY_EXEC_RDRAND_EXITING; |
5049 |
++ |
5050 |
++ if (nested) { |
5051 |
++ if (rdrand_enabled) |
5052 |
++ vmx->nested.msrs.secondary_ctls_high |= |
5053 |
++ SECONDARY_EXEC_RDRAND_EXITING; |
5054 |
++ else |
5055 |
++ vmx->nested.msrs.secondary_ctls_high &= |
5056 |
++ ~SECONDARY_EXEC_RDRAND_EXITING; |
5057 |
++ } |
5058 |
++ } |
5059 |
++ |
5060 |
++ if (vmx_rdseed_supported()) { |
5061 |
++ bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED); |
5062 |
++ if (rdseed_enabled) |
5063 |
++ exec_control &= ~SECONDARY_EXEC_RDSEED_EXITING; |
5064 |
++ |
5065 |
++ if (nested) { |
5066 |
++ if (rdseed_enabled) |
5067 |
++ vmx->nested.msrs.secondary_ctls_high |= |
5068 |
++ SECONDARY_EXEC_RDSEED_EXITING; |
5069 |
++ else |
5070 |
++ vmx->nested.msrs.secondary_ctls_high &= |
5071 |
++ ~SECONDARY_EXEC_RDSEED_EXITING; |
5072 |
++ } |
5073 |
++ } |
5074 |
++ |
5075 |
++ if (vmx_waitpkg_supported()) { |
5076 |
++ bool waitpkg_enabled = |
5077 |
++ guest_cpuid_has(vcpu, X86_FEATURE_WAITPKG); |
5078 |
++ |
5079 |
++ if (!waitpkg_enabled) |
5080 |
++ exec_control &= ~SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; |
5081 |
++ |
5082 |
++ if (nested) { |
5083 |
++ if (waitpkg_enabled) |
5084 |
++ vmx->nested.msrs.secondary_ctls_high |= |
5085 |
++ SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; |
5086 |
++ else |
5087 |
++ vmx->nested.msrs.secondary_ctls_high &= |
5088 |
++ ~SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; |
5089 |
++ } |
5090 |
++ } |
5091 |
++ |
5092 |
++ vmx->secondary_exec_control = exec_control; |
5093 |
++} |
5094 |
++ |
5095 |
++static void ept_set_mmio_spte_mask(void) |
5096 |
++{ |
5097 |
++ /* |
5098 |
++ * EPT Misconfigurations can be generated if the value of bits 2:0 |
5099 |
++ * of an EPT paging-structure entry is 110b (write/execute). |
5100 |
++ */ |
5101 |
++ kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK, |
5102 |
++ VMX_EPT_MISCONFIG_WX_VALUE, 0); |
5103 |
++} |
5104 |
++ |
5105 |
++#define VMX_XSS_EXIT_BITMAP 0 |
5106 |
++ |
5107 |
++/* |
5108 |
++ * Noting that the initialization of Guest-state Area of VMCS is in |
5109 |
++ * vmx_vcpu_reset(). |
5110 |
++ */ |
5111 |
++static void init_vmcs(struct vcpu_vmx *vmx) |
5112 |
++{ |
5113 |
++ if (nested) |
5114 |
++ nested_vmx_set_vmcs_shadowing_bitmap(); |
5115 |
++ |
5116 |
++ if (cpu_has_vmx_msr_bitmap()) |
5117 |
++ vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); |
5118 |
++ |
5119 |
++ vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ |
5120 |
++ |
5121 |
++ /* Control */ |
5122 |
++ pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx)); |
5123 |
++ |
5124 |
++ exec_controls_set(vmx, vmx_exec_control(vmx)); |
5125 |
++ |
5126 |
++ if (cpu_has_secondary_exec_ctrls()) { |
5127 |
++ vmx_compute_secondary_exec_control(vmx); |
5128 |
++ secondary_exec_controls_set(vmx, vmx->secondary_exec_control); |
5129 |
++ } |
5130 |
++ |
5131 |
++ if (kvm_vcpu_apicv_active(&vmx->vcpu)) { |
5132 |
++ vmcs_write64(EOI_EXIT_BITMAP0, 0); |
5133 |
++ vmcs_write64(EOI_EXIT_BITMAP1, 0); |
5134 |
++ vmcs_write64(EOI_EXIT_BITMAP2, 0); |
5135 |
++ vmcs_write64(EOI_EXIT_BITMAP3, 0); |
5136 |
++ |
5137 |
++ vmcs_write16(GUEST_INTR_STATUS, 0); |
5138 |
++ |
5139 |
++ vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); |
5140 |
++ vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); |
5141 |
++ } |
5142 |
++ |
5143 |
++ if (!kvm_pause_in_guest(vmx->vcpu.kvm)) { |
5144 |
++ vmcs_write32(PLE_GAP, ple_gap); |
5145 |
++ vmx->ple_window = ple_window; |
5146 |
++ vmx->ple_window_dirty = true; |
5147 |
++ } |
5148 |
++ |
5149 |
++ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); |
5150 |
++ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); |
5151 |
++ vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ |
5152 |
++ |
5153 |
++ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ |
5154 |
++ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ |
5155 |
++ vmx_set_constant_host_state(vmx); |
5156 |
++ vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ |
5157 |
++ vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ |
5158 |
++ |
5159 |
++ if (cpu_has_vmx_vmfunc()) |
5160 |
++ vmcs_write64(VM_FUNCTION_CONTROL, 0); |
5161 |
++ |
5162 |
++ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); |
5163 |
++ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); |
5164 |
++ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); |
5165 |
++ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); |
5166 |
++ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); |
5167 |
++ |
5168 |
++ if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) |
5169 |
++ vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); |
5170 |
++ |
5171 |
++ vm_exit_controls_set(vmx, vmx_vmexit_ctrl()); |
5172 |
++ |
5173 |
++ /* 22.2.1, 20.8.1 */ |
5174 |
++ vm_entry_controls_set(vmx, vmx_vmentry_ctrl()); |
5175 |
++ |
5176 |
++ vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS; |
5177 |
++ vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS); |
5178 |
++ |
5179 |
++ set_cr4_guest_host_mask(vmx); |
5180 |
++ |
5181 |
++ if (vmx->vpid != 0) |
5182 |
++ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); |
5183 |
++ |
5184 |
++ if (vmx_xsaves_supported()) |
5185 |
++ vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); |
5186 |
++ |
5187 |
++ if (enable_pml) { |
5188 |
++ vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); |
5189 |
++ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); |
5190 |
++ } |
5191 |
++ |
5192 |
++ if (cpu_has_vmx_encls_vmexit()) |
5193 |
++ vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); |
5194 |
++ |
5195 |
++ if (pt_mode == PT_MODE_HOST_GUEST) { |
5196 |
++ memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc)); |
5197 |
++ /* Bit[6~0] are forced to 1, writes are ignored. */ |
5198 |
++ vmx->pt_desc.guest.output_mask = 0x7F; |
5199 |
++ vmcs_write64(GUEST_IA32_RTIT_CTL, 0); |
5200 |
++ } |
5201 |
++} |
5202 |
++ |
5203 |
++static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) |
5204 |
++{ |
5205 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
5206 |
++ struct msr_data apic_base_msr; |
5207 |
++ u64 cr0; |
5208 |
++ |
5209 |
++ vmx->rmode.vm86_active = 0; |
5210 |
++ vmx->spec_ctrl = 0; |
5211 |
++ |
5212 |
++ vmx->msr_ia32_umwait_control = 0; |
5213 |
++ |
5214 |
++ vcpu->arch.microcode_version = 0x100000000ULL; |
5215 |
++ vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); |
5216 |
++ vmx->hv_deadline_tsc = -1; |
5217 |
++ kvm_set_cr8(vcpu, 0); |
5218 |
++ |
5219 |
++ if (!init_event) { |
5220 |
++ apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | |
5221 |
++ MSR_IA32_APICBASE_ENABLE; |
5222 |
++ if (kvm_vcpu_is_reset_bsp(vcpu)) |
5223 |
++ apic_base_msr.data |= MSR_IA32_APICBASE_BSP; |
5224 |
++ apic_base_msr.host_initiated = true; |
5225 |
++ kvm_set_apic_base(vcpu, &apic_base_msr); |
5226 |
++ } |
5227 |
++ |
5228 |
++ vmx_segment_cache_clear(vmx); |
5229 |
++ |
5230 |
++ seg_setup(VCPU_SREG_CS); |
5231 |
++ vmcs_write16(GUEST_CS_SELECTOR, 0xf000); |
5232 |
++ vmcs_writel(GUEST_CS_BASE, 0xffff0000ul); |
5233 |
++ |
5234 |
++ seg_setup(VCPU_SREG_DS); |
5235 |
++ seg_setup(VCPU_SREG_ES); |
5236 |
++ seg_setup(VCPU_SREG_FS); |
5237 |
++ seg_setup(VCPU_SREG_GS); |
5238 |
++ seg_setup(VCPU_SREG_SS); |
5239 |
++ |
5240 |
++ vmcs_write16(GUEST_TR_SELECTOR, 0); |
5241 |
++ vmcs_writel(GUEST_TR_BASE, 0); |
5242 |
++ vmcs_write32(GUEST_TR_LIMIT, 0xffff); |
5243 |
++ vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); |
5244 |
++ |
5245 |
++ vmcs_write16(GUEST_LDTR_SELECTOR, 0); |
5246 |
++ vmcs_writel(GUEST_LDTR_BASE, 0); |
5247 |
++ vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); |
5248 |
++ vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); |
5249 |
++ |
5250 |
++ if (!init_event) { |
5251 |
++ vmcs_write32(GUEST_SYSENTER_CS, 0); |
5252 |
++ vmcs_writel(GUEST_SYSENTER_ESP, 0); |
5253 |
++ vmcs_writel(GUEST_SYSENTER_EIP, 0); |
5254 |
++ vmcs_write64(GUEST_IA32_DEBUGCTL, 0); |
5255 |
++ } |
5256 |
++ |
5257 |
++ kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); |
5258 |
++ kvm_rip_write(vcpu, 0xfff0); |
5259 |
++ |
5260 |
++ vmcs_writel(GUEST_GDTR_BASE, 0); |
5261 |
++ vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); |
5262 |
++ |
5263 |
++ vmcs_writel(GUEST_IDTR_BASE, 0); |
5264 |
++ vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); |
5265 |
++ |
5266 |
++ vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); |
5267 |
++ vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); |
5268 |
++ vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0); |
5269 |
++ if (kvm_mpx_supported()) |
5270 |
++ vmcs_write64(GUEST_BNDCFGS, 0); |
5271 |
++ |
5272 |
++ setup_msrs(vmx); |
5273 |
++ |
5274 |
++ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ |
5275 |
++ |
5276 |
++ if (cpu_has_vmx_tpr_shadow() && !init_event) { |
5277 |
++ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); |
5278 |
++ if (cpu_need_tpr_shadow(vcpu)) |
5279 |
++ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, |
5280 |
++ __pa(vcpu->arch.apic->regs)); |
5281 |
++ vmcs_write32(TPR_THRESHOLD, 0); |
5282 |
++ } |
5283 |
++ |
5284 |
++ kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); |
5285 |
++ |
5286 |
++ cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; |
5287 |
++ vmx->vcpu.arch.cr0 = cr0; |
5288 |
++ vmx_set_cr0(vcpu, cr0); /* enter rmode */ |
5289 |
++ vmx_set_cr4(vcpu, 0); |
5290 |
++ vmx_set_efer(vcpu, 0); |
5291 |
++ |
5292 |
++ update_exception_bitmap(vcpu); |
5293 |
++ |
5294 |
++ vpid_sync_context(vmx->vpid); |
5295 |
++ if (init_event) |
5296 |
++ vmx_clear_hlt(vcpu); |
5297 |
++} |
5298 |
++ |
5299 |
++static void enable_irq_window(struct kvm_vcpu *vcpu) |
5300 |
++{ |
5301 |
++ exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING); |
5302 |
++} |
5303 |
++ |
5304 |
++static void enable_nmi_window(struct kvm_vcpu *vcpu) |
5305 |
++{ |
5306 |
++ if (!enable_vnmi || |
5307 |
++ vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { |
5308 |
++ enable_irq_window(vcpu); |
5309 |
++ return; |
5310 |
++ } |
5311 |
++ |
5312 |
++ exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING); |
5313 |
++} |
5314 |
++ |
5315 |
++static void vmx_inject_irq(struct kvm_vcpu *vcpu) |
5316 |
++{ |
5317 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
5318 |
++ uint32_t intr; |
5319 |
++ int irq = vcpu->arch.interrupt.nr; |
5320 |
++ |
5321 |
++ trace_kvm_inj_virq(irq); |
5322 |
++ |
5323 |
++ ++vcpu->stat.irq_injections; |
5324 |
++ if (vmx->rmode.vm86_active) { |
5325 |
++ int inc_eip = 0; |
5326 |
++ if (vcpu->arch.interrupt.soft) |
5327 |
++ inc_eip = vcpu->arch.event_exit_inst_len; |
5328 |
++ kvm_inject_realmode_interrupt(vcpu, irq, inc_eip); |
5329 |
++ return; |
5330 |
++ } |
5331 |
++ intr = irq | INTR_INFO_VALID_MASK; |
5332 |
++ if (vcpu->arch.interrupt.soft) { |
5333 |
++ intr |= INTR_TYPE_SOFT_INTR; |
5334 |
++ vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, |
5335 |
++ vmx->vcpu.arch.event_exit_inst_len); |
5336 |
++ } else |
5337 |
++ intr |= INTR_TYPE_EXT_INTR; |
5338 |
++ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); |
5339 |
++ |
5340 |
++ vmx_clear_hlt(vcpu); |
5341 |
++} |
5342 |
++ |
5343 |
++static void vmx_inject_nmi(struct kvm_vcpu *vcpu) |
5344 |
++{ |
5345 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
5346 |
++ |
5347 |
++ if (!enable_vnmi) { |
5348 |
++ /* |
5349 |
++ * Tracking the NMI-blocked state in software is built upon |
5350 |
++ * finding the next open IRQ window. This, in turn, depends on |
5351 |
++ * well-behaving guests: They have to keep IRQs disabled at |
5352 |
++ * least as long as the NMI handler runs. Otherwise we may |
5353 |
++ * cause NMI nesting, maybe breaking the guest. But as this is |
5354 |
++ * highly unlikely, we can live with the residual risk. |
5355 |
++ */ |
5356 |
++ vmx->loaded_vmcs->soft_vnmi_blocked = 1; |
5357 |
++ vmx->loaded_vmcs->vnmi_blocked_time = 0; |
5358 |
++ } |
5359 |
++ |
5360 |
++ ++vcpu->stat.nmi_injections; |
5361 |
++ vmx->loaded_vmcs->nmi_known_unmasked = false; |
5362 |
++ |
5363 |
++ if (vmx->rmode.vm86_active) { |
5364 |
++ kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0); |
5365 |
++ return; |
5366 |
++ } |
5367 |
++ |
5368 |
++ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, |
5369 |
++ INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); |
5370 |
++ |
5371 |
++ vmx_clear_hlt(vcpu); |
5372 |
++} |
5373 |
++ |
5374 |
++bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) |
5375 |
++{ |
5376 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
5377 |
++ bool masked; |
5378 |
++ |
5379 |
++ if (!enable_vnmi) |
5380 |
++ return vmx->loaded_vmcs->soft_vnmi_blocked; |
5381 |
++ if (vmx->loaded_vmcs->nmi_known_unmasked) |
5382 |
++ return false; |
5383 |
++ masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; |
5384 |
++ vmx->loaded_vmcs->nmi_known_unmasked = !masked; |
5385 |
++ return masked; |
5386 |
++} |
5387 |
++ |
5388 |
++void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) |
5389 |
++{ |
5390 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
5391 |
++ |
5392 |
++ if (!enable_vnmi) { |
5393 |
++ if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { |
5394 |
++ vmx->loaded_vmcs->soft_vnmi_blocked = masked; |
5395 |
++ vmx->loaded_vmcs->vnmi_blocked_time = 0; |
5396 |
++ } |
5397 |
++ } else { |
5398 |
++ vmx->loaded_vmcs->nmi_known_unmasked = !masked; |
5399 |
++ if (masked) |
5400 |
++ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
5401 |
++ GUEST_INTR_STATE_NMI); |
5402 |
++ else |
5403 |
++ vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, |
5404 |
++ GUEST_INTR_STATE_NMI); |
5405 |
++ } |
5406 |
++} |
5407 |
++ |
5408 |
++static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) |
5409 |
++{ |
5410 |
++ if (to_vmx(vcpu)->nested.nested_run_pending) |
5411 |
++ return 0; |
5412 |
++ |
5413 |
++ if (!enable_vnmi && |
5414 |
++ to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) |
5415 |
++ return 0; |
5416 |
++ |
5417 |
++ return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & |
5418 |
++ (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI |
5419 |
++ | GUEST_INTR_STATE_NMI)); |
5420 |
++} |
5421 |
++ |
5422 |
++static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) |
5423 |
++{ |
5424 |
++ return (!to_vmx(vcpu)->nested.nested_run_pending && |
5425 |
++ vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && |
5426 |
++ !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & |
5427 |
++ (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); |
5428 |
++} |
5429 |
++ |
5430 |
++static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) |
5431 |
++{ |
5432 |
++ int ret; |
5433 |
++ |
5434 |
++ if (enable_unrestricted_guest) |
5435 |
++ return 0; |
5436 |
++ |
5437 |
++ ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, |
5438 |
++ PAGE_SIZE * 3); |
5439 |
++ if (ret) |
5440 |
++ return ret; |
5441 |
++ to_kvm_vmx(kvm)->tss_addr = addr; |
5442 |
++ return init_rmode_tss(kvm); |
5443 |
++} |
5444 |
++ |
5445 |
++static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) |
5446 |
++{ |
5447 |
++ to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr; |
5448 |
++ return 0; |
5449 |
++} |
5450 |
++ |
5451 |
++static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) |
5452 |
++{ |
5453 |
++ switch (vec) { |
5454 |
++ case BP_VECTOR: |
5455 |
++ /* |
5456 |
++ * Update instruction length as we may reinject the exception |
5457 |
++ * from user space while in guest debugging mode. |
5458 |
++ */ |
5459 |
++ to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = |
5460 |
++ vmcs_read32(VM_EXIT_INSTRUCTION_LEN); |
5461 |
++ if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) |
5462 |
++ return false; |
5463 |
++ /* fall through */ |
5464 |
++ case DB_VECTOR: |
5465 |
++ if (vcpu->guest_debug & |
5466 |
++ (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) |
5467 |
++ return false; |
5468 |
++ /* fall through */ |
5469 |
++ case DE_VECTOR: |
5470 |
++ case OF_VECTOR: |
5471 |
++ case BR_VECTOR: |
5472 |
++ case UD_VECTOR: |
5473 |
++ case DF_VECTOR: |
5474 |
++ case SS_VECTOR: |
5475 |
++ case GP_VECTOR: |
5476 |
++ case MF_VECTOR: |
5477 |
++ return true; |
5478 |
++ break; |
5479 |
++ } |
5480 |
++ return false; |
5481 |
++} |
5482 |
++ |
5483 |
++static int handle_rmode_exception(struct kvm_vcpu *vcpu, |
5484 |
++ int vec, u32 err_code) |
5485 |
++{ |
5486 |
++ /* |
5487 |
++ * Instruction with address size override prefix opcode 0x67 |
5488 |
++ * Cause the #SS fault with 0 error code in VM86 mode. |
5489 |
++ */ |
5490 |
++ if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { |
5491 |
++ if (kvm_emulate_instruction(vcpu, 0)) { |
5492 |
++ if (vcpu->arch.halt_request) { |
5493 |
++ vcpu->arch.halt_request = 0; |
5494 |
++ return kvm_vcpu_halt(vcpu); |
5495 |
++ } |
5496 |
++ return 1; |
5497 |
++ } |
5498 |
++ return 0; |
5499 |
++ } |
5500 |
++ |
5501 |
++ /* |
5502 |
++ * Forward all other exceptions that are valid in real mode. |
5503 |
++ * FIXME: Breaks guest debugging in real mode, needs to be fixed with |
5504 |
++ * the required debugging infrastructure rework. |
5505 |
++ */ |
5506 |
++ kvm_queue_exception(vcpu, vec); |
5507 |
++ return 1; |
5508 |
++} |
5509 |
++ |
5510 |
++/* |
5511 |
++ * Trigger machine check on the host. We assume all the MSRs are already set up |
5512 |
++ * by the CPU and that we still run on the same CPU as the MCE occurred on. |
5513 |
++ * We pass a fake environment to the machine check handler because we want |
5514 |
++ * the guest to be always treated like user space, no matter what context |
5515 |
++ * it used internally. |
5516 |
++ */ |
5517 |
++static void kvm_machine_check(void) |
5518 |
++{ |
5519 |
++#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) |
5520 |
++ struct pt_regs regs = { |
5521 |
++ .cs = 3, /* Fake ring 3 no matter what the guest ran on */ |
5522 |
++ .flags = X86_EFLAGS_IF, |
5523 |
++ }; |
5524 |
++ |
5525 |
++ do_machine_check(®s, 0); |
5526 |
++#endif |
5527 |
++} |
5528 |
++ |
5529 |
++static int handle_machine_check(struct kvm_vcpu *vcpu) |
5530 |
++{ |
5531 |
++ /* handled by vmx_vcpu_run() */ |
5532 |
++ return 1; |
5533 |
++} |
5534 |
++ |
5535 |
++static int handle_exception_nmi(struct kvm_vcpu *vcpu) |
5536 |
++{ |
5537 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
5538 |
++ struct kvm_run *kvm_run = vcpu->run; |
5539 |
++ u32 intr_info, ex_no, error_code; |
5540 |
++ unsigned long cr2, rip, dr6; |
5541 |
++ u32 vect_info; |
5542 |
++ |
5543 |
++ vect_info = vmx->idt_vectoring_info; |
5544 |
++ intr_info = vmx->exit_intr_info; |
5545 |
++ |
5546 |
++ if (is_machine_check(intr_info) || is_nmi(intr_info)) |
5547 |
++ return 1; /* handled by handle_exception_nmi_irqoff() */ |
5548 |
++ |
5549 |
++ if (is_invalid_opcode(intr_info)) |
5550 |
++ return handle_ud(vcpu); |
5551 |
++ |
5552 |
++ error_code = 0; |
5553 |
++ if (intr_info & INTR_INFO_DELIVER_CODE_MASK) |
5554 |
++ error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); |
5555 |
++ |
5556 |
++ if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { |
5557 |
++ WARN_ON_ONCE(!enable_vmware_backdoor); |
5558 |
++ |
5559 |
++ /* |
5560 |
++ * VMware backdoor emulation on #GP interception only handles |
5561 |
++ * IN{S}, OUT{S}, and RDPMC, none of which generate a non-zero |
5562 |
++ * error code on #GP. |
5563 |
++ */ |
5564 |
++ if (error_code) { |
5565 |
++ kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); |
5566 |
++ return 1; |
5567 |
++ } |
5568 |
++ return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP); |
5569 |
++ } |
5570 |
++ |
5571 |
++ /* |
5572 |
++ * The #PF with PFEC.RSVD = 1 indicates the guest is accessing |
5573 |
++ * MMIO, it is better to report an internal error. |
5574 |
++ * See the comments in vmx_handle_exit. |
5575 |
++ */ |
5576 |
++ if ((vect_info & VECTORING_INFO_VALID_MASK) && |
5577 |
++ !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { |
5578 |
++ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
5579 |
++ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; |
5580 |
++ vcpu->run->internal.ndata = 3; |
5581 |
++ vcpu->run->internal.data[0] = vect_info; |
5582 |
++ vcpu->run->internal.data[1] = intr_info; |
5583 |
++ vcpu->run->internal.data[2] = error_code; |
5584 |
++ return 0; |
5585 |
++ } |
5586 |
++ |
5587 |
++ if (is_page_fault(intr_info)) { |
5588 |
++ cr2 = vmcs_readl(EXIT_QUALIFICATION); |
5589 |
++ /* EPT won't cause page fault directly */ |
5590 |
++ WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept); |
5591 |
++ return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0); |
5592 |
++ } |
5593 |
++ |
5594 |
++ ex_no = intr_info & INTR_INFO_VECTOR_MASK; |
5595 |
++ |
5596 |
++ if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) |
5597 |
++ return handle_rmode_exception(vcpu, ex_no, error_code); |
5598 |
++ |
5599 |
++ switch (ex_no) { |
5600 |
++ case AC_VECTOR: |
5601 |
++ kvm_queue_exception_e(vcpu, AC_VECTOR, error_code); |
5602 |
++ return 1; |
5603 |
++ case DB_VECTOR: |
5604 |
++ dr6 = vmcs_readl(EXIT_QUALIFICATION); |
5605 |
++ if (!(vcpu->guest_debug & |
5606 |
++ (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { |
5607 |
++ vcpu->arch.dr6 &= ~DR_TRAP_BITS; |
5608 |
++ vcpu->arch.dr6 |= dr6 | DR6_RTM; |
5609 |
++ if (is_icebp(intr_info)) |
5610 |
++ WARN_ON(!skip_emulated_instruction(vcpu)); |
5611 |
++ |
5612 |
++ kvm_queue_exception(vcpu, DB_VECTOR); |
5613 |
++ return 1; |
5614 |
++ } |
5615 |
++ kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; |
5616 |
++ kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); |
5617 |
++ /* fall through */ |
5618 |
++ case BP_VECTOR: |
5619 |
++ /* |
5620 |
++ * Update instruction length as we may reinject #BP from |
5621 |
++ * user space while in guest debugging mode. Reading it for |
5622 |
++ * #DB as well causes no harm, it is not used in that case. |
5623 |
++ */ |
5624 |
++ vmx->vcpu.arch.event_exit_inst_len = |
5625 |
++ vmcs_read32(VM_EXIT_INSTRUCTION_LEN); |
5626 |
++ kvm_run->exit_reason = KVM_EXIT_DEBUG; |
5627 |
++ rip = kvm_rip_read(vcpu); |
5628 |
++ kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; |
5629 |
++ kvm_run->debug.arch.exception = ex_no; |
5630 |
++ break; |
5631 |
++ default: |
5632 |
++ kvm_run->exit_reason = KVM_EXIT_EXCEPTION; |
5633 |
++ kvm_run->ex.exception = ex_no; |
5634 |
++ kvm_run->ex.error_code = error_code; |
5635 |
++ break; |
5636 |
++ } |
5637 |
++ return 0; |
5638 |
++} |
5639 |
++ |
5640 |
++static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu) |
5641 |
++{ |
5642 |
++ ++vcpu->stat.irq_exits; |
5643 |
++ return 1; |
5644 |
++} |
5645 |
++ |
5646 |
++static int handle_triple_fault(struct kvm_vcpu *vcpu) |
5647 |
++{ |
5648 |
++ vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; |
5649 |
++ vcpu->mmio_needed = 0; |
5650 |
++ return 0; |
5651 |
++} |
5652 |
++ |
5653 |
++static int handle_io(struct kvm_vcpu *vcpu) |
5654 |
++{ |
5655 |
++ unsigned long exit_qualification; |
5656 |
++ int size, in, string; |
5657 |
++ unsigned port; |
5658 |
++ |
5659 |
++ exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
5660 |
++ string = (exit_qualification & 16) != 0; |
5661 |
++ |
5662 |
++ ++vcpu->stat.io_exits; |
5663 |
++ |
5664 |
++ if (string) |
5665 |
++ return kvm_emulate_instruction(vcpu, 0); |
5666 |
++ |
5667 |
++ port = exit_qualification >> 16; |
5668 |
++ size = (exit_qualification & 7) + 1; |
5669 |
++ in = (exit_qualification & 8) != 0; |
5670 |
++ |
5671 |
++ return kvm_fast_pio(vcpu, size, port, in); |
5672 |
++} |
5673 |
++ |
5674 |
++static void |
5675 |
++vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) |
5676 |
++{ |
5677 |
++ /* |
5678 |
++ * Patch in the VMCALL instruction: |
5679 |
++ */ |
5680 |
++ hypercall[0] = 0x0f; |
5681 |
++ hypercall[1] = 0x01; |
5682 |
++ hypercall[2] = 0xc1; |
5683 |
++} |
5684 |
++ |
5685 |
++/* called to set cr0 as appropriate for a mov-to-cr0 exit. */ |
5686 |
++static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) |
5687 |
++{ |
5688 |
++ if (is_guest_mode(vcpu)) { |
5689 |
++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
5690 |
++ unsigned long orig_val = val; |
5691 |
++ |
5692 |
++ /* |
5693 |
++ * We get here when L2 changed cr0 in a way that did not change |
5694 |
++ * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), |
5695 |
++ * but did change L0 shadowed bits. So we first calculate the |
5696 |
++ * effective cr0 value that L1 would like to write into the |
5697 |
++ * hardware. It consists of the L2-owned bits from the new |
5698 |
++ * value combined with the L1-owned bits from L1's guest_cr0. |
5699 |
++ */ |
5700 |
++ val = (val & ~vmcs12->cr0_guest_host_mask) | |
5701 |
++ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); |
5702 |
++ |
5703 |
++ if (!nested_guest_cr0_valid(vcpu, val)) |
5704 |
++ return 1; |
5705 |
++ |
5706 |
++ if (kvm_set_cr0(vcpu, val)) |
5707 |
++ return 1; |
5708 |
++ vmcs_writel(CR0_READ_SHADOW, orig_val); |
5709 |
++ return 0; |
5710 |
++ } else { |
5711 |
++ if (to_vmx(vcpu)->nested.vmxon && |
5712 |
++ !nested_host_cr0_valid(vcpu, val)) |
5713 |
++ return 1; |
5714 |
++ |
5715 |
++ return kvm_set_cr0(vcpu, val); |
5716 |
++ } |
5717 |
++} |
5718 |
++ |
5719 |
++static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) |
5720 |
++{ |
5721 |
++ if (is_guest_mode(vcpu)) { |
5722 |
++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
5723 |
++ unsigned long orig_val = val; |
5724 |
++ |
5725 |
++ /* analogously to handle_set_cr0 */ |
5726 |
++ val = (val & ~vmcs12->cr4_guest_host_mask) | |
5727 |
++ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); |
5728 |
++ if (kvm_set_cr4(vcpu, val)) |
5729 |
++ return 1; |
5730 |
++ vmcs_writel(CR4_READ_SHADOW, orig_val); |
5731 |
++ return 0; |
5732 |
++ } else |
5733 |
++ return kvm_set_cr4(vcpu, val); |
5734 |
++} |
5735 |
++ |
5736 |
++static int handle_desc(struct kvm_vcpu *vcpu) |
5737 |
++{ |
5738 |
++ WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); |
5739 |
++ return kvm_emulate_instruction(vcpu, 0); |
5740 |
++} |
5741 |
++ |
5742 |
++static int handle_cr(struct kvm_vcpu *vcpu) |
5743 |
++{ |
5744 |
++ unsigned long exit_qualification, val; |
5745 |
++ int cr; |
5746 |
++ int reg; |
5747 |
++ int err; |
5748 |
++ int ret; |
5749 |
++ |
5750 |
++ exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
5751 |
++ cr = exit_qualification & 15; |
5752 |
++ reg = (exit_qualification >> 8) & 15; |
5753 |
++ switch ((exit_qualification >> 4) & 3) { |
5754 |
++ case 0: /* mov to cr */ |
5755 |
++ val = kvm_register_readl(vcpu, reg); |
5756 |
++ trace_kvm_cr_write(cr, val); |
5757 |
++ switch (cr) { |
5758 |
++ case 0: |
5759 |
++ err = handle_set_cr0(vcpu, val); |
5760 |
++ return kvm_complete_insn_gp(vcpu, err); |
5761 |
++ case 3: |
5762 |
++ WARN_ON_ONCE(enable_unrestricted_guest); |
5763 |
++ err = kvm_set_cr3(vcpu, val); |
5764 |
++ return kvm_complete_insn_gp(vcpu, err); |
5765 |
++ case 4: |
5766 |
++ err = handle_set_cr4(vcpu, val); |
5767 |
++ return kvm_complete_insn_gp(vcpu, err); |
5768 |
++ case 8: { |
5769 |
++ u8 cr8_prev = kvm_get_cr8(vcpu); |
5770 |
++ u8 cr8 = (u8)val; |
5771 |
++ err = kvm_set_cr8(vcpu, cr8); |
5772 |
++ ret = kvm_complete_insn_gp(vcpu, err); |
5773 |
++ if (lapic_in_kernel(vcpu)) |
5774 |
++ return ret; |
5775 |
++ if (cr8_prev <= cr8) |
5776 |
++ return ret; |
5777 |
++ /* |
5778 |
++ * TODO: we might be squashing a |
5779 |
++ * KVM_GUESTDBG_SINGLESTEP-triggered |
5780 |
++ * KVM_EXIT_DEBUG here. |
5781 |
++ */ |
5782 |
++ vcpu->run->exit_reason = KVM_EXIT_SET_TPR; |
5783 |
++ return 0; |
5784 |
++ } |
5785 |
++ } |
5786 |
++ break; |
5787 |
++ case 2: /* clts */ |
5788 |
++ WARN_ONCE(1, "Guest should always own CR0.TS"); |
5789 |
++ vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); |
5790 |
++ trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); |
5791 |
++ return kvm_skip_emulated_instruction(vcpu); |
5792 |
++ case 1: /*mov from cr*/ |
5793 |
++ switch (cr) { |
5794 |
++ case 3: |
5795 |
++ WARN_ON_ONCE(enable_unrestricted_guest); |
5796 |
++ val = kvm_read_cr3(vcpu); |
5797 |
++ kvm_register_write(vcpu, reg, val); |
5798 |
++ trace_kvm_cr_read(cr, val); |
5799 |
++ return kvm_skip_emulated_instruction(vcpu); |
5800 |
++ case 8: |
5801 |
++ val = kvm_get_cr8(vcpu); |
5802 |
++ kvm_register_write(vcpu, reg, val); |
5803 |
++ trace_kvm_cr_read(cr, val); |
5804 |
++ return kvm_skip_emulated_instruction(vcpu); |
5805 |
++ } |
5806 |
++ break; |
5807 |
++ case 3: /* lmsw */ |
5808 |
++ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; |
5809 |
++ trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); |
5810 |
++ kvm_lmsw(vcpu, val); |
5811 |
++ |
5812 |
++ return kvm_skip_emulated_instruction(vcpu); |
5813 |
++ default: |
5814 |
++ break; |
5815 |
++ } |
5816 |
++ vcpu->run->exit_reason = 0; |
5817 |
++ vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", |
5818 |
++ (int)(exit_qualification >> 4) & 3, cr); |
5819 |
++ return 0; |
5820 |
++} |
5821 |
++ |
5822 |
++static int handle_dr(struct kvm_vcpu *vcpu) |
5823 |
++{ |
5824 |
++ unsigned long exit_qualification; |
5825 |
++ int dr, dr7, reg; |
5826 |
++ |
5827 |
++ exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
5828 |
++ dr = exit_qualification & DEBUG_REG_ACCESS_NUM; |
5829 |
++ |
5830 |
++ /* First, if DR does not exist, trigger UD */ |
5831 |
++ if (!kvm_require_dr(vcpu, dr)) |
5832 |
++ return 1; |
5833 |
++ |
5834 |
++ /* Do not handle if the CPL > 0, will trigger GP on re-entry */ |
5835 |
++ if (!kvm_require_cpl(vcpu, 0)) |
5836 |
++ return 1; |
5837 |
++ dr7 = vmcs_readl(GUEST_DR7); |
5838 |
++ if (dr7 & DR7_GD) { |
5839 |
++ /* |
5840 |
++ * As the vm-exit takes precedence over the debug trap, we |
5841 |
++ * need to emulate the latter, either for the host or the |
5842 |
++ * guest debugging itself. |
5843 |
++ */ |
5844 |
++ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { |
5845 |
++ vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; |
5846 |
++ vcpu->run->debug.arch.dr7 = dr7; |
5847 |
++ vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); |
5848 |
++ vcpu->run->debug.arch.exception = DB_VECTOR; |
5849 |
++ vcpu->run->exit_reason = KVM_EXIT_DEBUG; |
5850 |
++ return 0; |
5851 |
++ } else { |
5852 |
++ vcpu->arch.dr6 &= ~DR_TRAP_BITS; |
5853 |
++ vcpu->arch.dr6 |= DR6_BD | DR6_RTM; |
5854 |
++ kvm_queue_exception(vcpu, DB_VECTOR); |
5855 |
++ return 1; |
5856 |
++ } |
5857 |
++ } |
5858 |
++ |
5859 |
++ if (vcpu->guest_debug == 0) { |
5860 |
++ exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); |
5861 |
++ |
5862 |
++ /* |
5863 |
++ * No more DR vmexits; force a reload of the debug registers |
5864 |
++ * and reenter on this instruction. The next vmexit will |
5865 |
++ * retrieve the full state of the debug registers. |
5866 |
++ */ |
5867 |
++ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; |
5868 |
++ return 1; |
5869 |
++ } |
5870 |
++ |
5871 |
++ reg = DEBUG_REG_ACCESS_REG(exit_qualification); |
5872 |
++ if (exit_qualification & TYPE_MOV_FROM_DR) { |
5873 |
++ unsigned long val; |
5874 |
++ |
5875 |
++ if (kvm_get_dr(vcpu, dr, &val)) |
5876 |
++ return 1; |
5877 |
++ kvm_register_write(vcpu, reg, val); |
5878 |
++ } else |
5879 |
++ if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) |
5880 |
++ return 1; |
5881 |
++ |
5882 |
++ return kvm_skip_emulated_instruction(vcpu); |
5883 |
++} |
5884 |
++ |
5885 |
++static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) |
5886 |
++{ |
5887 |
++ return vcpu->arch.dr6; |
5888 |
++} |
5889 |
++ |
5890 |
++static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) |
5891 |
++{ |
5892 |
++} |
5893 |
++ |
5894 |
++static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) |
5895 |
++{ |
5896 |
++ get_debugreg(vcpu->arch.db[0], 0); |
5897 |
++ get_debugreg(vcpu->arch.db[1], 1); |
5898 |
++ get_debugreg(vcpu->arch.db[2], 2); |
5899 |
++ get_debugreg(vcpu->arch.db[3], 3); |
5900 |
++ get_debugreg(vcpu->arch.dr6, 6); |
5901 |
++ vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); |
5902 |
++ |
5903 |
++ vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; |
5904 |
++ exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); |
5905 |
++} |
5906 |
++ |
5907 |
++static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) |
5908 |
++{ |
5909 |
++ vmcs_writel(GUEST_DR7, val); |
5910 |
++} |
5911 |
++ |
5912 |
++static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) |
5913 |
++{ |
5914 |
++ kvm_apic_update_ppr(vcpu); |
5915 |
++ return 1; |
5916 |
++} |
5917 |
++ |
5918 |
++static int handle_interrupt_window(struct kvm_vcpu *vcpu) |
5919 |
++{ |
5920 |
++ exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING); |
5921 |
++ |
5922 |
++ kvm_make_request(KVM_REQ_EVENT, vcpu); |
5923 |
++ |
5924 |
++ ++vcpu->stat.irq_window_exits; |
5925 |
++ return 1; |
5926 |
++} |
5927 |
++ |
5928 |
++static int handle_vmcall(struct kvm_vcpu *vcpu) |
5929 |
++{ |
5930 |
++ return kvm_emulate_hypercall(vcpu); |
5931 |
++} |
5932 |
++ |
5933 |
++static int handle_invd(struct kvm_vcpu *vcpu) |
5934 |
++{ |
5935 |
++ return kvm_emulate_instruction(vcpu, 0); |
5936 |
++} |
5937 |
++ |
5938 |
++static int handle_invlpg(struct kvm_vcpu *vcpu) |
5939 |
++{ |
5940 |
++ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
5941 |
++ |
5942 |
++ kvm_mmu_invlpg(vcpu, exit_qualification); |
5943 |
++ return kvm_skip_emulated_instruction(vcpu); |
5944 |
++} |
5945 |
++ |
5946 |
++static int handle_rdpmc(struct kvm_vcpu *vcpu) |
5947 |
++{ |
5948 |
++ int err; |
5949 |
++ |
5950 |
++ err = kvm_rdpmc(vcpu); |
5951 |
++ return kvm_complete_insn_gp(vcpu, err); |
5952 |
++} |
5953 |
++ |
5954 |
++static int handle_wbinvd(struct kvm_vcpu *vcpu) |
5955 |
++{ |
5956 |
++ return kvm_emulate_wbinvd(vcpu); |
5957 |
++} |
5958 |
++ |
5959 |
++static int handle_xsetbv(struct kvm_vcpu *vcpu) |
5960 |
++{ |
5961 |
++ u64 new_bv = kvm_read_edx_eax(vcpu); |
5962 |
++ u32 index = kvm_rcx_read(vcpu); |
5963 |
++ |
5964 |
++ if (kvm_set_xcr(vcpu, index, new_bv) == 0) |
5965 |
++ return kvm_skip_emulated_instruction(vcpu); |
5966 |
++ return 1; |
5967 |
++} |
5968 |
++ |
5969 |
++static int handle_apic_access(struct kvm_vcpu *vcpu) |
5970 |
++{ |
5971 |
++ if (likely(fasteoi)) { |
5972 |
++ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
5973 |
++ int access_type, offset; |
5974 |
++ |
5975 |
++ access_type = exit_qualification & APIC_ACCESS_TYPE; |
5976 |
++ offset = exit_qualification & APIC_ACCESS_OFFSET; |
5977 |
++ /* |
5978 |
++ * Sane guest uses MOV to write EOI, with written value |
5979 |
++ * not cared. So make a short-circuit here by avoiding |
5980 |
++ * heavy instruction emulation. |
5981 |
++ */ |
5982 |
++ if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && |
5983 |
++ (offset == APIC_EOI)) { |
5984 |
++ kvm_lapic_set_eoi(vcpu); |
5985 |
++ return kvm_skip_emulated_instruction(vcpu); |
5986 |
++ } |
5987 |
++ } |
5988 |
++ return kvm_emulate_instruction(vcpu, 0); |
5989 |
++} |
5990 |
++ |
5991 |
++static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) |
5992 |
++{ |
5993 |
++ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
5994 |
++ int vector = exit_qualification & 0xff; |
5995 |
++ |
5996 |
++ /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ |
5997 |
++ kvm_apic_set_eoi_accelerated(vcpu, vector); |
5998 |
++ return 1; |
5999 |
++} |
6000 |
++ |
6001 |
++static int handle_apic_write(struct kvm_vcpu *vcpu) |
6002 |
++{ |
6003 |
++ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
6004 |
++ u32 offset = exit_qualification & 0xfff; |
6005 |
++ |
6006 |
++ /* APIC-write VM exit is trap-like and thus no need to adjust IP */ |
6007 |
++ kvm_apic_write_nodecode(vcpu, offset); |
6008 |
++ return 1; |
6009 |
++} |
6010 |
++ |
6011 |
++static int handle_task_switch(struct kvm_vcpu *vcpu) |
6012 |
++{ |
6013 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
6014 |
++ unsigned long exit_qualification; |
6015 |
++ bool has_error_code = false; |
6016 |
++ u32 error_code = 0; |
6017 |
++ u16 tss_selector; |
6018 |
++ int reason, type, idt_v, idt_index; |
6019 |
++ |
6020 |
++ idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); |
6021 |
++ idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); |
6022 |
++ type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); |
6023 |
++ |
6024 |
++ exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
6025 |
++ |
6026 |
++ reason = (u32)exit_qualification >> 30; |
6027 |
++ if (reason == TASK_SWITCH_GATE && idt_v) { |
6028 |
++ switch (type) { |
6029 |
++ case INTR_TYPE_NMI_INTR: |
6030 |
++ vcpu->arch.nmi_injected = false; |
6031 |
++ vmx_set_nmi_mask(vcpu, true); |
6032 |
++ break; |
6033 |
++ case INTR_TYPE_EXT_INTR: |
6034 |
++ case INTR_TYPE_SOFT_INTR: |
6035 |
++ kvm_clear_interrupt_queue(vcpu); |
6036 |
++ break; |
6037 |
++ case INTR_TYPE_HARD_EXCEPTION: |
6038 |
++ if (vmx->idt_vectoring_info & |
6039 |
++ VECTORING_INFO_DELIVER_CODE_MASK) { |
6040 |
++ has_error_code = true; |
6041 |
++ error_code = |
6042 |
++ vmcs_read32(IDT_VECTORING_ERROR_CODE); |
6043 |
++ } |
6044 |
++ /* fall through */ |
6045 |
++ case INTR_TYPE_SOFT_EXCEPTION: |
6046 |
++ kvm_clear_exception_queue(vcpu); |
6047 |
++ break; |
6048 |
++ default: |
6049 |
++ break; |
6050 |
++ } |
6051 |
++ } |
6052 |
++ tss_selector = exit_qualification; |
6053 |
++ |
6054 |
++ if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && |
6055 |
++ type != INTR_TYPE_EXT_INTR && |
6056 |
++ type != INTR_TYPE_NMI_INTR)) |
6057 |
++ WARN_ON(!skip_emulated_instruction(vcpu)); |
6058 |
++ |
6059 |
++ /* |
6060 |
++ * TODO: What about debug traps on tss switch? |
6061 |
++ * Are we supposed to inject them and update dr6? |
6062 |
++ */ |
6063 |
++ return kvm_task_switch(vcpu, tss_selector, |
6064 |
++ type == INTR_TYPE_SOFT_INTR ? idt_index : -1, |
6065 |
++ reason, has_error_code, error_code); |
6066 |
++} |
6067 |
++ |
6068 |
++static int handle_ept_violation(struct kvm_vcpu *vcpu) |
6069 |
++{ |
6070 |
++ unsigned long exit_qualification; |
6071 |
++ gpa_t gpa; |
6072 |
++ u64 error_code; |
6073 |
++ |
6074 |
++ exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
6075 |
++ |
6076 |
++ /* |
6077 |
++ * EPT violation happened while executing iret from NMI, |
6078 |
++ * "blocked by NMI" bit has to be set before next VM entry. |
6079 |
++ * There are errata that may cause this bit to not be set: |
6080 |
++ * AAK134, BY25. |
6081 |
++ */ |
6082 |
++ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && |
6083 |
++ enable_vnmi && |
6084 |
++ (exit_qualification & INTR_INFO_UNBLOCK_NMI)) |
6085 |
++ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); |
6086 |
++ |
6087 |
++ gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); |
6088 |
++ trace_kvm_page_fault(gpa, exit_qualification); |
6089 |
++ |
6090 |
++ /* Is it a read fault? */ |
6091 |
++ error_code = (exit_qualification & EPT_VIOLATION_ACC_READ) |
6092 |
++ ? PFERR_USER_MASK : 0; |
6093 |
++ /* Is it a write fault? */ |
6094 |
++ error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE) |
6095 |
++ ? PFERR_WRITE_MASK : 0; |
6096 |
++ /* Is it a fetch fault? */ |
6097 |
++ error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR) |
6098 |
++ ? PFERR_FETCH_MASK : 0; |
6099 |
++ /* ept page table entry is present? */ |
6100 |
++ error_code |= (exit_qualification & |
6101 |
++ (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE | |
6102 |
++ EPT_VIOLATION_EXECUTABLE)) |
6103 |
++ ? PFERR_PRESENT_MASK : 0; |
6104 |
++ |
6105 |
++ error_code |= (exit_qualification & 0x100) != 0 ? |
6106 |
++ PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; |
6107 |
++ |
6108 |
++ vcpu->arch.exit_qualification = exit_qualification; |
6109 |
++ return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); |
6110 |
++} |
6111 |
++ |
6112 |
++static int handle_ept_misconfig(struct kvm_vcpu *vcpu) |
6113 |
++{ |
6114 |
++ gpa_t gpa; |
6115 |
++ |
6116 |
++ /* |
6117 |
++ * A nested guest cannot optimize MMIO vmexits, because we have an |
6118 |
++ * nGPA here instead of the required GPA. |
6119 |
++ */ |
6120 |
++ gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); |
6121 |
++ if (!is_guest_mode(vcpu) && |
6122 |
++ !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { |
6123 |
++ trace_kvm_fast_mmio(gpa); |
6124 |
++ return kvm_skip_emulated_instruction(vcpu); |
6125 |
++ } |
6126 |
++ |
6127 |
++ return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); |
6128 |
++} |
6129 |
++ |
6130 |
++static int handle_nmi_window(struct kvm_vcpu *vcpu) |
6131 |
++{ |
6132 |
++ WARN_ON_ONCE(!enable_vnmi); |
6133 |
++ exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING); |
6134 |
++ ++vcpu->stat.nmi_window_exits; |
6135 |
++ kvm_make_request(KVM_REQ_EVENT, vcpu); |
6136 |
++ |
6137 |
++ return 1; |
6138 |
++} |
6139 |
++ |
6140 |
++static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) |
6141 |
++{ |
6142 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
6143 |
++ bool intr_window_requested; |
6144 |
++ unsigned count = 130; |
6145 |
++ |
6146 |
++ /* |
6147 |
++ * We should never reach the point where we are emulating L2 |
6148 |
++ * due to invalid guest state as that means we incorrectly |
6149 |
++ * allowed a nested VMEntry with an invalid vmcs12. |
6150 |
++ */ |
6151 |
++ WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending); |
6152 |
++ |
6153 |
++ intr_window_requested = exec_controls_get(vmx) & |
6154 |
++ CPU_BASED_INTR_WINDOW_EXITING; |
6155 |
++ |
6156 |
++ while (vmx->emulation_required && count-- != 0) { |
6157 |
++ if (intr_window_requested && vmx_interrupt_allowed(vcpu)) |
6158 |
++ return handle_interrupt_window(&vmx->vcpu); |
6159 |
++ |
6160 |
++ if (kvm_test_request(KVM_REQ_EVENT, vcpu)) |
6161 |
++ return 1; |
6162 |
++ |
6163 |
++ if (!kvm_emulate_instruction(vcpu, 0)) |
6164 |
++ return 0; |
6165 |
++ |
6166 |
++ if (vmx->emulation_required && !vmx->rmode.vm86_active && |
6167 |
++ vcpu->arch.exception.pending) { |
6168 |
++ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
6169 |
++ vcpu->run->internal.suberror = |
6170 |
++ KVM_INTERNAL_ERROR_EMULATION; |
6171 |
++ vcpu->run->internal.ndata = 0; |
6172 |
++ return 0; |
6173 |
++ } |
6174 |
++ |
6175 |
++ if (vcpu->arch.halt_request) { |
6176 |
++ vcpu->arch.halt_request = 0; |
6177 |
++ return kvm_vcpu_halt(vcpu); |
6178 |
++ } |
6179 |
++ |
6180 |
++ /* |
6181 |
++ * Note, return 1 and not 0, vcpu_run() is responsible for |
6182 |
++ * morphing the pending signal into the proper return code. |
6183 |
++ */ |
6184 |
++ if (signal_pending(current)) |
6185 |
++ return 1; |
6186 |
++ |
6187 |
++ if (need_resched()) |
6188 |
++ schedule(); |
6189 |
++ } |
6190 |
++ |
6191 |
++ return 1; |
6192 |
++} |
6193 |
++ |
6194 |
++static void grow_ple_window(struct kvm_vcpu *vcpu) |
6195 |
++{ |
6196 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
6197 |
++ unsigned int old = vmx->ple_window; |
6198 |
++ |
6199 |
++ vmx->ple_window = __grow_ple_window(old, ple_window, |
6200 |
++ ple_window_grow, |
6201 |
++ ple_window_max); |
6202 |
++ |
6203 |
++ if (vmx->ple_window != old) { |
6204 |
++ vmx->ple_window_dirty = true; |
6205 |
++ trace_kvm_ple_window_update(vcpu->vcpu_id, |
6206 |
++ vmx->ple_window, old); |
6207 |
++ } |
6208 |
++} |
6209 |
++ |
6210 |
++static void shrink_ple_window(struct kvm_vcpu *vcpu) |
6211 |
++{ |
6212 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
6213 |
++ unsigned int old = vmx->ple_window; |
6214 |
++ |
6215 |
++ vmx->ple_window = __shrink_ple_window(old, ple_window, |
6216 |
++ ple_window_shrink, |
6217 |
++ ple_window); |
6218 |
++ |
6219 |
++ if (vmx->ple_window != old) { |
6220 |
++ vmx->ple_window_dirty = true; |
6221 |
++ trace_kvm_ple_window_update(vcpu->vcpu_id, |
6222 |
++ vmx->ple_window, old); |
6223 |
++ } |
6224 |
++} |
6225 |
++ |
6226 |
++/* |
6227 |
++ * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR. |
6228 |
++ */ |
6229 |
++static void wakeup_handler(void) |
6230 |
++{ |
6231 |
++ struct kvm_vcpu *vcpu; |
6232 |
++ int cpu = smp_processor_id(); |
6233 |
++ |
6234 |
++ spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); |
6235 |
++ list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), |
6236 |
++ blocked_vcpu_list) { |
6237 |
++ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); |
6238 |
++ |
6239 |
++ if (pi_test_on(pi_desc) == 1) |
6240 |
++ kvm_vcpu_kick(vcpu); |
6241 |
++ } |
6242 |
++ spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); |
6243 |
++} |
6244 |
++ |
6245 |
++static void vmx_enable_tdp(void) |
6246 |
++{ |
6247 |
++ kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK, |
6248 |
++ enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull, |
6249 |
++ enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull, |
6250 |
++ 0ull, VMX_EPT_EXECUTABLE_MASK, |
6251 |
++ cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK, |
6252 |
++ VMX_EPT_RWX_MASK, 0ull); |
6253 |
++ |
6254 |
++ ept_set_mmio_spte_mask(); |
6255 |
++ kvm_enable_tdp(); |
6256 |
++} |
6257 |
++ |
6258 |
++/* |
6259 |
++ * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE |
6260 |
++ * exiting, so only get here on cpu with PAUSE-Loop-Exiting. |
6261 |
++ */ |
6262 |
++static int handle_pause(struct kvm_vcpu *vcpu) |
6263 |
++{ |
6264 |
++ if (!kvm_pause_in_guest(vcpu->kvm)) |
6265 |
++ grow_ple_window(vcpu); |
6266 |
++ |
6267 |
++ /* |
6268 |
++ * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting" |
6269 |
++ * VM-execution control is ignored if CPL > 0. OTOH, KVM |
6270 |
++ * never set PAUSE_EXITING and just set PLE if supported, |
6271 |
++ * so the vcpu must be CPL=0 if it gets a PAUSE exit. |
6272 |
++ */ |
6273 |
++ kvm_vcpu_on_spin(vcpu, true); |
6274 |
++ return kvm_skip_emulated_instruction(vcpu); |
6275 |
++} |
6276 |
++ |
6277 |
++static int handle_nop(struct kvm_vcpu *vcpu) |
6278 |
++{ |
6279 |
++ return kvm_skip_emulated_instruction(vcpu); |
6280 |
++} |
6281 |
++ |
6282 |
++static int handle_mwait(struct kvm_vcpu *vcpu) |
6283 |
++{ |
6284 |
++ printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); |
6285 |
++ return handle_nop(vcpu); |
6286 |
++} |
6287 |
++ |
6288 |
++static int handle_invalid_op(struct kvm_vcpu *vcpu) |
6289 |
++{ |
6290 |
++ kvm_queue_exception(vcpu, UD_VECTOR); |
6291 |
++ return 1; |
6292 |
++} |
6293 |
++ |
6294 |
++static int handle_monitor_trap(struct kvm_vcpu *vcpu) |
6295 |
++{ |
6296 |
++ return 1; |
6297 |
++} |
6298 |
++ |
6299 |
++static int handle_monitor(struct kvm_vcpu *vcpu) |
6300 |
++{ |
6301 |
++ printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); |
6302 |
++ return handle_nop(vcpu); |
6303 |
++} |
6304 |
++ |
6305 |
++static int handle_invpcid(struct kvm_vcpu *vcpu) |
6306 |
++{ |
6307 |
++ u32 vmx_instruction_info; |
6308 |
++ unsigned long type; |
6309 |
++ bool pcid_enabled; |
6310 |
++ gva_t gva; |
6311 |
++ struct x86_exception e; |
6312 |
++ unsigned i; |
6313 |
++ unsigned long roots_to_free = 0; |
6314 |
++ struct { |
6315 |
++ u64 pcid; |
6316 |
++ u64 gla; |
6317 |
++ } operand; |
6318 |
++ |
6319 |
++ if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { |
6320 |
++ kvm_queue_exception(vcpu, UD_VECTOR); |
6321 |
++ return 1; |
6322 |
++ } |
6323 |
++ |
6324 |
++ vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); |
6325 |
++ type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); |
6326 |
++ |
6327 |
++ if (type > 3) { |
6328 |
++ kvm_inject_gp(vcpu, 0); |
6329 |
++ return 1; |
6330 |
++ } |
6331 |
++ |
6332 |
++ /* According to the Intel instruction reference, the memory operand |
6333 |
++ * is read even if it isn't needed (e.g., for type==all) |
6334 |
++ */ |
6335 |
++ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), |
6336 |
++ vmx_instruction_info, false, |
6337 |
++ sizeof(operand), &gva)) |
6338 |
++ return 1; |
6339 |
++ |
6340 |
++ if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { |
6341 |
++ kvm_inject_page_fault(vcpu, &e); |
6342 |
++ return 1; |
6343 |
++ } |
6344 |
++ |
6345 |
++ if (operand.pcid >> 12 != 0) { |
6346 |
++ kvm_inject_gp(vcpu, 0); |
6347 |
++ return 1; |
6348 |
++ } |
6349 |
++ |
6350 |
++ pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); |
6351 |
++ |
6352 |
++ switch (type) { |
6353 |
++ case INVPCID_TYPE_INDIV_ADDR: |
6354 |
++ if ((!pcid_enabled && (operand.pcid != 0)) || |
6355 |
++ is_noncanonical_address(operand.gla, vcpu)) { |
6356 |
++ kvm_inject_gp(vcpu, 0); |
6357 |
++ return 1; |
6358 |
++ } |
6359 |
++ kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); |
6360 |
++ return kvm_skip_emulated_instruction(vcpu); |
6361 |
++ |
6362 |
++ case INVPCID_TYPE_SINGLE_CTXT: |
6363 |
++ if (!pcid_enabled && (operand.pcid != 0)) { |
6364 |
++ kvm_inject_gp(vcpu, 0); |
6365 |
++ return 1; |
6366 |
++ } |
6367 |
++ |
6368 |
++ if (kvm_get_active_pcid(vcpu) == operand.pcid) { |
6369 |
++ kvm_mmu_sync_roots(vcpu); |
6370 |
++ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
6371 |
++ } |
6372 |
++ |
6373 |
++ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
6374 |
++ if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3) |
6375 |
++ == operand.pcid) |
6376 |
++ roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); |
6377 |
++ |
6378 |
++ kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); |
6379 |
++ /* |
6380 |
++ * If neither the current cr3 nor any of the prev_roots use the |
6381 |
++ * given PCID, then nothing needs to be done here because a |
6382 |
++ * resync will happen anyway before switching to any other CR3. |
6383 |
++ */ |
6384 |
++ |
6385 |
++ return kvm_skip_emulated_instruction(vcpu); |
6386 |
++ |
6387 |
++ case INVPCID_TYPE_ALL_NON_GLOBAL: |
6388 |
++ /* |
6389 |
++ * Currently, KVM doesn't mark global entries in the shadow |
6390 |
++ * page tables, so a non-global flush just degenerates to a |
6391 |
++ * global flush. If needed, we could optimize this later by |
6392 |
++ * keeping track of global entries in shadow page tables. |
6393 |
++ */ |
6394 |
++ |
6395 |
++ /* fall-through */ |
6396 |
++ case INVPCID_TYPE_ALL_INCL_GLOBAL: |
6397 |
++ kvm_mmu_unload(vcpu); |
6398 |
++ return kvm_skip_emulated_instruction(vcpu); |
6399 |
++ |
6400 |
++ default: |
6401 |
++ BUG(); /* We have already checked above that type <= 3 */ |
6402 |
++ } |
6403 |
++} |
6404 |
++ |
6405 |
++static int handle_pml_full(struct kvm_vcpu *vcpu) |
6406 |
++{ |
6407 |
++ unsigned long exit_qualification; |
6408 |
++ |
6409 |
++ trace_kvm_pml_full(vcpu->vcpu_id); |
6410 |
++ |
6411 |
++ exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
6412 |
++ |
6413 |
++ /* |
6414 |
++ * PML buffer FULL happened while executing iret from NMI, |
6415 |
++ * "blocked by NMI" bit has to be set before next VM entry. |
6416 |
++ */ |
6417 |
++ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && |
6418 |
++ enable_vnmi && |
6419 |
++ (exit_qualification & INTR_INFO_UNBLOCK_NMI)) |
6420 |
++ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
6421 |
++ GUEST_INTR_STATE_NMI); |
6422 |
++ |
6423 |
++ /* |
6424 |
++ * PML buffer already flushed at beginning of VMEXIT. Nothing to do |
6425 |
++ * here.., and there's no userspace involvement needed for PML. |
6426 |
++ */ |
6427 |
++ return 1; |
6428 |
++} |
6429 |
++ |
6430 |
++static int handle_preemption_timer(struct kvm_vcpu *vcpu) |
6431 |
++{ |
6432 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
6433 |
++ |
6434 |
++ if (!vmx->req_immediate_exit && |
6435 |
++ !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) |
6436 |
++ kvm_lapic_expired_hv_timer(vcpu); |
6437 |
++ |
6438 |
++ return 1; |
6439 |
++} |
6440 |
++ |
6441 |
++/* |
6442 |
++ * When nested=0, all VMX instruction VM Exits filter here. The handlers |
6443 |
++ * are overwritten by nested_vmx_setup() when nested=1. |
6444 |
++ */ |
6445 |
++static int handle_vmx_instruction(struct kvm_vcpu *vcpu) |
6446 |
++{ |
6447 |
++ kvm_queue_exception(vcpu, UD_VECTOR); |
6448 |
++ return 1; |
6449 |
++} |
6450 |
++ |
6451 |
++static int handle_encls(struct kvm_vcpu *vcpu) |
6452 |
++{ |
6453 |
++ /* |
6454 |
++ * SGX virtualization is not yet supported. There is no software |
6455 |
++ * enable bit for SGX, so we have to trap ENCLS and inject a #UD |
6456 |
++ * to prevent the guest from executing ENCLS. |
6457 |
++ */ |
6458 |
++ kvm_queue_exception(vcpu, UD_VECTOR); |
6459 |
++ return 1; |
6460 |
++} |
6461 |
++ |
6462 |
++/* |
6463 |
++ * The exit handlers return 1 if the exit was handled fully and guest execution |
6464 |
++ * may resume. Otherwise they set the kvm_run parameter to indicate what needs |
6465 |
++ * to be done to userspace and return 0. |
6466 |
++ */ |
6467 |
++static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { |
6468 |
++ [EXIT_REASON_EXCEPTION_NMI] = handle_exception_nmi, |
6469 |
++ [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, |
6470 |
++ [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, |
6471 |
++ [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, |
6472 |
++ [EXIT_REASON_IO_INSTRUCTION] = handle_io, |
6473 |
++ [EXIT_REASON_CR_ACCESS] = handle_cr, |
6474 |
++ [EXIT_REASON_DR_ACCESS] = handle_dr, |
6475 |
++ [EXIT_REASON_CPUID] = kvm_emulate_cpuid, |
6476 |
++ [EXIT_REASON_MSR_READ] = kvm_emulate_rdmsr, |
6477 |
++ [EXIT_REASON_MSR_WRITE] = kvm_emulate_wrmsr, |
6478 |
++ [EXIT_REASON_INTERRUPT_WINDOW] = handle_interrupt_window, |
6479 |
++ [EXIT_REASON_HLT] = kvm_emulate_halt, |
6480 |
++ [EXIT_REASON_INVD] = handle_invd, |
6481 |
++ [EXIT_REASON_INVLPG] = handle_invlpg, |
6482 |
++ [EXIT_REASON_RDPMC] = handle_rdpmc, |
6483 |
++ [EXIT_REASON_VMCALL] = handle_vmcall, |
6484 |
++ [EXIT_REASON_VMCLEAR] = handle_vmx_instruction, |
6485 |
++ [EXIT_REASON_VMLAUNCH] = handle_vmx_instruction, |
6486 |
++ [EXIT_REASON_VMPTRLD] = handle_vmx_instruction, |
6487 |
++ [EXIT_REASON_VMPTRST] = handle_vmx_instruction, |
6488 |
++ [EXIT_REASON_VMREAD] = handle_vmx_instruction, |
6489 |
++ [EXIT_REASON_VMRESUME] = handle_vmx_instruction, |
6490 |
++ [EXIT_REASON_VMWRITE] = handle_vmx_instruction, |
6491 |
++ [EXIT_REASON_VMOFF] = handle_vmx_instruction, |
6492 |
++ [EXIT_REASON_VMON] = handle_vmx_instruction, |
6493 |
++ [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, |
6494 |
++ [EXIT_REASON_APIC_ACCESS] = handle_apic_access, |
6495 |
++ [EXIT_REASON_APIC_WRITE] = handle_apic_write, |
6496 |
++ [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, |
6497 |
++ [EXIT_REASON_WBINVD] = handle_wbinvd, |
6498 |
++ [EXIT_REASON_XSETBV] = handle_xsetbv, |
6499 |
++ [EXIT_REASON_TASK_SWITCH] = handle_task_switch, |
6500 |
++ [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, |
6501 |
++ [EXIT_REASON_GDTR_IDTR] = handle_desc, |
6502 |
++ [EXIT_REASON_LDTR_TR] = handle_desc, |
6503 |
++ [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, |
6504 |
++ [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, |
6505 |
++ [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, |
6506 |
++ [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, |
6507 |
++ [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap, |
6508 |
++ [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, |
6509 |
++ [EXIT_REASON_INVEPT] = handle_vmx_instruction, |
6510 |
++ [EXIT_REASON_INVVPID] = handle_vmx_instruction, |
6511 |
++ [EXIT_REASON_RDRAND] = handle_invalid_op, |
6512 |
++ [EXIT_REASON_RDSEED] = handle_invalid_op, |
6513 |
++ [EXIT_REASON_PML_FULL] = handle_pml_full, |
6514 |
++ [EXIT_REASON_INVPCID] = handle_invpcid, |
6515 |
++ [EXIT_REASON_VMFUNC] = handle_vmx_instruction, |
6516 |
++ [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, |
6517 |
++ [EXIT_REASON_ENCLS] = handle_encls, |
6518 |
++}; |
6519 |
++ |
6520 |
++static const int kvm_vmx_max_exit_handlers = |
6521 |
++ ARRAY_SIZE(kvm_vmx_exit_handlers); |
6522 |
++ |
6523 |
++static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) |
6524 |
++{ |
6525 |
++ *info1 = vmcs_readl(EXIT_QUALIFICATION); |
6526 |
++ *info2 = vmcs_read32(VM_EXIT_INTR_INFO); |
6527 |
++} |
6528 |
++ |
6529 |
++static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) |
6530 |
++{ |
6531 |
++ if (vmx->pml_pg) { |
6532 |
++ __free_page(vmx->pml_pg); |
6533 |
++ vmx->pml_pg = NULL; |
6534 |
++ } |
6535 |
++} |
6536 |
++ |
6537 |
++static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) |
6538 |
++{ |
6539 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
6540 |
++ u64 *pml_buf; |
6541 |
++ u16 pml_idx; |
6542 |
++ |
6543 |
++ pml_idx = vmcs_read16(GUEST_PML_INDEX); |
6544 |
++ |
6545 |
++ /* Do nothing if PML buffer is empty */ |
6546 |
++ if (pml_idx == (PML_ENTITY_NUM - 1)) |
6547 |
++ return; |
6548 |
++ |
6549 |
++ /* PML index always points to next available PML buffer entity */ |
6550 |
++ if (pml_idx >= PML_ENTITY_NUM) |
6551 |
++ pml_idx = 0; |
6552 |
++ else |
6553 |
++ pml_idx++; |
6554 |
++ |
6555 |
++ pml_buf = page_address(vmx->pml_pg); |
6556 |
++ for (; pml_idx < PML_ENTITY_NUM; pml_idx++) { |
6557 |
++ u64 gpa; |
6558 |
++ |
6559 |
++ gpa = pml_buf[pml_idx]; |
6560 |
++ WARN_ON(gpa & (PAGE_SIZE - 1)); |
6561 |
++ kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); |
6562 |
++ } |
6563 |
++ |
6564 |
++ /* reset PML index */ |
6565 |
++ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); |
6566 |
++} |
6567 |
++ |
6568 |
++/* |
6569 |
++ * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap. |
6570 |
++ * Called before reporting dirty_bitmap to userspace. |
6571 |
++ */ |
6572 |
++static void kvm_flush_pml_buffers(struct kvm *kvm) |
6573 |
++{ |
6574 |
++ int i; |
6575 |
++ struct kvm_vcpu *vcpu; |
6576 |
++ /* |
6577 |
++ * We only need to kick vcpu out of guest mode here, as PML buffer |
6578 |
++ * is flushed at beginning of all VMEXITs, and it's obvious that only |
6579 |
++ * vcpus running in guest are possible to have unflushed GPAs in PML |
6580 |
++ * buffer. |
6581 |
++ */ |
6582 |
++ kvm_for_each_vcpu(i, vcpu, kvm) |
6583 |
++ kvm_vcpu_kick(vcpu); |
6584 |
++} |
6585 |
++ |
6586 |
++static void vmx_dump_sel(char *name, uint32_t sel) |
6587 |
++{ |
6588 |
++ pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n", |
6589 |
++ name, vmcs_read16(sel), |
6590 |
++ vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), |
6591 |
++ vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), |
6592 |
++ vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); |
6593 |
++} |
6594 |
++ |
6595 |
++static void vmx_dump_dtsel(char *name, uint32_t limit) |
6596 |
++{ |
6597 |
++ pr_err("%s limit=0x%08x, base=0x%016lx\n", |
6598 |
++ name, vmcs_read32(limit), |
6599 |
++ vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); |
6600 |
++} |
6601 |
++ |
6602 |
++void dump_vmcs(void) |
6603 |
++{ |
6604 |
++ u32 vmentry_ctl, vmexit_ctl; |
6605 |
++ u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control; |
6606 |
++ unsigned long cr4; |
6607 |
++ u64 efer; |
6608 |
++ int i, n; |
6609 |
++ |
6610 |
++ if (!dump_invalid_vmcs) { |
6611 |
++ pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n"); |
6612 |
++ return; |
6613 |
++ } |
6614 |
++ |
6615 |
++ vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); |
6616 |
++ vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); |
6617 |
++ cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); |
6618 |
++ pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); |
6619 |
++ cr4 = vmcs_readl(GUEST_CR4); |
6620 |
++ efer = vmcs_read64(GUEST_IA32_EFER); |
6621 |
++ secondary_exec_control = 0; |
6622 |
++ if (cpu_has_secondary_exec_ctrls()) |
6623 |
++ secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); |
6624 |
++ |
6625 |
++ pr_err("*** Guest State ***\n"); |
6626 |
++ pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", |
6627 |
++ vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW), |
6628 |
++ vmcs_readl(CR0_GUEST_HOST_MASK)); |
6629 |
++ pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", |
6630 |
++ cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK)); |
6631 |
++ pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3)); |
6632 |
++ if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) && |
6633 |
++ (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA)) |
6634 |
++ { |
6635 |
++ pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n", |
6636 |
++ vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1)); |
6637 |
++ pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n", |
6638 |
++ vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3)); |
6639 |
++ } |
6640 |
++ pr_err("RSP = 0x%016lx RIP = 0x%016lx\n", |
6641 |
++ vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP)); |
6642 |
++ pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n", |
6643 |
++ vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7)); |
6644 |
++ pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", |
6645 |
++ vmcs_readl(GUEST_SYSENTER_ESP), |
6646 |
++ vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP)); |
6647 |
++ vmx_dump_sel("CS: ", GUEST_CS_SELECTOR); |
6648 |
++ vmx_dump_sel("DS: ", GUEST_DS_SELECTOR); |
6649 |
++ vmx_dump_sel("SS: ", GUEST_SS_SELECTOR); |
6650 |
++ vmx_dump_sel("ES: ", GUEST_ES_SELECTOR); |
6651 |
++ vmx_dump_sel("FS: ", GUEST_FS_SELECTOR); |
6652 |
++ vmx_dump_sel("GS: ", GUEST_GS_SELECTOR); |
6653 |
++ vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT); |
6654 |
++ vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); |
6655 |
++ vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); |
6656 |
++ vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); |
6657 |
++ if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) || |
6658 |
++ (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER))) |
6659 |
++ pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", |
6660 |
++ efer, vmcs_read64(GUEST_IA32_PAT)); |
6661 |
++ pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n", |
6662 |
++ vmcs_read64(GUEST_IA32_DEBUGCTL), |
6663 |
++ vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS)); |
6664 |
++ if (cpu_has_load_perf_global_ctrl() && |
6665 |
++ vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) |
6666 |
++ pr_err("PerfGlobCtl = 0x%016llx\n", |
6667 |
++ vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL)); |
6668 |
++ if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) |
6669 |
++ pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS)); |
6670 |
++ pr_err("Interruptibility = %08x ActivityState = %08x\n", |
6671 |
++ vmcs_read32(GUEST_INTERRUPTIBILITY_INFO), |
6672 |
++ vmcs_read32(GUEST_ACTIVITY_STATE)); |
6673 |
++ if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) |
6674 |
++ pr_err("InterruptStatus = %04x\n", |
6675 |
++ vmcs_read16(GUEST_INTR_STATUS)); |
6676 |
++ |
6677 |
++ pr_err("*** Host State ***\n"); |
6678 |
++ pr_err("RIP = 0x%016lx RSP = 0x%016lx\n", |
6679 |
++ vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP)); |
6680 |
++ pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n", |
6681 |
++ vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR), |
6682 |
++ vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR), |
6683 |
++ vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR), |
6684 |
++ vmcs_read16(HOST_TR_SELECTOR)); |
6685 |
++ pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n", |
6686 |
++ vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE), |
6687 |
++ vmcs_readl(HOST_TR_BASE)); |
6688 |
++ pr_err("GDTBase=%016lx IDTBase=%016lx\n", |
6689 |
++ vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE)); |
6690 |
++ pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n", |
6691 |
++ vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3), |
6692 |
++ vmcs_readl(HOST_CR4)); |
6693 |
++ pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", |
6694 |
++ vmcs_readl(HOST_IA32_SYSENTER_ESP), |
6695 |
++ vmcs_read32(HOST_IA32_SYSENTER_CS), |
6696 |
++ vmcs_readl(HOST_IA32_SYSENTER_EIP)); |
6697 |
++ if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER)) |
6698 |
++ pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", |
6699 |
++ vmcs_read64(HOST_IA32_EFER), |
6700 |
++ vmcs_read64(HOST_IA32_PAT)); |
6701 |
++ if (cpu_has_load_perf_global_ctrl() && |
6702 |
++ vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) |
6703 |
++ pr_err("PerfGlobCtl = 0x%016llx\n", |
6704 |
++ vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL)); |
6705 |
++ |
6706 |
++ pr_err("*** Control State ***\n"); |
6707 |
++ pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", |
6708 |
++ pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control); |
6709 |
++ pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl); |
6710 |
++ pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n", |
6711 |
++ vmcs_read32(EXCEPTION_BITMAP), |
6712 |
++ vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK), |
6713 |
++ vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH)); |
6714 |
++ pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", |
6715 |
++ vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), |
6716 |
++ vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE), |
6717 |
++ vmcs_read32(VM_ENTRY_INSTRUCTION_LEN)); |
6718 |
++ pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", |
6719 |
++ vmcs_read32(VM_EXIT_INTR_INFO), |
6720 |
++ vmcs_read32(VM_EXIT_INTR_ERROR_CODE), |
6721 |
++ vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); |
6722 |
++ pr_err(" reason=%08x qualification=%016lx\n", |
6723 |
++ vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION)); |
6724 |
++ pr_err("IDTVectoring: info=%08x errcode=%08x\n", |
6725 |
++ vmcs_read32(IDT_VECTORING_INFO_FIELD), |
6726 |
++ vmcs_read32(IDT_VECTORING_ERROR_CODE)); |
6727 |
++ pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET)); |
6728 |
++ if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING) |
6729 |
++ pr_err("TSC Multiplier = 0x%016llx\n", |
6730 |
++ vmcs_read64(TSC_MULTIPLIER)); |
6731 |
++ if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) { |
6732 |
++ if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) { |
6733 |
++ u16 status = vmcs_read16(GUEST_INTR_STATUS); |
6734 |
++ pr_err("SVI|RVI = %02x|%02x ", status >> 8, status & 0xff); |
6735 |
++ } |
6736 |
++ pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD)); |
6737 |
++ if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) |
6738 |
++ pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR)); |
6739 |
++ pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR)); |
6740 |
++ } |
6741 |
++ if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR) |
6742 |
++ pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV)); |
6743 |
++ if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)) |
6744 |
++ pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER)); |
6745 |
++ n = vmcs_read32(CR3_TARGET_COUNT); |
6746 |
++ for (i = 0; i + 1 < n; i += 4) |
6747 |
++ pr_err("CR3 target%u=%016lx target%u=%016lx\n", |
6748 |
++ i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2), |
6749 |
++ i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2)); |
6750 |
++ if (i < n) |
6751 |
++ pr_err("CR3 target%u=%016lx\n", |
6752 |
++ i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2)); |
6753 |
++ if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) |
6754 |
++ pr_err("PLE Gap=%08x Window=%08x\n", |
6755 |
++ vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW)); |
6756 |
++ if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID) |
6757 |
++ pr_err("Virtual processor ID = 0x%04x\n", |
6758 |
++ vmcs_read16(VIRTUAL_PROCESSOR_ID)); |
6759 |
++} |
6760 |
++ |
6761 |
++/* |
6762 |
++ * The guest has exited. See if we can fix it or if we need userspace |
6763 |
++ * assistance. |
6764 |
++ */ |
6765 |
++static int vmx_handle_exit(struct kvm_vcpu *vcpu, |
6766 |
++ enum exit_fastpath_completion exit_fastpath) |
6767 |
++{ |
6768 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
6769 |
++ u32 exit_reason = vmx->exit_reason; |
6770 |
++ u32 vectoring_info = vmx->idt_vectoring_info; |
6771 |
++ |
6772 |
++ trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); |
6773 |
++ |
6774 |
++ /* |
6775 |
++ * Flush logged GPAs PML buffer, this will make dirty_bitmap more |
6776 |
++ * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before |
6777 |
++ * querying dirty_bitmap, we only need to kick all vcpus out of guest |
6778 |
++ * mode as if vcpus is in root mode, the PML buffer must has been |
6779 |
++ * flushed already. |
6780 |
++ */ |
6781 |
++ if (enable_pml) |
6782 |
++ vmx_flush_pml_buffer(vcpu); |
6783 |
++ |
6784 |
++ /* If guest state is invalid, start emulating */ |
6785 |
++ if (vmx->emulation_required) |
6786 |
++ return handle_invalid_guest_state(vcpu); |
6787 |
++ |
6788 |
++ if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason)) |
6789 |
++ return nested_vmx_reflect_vmexit(vcpu, exit_reason); |
6790 |
++ |
6791 |
++ if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { |
6792 |
++ dump_vmcs(); |
6793 |
++ vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
6794 |
++ vcpu->run->fail_entry.hardware_entry_failure_reason |
6795 |
++ = exit_reason; |
6796 |
++ return 0; |
6797 |
++ } |
6798 |
++ |
6799 |
++ if (unlikely(vmx->fail)) { |
6800 |
++ dump_vmcs(); |
6801 |
++ vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
6802 |
++ vcpu->run->fail_entry.hardware_entry_failure_reason |
6803 |
++ = vmcs_read32(VM_INSTRUCTION_ERROR); |
6804 |
++ return 0; |
6805 |
++ } |
6806 |
++ |
6807 |
++ /* |
6808 |
++ * Note: |
6809 |
++ * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by |
6810 |
++ * delivery event since it indicates guest is accessing MMIO. |
6811 |
++ * The vm-exit can be triggered again after return to guest that |
6812 |
++ * will cause infinite loop. |
6813 |
++ */ |
6814 |
++ if ((vectoring_info & VECTORING_INFO_VALID_MASK) && |
6815 |
++ (exit_reason != EXIT_REASON_EXCEPTION_NMI && |
6816 |
++ exit_reason != EXIT_REASON_EPT_VIOLATION && |
6817 |
++ exit_reason != EXIT_REASON_PML_FULL && |
6818 |
++ exit_reason != EXIT_REASON_TASK_SWITCH)) { |
6819 |
++ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
6820 |
++ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; |
6821 |
++ vcpu->run->internal.ndata = 3; |
6822 |
++ vcpu->run->internal.data[0] = vectoring_info; |
6823 |
++ vcpu->run->internal.data[1] = exit_reason; |
6824 |
++ vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; |
6825 |
++ if (exit_reason == EXIT_REASON_EPT_MISCONFIG) { |
6826 |
++ vcpu->run->internal.ndata++; |
6827 |
++ vcpu->run->internal.data[3] = |
6828 |
++ vmcs_read64(GUEST_PHYSICAL_ADDRESS); |
6829 |
++ } |
6830 |
++ return 0; |
6831 |
++ } |
6832 |
++ |
6833 |
++ if (unlikely(!enable_vnmi && |
6834 |
++ vmx->loaded_vmcs->soft_vnmi_blocked)) { |
6835 |
++ if (vmx_interrupt_allowed(vcpu)) { |
6836 |
++ vmx->loaded_vmcs->soft_vnmi_blocked = 0; |
6837 |
++ } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && |
6838 |
++ vcpu->arch.nmi_pending) { |
6839 |
++ /* |
6840 |
++ * This CPU don't support us in finding the end of an |
6841 |
++ * NMI-blocked window if the guest runs with IRQs |
6842 |
++ * disabled. So we pull the trigger after 1 s of |
6843 |
++ * futile waiting, but inform the user about this. |
6844 |
++ */ |
6845 |
++ printk(KERN_WARNING "%s: Breaking out of NMI-blocked " |
6846 |
++ "state on VCPU %d after 1 s timeout\n", |
6847 |
++ __func__, vcpu->vcpu_id); |
6848 |
++ vmx->loaded_vmcs->soft_vnmi_blocked = 0; |
6849 |
++ } |
6850 |
++ } |
6851 |
++ |
6852 |
++ if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) { |
6853 |
++ kvm_skip_emulated_instruction(vcpu); |
6854 |
++ return 1; |
6855 |
++ } else if (exit_reason < kvm_vmx_max_exit_handlers |
6856 |
++ && kvm_vmx_exit_handlers[exit_reason]) { |
6857 |
++#ifdef CONFIG_RETPOLINE |
6858 |
++ if (exit_reason == EXIT_REASON_MSR_WRITE) |
6859 |
++ return kvm_emulate_wrmsr(vcpu); |
6860 |
++ else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER) |
6861 |
++ return handle_preemption_timer(vcpu); |
6862 |
++ else if (exit_reason == EXIT_REASON_INTERRUPT_WINDOW) |
6863 |
++ return handle_interrupt_window(vcpu); |
6864 |
++ else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) |
6865 |
++ return handle_external_interrupt(vcpu); |
6866 |
++ else if (exit_reason == EXIT_REASON_HLT) |
6867 |
++ return kvm_emulate_halt(vcpu); |
6868 |
++ else if (exit_reason == EXIT_REASON_EPT_MISCONFIG) |
6869 |
++ return handle_ept_misconfig(vcpu); |
6870 |
++#endif |
6871 |
++ return kvm_vmx_exit_handlers[exit_reason](vcpu); |
6872 |
++ } else { |
6873 |
++ vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", |
6874 |
++ exit_reason); |
6875 |
++ dump_vmcs(); |
6876 |
++ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
6877 |
++ vcpu->run->internal.suberror = |
6878 |
++ KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; |
6879 |
++ vcpu->run->internal.ndata = 1; |
6880 |
++ vcpu->run->internal.data[0] = exit_reason; |
6881 |
++ return 0; |
6882 |
++ } |
6883 |
++} |
6884 |
++ |
6885 |
++/* |
6886 |
++ * Software based L1D cache flush which is used when microcode providing |
6887 |
++ * the cache control MSR is not loaded. |
6888 |
++ * |
6889 |
++ * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to |
6890 |
++ * flush it is required to read in 64 KiB because the replacement algorithm |
6891 |
++ * is not exactly LRU. This could be sized at runtime via topology |
6892 |
++ * information but as all relevant affected CPUs have 32KiB L1D cache size |
6893 |
++ * there is no point in doing so. |
6894 |
++ */ |
6895 |
++static void vmx_l1d_flush(struct kvm_vcpu *vcpu) |
6896 |
++{ |
6897 |
++ int size = PAGE_SIZE << L1D_CACHE_ORDER; |
6898 |
++ |
6899 |
++ /* |
6900 |
++ * This code is only executed when the the flush mode is 'cond' or |
6901 |
++ * 'always' |
6902 |
++ */ |
6903 |
++ if (static_branch_likely(&vmx_l1d_flush_cond)) { |
6904 |
++ bool flush_l1d; |
6905 |
++ |
6906 |
++ /* |
6907 |
++ * Clear the per-vcpu flush bit, it gets set again |
6908 |
++ * either from vcpu_run() or from one of the unsafe |
6909 |
++ * VMEXIT handlers. |
6910 |
++ */ |
6911 |
++ flush_l1d = vcpu->arch.l1tf_flush_l1d; |
6912 |
++ vcpu->arch.l1tf_flush_l1d = false; |
6913 |
++ |
6914 |
++ /* |
6915 |
++ * Clear the per-cpu flush bit, it gets set again from |
6916 |
++ * the interrupt handlers. |
6917 |
++ */ |
6918 |
++ flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); |
6919 |
++ kvm_clear_cpu_l1tf_flush_l1d(); |
6920 |
++ |
6921 |
++ if (!flush_l1d) |
6922 |
++ return; |
6923 |
++ } |
6924 |
++ |
6925 |
++ vcpu->stat.l1d_flush++; |
6926 |
++ |
6927 |
++ if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) { |
6928 |
++ wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); |
6929 |
++ return; |
6930 |
++ } |
6931 |
++ |
6932 |
++ asm volatile( |
6933 |
++ /* First ensure the pages are in the TLB */ |
6934 |
++ "xorl %%eax, %%eax\n" |
6935 |
++ ".Lpopulate_tlb:\n\t" |
6936 |
++ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" |
6937 |
++ "addl $4096, %%eax\n\t" |
6938 |
++ "cmpl %%eax, %[size]\n\t" |
6939 |
++ "jne .Lpopulate_tlb\n\t" |
6940 |
++ "xorl %%eax, %%eax\n\t" |
6941 |
++ "cpuid\n\t" |
6942 |
++ /* Now fill the cache */ |
6943 |
++ "xorl %%eax, %%eax\n" |
6944 |
++ ".Lfill_cache:\n" |
6945 |
++ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" |
6946 |
++ "addl $64, %%eax\n\t" |
6947 |
++ "cmpl %%eax, %[size]\n\t" |
6948 |
++ "jne .Lfill_cache\n\t" |
6949 |
++ "lfence\n" |
6950 |
++ :: [flush_pages] "r" (vmx_l1d_flush_pages), |
6951 |
++ [size] "r" (size) |
6952 |
++ : "eax", "ebx", "ecx", "edx"); |
6953 |
++} |
6954 |
++ |
6955 |
++static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) |
6956 |
++{ |
6957 |
++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
6958 |
++ int tpr_threshold; |
6959 |
++ |
6960 |
++ if (is_guest_mode(vcpu) && |
6961 |
++ nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) |
6962 |
++ return; |
6963 |
++ |
6964 |
++ tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr; |
6965 |
++ if (is_guest_mode(vcpu)) |
6966 |
++ to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold; |
6967 |
++ else |
6968 |
++ vmcs_write32(TPR_THRESHOLD, tpr_threshold); |
6969 |
++} |
6970 |
++ |
6971 |
++void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu) |
6972 |
++{ |
6973 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
6974 |
++ u32 sec_exec_control; |
6975 |
++ |
6976 |
++ if (!lapic_in_kernel(vcpu)) |
6977 |
++ return; |
6978 |
++ |
6979 |
++ if (!flexpriority_enabled && |
6980 |
++ !cpu_has_vmx_virtualize_x2apic_mode()) |
6981 |
++ return; |
6982 |
++ |
6983 |
++ /* Postpone execution until vmcs01 is the current VMCS. */ |
6984 |
++ if (is_guest_mode(vcpu)) { |
6985 |
++ vmx->nested.change_vmcs01_virtual_apic_mode = true; |
6986 |
++ return; |
6987 |
++ } |
6988 |
++ |
6989 |
++ sec_exec_control = secondary_exec_controls_get(vmx); |
6990 |
++ sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | |
6991 |
++ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); |
6992 |
++ |
6993 |
++ switch (kvm_get_apic_mode(vcpu)) { |
6994 |
++ case LAPIC_MODE_INVALID: |
6995 |
++ WARN_ONCE(true, "Invalid local APIC state"); |
6996 |
++ case LAPIC_MODE_DISABLED: |
6997 |
++ break; |
6998 |
++ case LAPIC_MODE_XAPIC: |
6999 |
++ if (flexpriority_enabled) { |
7000 |
++ sec_exec_control |= |
7001 |
++ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; |
7002 |
++ vmx_flush_tlb(vcpu, true); |
7003 |
++ } |
7004 |
++ break; |
7005 |
++ case LAPIC_MODE_X2APIC: |
7006 |
++ if (cpu_has_vmx_virtualize_x2apic_mode()) |
7007 |
++ sec_exec_control |= |
7008 |
++ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; |
7009 |
++ break; |
7010 |
++ } |
7011 |
++ secondary_exec_controls_set(vmx, sec_exec_control); |
7012 |
++ |
7013 |
++ vmx_update_msr_bitmap(vcpu); |
7014 |
++} |
7015 |
++ |
7016 |
++static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) |
7017 |
++{ |
7018 |
++ if (!is_guest_mode(vcpu)) { |
7019 |
++ vmcs_write64(APIC_ACCESS_ADDR, hpa); |
7020 |
++ vmx_flush_tlb(vcpu, true); |
7021 |
++ } |
7022 |
++} |
7023 |
++ |
7024 |
++static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) |
7025 |
++{ |
7026 |
++ u16 status; |
7027 |
++ u8 old; |
7028 |
++ |
7029 |
++ if (max_isr == -1) |
7030 |
++ max_isr = 0; |
7031 |
++ |
7032 |
++ status = vmcs_read16(GUEST_INTR_STATUS); |
7033 |
++ old = status >> 8; |
7034 |
++ if (max_isr != old) { |
7035 |
++ status &= 0xff; |
7036 |
++ status |= max_isr << 8; |
7037 |
++ vmcs_write16(GUEST_INTR_STATUS, status); |
7038 |
++ } |
7039 |
++} |
7040 |
++ |
7041 |
++static void vmx_set_rvi(int vector) |
7042 |
++{ |
7043 |
++ u16 status; |
7044 |
++ u8 old; |
7045 |
++ |
7046 |
++ if (vector == -1) |
7047 |
++ vector = 0; |
7048 |
++ |
7049 |
++ status = vmcs_read16(GUEST_INTR_STATUS); |
7050 |
++ old = (u8)status & 0xff; |
7051 |
++ if ((u8)vector != old) { |
7052 |
++ status &= ~0xff; |
7053 |
++ status |= (u8)vector; |
7054 |
++ vmcs_write16(GUEST_INTR_STATUS, status); |
7055 |
++ } |
7056 |
++} |
7057 |
++ |
7058 |
++static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) |
7059 |
++{ |
7060 |
++ /* |
7061 |
++ * When running L2, updating RVI is only relevant when |
7062 |
++ * vmcs12 virtual-interrupt-delivery enabled. |
7063 |
++ * However, it can be enabled only when L1 also |
7064 |
++ * intercepts external-interrupts and in that case |
7065 |
++ * we should not update vmcs02 RVI but instead intercept |
7066 |
++ * interrupt. Therefore, do nothing when running L2. |
7067 |
++ */ |
7068 |
++ if (!is_guest_mode(vcpu)) |
7069 |
++ vmx_set_rvi(max_irr); |
7070 |
++} |
7071 |
++ |
7072 |
++static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) |
7073 |
++{ |
7074 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
7075 |
++ int max_irr; |
7076 |
++ bool max_irr_updated; |
7077 |
++ |
7078 |
++ WARN_ON(!vcpu->arch.apicv_active); |
7079 |
++ if (pi_test_on(&vmx->pi_desc)) { |
7080 |
++ pi_clear_on(&vmx->pi_desc); |
7081 |
++ /* |
7082 |
++ * IOMMU can write to PID.ON, so the barrier matters even on UP. |
7083 |
++ * But on x86 this is just a compiler barrier anyway. |
7084 |
++ */ |
7085 |
++ smp_mb__after_atomic(); |
7086 |
++ max_irr_updated = |
7087 |
++ kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); |
7088 |
++ |
7089 |
++ /* |
7090 |
++ * If we are running L2 and L1 has a new pending interrupt |
7091 |
++ * which can be injected, we should re-evaluate |
7092 |
++ * what should be done with this new L1 interrupt. |
7093 |
++ * If L1 intercepts external-interrupts, we should |
7094 |
++ * exit from L2 to L1. Otherwise, interrupt should be |
7095 |
++ * delivered directly to L2. |
7096 |
++ */ |
7097 |
++ if (is_guest_mode(vcpu) && max_irr_updated) { |
7098 |
++ if (nested_exit_on_intr(vcpu)) |
7099 |
++ kvm_vcpu_exiting_guest_mode(vcpu); |
7100 |
++ else |
7101 |
++ kvm_make_request(KVM_REQ_EVENT, vcpu); |
7102 |
++ } |
7103 |
++ } else { |
7104 |
++ max_irr = kvm_lapic_find_highest_irr(vcpu); |
7105 |
++ } |
7106 |
++ vmx_hwapic_irr_update(vcpu, max_irr); |
7107 |
++ return max_irr; |
7108 |
++} |
7109 |
++ |
7110 |
++static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu) |
7111 |
++{ |
7112 |
++ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); |
7113 |
++ |
7114 |
++ return pi_test_on(pi_desc) || |
7115 |
++ (pi_test_sn(pi_desc) && !pi_is_pir_empty(pi_desc)); |
7116 |
++} |
7117 |
++ |
7118 |
++static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) |
7119 |
++{ |
7120 |
++ if (!kvm_vcpu_apicv_active(vcpu)) |
7121 |
++ return; |
7122 |
++ |
7123 |
++ vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); |
7124 |
++ vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]); |
7125 |
++ vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]); |
7126 |
++ vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]); |
7127 |
++} |
7128 |
++ |
7129 |
++static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu) |
7130 |
++{ |
7131 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
7132 |
++ |
7133 |
++ pi_clear_on(&vmx->pi_desc); |
7134 |
++ memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir)); |
7135 |
++} |
7136 |
++ |
7137 |
++static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx) |
7138 |
++{ |
7139 |
++ vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
7140 |
++ |
7141 |
++ /* if exit due to PF check for async PF */ |
7142 |
++ if (is_page_fault(vmx->exit_intr_info)) |
7143 |
++ vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); |
7144 |
++ |
7145 |
++ /* Handle machine checks before interrupts are enabled */ |
7146 |
++ if (is_machine_check(vmx->exit_intr_info)) |
7147 |
++ kvm_machine_check(); |
7148 |
++ |
7149 |
++ /* We need to handle NMIs before interrupts are enabled */ |
7150 |
++ if (is_nmi(vmx->exit_intr_info)) { |
7151 |
++ kvm_before_interrupt(&vmx->vcpu); |
7152 |
++ asm("int $2"); |
7153 |
++ kvm_after_interrupt(&vmx->vcpu); |
7154 |
++ } |
7155 |
++} |
7156 |
++ |
7157 |
++static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu) |
7158 |
++{ |
7159 |
++ unsigned int vector; |
7160 |
++ unsigned long entry; |
7161 |
++#ifdef CONFIG_X86_64 |
7162 |
++ unsigned long tmp; |
7163 |
++#endif |
7164 |
++ gate_desc *desc; |
7165 |
++ u32 intr_info; |
7166 |
++ |
7167 |
++ intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
7168 |
++ if (WARN_ONCE(!is_external_intr(intr_info), |
7169 |
++ "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info)) |
7170 |
++ return; |
7171 |
++ |
7172 |
++ vector = intr_info & INTR_INFO_VECTOR_MASK; |
7173 |
++ desc = (gate_desc *)host_idt_base + vector; |
7174 |
++ entry = gate_offset(desc); |
7175 |
++ |
7176 |
++ kvm_before_interrupt(vcpu); |
7177 |
++ |
7178 |
++ asm volatile( |
7179 |
++#ifdef CONFIG_X86_64 |
7180 |
++ "mov %%" _ASM_SP ", %[sp]\n\t" |
7181 |
++ "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t" |
7182 |
++ "push $%c[ss]\n\t" |
7183 |
++ "push %[sp]\n\t" |
7184 |
++#endif |
7185 |
++ "pushf\n\t" |
7186 |
++ __ASM_SIZE(push) " $%c[cs]\n\t" |
7187 |
++ CALL_NOSPEC |
7188 |
++ : |
7189 |
++#ifdef CONFIG_X86_64 |
7190 |
++ [sp]"=&r"(tmp), |
7191 |
++#endif |
7192 |
++ ASM_CALL_CONSTRAINT |
7193 |
++ : |
7194 |
++ THUNK_TARGET(entry), |
7195 |
++ [ss]"i"(__KERNEL_DS), |
7196 |
++ [cs]"i"(__KERNEL_CS) |
7197 |
++ ); |
7198 |
++ |
7199 |
++ kvm_after_interrupt(vcpu); |
7200 |
++} |
7201 |
++STACK_FRAME_NON_STANDARD(handle_external_interrupt_irqoff); |
7202 |
++ |
7203 |
++static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu, |
7204 |
++ enum exit_fastpath_completion *exit_fastpath) |
7205 |
++{ |
7206 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
7207 |
++ |
7208 |
++ if (vmx->exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) |
7209 |
++ handle_external_interrupt_irqoff(vcpu); |
7210 |
++ else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI) |
7211 |
++ handle_exception_nmi_irqoff(vmx); |
7212 |
++ else if (!is_guest_mode(vcpu) && |
7213 |
++ vmx->exit_reason == EXIT_REASON_MSR_WRITE) |
7214 |
++ *exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu); |
7215 |
++} |
7216 |
++ |
7217 |
++static bool vmx_has_emulated_msr(int index) |
7218 |
++{ |
7219 |
++ switch (index) { |
7220 |
++ case MSR_IA32_SMBASE: |
7221 |
++ /* |
7222 |
++ * We cannot do SMM unless we can run the guest in big |
7223 |
++ * real mode. |
7224 |
++ */ |
7225 |
++ return enable_unrestricted_guest || emulate_invalid_guest_state; |
7226 |
++ case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: |
7227 |
++ return nested; |
7228 |
++ case MSR_AMD64_VIRT_SPEC_CTRL: |
7229 |
++ /* This is AMD only. */ |
7230 |
++ return false; |
7231 |
++ default: |
7232 |
++ return true; |
7233 |
++ } |
7234 |
++} |
7235 |
++ |
7236 |
++static bool vmx_pt_supported(void) |
7237 |
++{ |
7238 |
++ return pt_mode == PT_MODE_HOST_GUEST; |
7239 |
++} |
7240 |
++ |
7241 |
++static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) |
7242 |
++{ |
7243 |
++ u32 exit_intr_info; |
7244 |
++ bool unblock_nmi; |
7245 |
++ u8 vector; |
7246 |
++ bool idtv_info_valid; |
7247 |
++ |
7248 |
++ idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; |
7249 |
++ |
7250 |
++ if (enable_vnmi) { |
7251 |
++ if (vmx->loaded_vmcs->nmi_known_unmasked) |
7252 |
++ return; |
7253 |
++ /* |
7254 |
++ * Can't use vmx->exit_intr_info since we're not sure what |
7255 |
++ * the exit reason is. |
7256 |
++ */ |
7257 |
++ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
7258 |
++ unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; |
7259 |
++ vector = exit_intr_info & INTR_INFO_VECTOR_MASK; |
7260 |
++ /* |
7261 |
++ * SDM 3: 27.7.1.2 (September 2008) |
7262 |
++ * Re-set bit "block by NMI" before VM entry if vmexit caused by |
7263 |
++ * a guest IRET fault. |
7264 |
++ * SDM 3: 23.2.2 (September 2008) |
7265 |
++ * Bit 12 is undefined in any of the following cases: |
7266 |
++ * If the VM exit sets the valid bit in the IDT-vectoring |
7267 |
++ * information field. |
7268 |
++ * If the VM exit is due to a double fault. |
7269 |
++ */ |
7270 |
++ if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && |
7271 |
++ vector != DF_VECTOR && !idtv_info_valid) |
7272 |
++ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
7273 |
++ GUEST_INTR_STATE_NMI); |
7274 |
++ else |
7275 |
++ vmx->loaded_vmcs->nmi_known_unmasked = |
7276 |
++ !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) |
7277 |
++ & GUEST_INTR_STATE_NMI); |
7278 |
++ } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) |
7279 |
++ vmx->loaded_vmcs->vnmi_blocked_time += |
7280 |
++ ktime_to_ns(ktime_sub(ktime_get(), |
7281 |
++ vmx->loaded_vmcs->entry_time)); |
7282 |
++} |
7283 |
++ |
7284 |
++static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, |
7285 |
++ u32 idt_vectoring_info, |
7286 |
++ int instr_len_field, |
7287 |
++ int error_code_field) |
7288 |
++{ |
7289 |
++ u8 vector; |
7290 |
++ int type; |
7291 |
++ bool idtv_info_valid; |
7292 |
++ |
7293 |
++ idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; |
7294 |
++ |
7295 |
++ vcpu->arch.nmi_injected = false; |
7296 |
++ kvm_clear_exception_queue(vcpu); |
7297 |
++ kvm_clear_interrupt_queue(vcpu); |
7298 |
++ |
7299 |
++ if (!idtv_info_valid) |
7300 |
++ return; |
7301 |
++ |
7302 |
++ kvm_make_request(KVM_REQ_EVENT, vcpu); |
7303 |
++ |
7304 |
++ vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; |
7305 |
++ type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; |
7306 |
++ |
7307 |
++ switch (type) { |
7308 |
++ case INTR_TYPE_NMI_INTR: |
7309 |
++ vcpu->arch.nmi_injected = true; |
7310 |
++ /* |
7311 |
++ * SDM 3: 27.7.1.2 (September 2008) |
7312 |
++ * Clear bit "block by NMI" before VM entry if a NMI |
7313 |
++ * delivery faulted. |
7314 |
++ */ |
7315 |
++ vmx_set_nmi_mask(vcpu, false); |
7316 |
++ break; |
7317 |
++ case INTR_TYPE_SOFT_EXCEPTION: |
7318 |
++ vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); |
7319 |
++ /* fall through */ |
7320 |
++ case INTR_TYPE_HARD_EXCEPTION: |
7321 |
++ if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { |
7322 |
++ u32 err = vmcs_read32(error_code_field); |
7323 |
++ kvm_requeue_exception_e(vcpu, vector, err); |
7324 |
++ } else |
7325 |
++ kvm_requeue_exception(vcpu, vector); |
7326 |
++ break; |
7327 |
++ case INTR_TYPE_SOFT_INTR: |
7328 |
++ vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); |
7329 |
++ /* fall through */ |
7330 |
++ case INTR_TYPE_EXT_INTR: |
7331 |
++ kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); |
7332 |
++ break; |
7333 |
++ default: |
7334 |
++ break; |
7335 |
++ } |
7336 |
++} |
7337 |
++ |
7338 |
++static void vmx_complete_interrupts(struct vcpu_vmx *vmx) |
7339 |
++{ |
7340 |
++ __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, |
7341 |
++ VM_EXIT_INSTRUCTION_LEN, |
7342 |
++ IDT_VECTORING_ERROR_CODE); |
7343 |
++} |
7344 |
++ |
7345 |
++static void vmx_cancel_injection(struct kvm_vcpu *vcpu) |
7346 |
++{ |
7347 |
++ __vmx_complete_interrupts(vcpu, |
7348 |
++ vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), |
7349 |
++ VM_ENTRY_INSTRUCTION_LEN, |
7350 |
++ VM_ENTRY_EXCEPTION_ERROR_CODE); |
7351 |
++ |
7352 |
++ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); |
7353 |
++} |
7354 |
++ |
7355 |
++static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) |
7356 |
++{ |
7357 |
++ int i, nr_msrs; |
7358 |
++ struct perf_guest_switch_msr *msrs; |
7359 |
++ |
7360 |
++ msrs = perf_guest_get_msrs(&nr_msrs); |
7361 |
++ |
7362 |
++ if (!msrs) |
7363 |
++ return; |
7364 |
++ |
7365 |
++ for (i = 0; i < nr_msrs; i++) |
7366 |
++ if (msrs[i].host == msrs[i].guest) |
7367 |
++ clear_atomic_switch_msr(vmx, msrs[i].msr); |
7368 |
++ else |
7369 |
++ add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, |
7370 |
++ msrs[i].host, false); |
7371 |
++} |
7372 |
++ |
7373 |
++static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx) |
7374 |
++{ |
7375 |
++ u32 host_umwait_control; |
7376 |
++ |
7377 |
++ if (!vmx_has_waitpkg(vmx)) |
7378 |
++ return; |
7379 |
++ |
7380 |
++ host_umwait_control = get_umwait_control_msr(); |
7381 |
++ |
7382 |
++ if (vmx->msr_ia32_umwait_control != host_umwait_control) |
7383 |
++ add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL, |
7384 |
++ vmx->msr_ia32_umwait_control, |
7385 |
++ host_umwait_control, false); |
7386 |
++ else |
7387 |
++ clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL); |
7388 |
++} |
7389 |
++ |
7390 |
++static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) |
7391 |
++{ |
7392 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
7393 |
++ u64 tscl; |
7394 |
++ u32 delta_tsc; |
7395 |
++ |
7396 |
++ if (vmx->req_immediate_exit) { |
7397 |
++ vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0); |
7398 |
++ vmx->loaded_vmcs->hv_timer_soft_disabled = false; |
7399 |
++ } else if (vmx->hv_deadline_tsc != -1) { |
7400 |
++ tscl = rdtsc(); |
7401 |
++ if (vmx->hv_deadline_tsc > tscl) |
7402 |
++ /* set_hv_timer ensures the delta fits in 32-bits */ |
7403 |
++ delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> |
7404 |
++ cpu_preemption_timer_multi); |
7405 |
++ else |
7406 |
++ delta_tsc = 0; |
7407 |
++ |
7408 |
++ vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc); |
7409 |
++ vmx->loaded_vmcs->hv_timer_soft_disabled = false; |
7410 |
++ } else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) { |
7411 |
++ vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, -1); |
7412 |
++ vmx->loaded_vmcs->hv_timer_soft_disabled = true; |
7413 |
++ } |
7414 |
++} |
7415 |
++ |
7416 |
++void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) |
7417 |
++{ |
7418 |
++ if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) { |
7419 |
++ vmx->loaded_vmcs->host_state.rsp = host_rsp; |
7420 |
++ vmcs_writel(HOST_RSP, host_rsp); |
7421 |
++ } |
7422 |
++} |
7423 |
++ |
7424 |
++bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); |
7425 |
++ |
7426 |
++static void vmx_vcpu_run(struct kvm_vcpu *vcpu) |
7427 |
++{ |
7428 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
7429 |
++ unsigned long cr3, cr4; |
7430 |
++ |
7431 |
++ /* Record the guest's net vcpu time for enforced NMI injections. */ |
7432 |
++ if (unlikely(!enable_vnmi && |
7433 |
++ vmx->loaded_vmcs->soft_vnmi_blocked)) |
7434 |
++ vmx->loaded_vmcs->entry_time = ktime_get(); |
7435 |
++ |
7436 |
++ /* Don't enter VMX if guest state is invalid, let the exit handler |
7437 |
++ start emulation until we arrive back to a valid state */ |
7438 |
++ if (vmx->emulation_required) |
7439 |
++ return; |
7440 |
++ |
7441 |
++ if (vmx->ple_window_dirty) { |
7442 |
++ vmx->ple_window_dirty = false; |
7443 |
++ vmcs_write32(PLE_WINDOW, vmx->ple_window); |
7444 |
++ } |
7445 |
++ |
7446 |
++ if (vmx->nested.need_vmcs12_to_shadow_sync) |
7447 |
++ nested_sync_vmcs12_to_shadow(vcpu); |
7448 |
++ |
7449 |
++ if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP)) |
7450 |
++ vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); |
7451 |
++ if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP)) |
7452 |
++ vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); |
7453 |
++ |
7454 |
++ cr3 = __get_current_cr3_fast(); |
7455 |
++ if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { |
7456 |
++ vmcs_writel(HOST_CR3, cr3); |
7457 |
++ vmx->loaded_vmcs->host_state.cr3 = cr3; |
7458 |
++ } |
7459 |
++ |
7460 |
++ cr4 = cr4_read_shadow(); |
7461 |
++ if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { |
7462 |
++ vmcs_writel(HOST_CR4, cr4); |
7463 |
++ vmx->loaded_vmcs->host_state.cr4 = cr4; |
7464 |
++ } |
7465 |
++ |
7466 |
++ /* When single-stepping over STI and MOV SS, we must clear the |
7467 |
++ * corresponding interruptibility bits in the guest state. Otherwise |
7468 |
++ * vmentry fails as it then expects bit 14 (BS) in pending debug |
7469 |
++ * exceptions being set, but that's not correct for the guest debugging |
7470 |
++ * case. */ |
7471 |
++ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) |
7472 |
++ vmx_set_interrupt_shadow(vcpu, 0); |
7473 |
++ |
7474 |
++ kvm_load_guest_xsave_state(vcpu); |
7475 |
++ |
7476 |
++ if (static_cpu_has(X86_FEATURE_PKU) && |
7477 |
++ kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && |
7478 |
++ vcpu->arch.pkru != vmx->host_pkru) |
7479 |
++ __write_pkru(vcpu->arch.pkru); |
7480 |
++ |
7481 |
++ pt_guest_enter(vmx); |
7482 |
++ |
7483 |
++ atomic_switch_perf_msrs(vmx); |
7484 |
++ atomic_switch_umwait_control_msr(vmx); |
7485 |
++ |
7486 |
++ if (enable_preemption_timer) |
7487 |
++ vmx_update_hv_timer(vcpu); |
7488 |
++ |
7489 |
++ if (lapic_in_kernel(vcpu) && |
7490 |
++ vcpu->arch.apic->lapic_timer.timer_advance_ns) |
7491 |
++ kvm_wait_lapic_expire(vcpu); |
7492 |
++ |
7493 |
++ /* |
7494 |
++ * If this vCPU has touched SPEC_CTRL, restore the guest's value if |
7495 |
++ * it's non-zero. Since vmentry is serialising on affected CPUs, there |
7496 |
++ * is no need to worry about the conditional branch over the wrmsr |
7497 |
++ * being speculatively taken. |
7498 |
++ */ |
7499 |
++ x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); |
7500 |
++ |
7501 |
++ /* L1D Flush includes CPU buffer clear to mitigate MDS */ |
7502 |
++ if (static_branch_unlikely(&vmx_l1d_should_flush)) |
7503 |
++ vmx_l1d_flush(vcpu); |
7504 |
++ else if (static_branch_unlikely(&mds_user_clear)) |
7505 |
++ mds_clear_cpu_buffers(); |
7506 |
++ |
7507 |
++ if (vcpu->arch.cr2 != read_cr2()) |
7508 |
++ write_cr2(vcpu->arch.cr2); |
7509 |
++ |
7510 |
++ vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, |
7511 |
++ vmx->loaded_vmcs->launched); |
7512 |
++ |
7513 |
++ vcpu->arch.cr2 = read_cr2(); |
7514 |
++ |
7515 |
++ /* |
7516 |
++ * We do not use IBRS in the kernel. If this vCPU has used the |
7517 |
++ * SPEC_CTRL MSR it may have left it on; save the value and |
7518 |
++ * turn it off. This is much more efficient than blindly adding |
7519 |
++ * it to the atomic save/restore list. Especially as the former |
7520 |
++ * (Saving guest MSRs on vmexit) doesn't even exist in KVM. |
7521 |
++ * |
7522 |
++ * For non-nested case: |
7523 |
++ * If the L01 MSR bitmap does not intercept the MSR, then we need to |
7524 |
++ * save it. |
7525 |
++ * |
7526 |
++ * For nested case: |
7527 |
++ * If the L02 MSR bitmap does not intercept the MSR, then we need to |
7528 |
++ * save it. |
7529 |
++ */ |
7530 |
++ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) |
7531 |
++ vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); |
7532 |
++ |
7533 |
++ x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); |
7534 |
++ |
7535 |
++ /* All fields are clean at this point */ |
7536 |
++ if (static_branch_unlikely(&enable_evmcs)) |
7537 |
++ current_evmcs->hv_clean_fields |= |
7538 |
++ HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; |
7539 |
++ |
7540 |
++ if (static_branch_unlikely(&enable_evmcs)) |
7541 |
++ current_evmcs->hv_vp_id = vcpu->arch.hyperv.vp_index; |
7542 |
++ |
7543 |
++ /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ |
7544 |
++ if (vmx->host_debugctlmsr) |
7545 |
++ update_debugctlmsr(vmx->host_debugctlmsr); |
7546 |
++ |
7547 |
++#ifndef CONFIG_X86_64 |
7548 |
++ /* |
7549 |
++ * The sysexit path does not restore ds/es, so we must set them to |
7550 |
++ * a reasonable value ourselves. |
7551 |
++ * |
7552 |
++ * We can't defer this to vmx_prepare_switch_to_host() since that |
7553 |
++ * function may be executed in interrupt context, which saves and |
7554 |
++ * restore segments around it, nullifying its effect. |
7555 |
++ */ |
7556 |
++ loadsegment(ds, __USER_DS); |
7557 |
++ loadsegment(es, __USER_DS); |
7558 |
++#endif |
7559 |
++ |
7560 |
++ vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) |
7561 |
++ | (1 << VCPU_EXREG_RFLAGS) |
7562 |
++ | (1 << VCPU_EXREG_PDPTR) |
7563 |
++ | (1 << VCPU_EXREG_SEGMENTS) |
7564 |
++ | (1 << VCPU_EXREG_CR3)); |
7565 |
++ vcpu->arch.regs_dirty = 0; |
7566 |
++ |
7567 |
++ pt_guest_exit(vmx); |
7568 |
++ |
7569 |
++ /* |
7570 |
++ * eager fpu is enabled if PKEY is supported and CR4 is switched |
7571 |
++ * back on host, so it is safe to read guest PKRU from current |
7572 |
++ * XSAVE. |
7573 |
++ */ |
7574 |
++ if (static_cpu_has(X86_FEATURE_PKU) && |
7575 |
++ kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) { |
7576 |
++ vcpu->arch.pkru = rdpkru(); |
7577 |
++ if (vcpu->arch.pkru != vmx->host_pkru) |
7578 |
++ __write_pkru(vmx->host_pkru); |
7579 |
++ } |
7580 |
++ |
7581 |
++ kvm_load_host_xsave_state(vcpu); |
7582 |
++ |
7583 |
++ vmx->nested.nested_run_pending = 0; |
7584 |
++ vmx->idt_vectoring_info = 0; |
7585 |
++ |
7586 |
++ vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON); |
7587 |
++ if ((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY) |
7588 |
++ kvm_machine_check(); |
7589 |
++ |
7590 |
++ if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) |
7591 |
++ return; |
7592 |
++ |
7593 |
++ vmx->loaded_vmcs->launched = 1; |
7594 |
++ vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); |
7595 |
++ |
7596 |
++ vmx_recover_nmi_blocking(vmx); |
7597 |
++ vmx_complete_interrupts(vmx); |
7598 |
++} |
7599 |
++ |
7600 |
++static struct kvm *vmx_vm_alloc(void) |
7601 |
++{ |
7602 |
++ struct kvm_vmx *kvm_vmx = __vmalloc(sizeof(struct kvm_vmx), |
7603 |
++ GFP_KERNEL_ACCOUNT | __GFP_ZERO, |
7604 |
++ PAGE_KERNEL); |
7605 |
++ return &kvm_vmx->kvm; |
7606 |
++} |
7607 |
++ |
7608 |
++static void vmx_vm_free(struct kvm *kvm) |
7609 |
++{ |
7610 |
++ kfree(kvm->arch.hyperv.hv_pa_pg); |
7611 |
++ vfree(to_kvm_vmx(kvm)); |
7612 |
++} |
7613 |
++ |
7614 |
++static void vmx_free_vcpu(struct kvm_vcpu *vcpu) |
7615 |
++{ |
7616 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
7617 |
++ |
7618 |
++ if (enable_pml) |
7619 |
++ vmx_destroy_pml_buffer(vmx); |
7620 |
++ free_vpid(vmx->vpid); |
7621 |
++ nested_vmx_free_vcpu(vcpu); |
7622 |
++ free_loaded_vmcs(vmx->loaded_vmcs); |
7623 |
++ kvm_vcpu_uninit(vcpu); |
7624 |
++ kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu); |
7625 |
++ kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); |
7626 |
++ kmem_cache_free(kvm_vcpu_cache, vmx); |
7627 |
++} |
7628 |
++ |
7629 |
++static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) |
7630 |
++{ |
7631 |
++ int err; |
7632 |
++ struct vcpu_vmx *vmx; |
7633 |
++ unsigned long *msr_bitmap; |
7634 |
++ int i, cpu; |
7635 |
++ |
7636 |
++ BUILD_BUG_ON_MSG(offsetof(struct vcpu_vmx, vcpu) != 0, |
7637 |
++ "struct kvm_vcpu must be at offset 0 for arch usercopy region"); |
7638 |
++ |
7639 |
++ vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); |
7640 |
++ if (!vmx) |
7641 |
++ return ERR_PTR(-ENOMEM); |
7642 |
++ |
7643 |
++ vmx->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, |
7644 |
++ GFP_KERNEL_ACCOUNT); |
7645 |
++ if (!vmx->vcpu.arch.user_fpu) { |
7646 |
++ printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n"); |
7647 |
++ err = -ENOMEM; |
7648 |
++ goto free_partial_vcpu; |
7649 |
++ } |
7650 |
++ |
7651 |
++ vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, |
7652 |
++ GFP_KERNEL_ACCOUNT); |
7653 |
++ if (!vmx->vcpu.arch.guest_fpu) { |
7654 |
++ printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n"); |
7655 |
++ err = -ENOMEM; |
7656 |
++ goto free_user_fpu; |
7657 |
++ } |
7658 |
++ |
7659 |
++ vmx->vpid = allocate_vpid(); |
7660 |
++ |
7661 |
++ err = kvm_vcpu_init(&vmx->vcpu, kvm, id); |
7662 |
++ if (err) |
7663 |
++ goto free_vcpu; |
7664 |
++ |
7665 |
++ err = -ENOMEM; |
7666 |
++ |
7667 |
++ /* |
7668 |
++ * If PML is turned on, failure on enabling PML just results in failure |
7669 |
++ * of creating the vcpu, therefore we can simplify PML logic (by |
7670 |
++ * avoiding dealing with cases, such as enabling PML partially on vcpus |
7671 |
++ * for the guest), etc. |
7672 |
++ */ |
7673 |
++ if (enable_pml) { |
7674 |
++ vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); |
7675 |
++ if (!vmx->pml_pg) |
7676 |
++ goto uninit_vcpu; |
7677 |
++ } |
7678 |
++ |
7679 |
++ BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) != NR_SHARED_MSRS); |
7680 |
++ |
7681 |
++ for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { |
7682 |
++ u32 index = vmx_msr_index[i]; |
7683 |
++ u32 data_low, data_high; |
7684 |
++ int j = vmx->nmsrs; |
7685 |
++ |
7686 |
++ if (rdmsr_safe(index, &data_low, &data_high) < 0) |
7687 |
++ continue; |
7688 |
++ if (wrmsr_safe(index, data_low, data_high) < 0) |
7689 |
++ continue; |
7690 |
++ |
7691 |
++ vmx->guest_msrs[j].index = i; |
7692 |
++ vmx->guest_msrs[j].data = 0; |
7693 |
++ switch (index) { |
7694 |
++ case MSR_IA32_TSX_CTRL: |
7695 |
++ /* |
7696 |
++ * No need to pass TSX_CTRL_CPUID_CLEAR through, so |
7697 |
++ * let's avoid changing CPUID bits under the host |
7698 |
++ * kernel's feet. |
7699 |
++ */ |
7700 |
++ vmx->guest_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR; |
7701 |
++ break; |
7702 |
++ default: |
7703 |
++ vmx->guest_msrs[j].mask = -1ull; |
7704 |
++ break; |
7705 |
++ } |
7706 |
++ ++vmx->nmsrs; |
7707 |
++ } |
7708 |
++ |
7709 |
++ err = alloc_loaded_vmcs(&vmx->vmcs01); |
7710 |
++ if (err < 0) |
7711 |
++ goto free_pml; |
7712 |
++ |
7713 |
++ msr_bitmap = vmx->vmcs01.msr_bitmap; |
7714 |
++ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_TSC, MSR_TYPE_R); |
7715 |
++ vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW); |
7716 |
++ vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW); |
7717 |
++ vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); |
7718 |
++ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); |
7719 |
++ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW); |
7720 |
++ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW); |
7721 |
++ if (kvm_cstate_in_guest(kvm)) { |
7722 |
++ vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C1_RES, MSR_TYPE_R); |
7723 |
++ vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R); |
7724 |
++ vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R); |
7725 |
++ vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R); |
7726 |
++ } |
7727 |
++ vmx->msr_bitmap_mode = 0; |
7728 |
++ |
7729 |
++ vmx->loaded_vmcs = &vmx->vmcs01; |
7730 |
++ cpu = get_cpu(); |
7731 |
++ vmx_vcpu_load(&vmx->vcpu, cpu); |
7732 |
++ vmx->vcpu.cpu = cpu; |
7733 |
++ init_vmcs(vmx); |
7734 |
++ vmx_vcpu_put(&vmx->vcpu); |
7735 |
++ put_cpu(); |
7736 |
++ if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { |
7737 |
++ err = alloc_apic_access_page(kvm); |
7738 |
++ if (err) |
7739 |
++ goto free_vmcs; |
7740 |
++ } |
7741 |
++ |
7742 |
++ if (enable_ept && !enable_unrestricted_guest) { |
7743 |
++ err = init_rmode_identity_map(kvm); |
7744 |
++ if (err) |
7745 |
++ goto free_vmcs; |
7746 |
++ } |
7747 |
++ |
7748 |
++ if (nested) |
7749 |
++ nested_vmx_setup_ctls_msrs(&vmx->nested.msrs, |
7750 |
++ vmx_capability.ept, |
7751 |
++ kvm_vcpu_apicv_active(&vmx->vcpu)); |
7752 |
++ else |
7753 |
++ memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs)); |
7754 |
++ |
7755 |
++ vmx->nested.posted_intr_nv = -1; |
7756 |
++ vmx->nested.current_vmptr = -1ull; |
7757 |
++ |
7758 |
++ vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED; |
7759 |
++ |
7760 |
++ /* |
7761 |
++ * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR |
7762 |
++ * or POSTED_INTR_WAKEUP_VECTOR. |
7763 |
++ */ |
7764 |
++ vmx->pi_desc.nv = POSTED_INTR_VECTOR; |
7765 |
++ vmx->pi_desc.sn = 1; |
7766 |
++ |
7767 |
++ vmx->ept_pointer = INVALID_PAGE; |
7768 |
++ |
7769 |
++ return &vmx->vcpu; |
7770 |
++ |
7771 |
++free_vmcs: |
7772 |
++ free_loaded_vmcs(vmx->loaded_vmcs); |
7773 |
++free_pml: |
7774 |
++ vmx_destroy_pml_buffer(vmx); |
7775 |
++uninit_vcpu: |
7776 |
++ kvm_vcpu_uninit(&vmx->vcpu); |
7777 |
++free_vcpu: |
7778 |
++ free_vpid(vmx->vpid); |
7779 |
++ kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); |
7780 |
++free_user_fpu: |
7781 |
++ kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu); |
7782 |
++free_partial_vcpu: |
7783 |
++ kmem_cache_free(kvm_vcpu_cache, vmx); |
7784 |
++ return ERR_PTR(err); |
7785 |
++} |
7786 |
++ |
7787 |
++#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n" |
7788 |
++#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n" |
7789 |
++ |
7790 |
++static int vmx_vm_init(struct kvm *kvm) |
7791 |
++{ |
7792 |
++ spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock); |
7793 |
++ |
7794 |
++ if (!ple_gap) |
7795 |
++ kvm->arch.pause_in_guest = true; |
7796 |
++ |
7797 |
++ if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) { |
7798 |
++ switch (l1tf_mitigation) { |
7799 |
++ case L1TF_MITIGATION_OFF: |
7800 |
++ case L1TF_MITIGATION_FLUSH_NOWARN: |
7801 |
++ /* 'I explicitly don't care' is set */ |
7802 |
++ break; |
7803 |
++ case L1TF_MITIGATION_FLUSH: |
7804 |
++ case L1TF_MITIGATION_FLUSH_NOSMT: |
7805 |
++ case L1TF_MITIGATION_FULL: |
7806 |
++ /* |
7807 |
++ * Warn upon starting the first VM in a potentially |
7808 |
++ * insecure environment. |
7809 |
++ */ |
7810 |
++ if (sched_smt_active()) |
7811 |
++ pr_warn_once(L1TF_MSG_SMT); |
7812 |
++ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) |
7813 |
++ pr_warn_once(L1TF_MSG_L1D); |
7814 |
++ break; |
7815 |
++ case L1TF_MITIGATION_FULL_FORCE: |
7816 |
++ /* Flush is enforced */ |
7817 |
++ break; |
7818 |
++ } |
7819 |
++ } |
7820 |
++ return 0; |
7821 |
++} |
7822 |
++ |
7823 |
++static int __init vmx_check_processor_compat(void) |
7824 |
++{ |
7825 |
++ struct vmcs_config vmcs_conf; |
7826 |
++ struct vmx_capability vmx_cap; |
7827 |
++ |
7828 |
++ if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) |
7829 |
++ return -EIO; |
7830 |
++ if (nested) |
7831 |
++ nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept, |
7832 |
++ enable_apicv); |
7833 |
++ if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { |
7834 |
++ printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", |
7835 |
++ smp_processor_id()); |
7836 |
++ return -EIO; |
7837 |
++ } |
7838 |
++ return 0; |
7839 |
++} |
7840 |
++ |
7841 |
++static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) |
7842 |
++{ |
7843 |
++ u8 cache; |
7844 |
++ u64 ipat = 0; |
7845 |
++ |
7846 |
++ /* For VT-d and EPT combination |
7847 |
++ * 1. MMIO: always map as UC |
7848 |
++ * 2. EPT with VT-d: |
7849 |
++ * a. VT-d without snooping control feature: can't guarantee the |
7850 |
++ * result, try to trust guest. |
7851 |
++ * b. VT-d with snooping control feature: snooping control feature of |
7852 |
++ * VT-d engine can guarantee the cache correctness. Just set it |
7853 |
++ * to WB to keep consistent with host. So the same as item 3. |
7854 |
++ * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep |
7855 |
++ * consistent with host MTRR |
7856 |
++ */ |
7857 |
++ if (is_mmio) { |
7858 |
++ cache = MTRR_TYPE_UNCACHABLE; |
7859 |
++ goto exit; |
7860 |
++ } |
7861 |
++ |
7862 |
++ if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { |
7863 |
++ ipat = VMX_EPT_IPAT_BIT; |
7864 |
++ cache = MTRR_TYPE_WRBACK; |
7865 |
++ goto exit; |
7866 |
++ } |
7867 |
++ |
7868 |
++ if (kvm_read_cr0(vcpu) & X86_CR0_CD) { |
7869 |
++ ipat = VMX_EPT_IPAT_BIT; |
7870 |
++ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) |
7871 |
++ cache = MTRR_TYPE_WRBACK; |
7872 |
++ else |
7873 |
++ cache = MTRR_TYPE_UNCACHABLE; |
7874 |
++ goto exit; |
7875 |
++ } |
7876 |
++ |
7877 |
++ cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn); |
7878 |
++ |
7879 |
++exit: |
7880 |
++ return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat; |
7881 |
++} |
7882 |
++ |
7883 |
++static int vmx_get_lpage_level(void) |
7884 |
++{ |
7885 |
++ if (enable_ept && !cpu_has_vmx_ept_1g_page()) |
7886 |
++ return PT_DIRECTORY_LEVEL; |
7887 |
++ else |
7888 |
++ /* For shadow and EPT supported 1GB page */ |
7889 |
++ return PT_PDPE_LEVEL; |
7890 |
++} |
7891 |
++ |
7892 |
++static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx) |
7893 |
++{ |
7894 |
++ /* |
7895 |
++ * These bits in the secondary execution controls field |
7896 |
++ * are dynamic, the others are mostly based on the hypervisor |
7897 |
++ * architecture and the guest's CPUID. Do not touch the |
7898 |
++ * dynamic bits. |
7899 |
++ */ |
7900 |
++ u32 mask = |
7901 |
++ SECONDARY_EXEC_SHADOW_VMCS | |
7902 |
++ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | |
7903 |
++ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | |
7904 |
++ SECONDARY_EXEC_DESC; |
7905 |
++ |
7906 |
++ u32 new_ctl = vmx->secondary_exec_control; |
7907 |
++ u32 cur_ctl = secondary_exec_controls_get(vmx); |
7908 |
++ |
7909 |
++ secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask)); |
7910 |
++} |
7911 |
++ |
7912 |
++/* |
7913 |
++ * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits |
7914 |
++ * (indicating "allowed-1") if they are supported in the guest's CPUID. |
7915 |
++ */ |
7916 |
++static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) |
7917 |
++{ |
7918 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
7919 |
++ struct kvm_cpuid_entry2 *entry; |
7920 |
++ |
7921 |
++ vmx->nested.msrs.cr0_fixed1 = 0xffffffff; |
7922 |
++ vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE; |
7923 |
++ |
7924 |
++#define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \ |
7925 |
++ if (entry && (entry->_reg & (_cpuid_mask))) \ |
7926 |
++ vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \ |
7927 |
++} while (0) |
7928 |
++ |
7929 |
++ entry = kvm_find_cpuid_entry(vcpu, 0x1, 0); |
7930 |
++ cr4_fixed1_update(X86_CR4_VME, edx, bit(X86_FEATURE_VME)); |
7931 |
++ cr4_fixed1_update(X86_CR4_PVI, edx, bit(X86_FEATURE_VME)); |
7932 |
++ cr4_fixed1_update(X86_CR4_TSD, edx, bit(X86_FEATURE_TSC)); |
7933 |
++ cr4_fixed1_update(X86_CR4_DE, edx, bit(X86_FEATURE_DE)); |
7934 |
++ cr4_fixed1_update(X86_CR4_PSE, edx, bit(X86_FEATURE_PSE)); |
7935 |
++ cr4_fixed1_update(X86_CR4_PAE, edx, bit(X86_FEATURE_PAE)); |
7936 |
++ cr4_fixed1_update(X86_CR4_MCE, edx, bit(X86_FEATURE_MCE)); |
7937 |
++ cr4_fixed1_update(X86_CR4_PGE, edx, bit(X86_FEATURE_PGE)); |
7938 |
++ cr4_fixed1_update(X86_CR4_OSFXSR, edx, bit(X86_FEATURE_FXSR)); |
7939 |
++ cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM)); |
7940 |
++ cr4_fixed1_update(X86_CR4_VMXE, ecx, bit(X86_FEATURE_VMX)); |
7941 |
++ cr4_fixed1_update(X86_CR4_SMXE, ecx, bit(X86_FEATURE_SMX)); |
7942 |
++ cr4_fixed1_update(X86_CR4_PCIDE, ecx, bit(X86_FEATURE_PCID)); |
7943 |
++ cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, bit(X86_FEATURE_XSAVE)); |
7944 |
++ |
7945 |
++ entry = kvm_find_cpuid_entry(vcpu, 0x7, 0); |
7946 |
++ cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, bit(X86_FEATURE_FSGSBASE)); |
7947 |
++ cr4_fixed1_update(X86_CR4_SMEP, ebx, bit(X86_FEATURE_SMEP)); |
7948 |
++ cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP)); |
7949 |
++ cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU)); |
7950 |
++ cr4_fixed1_update(X86_CR4_UMIP, ecx, bit(X86_FEATURE_UMIP)); |
7951 |
++ cr4_fixed1_update(X86_CR4_LA57, ecx, bit(X86_FEATURE_LA57)); |
7952 |
++ |
7953 |
++#undef cr4_fixed1_update |
7954 |
++} |
7955 |
++ |
7956 |
++static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu) |
7957 |
++{ |
7958 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
7959 |
++ |
7960 |
++ if (kvm_mpx_supported()) { |
7961 |
++ bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX); |
7962 |
++ |
7963 |
++ if (mpx_enabled) { |
7964 |
++ vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; |
7965 |
++ vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; |
7966 |
++ } else { |
7967 |
++ vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS; |
7968 |
++ vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS; |
7969 |
++ } |
7970 |
++ } |
7971 |
++} |
7972 |
++ |
7973 |
++static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) |
7974 |
++{ |
7975 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
7976 |
++ struct kvm_cpuid_entry2 *best = NULL; |
7977 |
++ int i; |
7978 |
++ |
7979 |
++ for (i = 0; i < PT_CPUID_LEAVES; i++) { |
7980 |
++ best = kvm_find_cpuid_entry(vcpu, 0x14, i); |
7981 |
++ if (!best) |
7982 |
++ return; |
7983 |
++ vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax; |
7984 |
++ vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx; |
7985 |
++ vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx; |
7986 |
++ vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx; |
7987 |
++ } |
7988 |
++ |
7989 |
++ /* Get the number of configurable Address Ranges for filtering */ |
7990 |
++ vmx->pt_desc.addr_range = intel_pt_validate_cap(vmx->pt_desc.caps, |
7991 |
++ PT_CAP_num_address_ranges); |
7992 |
++ |
7993 |
++ /* Initialize and clear the no dependency bits */ |
7994 |
++ vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS | |
7995 |
++ RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC); |
7996 |
++ |
7997 |
++ /* |
7998 |
++ * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise |
7999 |
++ * will inject an #GP |
8000 |
++ */ |
8001 |
++ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering)) |
8002 |
++ vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN; |
8003 |
++ |
8004 |
++ /* |
8005 |
++ * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and |
8006 |
++ * PSBFreq can be set |
8007 |
++ */ |
8008 |
++ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc)) |
8009 |
++ vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC | |
8010 |
++ RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ); |
8011 |
++ |
8012 |
++ /* |
8013 |
++ * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn BranchEn and |
8014 |
++ * MTCFreq can be set |
8015 |
++ */ |
8016 |
++ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc)) |
8017 |
++ vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN | |
8018 |
++ RTIT_CTL_BRANCH_EN | RTIT_CTL_MTC_RANGE); |
8019 |
++ |
8020 |
++ /* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */ |
8021 |
++ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite)) |
8022 |
++ vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW | |
8023 |
++ RTIT_CTL_PTW_EN); |
8024 |
++ |
8025 |
++ /* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */ |
8026 |
++ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace)) |
8027 |
++ vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN; |
8028 |
++ |
8029 |
++ /* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */ |
8030 |
++ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output)) |
8031 |
++ vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA; |
8032 |
++ |
8033 |
++ /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabircEn can be set */ |
8034 |
++ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys)) |
8035 |
++ vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN; |
8036 |
++ |
8037 |
++ /* unmask address range configure area */ |
8038 |
++ for (i = 0; i < vmx->pt_desc.addr_range; i++) |
8039 |
++ vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4)); |
8040 |
++} |
8041 |
++ |
8042 |
++static void vmx_cpuid_update(struct kvm_vcpu *vcpu) |
8043 |
++{ |
8044 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
8045 |
++ |
8046 |
++ /* xsaves_enabled is recomputed in vmx_compute_secondary_exec_control(). */ |
8047 |
++ vcpu->arch.xsaves_enabled = false; |
8048 |
++ |
8049 |
++ if (cpu_has_secondary_exec_ctrls()) { |
8050 |
++ vmx_compute_secondary_exec_control(vmx); |
8051 |
++ vmcs_set_secondary_exec_control(vmx); |
8052 |
++ } |
8053 |
++ |
8054 |
++ if (nested_vmx_allowed(vcpu)) |
8055 |
++ to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= |
8056 |
++ FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX | |
8057 |
++ FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; |
8058 |
++ else |
8059 |
++ to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= |
8060 |
++ ~(FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX | |
8061 |
++ FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX); |
8062 |
++ |
8063 |
++ if (nested_vmx_allowed(vcpu)) { |
8064 |
++ nested_vmx_cr_fixed1_bits_update(vcpu); |
8065 |
++ nested_vmx_entry_exit_ctls_update(vcpu); |
8066 |
++ } |
8067 |
++ |
8068 |
++ if (boot_cpu_has(X86_FEATURE_INTEL_PT) && |
8069 |
++ guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT)) |
8070 |
++ update_intel_pt_cfg(vcpu); |
8071 |
++ |
8072 |
++ if (boot_cpu_has(X86_FEATURE_RTM)) { |
8073 |
++ struct shared_msr_entry *msr; |
8074 |
++ msr = find_msr_entry(vmx, MSR_IA32_TSX_CTRL); |
8075 |
++ if (msr) { |
8076 |
++ bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM); |
8077 |
++ vmx_set_guest_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE); |
8078 |
++ } |
8079 |
++ } |
8080 |
++} |
8081 |
++ |
8082 |
++static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) |
8083 |
++{ |
8084 |
++ if (func == 1 && nested) |
8085 |
++ entry->ecx |= bit(X86_FEATURE_VMX); |
8086 |
++} |
8087 |
++ |
8088 |
++static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) |
8089 |
++{ |
8090 |
++ to_vmx(vcpu)->req_immediate_exit = true; |
8091 |
++} |
8092 |
++ |
8093 |
++static int vmx_check_intercept(struct kvm_vcpu *vcpu, |
8094 |
++ struct x86_instruction_info *info, |
8095 |
++ enum x86_intercept_stage stage) |
8096 |
++{ |
8097 |
++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
8098 |
++ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; |
8099 |
++ |
8100 |
++ /* |
8101 |
++ * RDPID causes #UD if disabled through secondary execution controls. |
8102 |
++ * Because it is marked as EmulateOnUD, we need to intercept it here. |
8103 |
++ */ |
8104 |
++ if (info->intercept == x86_intercept_rdtscp && |
8105 |
++ !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) { |
8106 |
++ ctxt->exception.vector = UD_VECTOR; |
8107 |
++ ctxt->exception.error_code_valid = false; |
8108 |
++ return X86EMUL_PROPAGATE_FAULT; |
8109 |
++ } |
8110 |
++ |
8111 |
++ /* TODO: check more intercepts... */ |
8112 |
++ return X86EMUL_CONTINUE; |
8113 |
++} |
8114 |
++ |
8115 |
++#ifdef CONFIG_X86_64 |
8116 |
++/* (a << shift) / divisor, return 1 if overflow otherwise 0 */ |
8117 |
++static inline int u64_shl_div_u64(u64 a, unsigned int shift, |
8118 |
++ u64 divisor, u64 *result) |
8119 |
++{ |
8120 |
++ u64 low = a << shift, high = a >> (64 - shift); |
8121 |
++ |
8122 |
++ /* To avoid the overflow on divq */ |
8123 |
++ if (high >= divisor) |
8124 |
++ return 1; |
8125 |
++ |
8126 |
++ /* Low hold the result, high hold rem which is discarded */ |
8127 |
++ asm("divq %2\n\t" : "=a" (low), "=d" (high) : |
8128 |
++ "rm" (divisor), "0" (low), "1" (high)); |
8129 |
++ *result = low; |
8130 |
++ |
8131 |
++ return 0; |
8132 |
++} |
8133 |
++ |
8134 |
++static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc, |
8135 |
++ bool *expired) |
8136 |
++{ |
8137 |
++ struct vcpu_vmx *vmx; |
8138 |
++ u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles; |
8139 |
++ struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer; |
8140 |
++ |
8141 |
++ if (kvm_mwait_in_guest(vcpu->kvm) || |
8142 |
++ kvm_can_post_timer_interrupt(vcpu)) |
8143 |
++ return -EOPNOTSUPP; |
8144 |
++ |
8145 |
++ vmx = to_vmx(vcpu); |
8146 |
++ tscl = rdtsc(); |
8147 |
++ guest_tscl = kvm_read_l1_tsc(vcpu, tscl); |
8148 |
++ delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; |
8149 |
++ lapic_timer_advance_cycles = nsec_to_cycles(vcpu, |
8150 |
++ ktimer->timer_advance_ns); |
8151 |
++ |
8152 |
++ if (delta_tsc > lapic_timer_advance_cycles) |
8153 |
++ delta_tsc -= lapic_timer_advance_cycles; |
8154 |
++ else |
8155 |
++ delta_tsc = 0; |
8156 |
++ |
8157 |
++ /* Convert to host delta tsc if tsc scaling is enabled */ |
8158 |
++ if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && |
8159 |
++ delta_tsc && u64_shl_div_u64(delta_tsc, |
8160 |
++ kvm_tsc_scaling_ratio_frac_bits, |
8161 |
++ vcpu->arch.tsc_scaling_ratio, &delta_tsc)) |
8162 |
++ return -ERANGE; |
8163 |
++ |
8164 |
++ /* |
8165 |
++ * If the delta tsc can't fit in the 32 bit after the multi shift, |
8166 |
++ * we can't use the preemption timer. |
8167 |
++ * It's possible that it fits on later vmentries, but checking |
8168 |
++ * on every vmentry is costly so we just use an hrtimer. |
8169 |
++ */ |
8170 |
++ if (delta_tsc >> (cpu_preemption_timer_multi + 32)) |
8171 |
++ return -ERANGE; |
8172 |
++ |
8173 |
++ vmx->hv_deadline_tsc = tscl + delta_tsc; |
8174 |
++ *expired = !delta_tsc; |
8175 |
++ return 0; |
8176 |
++} |
8177 |
++ |
8178 |
++static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) |
8179 |
++{ |
8180 |
++ to_vmx(vcpu)->hv_deadline_tsc = -1; |
8181 |
++} |
8182 |
++#endif |
8183 |
++ |
8184 |
++static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) |
8185 |
++{ |
8186 |
++ if (!kvm_pause_in_guest(vcpu->kvm)) |
8187 |
++ shrink_ple_window(vcpu); |
8188 |
++} |
8189 |
++ |
8190 |
++static void vmx_slot_enable_log_dirty(struct kvm *kvm, |
8191 |
++ struct kvm_memory_slot *slot) |
8192 |
++{ |
8193 |
++ kvm_mmu_slot_leaf_clear_dirty(kvm, slot); |
8194 |
++ kvm_mmu_slot_largepage_remove_write_access(kvm, slot); |
8195 |
++} |
8196 |
++ |
8197 |
++static void vmx_slot_disable_log_dirty(struct kvm *kvm, |
8198 |
++ struct kvm_memory_slot *slot) |
8199 |
++{ |
8200 |
++ kvm_mmu_slot_set_dirty(kvm, slot); |
8201 |
++} |
8202 |
++ |
8203 |
++static void vmx_flush_log_dirty(struct kvm *kvm) |
8204 |
++{ |
8205 |
++ kvm_flush_pml_buffers(kvm); |
8206 |
++} |
8207 |
++ |
8208 |
++static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) |
8209 |
++{ |
8210 |
++ struct vmcs12 *vmcs12; |
8211 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
8212 |
++ gpa_t gpa, dst; |
8213 |
++ |
8214 |
++ if (is_guest_mode(vcpu)) { |
8215 |
++ WARN_ON_ONCE(vmx->nested.pml_full); |
8216 |
++ |
8217 |
++ /* |
8218 |
++ * Check if PML is enabled for the nested guest. |
8219 |
++ * Whether eptp bit 6 is set is already checked |
8220 |
++ * as part of A/D emulation. |
8221 |
++ */ |
8222 |
++ vmcs12 = get_vmcs12(vcpu); |
8223 |
++ if (!nested_cpu_has_pml(vmcs12)) |
8224 |
++ return 0; |
8225 |
++ |
8226 |
++ if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { |
8227 |
++ vmx->nested.pml_full = true; |
8228 |
++ return 1; |
8229 |
++ } |
8230 |
++ |
8231 |
++ gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; |
8232 |
++ dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; |
8233 |
++ |
8234 |
++ if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, |
8235 |
++ offset_in_page(dst), sizeof(gpa))) |
8236 |
++ return 0; |
8237 |
++ |
8238 |
++ vmcs12->guest_pml_index--; |
8239 |
++ } |
8240 |
++ |
8241 |
++ return 0; |
8242 |
++} |
8243 |
++ |
8244 |
++static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, |
8245 |
++ struct kvm_memory_slot *memslot, |
8246 |
++ gfn_t offset, unsigned long mask) |
8247 |
++{ |
8248 |
++ kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); |
8249 |
++} |
8250 |
++ |
8251 |
++static void __pi_post_block(struct kvm_vcpu *vcpu) |
8252 |
++{ |
8253 |
++ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); |
8254 |
++ struct pi_desc old, new; |
8255 |
++ unsigned int dest; |
8256 |
++ |
8257 |
++ do { |
8258 |
++ old.control = new.control = pi_desc->control; |
8259 |
++ WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR, |
8260 |
++ "Wakeup handler not enabled while the VCPU is blocked\n"); |
8261 |
++ |
8262 |
++ dest = cpu_physical_id(vcpu->cpu); |
8263 |
++ |
8264 |
++ if (x2apic_enabled()) |
8265 |
++ new.ndst = dest; |
8266 |
++ else |
8267 |
++ new.ndst = (dest << 8) & 0xFF00; |
8268 |
++ |
8269 |
++ /* set 'NV' to 'notification vector' */ |
8270 |
++ new.nv = POSTED_INTR_VECTOR; |
8271 |
++ } while (cmpxchg64(&pi_desc->control, old.control, |
8272 |
++ new.control) != old.control); |
8273 |
++ |
8274 |
++ if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) { |
8275 |
++ spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); |
8276 |
++ list_del(&vcpu->blocked_vcpu_list); |
8277 |
++ spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); |
8278 |
++ vcpu->pre_pcpu = -1; |
8279 |
++ } |
8280 |
++} |
8281 |
++ |
8282 |
++/* |
8283 |
++ * This routine does the following things for vCPU which is going |
8284 |
++ * to be blocked if VT-d PI is enabled. |
8285 |
++ * - Store the vCPU to the wakeup list, so when interrupts happen |
8286 |
++ * we can find the right vCPU to wake up. |
8287 |
++ * - Change the Posted-interrupt descriptor as below: |
8288 |
++ * 'NDST' <-- vcpu->pre_pcpu |
8289 |
++ * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR |
8290 |
++ * - If 'ON' is set during this process, which means at least one |
8291 |
++ * interrupt is posted for this vCPU, we cannot block it, in |
8292 |
++ * this case, return 1, otherwise, return 0. |
8293 |
++ * |
8294 |
++ */ |
8295 |
++static int pi_pre_block(struct kvm_vcpu *vcpu) |
8296 |
++{ |
8297 |
++ unsigned int dest; |
8298 |
++ struct pi_desc old, new; |
8299 |
++ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); |
8300 |
++ |
8301 |
++ if (!kvm_arch_has_assigned_device(vcpu->kvm) || |
8302 |
++ !irq_remapping_cap(IRQ_POSTING_CAP) || |
8303 |
++ !kvm_vcpu_apicv_active(vcpu)) |
8304 |
++ return 0; |
8305 |
++ |
8306 |
++ WARN_ON(irqs_disabled()); |
8307 |
++ local_irq_disable(); |
8308 |
++ if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) { |
8309 |
++ vcpu->pre_pcpu = vcpu->cpu; |
8310 |
++ spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); |
8311 |
++ list_add_tail(&vcpu->blocked_vcpu_list, |
8312 |
++ &per_cpu(blocked_vcpu_on_cpu, |
8313 |
++ vcpu->pre_pcpu)); |
8314 |
++ spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); |
8315 |
++ } |
8316 |
++ |
8317 |
++ do { |
8318 |
++ old.control = new.control = pi_desc->control; |
8319 |
++ |
8320 |
++ WARN((pi_desc->sn == 1), |
8321 |
++ "Warning: SN field of posted-interrupts " |
8322 |
++ "is set before blocking\n"); |
8323 |
++ |
8324 |
++ /* |
8325 |
++ * Since vCPU can be preempted during this process, |
8326 |
++ * vcpu->cpu could be different with pre_pcpu, we |
8327 |
++ * need to set pre_pcpu as the destination of wakeup |
8328 |
++ * notification event, then we can find the right vCPU |
8329 |
++ * to wakeup in wakeup handler if interrupts happen |
8330 |
++ * when the vCPU is in blocked state. |
8331 |
++ */ |
8332 |
++ dest = cpu_physical_id(vcpu->pre_pcpu); |
8333 |
++ |
8334 |
++ if (x2apic_enabled()) |
8335 |
++ new.ndst = dest; |
8336 |
++ else |
8337 |
++ new.ndst = (dest << 8) & 0xFF00; |
8338 |
++ |
8339 |
++ /* set 'NV' to 'wakeup vector' */ |
8340 |
++ new.nv = POSTED_INTR_WAKEUP_VECTOR; |
8341 |
++ } while (cmpxchg64(&pi_desc->control, old.control, |
8342 |
++ new.control) != old.control); |
8343 |
++ |
8344 |
++ /* We should not block the vCPU if an interrupt is posted for it. */ |
8345 |
++ if (pi_test_on(pi_desc) == 1) |
8346 |
++ __pi_post_block(vcpu); |
8347 |
++ |
8348 |
++ local_irq_enable(); |
8349 |
++ return (vcpu->pre_pcpu == -1); |
8350 |
++} |
8351 |
++ |
8352 |
++static int vmx_pre_block(struct kvm_vcpu *vcpu) |
8353 |
++{ |
8354 |
++ if (pi_pre_block(vcpu)) |
8355 |
++ return 1; |
8356 |
++ |
8357 |
++ if (kvm_lapic_hv_timer_in_use(vcpu)) |
8358 |
++ kvm_lapic_switch_to_sw_timer(vcpu); |
8359 |
++ |
8360 |
++ return 0; |
8361 |
++} |
8362 |
++ |
8363 |
++static void pi_post_block(struct kvm_vcpu *vcpu) |
8364 |
++{ |
8365 |
++ if (vcpu->pre_pcpu == -1) |
8366 |
++ return; |
8367 |
++ |
8368 |
++ WARN_ON(irqs_disabled()); |
8369 |
++ local_irq_disable(); |
8370 |
++ __pi_post_block(vcpu); |
8371 |
++ local_irq_enable(); |
8372 |
++} |
8373 |
++ |
8374 |
++static void vmx_post_block(struct kvm_vcpu *vcpu) |
8375 |
++{ |
8376 |
++ if (kvm_x86_ops->set_hv_timer) |
8377 |
++ kvm_lapic_switch_to_hv_timer(vcpu); |
8378 |
++ |
8379 |
++ pi_post_block(vcpu); |
8380 |
++} |
8381 |
++ |
8382 |
++/* |
8383 |
++ * vmx_update_pi_irte - set IRTE for Posted-Interrupts |
8384 |
++ * |
8385 |
++ * @kvm: kvm |
8386 |
++ * @host_irq: host irq of the interrupt |
8387 |
++ * @guest_irq: gsi of the interrupt |
8388 |
++ * @set: set or unset PI |
8389 |
++ * returns 0 on success, < 0 on failure |
8390 |
++ */ |
8391 |
++static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, |
8392 |
++ uint32_t guest_irq, bool set) |
8393 |
++{ |
8394 |
++ struct kvm_kernel_irq_routing_entry *e; |
8395 |
++ struct kvm_irq_routing_table *irq_rt; |
8396 |
++ struct kvm_lapic_irq irq; |
8397 |
++ struct kvm_vcpu *vcpu; |
8398 |
++ struct vcpu_data vcpu_info; |
8399 |
++ int idx, ret = 0; |
8400 |
++ |
8401 |
++ if (!kvm_arch_has_assigned_device(kvm) || |
8402 |
++ !irq_remapping_cap(IRQ_POSTING_CAP) || |
8403 |
++ !kvm_vcpu_apicv_active(kvm->vcpus[0])) |
8404 |
++ return 0; |
8405 |
++ |
8406 |
++ idx = srcu_read_lock(&kvm->irq_srcu); |
8407 |
++ irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); |
8408 |
++ if (guest_irq >= irq_rt->nr_rt_entries || |
8409 |
++ hlist_empty(&irq_rt->map[guest_irq])) { |
8410 |
++ pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n", |
8411 |
++ guest_irq, irq_rt->nr_rt_entries); |
8412 |
++ goto out; |
8413 |
++ } |
8414 |
++ |
8415 |
++ hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { |
8416 |
++ if (e->type != KVM_IRQ_ROUTING_MSI) |
8417 |
++ continue; |
8418 |
++ /* |
8419 |
++ * VT-d PI cannot support posting multicast/broadcast |
8420 |
++ * interrupts to a vCPU, we still use interrupt remapping |
8421 |
++ * for these kind of interrupts. |
8422 |
++ * |
8423 |
++ * For lowest-priority interrupts, we only support |
8424 |
++ * those with single CPU as the destination, e.g. user |
8425 |
++ * configures the interrupts via /proc/irq or uses |
8426 |
++ * irqbalance to make the interrupts single-CPU. |
8427 |
++ * |
8428 |
++ * We will support full lowest-priority interrupt later. |
8429 |
++ * |
8430 |
++ * In addition, we can only inject generic interrupts using |
8431 |
++ * the PI mechanism, refuse to route others through it. |
8432 |
++ */ |
8433 |
++ |
8434 |
++ kvm_set_msi_irq(kvm, e, &irq); |
8435 |
++ if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) || |
8436 |
++ !kvm_irq_is_postable(&irq)) { |
8437 |
++ /* |
8438 |
++ * Make sure the IRTE is in remapped mode if |
8439 |
++ * we don't handle it in posted mode. |
8440 |
++ */ |
8441 |
++ ret = irq_set_vcpu_affinity(host_irq, NULL); |
8442 |
++ if (ret < 0) { |
8443 |
++ printk(KERN_INFO |
8444 |
++ "failed to back to remapped mode, irq: %u\n", |
8445 |
++ host_irq); |
8446 |
++ goto out; |
8447 |
++ } |
8448 |
++ |
8449 |
++ continue; |
8450 |
++ } |
8451 |
++ |
8452 |
++ vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); |
8453 |
++ vcpu_info.vector = irq.vector; |
8454 |
++ |
8455 |
++ trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi, |
8456 |
++ vcpu_info.vector, vcpu_info.pi_desc_addr, set); |
8457 |
++ |
8458 |
++ if (set) |
8459 |
++ ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); |
8460 |
++ else |
8461 |
++ ret = irq_set_vcpu_affinity(host_irq, NULL); |
8462 |
++ |
8463 |
++ if (ret < 0) { |
8464 |
++ printk(KERN_INFO "%s: failed to update PI IRTE\n", |
8465 |
++ __func__); |
8466 |
++ goto out; |
8467 |
++ } |
8468 |
++ } |
8469 |
++ |
8470 |
++ ret = 0; |
8471 |
++out: |
8472 |
++ srcu_read_unlock(&kvm->irq_srcu, idx); |
8473 |
++ return ret; |
8474 |
++} |
8475 |
++ |
8476 |
++static void vmx_setup_mce(struct kvm_vcpu *vcpu) |
8477 |
++{ |
8478 |
++ if (vcpu->arch.mcg_cap & MCG_LMCE_P) |
8479 |
++ to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= |
8480 |
++ FEATURE_CONTROL_LMCE; |
8481 |
++ else |
8482 |
++ to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= |
8483 |
++ ~FEATURE_CONTROL_LMCE; |
8484 |
++} |
8485 |
++ |
8486 |
++static int vmx_smi_allowed(struct kvm_vcpu *vcpu) |
8487 |
++{ |
8488 |
++ /* we need a nested vmexit to enter SMM, postpone if run is pending */ |
8489 |
++ if (to_vmx(vcpu)->nested.nested_run_pending) |
8490 |
++ return 0; |
8491 |
++ return 1; |
8492 |
++} |
8493 |
++ |
8494 |
++static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) |
8495 |
++{ |
8496 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
8497 |
++ |
8498 |
++ vmx->nested.smm.guest_mode = is_guest_mode(vcpu); |
8499 |
++ if (vmx->nested.smm.guest_mode) |
8500 |
++ nested_vmx_vmexit(vcpu, -1, 0, 0); |
8501 |
++ |
8502 |
++ vmx->nested.smm.vmxon = vmx->nested.vmxon; |
8503 |
++ vmx->nested.vmxon = false; |
8504 |
++ vmx_clear_hlt(vcpu); |
8505 |
++ return 0; |
8506 |
++} |
8507 |
++ |
8508 |
++static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) |
8509 |
++{ |
8510 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
8511 |
++ int ret; |
8512 |
++ |
8513 |
++ if (vmx->nested.smm.vmxon) { |
8514 |
++ vmx->nested.vmxon = true; |
8515 |
++ vmx->nested.smm.vmxon = false; |
8516 |
++ } |
8517 |
++ |
8518 |
++ if (vmx->nested.smm.guest_mode) { |
8519 |
++ ret = nested_vmx_enter_non_root_mode(vcpu, false); |
8520 |
++ if (ret) |
8521 |
++ return ret; |
8522 |
++ |
8523 |
++ vmx->nested.smm.guest_mode = false; |
8524 |
++ } |
8525 |
++ return 0; |
8526 |
++} |
8527 |
++ |
8528 |
++static int enable_smi_window(struct kvm_vcpu *vcpu) |
8529 |
++{ |
8530 |
++ return 0; |
8531 |
++} |
8532 |
++ |
8533 |
++static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu) |
8534 |
++{ |
8535 |
++ return false; |
8536 |
++} |
8537 |
++ |
8538 |
++static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu) |
8539 |
++{ |
8540 |
++ return to_vmx(vcpu)->nested.vmxon; |
8541 |
++} |
8542 |
++ |
8543 |
++static __init int hardware_setup(void) |
8544 |
++{ |
8545 |
++ unsigned long host_bndcfgs; |
8546 |
++ struct desc_ptr dt; |
8547 |
++ int r, i; |
8548 |
++ |
8549 |
++ rdmsrl_safe(MSR_EFER, &host_efer); |
8550 |
++ |
8551 |
++ store_idt(&dt); |
8552 |
++ host_idt_base = dt.address; |
8553 |
++ |
8554 |
++ for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) |
8555 |
++ kvm_define_shared_msr(i, vmx_msr_index[i]); |
8556 |
++ |
8557 |
++ if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0) |
8558 |
++ return -EIO; |
8559 |
++ |
8560 |
++ if (boot_cpu_has(X86_FEATURE_NX)) |
8561 |
++ kvm_enable_efer_bits(EFER_NX); |
8562 |
++ |
8563 |
++ if (boot_cpu_has(X86_FEATURE_MPX)) { |
8564 |
++ rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs); |
8565 |
++ WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost"); |
8566 |
++ } |
8567 |
++ |
8568 |
++ if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || |
8569 |
++ !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) |
8570 |
++ enable_vpid = 0; |
8571 |
++ |
8572 |
++ if (!cpu_has_vmx_ept() || |
8573 |
++ !cpu_has_vmx_ept_4levels() || |
8574 |
++ !cpu_has_vmx_ept_mt_wb() || |
8575 |
++ !cpu_has_vmx_invept_global()) |
8576 |
++ enable_ept = 0; |
8577 |
++ |
8578 |
++ if (!cpu_has_vmx_ept_ad_bits() || !enable_ept) |
8579 |
++ enable_ept_ad_bits = 0; |
8580 |
++ |
8581 |
++ if (!cpu_has_vmx_unrestricted_guest() || !enable_ept) |
8582 |
++ enable_unrestricted_guest = 0; |
8583 |
++ |
8584 |
++ if (!cpu_has_vmx_flexpriority()) |
8585 |
++ flexpriority_enabled = 0; |
8586 |
++ |
8587 |
++ if (!cpu_has_virtual_nmis()) |
8588 |
++ enable_vnmi = 0; |
8589 |
++ |
8590 |
++ /* |
8591 |
++ * set_apic_access_page_addr() is used to reload apic access |
8592 |
++ * page upon invalidation. No need to do anything if not |
8593 |
++ * using the APIC_ACCESS_ADDR VMCS field. |
8594 |
++ */ |
8595 |
++ if (!flexpriority_enabled) |
8596 |
++ kvm_x86_ops->set_apic_access_page_addr = NULL; |
8597 |
++ |
8598 |
++ if (!cpu_has_vmx_tpr_shadow()) |
8599 |
++ kvm_x86_ops->update_cr8_intercept = NULL; |
8600 |
++ |
8601 |
++ if (enable_ept && !cpu_has_vmx_ept_2m_page()) |
8602 |
++ kvm_disable_largepages(); |
8603 |
++ |
8604 |
++#if IS_ENABLED(CONFIG_HYPERV) |
8605 |
++ if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH |
8606 |
++ && enable_ept) { |
8607 |
++ kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb; |
8608 |
++ kvm_x86_ops->tlb_remote_flush_with_range = |
8609 |
++ hv_remote_flush_tlb_with_range; |
8610 |
++ } |
8611 |
++#endif |
8612 |
++ |
8613 |
++ if (!cpu_has_vmx_ple()) { |
8614 |
++ ple_gap = 0; |
8615 |
++ ple_window = 0; |
8616 |
++ ple_window_grow = 0; |
8617 |
++ ple_window_max = 0; |
8618 |
++ ple_window_shrink = 0; |
8619 |
++ } |
8620 |
++ |
8621 |
++ if (!cpu_has_vmx_apicv()) { |
8622 |
++ enable_apicv = 0; |
8623 |
++ kvm_x86_ops->sync_pir_to_irr = NULL; |
8624 |
++ } |
8625 |
++ |
8626 |
++ if (cpu_has_vmx_tsc_scaling()) { |
8627 |
++ kvm_has_tsc_control = true; |
8628 |
++ kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX; |
8629 |
++ kvm_tsc_scaling_ratio_frac_bits = 48; |
8630 |
++ } |
8631 |
++ |
8632 |
++ set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ |
8633 |
++ |
8634 |
++ if (enable_ept) |
8635 |
++ vmx_enable_tdp(); |
8636 |
++ else |
8637 |
++ kvm_disable_tdp(); |
8638 |
++ |
8639 |
++ /* |
8640 |
++ * Only enable PML when hardware supports PML feature, and both EPT |
8641 |
++ * and EPT A/D bit features are enabled -- PML depends on them to work. |
8642 |
++ */ |
8643 |
++ if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml()) |
8644 |
++ enable_pml = 0; |
8645 |
++ |
8646 |
++ if (!enable_pml) { |
8647 |
++ kvm_x86_ops->slot_enable_log_dirty = NULL; |
8648 |
++ kvm_x86_ops->slot_disable_log_dirty = NULL; |
8649 |
++ kvm_x86_ops->flush_log_dirty = NULL; |
8650 |
++ kvm_x86_ops->enable_log_dirty_pt_masked = NULL; |
8651 |
++ } |
8652 |
++ |
8653 |
++ if (!cpu_has_vmx_preemption_timer()) |
8654 |
++ enable_preemption_timer = false; |
8655 |
++ |
8656 |
++ if (enable_preemption_timer) { |
8657 |
++ u64 use_timer_freq = 5000ULL * 1000 * 1000; |
8658 |
++ u64 vmx_msr; |
8659 |
++ |
8660 |
++ rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); |
8661 |
++ cpu_preemption_timer_multi = |
8662 |
++ vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK; |
8663 |
++ |
8664 |
++ if (tsc_khz) |
8665 |
++ use_timer_freq = (u64)tsc_khz * 1000; |
8666 |
++ use_timer_freq >>= cpu_preemption_timer_multi; |
8667 |
++ |
8668 |
++ /* |
8669 |
++ * KVM "disables" the preemption timer by setting it to its max |
8670 |
++ * value. Don't use the timer if it might cause spurious exits |
8671 |
++ * at a rate faster than 0.1 Hz (of uninterrupted guest time). |
8672 |
++ */ |
8673 |
++ if (use_timer_freq > 0xffffffffu / 10) |
8674 |
++ enable_preemption_timer = false; |
8675 |
++ } |
8676 |
++ |
8677 |
++ if (!enable_preemption_timer) { |
8678 |
++ kvm_x86_ops->set_hv_timer = NULL; |
8679 |
++ kvm_x86_ops->cancel_hv_timer = NULL; |
8680 |
++ kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit; |
8681 |
++ } |
8682 |
++ |
8683 |
++ kvm_set_posted_intr_wakeup_handler(wakeup_handler); |
8684 |
++ |
8685 |
++ kvm_mce_cap_supported |= MCG_LMCE_P; |
8686 |
++ |
8687 |
++ if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST) |
8688 |
++ return -EINVAL; |
8689 |
++ if (!enable_ept || !cpu_has_vmx_intel_pt()) |
8690 |
++ pt_mode = PT_MODE_SYSTEM; |
8691 |
++ |
8692 |
++ if (nested) { |
8693 |
++ nested_vmx_setup_ctls_msrs(&vmcs_config.nested, |
8694 |
++ vmx_capability.ept, enable_apicv); |
8695 |
++ |
8696 |
++ r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers); |
8697 |
++ if (r) |
8698 |
++ return r; |
8699 |
++ } |
8700 |
++ |
8701 |
++ r = alloc_kvm_area(); |
8702 |
++ if (r) |
8703 |
++ nested_vmx_hardware_unsetup(); |
8704 |
++ return r; |
8705 |
++} |
8706 |
++ |
8707 |
++static __exit void hardware_unsetup(void) |
8708 |
++{ |
8709 |
++ if (nested) |
8710 |
++ nested_vmx_hardware_unsetup(); |
8711 |
++ |
8712 |
++ free_kvm_area(); |
8713 |
++} |
8714 |
++ |
8715 |
++static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { |
8716 |
++ .cpu_has_kvm_support = cpu_has_kvm_support, |
8717 |
++ .disabled_by_bios = vmx_disabled_by_bios, |
8718 |
++ .hardware_setup = hardware_setup, |
8719 |
++ .hardware_unsetup = hardware_unsetup, |
8720 |
++ .check_processor_compatibility = vmx_check_processor_compat, |
8721 |
++ .hardware_enable = hardware_enable, |
8722 |
++ .hardware_disable = hardware_disable, |
8723 |
++ .cpu_has_accelerated_tpr = report_flexpriority, |
8724 |
++ .has_emulated_msr = vmx_has_emulated_msr, |
8725 |
++ |
8726 |
++ .vm_init = vmx_vm_init, |
8727 |
++ .vm_alloc = vmx_vm_alloc, |
8728 |
++ .vm_free = vmx_vm_free, |
8729 |
++ |
8730 |
++ .vcpu_create = vmx_create_vcpu, |
8731 |
++ .vcpu_free = vmx_free_vcpu, |
8732 |
++ .vcpu_reset = vmx_vcpu_reset, |
8733 |
++ |
8734 |
++ .prepare_guest_switch = vmx_prepare_switch_to_guest, |
8735 |
++ .vcpu_load = vmx_vcpu_load, |
8736 |
++ .vcpu_put = vmx_vcpu_put, |
8737 |
++ |
8738 |
++ .update_bp_intercept = update_exception_bitmap, |
8739 |
++ .get_msr_feature = vmx_get_msr_feature, |
8740 |
++ .get_msr = vmx_get_msr, |
8741 |
++ .set_msr = vmx_set_msr, |
8742 |
++ .get_segment_base = vmx_get_segment_base, |
8743 |
++ .get_segment = vmx_get_segment, |
8744 |
++ .set_segment = vmx_set_segment, |
8745 |
++ .get_cpl = vmx_get_cpl, |
8746 |
++ .get_cs_db_l_bits = vmx_get_cs_db_l_bits, |
8747 |
++ .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, |
8748 |
++ .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, |
8749 |
++ .set_cr0 = vmx_set_cr0, |
8750 |
++ .set_cr3 = vmx_set_cr3, |
8751 |
++ .set_cr4 = vmx_set_cr4, |
8752 |
++ .set_efer = vmx_set_efer, |
8753 |
++ .get_idt = vmx_get_idt, |
8754 |
++ .set_idt = vmx_set_idt, |
8755 |
++ .get_gdt = vmx_get_gdt, |
8756 |
++ .set_gdt = vmx_set_gdt, |
8757 |
++ .get_dr6 = vmx_get_dr6, |
8758 |
++ .set_dr6 = vmx_set_dr6, |
8759 |
++ .set_dr7 = vmx_set_dr7, |
8760 |
++ .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, |
8761 |
++ .cache_reg = vmx_cache_reg, |
8762 |
++ .get_rflags = vmx_get_rflags, |
8763 |
++ .set_rflags = vmx_set_rflags, |
8764 |
++ |
8765 |
++ .tlb_flush = vmx_flush_tlb, |
8766 |
++ .tlb_flush_gva = vmx_flush_tlb_gva, |
8767 |
++ |
8768 |
++ .run = vmx_vcpu_run, |
8769 |
++ .handle_exit = vmx_handle_exit, |
8770 |
++ .skip_emulated_instruction = skip_emulated_instruction, |
8771 |
++ .set_interrupt_shadow = vmx_set_interrupt_shadow, |
8772 |
++ .get_interrupt_shadow = vmx_get_interrupt_shadow, |
8773 |
++ .patch_hypercall = vmx_patch_hypercall, |
8774 |
++ .set_irq = vmx_inject_irq, |
8775 |
++ .set_nmi = vmx_inject_nmi, |
8776 |
++ .queue_exception = vmx_queue_exception, |
8777 |
++ .cancel_injection = vmx_cancel_injection, |
8778 |
++ .interrupt_allowed = vmx_interrupt_allowed, |
8779 |
++ .nmi_allowed = vmx_nmi_allowed, |
8780 |
++ .get_nmi_mask = vmx_get_nmi_mask, |
8781 |
++ .set_nmi_mask = vmx_set_nmi_mask, |
8782 |
++ .enable_nmi_window = enable_nmi_window, |
8783 |
++ .enable_irq_window = enable_irq_window, |
8784 |
++ .update_cr8_intercept = update_cr8_intercept, |
8785 |
++ .set_virtual_apic_mode = vmx_set_virtual_apic_mode, |
8786 |
++ .set_apic_access_page_addr = vmx_set_apic_access_page_addr, |
8787 |
++ .get_enable_apicv = vmx_get_enable_apicv, |
8788 |
++ .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, |
8789 |
++ .load_eoi_exitmap = vmx_load_eoi_exitmap, |
8790 |
++ .apicv_post_state_restore = vmx_apicv_post_state_restore, |
8791 |
++ .hwapic_irr_update = vmx_hwapic_irr_update, |
8792 |
++ .hwapic_isr_update = vmx_hwapic_isr_update, |
8793 |
++ .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, |
8794 |
++ .sync_pir_to_irr = vmx_sync_pir_to_irr, |
8795 |
++ .deliver_posted_interrupt = vmx_deliver_posted_interrupt, |
8796 |
++ .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt, |
8797 |
++ |
8798 |
++ .set_tss_addr = vmx_set_tss_addr, |
8799 |
++ .set_identity_map_addr = vmx_set_identity_map_addr, |
8800 |
++ .get_tdp_level = get_ept_level, |
8801 |
++ .get_mt_mask = vmx_get_mt_mask, |
8802 |
++ |
8803 |
++ .get_exit_info = vmx_get_exit_info, |
8804 |
++ |
8805 |
++ .get_lpage_level = vmx_get_lpage_level, |
8806 |
++ |
8807 |
++ .cpuid_update = vmx_cpuid_update, |
8808 |
++ |
8809 |
++ .rdtscp_supported = vmx_rdtscp_supported, |
8810 |
++ .invpcid_supported = vmx_invpcid_supported, |
8811 |
++ |
8812 |
++ .set_supported_cpuid = vmx_set_supported_cpuid, |
8813 |
++ |
8814 |
++ .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, |
8815 |
++ |
8816 |
++ .read_l1_tsc_offset = vmx_read_l1_tsc_offset, |
8817 |
++ .write_l1_tsc_offset = vmx_write_l1_tsc_offset, |
8818 |
++ |
8819 |
++ .set_tdp_cr3 = vmx_set_cr3, |
8820 |
++ |
8821 |
++ .check_intercept = vmx_check_intercept, |
8822 |
++ .handle_exit_irqoff = vmx_handle_exit_irqoff, |
8823 |
++ .mpx_supported = vmx_mpx_supported, |
8824 |
++ .xsaves_supported = vmx_xsaves_supported, |
8825 |
++ .umip_emulated = vmx_umip_emulated, |
8826 |
++ .pt_supported = vmx_pt_supported, |
8827 |
++ |
8828 |
++ .request_immediate_exit = vmx_request_immediate_exit, |
8829 |
++ |
8830 |
++ .sched_in = vmx_sched_in, |
8831 |
++ |
8832 |
++ .slot_enable_log_dirty = vmx_slot_enable_log_dirty, |
8833 |
++ .slot_disable_log_dirty = vmx_slot_disable_log_dirty, |
8834 |
++ .flush_log_dirty = vmx_flush_log_dirty, |
8835 |
++ .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked, |
8836 |
++ .write_log_dirty = vmx_write_pml_buffer, |
8837 |
++ |
8838 |
++ .pre_block = vmx_pre_block, |
8839 |
++ .post_block = vmx_post_block, |
8840 |
++ |
8841 |
++ .pmu_ops = &intel_pmu_ops, |
8842 |
++ |
8843 |
++ .update_pi_irte = vmx_update_pi_irte, |
8844 |
++ |
8845 |
++#ifdef CONFIG_X86_64 |
8846 |
++ .set_hv_timer = vmx_set_hv_timer, |
8847 |
++ .cancel_hv_timer = vmx_cancel_hv_timer, |
8848 |
++#endif |
8849 |
++ |
8850 |
++ .setup_mce = vmx_setup_mce, |
8851 |
++ |
8852 |
++ .smi_allowed = vmx_smi_allowed, |
8853 |
++ .pre_enter_smm = vmx_pre_enter_smm, |
8854 |
++ .pre_leave_smm = vmx_pre_leave_smm, |
8855 |
++ .enable_smi_window = enable_smi_window, |
8856 |
++ |
8857 |
++ .check_nested_events = NULL, |
8858 |
++ .get_nested_state = NULL, |
8859 |
++ .set_nested_state = NULL, |
8860 |
++ .get_vmcs12_pages = NULL, |
8861 |
++ .nested_enable_evmcs = NULL, |
8862 |
++ .nested_get_evmcs_version = NULL, |
8863 |
++ .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault, |
8864 |
++ .apic_init_signal_blocked = vmx_apic_init_signal_blocked, |
8865 |
++}; |
8866 |
++ |
8867 |
++static void vmx_cleanup_l1d_flush(void) |
8868 |
++{ |
8869 |
++ if (vmx_l1d_flush_pages) { |
8870 |
++ free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); |
8871 |
++ vmx_l1d_flush_pages = NULL; |
8872 |
++ } |
8873 |
++ /* Restore state so sysfs ignores VMX */ |
8874 |
++ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; |
8875 |
++} |
8876 |
++ |
8877 |
++static void vmx_exit(void) |
8878 |
++{ |
8879 |
++#ifdef CONFIG_KEXEC_CORE |
8880 |
++ RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); |
8881 |
++ synchronize_rcu(); |
8882 |
++#endif |
8883 |
++ |
8884 |
++ kvm_exit(); |
8885 |
++ |
8886 |
++#if IS_ENABLED(CONFIG_HYPERV) |
8887 |
++ if (static_branch_unlikely(&enable_evmcs)) { |
8888 |
++ int cpu; |
8889 |
++ struct hv_vp_assist_page *vp_ap; |
8890 |
++ /* |
8891 |
++ * Reset everything to support using non-enlightened VMCS |
8892 |
++ * access later (e.g. when we reload the module with |
8893 |
++ * enlightened_vmcs=0) |
8894 |
++ */ |
8895 |
++ for_each_online_cpu(cpu) { |
8896 |
++ vp_ap = hv_get_vp_assist_page(cpu); |
8897 |
++ |
8898 |
++ if (!vp_ap) |
8899 |
++ continue; |
8900 |
++ |
8901 |
++ vp_ap->nested_control.features.directhypercall = 0; |
8902 |
++ vp_ap->current_nested_vmcs = 0; |
8903 |
++ vp_ap->enlighten_vmentry = 0; |
8904 |
++ } |
8905 |
++ |
8906 |
++ static_branch_disable(&enable_evmcs); |
8907 |
++ } |
8908 |
++#endif |
8909 |
++ vmx_cleanup_l1d_flush(); |
8910 |
++} |
8911 |
++module_exit(vmx_exit); |
8912 |
++ |
8913 |
++static int __init vmx_init(void) |
8914 |
++{ |
8915 |
++ int r; |
8916 |
++ |
8917 |
++#if IS_ENABLED(CONFIG_HYPERV) |
8918 |
++ /* |
8919 |
++ * Enlightened VMCS usage should be recommended and the host needs |
8920 |
++ * to support eVMCS v1 or above. We can also disable eVMCS support |
8921 |
++ * with module parameter. |
8922 |
++ */ |
8923 |
++ if (enlightened_vmcs && |
8924 |
++ ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED && |
8925 |
++ (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >= |
8926 |
++ KVM_EVMCS_VERSION) { |
8927 |
++ int cpu; |
8928 |
++ |
8929 |
++ /* Check that we have assist pages on all online CPUs */ |
8930 |
++ for_each_online_cpu(cpu) { |
8931 |
++ if (!hv_get_vp_assist_page(cpu)) { |
8932 |
++ enlightened_vmcs = false; |
8933 |
++ break; |
8934 |
++ } |
8935 |
++ } |
8936 |
++ |
8937 |
++ if (enlightened_vmcs) { |
8938 |
++ pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n"); |
8939 |
++ static_branch_enable(&enable_evmcs); |
8940 |
++ } |
8941 |
++ |
8942 |
++ if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) |
8943 |
++ vmx_x86_ops.enable_direct_tlbflush |
8944 |
++ = hv_enable_direct_tlbflush; |
8945 |
++ |
8946 |
++ } else { |
8947 |
++ enlightened_vmcs = false; |
8948 |
++ } |
8949 |
++#endif |
8950 |
++ |
8951 |
++ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), |
8952 |
++ __alignof__(struct vcpu_vmx), THIS_MODULE); |
8953 |
++ if (r) |
8954 |
++ return r; |
8955 |
++ |
8956 |
++ /* |
8957 |
++ * Must be called after kvm_init() so enable_ept is properly set |
8958 |
++ * up. Hand the parameter mitigation value in which was stored in |
8959 |
++ * the pre module init parser. If no parameter was given, it will |
8960 |
++ * contain 'auto' which will be turned into the default 'cond' |
8961 |
++ * mitigation mode. |
8962 |
++ */ |
8963 |
++ r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); |
8964 |
++ if (r) { |
8965 |
++ vmx_exit(); |
8966 |
++ return r; |
8967 |
++ } |
8968 |
++ |
8969 |
++#ifdef CONFIG_KEXEC_CORE |
8970 |
++ rcu_assign_pointer(crash_vmclear_loaded_vmcss, |
8971 |
++ crash_vmclear_local_loaded_vmcss); |
8972 |
++#endif |
8973 |
++ vmx_check_vmcs12_offsets(); |
8974 |
++ |
8975 |
++ return 0; |
8976 |
++} |
8977 |
++module_init(vmx_init); |
8978 |
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
8979 |
+index df7ccee4e3fd..21fb707546b6 100644 |
8980 |
+--- a/arch/x86/kvm/x86.c |
8981 |
++++ b/arch/x86/kvm/x86.c |
8982 |
+@@ -53,6 +53,7 @@ |
8983 |
+ #include <linux/pvclock_gtod.h> |
8984 |
+ #include <linux/kvm_irqfd.h> |
8985 |
+ #include <linux/irqbypass.h> |
8986 |
++#include <linux/nospec.h> |
8987 |
+ #include <trace/events/kvm.h> |
8988 |
+ |
8989 |
+ #define CREATE_TRACE_POINTS |
8990 |
+@@ -873,9 +874,11 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) |
8991 |
+ |
8992 |
+ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) |
8993 |
+ { |
8994 |
++ size_t size = ARRAY_SIZE(vcpu->arch.db); |
8995 |
++ |
8996 |
+ switch (dr) { |
8997 |
+ case 0 ... 3: |
8998 |
+- vcpu->arch.db[dr] = val; |
8999 |
++ vcpu->arch.db[array_index_nospec(dr, size)] = val; |
9000 |
+ if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) |
9001 |
+ vcpu->arch.eff_db[dr] = val; |
9002 |
+ break; |
9003 |
+@@ -912,9 +915,11 @@ EXPORT_SYMBOL_GPL(kvm_set_dr); |
9004 |
+ |
9005 |
+ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) |
9006 |
+ { |
9007 |
++ size_t size = ARRAY_SIZE(vcpu->arch.db); |
9008 |
++ |
9009 |
+ switch (dr) { |
9010 |
+ case 0 ... 3: |
9011 |
+- *val = vcpu->arch.db[dr]; |
9012 |
++ *val = vcpu->arch.db[array_index_nospec(dr, size)]; |
9013 |
+ break; |
9014 |
+ case 4: |
9015 |
+ /* fall through */ |
9016 |
+@@ -1989,7 +1994,10 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
9017 |
+ default: |
9018 |
+ if (msr >= MSR_IA32_MC0_CTL && |
9019 |
+ msr < MSR_IA32_MCx_CTL(bank_num)) { |
9020 |
+- u32 offset = msr - MSR_IA32_MC0_CTL; |
9021 |
++ u32 offset = array_index_nospec( |
9022 |
++ msr - MSR_IA32_MC0_CTL, |
9023 |
++ MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); |
9024 |
++ |
9025 |
+ /* only 0 or all 1s can be written to IA32_MCi_CTL |
9026 |
+ * some Linux kernels though clear bit 10 in bank 4 to |
9027 |
+ * workaround a BIOS/GART TBL issue on AMD K8s, ignore |
9028 |
+@@ -2350,7 +2358,10 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) |
9029 |
+ default: |
9030 |
+ if (msr >= MSR_IA32_MC0_CTL && |
9031 |
+ msr < MSR_IA32_MCx_CTL(bank_num)) { |
9032 |
+- u32 offset = msr - MSR_IA32_MC0_CTL; |
9033 |
++ u32 offset = array_index_nospec( |
9034 |
++ msr - MSR_IA32_MC0_CTL, |
9035 |
++ MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); |
9036 |
++ |
9037 |
+ data = vcpu->arch.mce_banks[offset]; |
9038 |
+ break; |
9039 |
+ } |
9040 |
+@@ -5874,14 +5885,12 @@ static void kvm_set_mmio_spte_mask(void) |
9041 |
+ /* Set the present bit. */ |
9042 |
+ mask |= 1ull; |
9043 |
+ |
9044 |
+-#ifdef CONFIG_X86_64 |
9045 |
+ /* |
9046 |
+ * If reserved bit is not supported, clear the present bit to disable |
9047 |
+ * mmio page fault. |
9048 |
+ */ |
9049 |
+ if (maxphyaddr == 52) |
9050 |
+ mask &= ~1ull; |
9051 |
+-#endif |
9052 |
+ |
9053 |
+ kvm_mmu_set_mmio_spte_mask(mask); |
9054 |
+ } |
9055 |
+@@ -7487,7 +7496,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
9056 |
+ kvm_mmu_unload(vcpu); |
9057 |
+ vcpu_put(vcpu); |
9058 |
+ |
9059 |
+- kvm_x86_ops->vcpu_free(vcpu); |
9060 |
++ kvm_arch_vcpu_free(vcpu); |
9061 |
+ } |
9062 |
+ |
9063 |
+ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) |
9064 |
+diff --git a/crypto/algapi.c b/crypto/algapi.c |
9065 |
+index eb58b73ca925..9d26d0125cd2 100644 |
9066 |
+--- a/crypto/algapi.c |
9067 |
++++ b/crypto/algapi.c |
9068 |
+@@ -653,11 +653,9 @@ EXPORT_SYMBOL_GPL(crypto_grab_spawn); |
9069 |
+ |
9070 |
+ void crypto_drop_spawn(struct crypto_spawn *spawn) |
9071 |
+ { |
9072 |
+- if (!spawn->alg) |
9073 |
+- return; |
9074 |
+- |
9075 |
+ down_write(&crypto_alg_sem); |
9076 |
+- list_del(&spawn->list); |
9077 |
++ if (spawn->alg) |
9078 |
++ list_del(&spawn->list); |
9079 |
+ up_write(&crypto_alg_sem); |
9080 |
+ } |
9081 |
+ EXPORT_SYMBOL_GPL(crypto_drop_spawn); |
9082 |
+@@ -665,22 +663,16 @@ EXPORT_SYMBOL_GPL(crypto_drop_spawn); |
9083 |
+ static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn) |
9084 |
+ { |
9085 |
+ struct crypto_alg *alg; |
9086 |
+- struct crypto_alg *alg2; |
9087 |
+ |
9088 |
+ down_read(&crypto_alg_sem); |
9089 |
+ alg = spawn->alg; |
9090 |
+- alg2 = alg; |
9091 |
+- if (alg2) |
9092 |
+- alg2 = crypto_mod_get(alg2); |
9093 |
+- up_read(&crypto_alg_sem); |
9094 |
+- |
9095 |
+- if (!alg2) { |
9096 |
+- if (alg) |
9097 |
+- crypto_shoot_alg(alg); |
9098 |
+- return ERR_PTR(-EAGAIN); |
9099 |
++ if (alg && !crypto_mod_get(alg)) { |
9100 |
++ alg->cra_flags |= CRYPTO_ALG_DYING; |
9101 |
++ alg = NULL; |
9102 |
+ } |
9103 |
++ up_read(&crypto_alg_sem); |
9104 |
+ |
9105 |
+- return alg; |
9106 |
++ return alg ?: ERR_PTR(-EAGAIN); |
9107 |
+ } |
9108 |
+ |
9109 |
+ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, |
9110 |
+diff --git a/crypto/api.c b/crypto/api.c |
9111 |
+index bbc147cb5dec..e108f9d466b0 100644 |
9112 |
+--- a/crypto/api.c |
9113 |
++++ b/crypto/api.c |
9114 |
+@@ -355,13 +355,12 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) |
9115 |
+ return len; |
9116 |
+ } |
9117 |
+ |
9118 |
+-void crypto_shoot_alg(struct crypto_alg *alg) |
9119 |
++static void crypto_shoot_alg(struct crypto_alg *alg) |
9120 |
+ { |
9121 |
+ down_write(&crypto_alg_sem); |
9122 |
+ alg->cra_flags |= CRYPTO_ALG_DYING; |
9123 |
+ up_write(&crypto_alg_sem); |
9124 |
+ } |
9125 |
+-EXPORT_SYMBOL_GPL(crypto_shoot_alg); |
9126 |
+ |
9127 |
+ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, |
9128 |
+ u32 mask) |
9129 |
+diff --git a/crypto/internal.h b/crypto/internal.h |
9130 |
+index 00e42a3ed814..657578d0ad45 100644 |
9131 |
+--- a/crypto/internal.h |
9132 |
++++ b/crypto/internal.h |
9133 |
+@@ -87,7 +87,6 @@ void crypto_alg_tested(const char *name, int err); |
9134 |
+ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, |
9135 |
+ struct crypto_alg *nalg); |
9136 |
+ void crypto_remove_final(struct list_head *list); |
9137 |
+-void crypto_shoot_alg(struct crypto_alg *alg); |
9138 |
+ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, |
9139 |
+ u32 mask); |
9140 |
+ void *crypto_create_tfm(struct crypto_alg *alg, |
9141 |
+diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c |
9142 |
+index 1348541da463..85082574c515 100644 |
9143 |
+--- a/crypto/pcrypt.c |
9144 |
++++ b/crypto/pcrypt.c |
9145 |
+@@ -130,7 +130,6 @@ static void pcrypt_aead_done(struct crypto_async_request *areq, int err) |
9146 |
+ struct padata_priv *padata = pcrypt_request_padata(preq); |
9147 |
+ |
9148 |
+ padata->info = err; |
9149 |
+- req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
9150 |
+ |
9151 |
+ padata_do_serial(padata); |
9152 |
+ } |
9153 |
+diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c |
9154 |
+index 615da961c4d8..02f61409770e 100644 |
9155 |
+--- a/drivers/crypto/picoxcell_crypto.c |
9156 |
++++ b/drivers/crypto/picoxcell_crypto.c |
9157 |
+@@ -1610,6 +1610,11 @@ static bool spacc_is_compatible(struct platform_device *pdev, |
9158 |
+ return false; |
9159 |
+ } |
9160 |
+ |
9161 |
++static void spacc_tasklet_kill(void *data) |
9162 |
++{ |
9163 |
++ tasklet_kill(data); |
9164 |
++} |
9165 |
++ |
9166 |
+ static int spacc_probe(struct platform_device *pdev) |
9167 |
+ { |
9168 |
+ int i, err, ret = -EINVAL; |
9169 |
+@@ -1652,6 +1657,14 @@ static int spacc_probe(struct platform_device *pdev) |
9170 |
+ return -ENXIO; |
9171 |
+ } |
9172 |
+ |
9173 |
++ tasklet_init(&engine->complete, spacc_spacc_complete, |
9174 |
++ (unsigned long)engine); |
9175 |
++ |
9176 |
++ ret = devm_add_action(&pdev->dev, spacc_tasklet_kill, |
9177 |
++ &engine->complete); |
9178 |
++ if (ret) |
9179 |
++ return ret; |
9180 |
++ |
9181 |
+ if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0, |
9182 |
+ engine->name, engine)) { |
9183 |
+ dev_err(engine->dev, "failed to request IRQ\n"); |
9184 |
+@@ -1714,8 +1727,6 @@ static int spacc_probe(struct platform_device *pdev) |
9185 |
+ INIT_LIST_HEAD(&engine->completed); |
9186 |
+ INIT_LIST_HEAD(&engine->in_progress); |
9187 |
+ engine->in_flight = 0; |
9188 |
+- tasklet_init(&engine->complete, spacc_spacc_complete, |
9189 |
+- (unsigned long)engine); |
9190 |
+ |
9191 |
+ platform_set_drvdata(pdev, engine); |
9192 |
+ |
9193 |
+diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c |
9194 |
+index 9f6e234e7029..eae9370225df 100644 |
9195 |
+--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c |
9196 |
++++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c |
9197 |
+@@ -63,7 +63,11 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) |
9198 |
+ struct videomode vm; |
9199 |
+ unsigned long prate; |
9200 |
+ unsigned int cfg; |
9201 |
+- int div; |
9202 |
++ int div, ret; |
9203 |
++ |
9204 |
++ ret = clk_prepare_enable(crtc->dc->hlcdc->sys_clk); |
9205 |
++ if (ret) |
9206 |
++ return; |
9207 |
+ |
9208 |
+ vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay; |
9209 |
+ vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end; |
9210 |
+@@ -119,6 +123,8 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) |
9211 |
+ ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO | |
9212 |
+ ATMEL_HLCDC_GUARDTIME_MASK, |
9213 |
+ cfg); |
9214 |
++ |
9215 |
++ clk_disable_unprepare(crtc->dc->hlcdc->sys_clk); |
9216 |
+ } |
9217 |
+ |
9218 |
+ static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *crtc, |
9219 |
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c |
9220 |
+index c752c55f0bb2..c4d4cd38a58f 100644 |
9221 |
+--- a/drivers/md/dm.c |
9222 |
++++ b/drivers/md/dm.c |
9223 |
+@@ -2293,7 +2293,6 @@ static void dm_init_md_queue(struct mapped_device *md) |
9224 |
+ * - must do so here (in alloc_dev callchain) before queue is used |
9225 |
+ */ |
9226 |
+ md->queue->queuedata = md; |
9227 |
+- md->queue->backing_dev_info.congested_data = md; |
9228 |
+ } |
9229 |
+ |
9230 |
+ static void dm_init_old_md_queue(struct mapped_device *md) |
9231 |
+@@ -2304,6 +2303,7 @@ static void dm_init_old_md_queue(struct mapped_device *md) |
9232 |
+ /* |
9233 |
+ * Initialize aspects of queue that aren't relevant for blk-mq |
9234 |
+ */ |
9235 |
++ md->queue->backing_dev_info.congested_data = md; |
9236 |
+ md->queue->backing_dev_info.congested_fn = dm_any_congested; |
9237 |
+ blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); |
9238 |
+ } |
9239 |
+@@ -2386,6 +2386,12 @@ static struct mapped_device *alloc_dev(int minor) |
9240 |
+ goto bad; |
9241 |
+ |
9242 |
+ dm_init_md_queue(md); |
9243 |
++ /* |
9244 |
++ * default to bio-based required ->make_request_fn until DM |
9245 |
++ * table is loaded and md->type established. If request-based |
9246 |
++ * table is loaded: blk-mq will override accordingly. |
9247 |
++ */ |
9248 |
++ blk_queue_make_request(md->queue, dm_make_request); |
9249 |
+ |
9250 |
+ md->disk = alloc_disk(1); |
9251 |
+ if (!md->disk) |
9252 |
+@@ -2849,7 +2855,6 @@ int dm_setup_md_queue(struct mapped_device *md) |
9253 |
+ break; |
9254 |
+ case DM_TYPE_BIO_BASED: |
9255 |
+ dm_init_old_md_queue(md); |
9256 |
+- blk_queue_make_request(md->queue, dm_make_request); |
9257 |
+ /* |
9258 |
+ * DM handles splitting bios as needed. Free the bio_split bioset |
9259 |
+ * since it won't be used (saves 1 process per bio-based DM device). |
9260 |
+diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c |
9261 |
+index 306d2e4502c4..22729fd92a1b 100644 |
9262 |
+--- a/drivers/md/persistent-data/dm-space-map-common.c |
9263 |
++++ b/drivers/md/persistent-data/dm-space-map-common.c |
9264 |
+@@ -382,6 +382,33 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, |
9265 |
+ return -ENOSPC; |
9266 |
+ } |
9267 |
+ |
9268 |
++int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll, |
9269 |
++ dm_block_t begin, dm_block_t end, dm_block_t *b) |
9270 |
++{ |
9271 |
++ int r; |
9272 |
++ uint32_t count; |
9273 |
++ |
9274 |
++ do { |
9275 |
++ r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b); |
9276 |
++ if (r) |
9277 |
++ break; |
9278 |
++ |
9279 |
++ /* double check this block wasn't used in the old transaction */ |
9280 |
++ if (*b >= old_ll->nr_blocks) |
9281 |
++ count = 0; |
9282 |
++ else { |
9283 |
++ r = sm_ll_lookup(old_ll, *b, &count); |
9284 |
++ if (r) |
9285 |
++ break; |
9286 |
++ |
9287 |
++ if (count) |
9288 |
++ begin = *b + 1; |
9289 |
++ } |
9290 |
++ } while (count); |
9291 |
++ |
9292 |
++ return r; |
9293 |
++} |
9294 |
++ |
9295 |
+ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, |
9296 |
+ int (*mutator)(void *context, uint32_t old, uint32_t *new), |
9297 |
+ void *context, enum allocation_event *ev) |
9298 |
+diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h |
9299 |
+index b3078d5eda0c..8de63ce39bdd 100644 |
9300 |
+--- a/drivers/md/persistent-data/dm-space-map-common.h |
9301 |
++++ b/drivers/md/persistent-data/dm-space-map-common.h |
9302 |
+@@ -109,6 +109,8 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result); |
9303 |
+ int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result); |
9304 |
+ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, |
9305 |
+ dm_block_t end, dm_block_t *result); |
9306 |
++int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll, |
9307 |
++ dm_block_t begin, dm_block_t end, dm_block_t *result); |
9308 |
+ int sm_ll_insert(struct ll_disk *ll, dm_block_t b, uint32_t ref_count, enum allocation_event *ev); |
9309 |
+ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev); |
9310 |
+ int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev); |
9311 |
+diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c |
9312 |
+index 32adf6b4a9c7..bf4c5e2ccb6f 100644 |
9313 |
+--- a/drivers/md/persistent-data/dm-space-map-disk.c |
9314 |
++++ b/drivers/md/persistent-data/dm-space-map-disk.c |
9315 |
+@@ -167,8 +167,10 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) |
9316 |
+ enum allocation_event ev; |
9317 |
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm); |
9318 |
+ |
9319 |
+- /* FIXME: we should loop round a couple of times */ |
9320 |
+- r = sm_ll_find_free_block(&smd->old_ll, smd->begin, smd->old_ll.nr_blocks, b); |
9321 |
++ /* |
9322 |
++ * Any block we allocate has to be free in both the old and current ll. |
9323 |
++ */ |
9324 |
++ r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b); |
9325 |
+ if (r) |
9326 |
+ return r; |
9327 |
+ |
9328 |
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c |
9329 |
+index 1d29771af380..967d8f2a731f 100644 |
9330 |
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c |
9331 |
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c |
9332 |
+@@ -447,7 +447,10 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b) |
9333 |
+ enum allocation_event ev; |
9334 |
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); |
9335 |
+ |
9336 |
+- r = sm_ll_find_free_block(&smm->old_ll, smm->begin, smm->old_ll.nr_blocks, b); |
9337 |
++ /* |
9338 |
++ * Any block we allocate has to be free in both the old and current ll. |
9339 |
++ */ |
9340 |
++ r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b); |
9341 |
+ if (r) |
9342 |
+ return r; |
9343 |
+ |
9344 |
+diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c |
9345 |
+index cda4ce612dcf..782391507e3a 100644 |
9346 |
+--- a/drivers/media/rc/iguanair.c |
9347 |
++++ b/drivers/media/rc/iguanair.c |
9348 |
+@@ -430,7 +430,7 @@ static int iguanair_probe(struct usb_interface *intf, |
9349 |
+ int ret, pipein, pipeout; |
9350 |
+ struct usb_host_interface *idesc; |
9351 |
+ |
9352 |
+- idesc = intf->altsetting; |
9353 |
++ idesc = intf->cur_altsetting; |
9354 |
+ if (idesc->desc.bNumEndpoints < 2) |
9355 |
+ return -ENODEV; |
9356 |
+ |
9357 |
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c |
9358 |
+index ebd1b882556d..9cd0268b2767 100644 |
9359 |
+--- a/drivers/media/usb/uvc/uvc_driver.c |
9360 |
++++ b/drivers/media/usb/uvc/uvc_driver.c |
9361 |
+@@ -1411,6 +1411,11 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain, |
9362 |
+ break; |
9363 |
+ if (forward == prev) |
9364 |
+ continue; |
9365 |
++ if (forward->chain.next || forward->chain.prev) { |
9366 |
++ uvc_trace(UVC_TRACE_DESCR, "Found reference to " |
9367 |
++ "entity %d already in chain.\n", forward->id); |
9368 |
++ return -EINVAL; |
9369 |
++ } |
9370 |
+ |
9371 |
+ switch (UVC_ENTITY_TYPE(forward)) { |
9372 |
+ case UVC_VC_EXTENSION_UNIT: |
9373 |
+@@ -1492,6 +1497,13 @@ static int uvc_scan_chain_backward(struct uvc_video_chain *chain, |
9374 |
+ return -1; |
9375 |
+ } |
9376 |
+ |
9377 |
++ if (term->chain.next || term->chain.prev) { |
9378 |
++ uvc_trace(UVC_TRACE_DESCR, "Found reference to " |
9379 |
++ "entity %d already in chain.\n", |
9380 |
++ term->id); |
9381 |
++ return -EINVAL; |
9382 |
++ } |
9383 |
++ |
9384 |
+ if (uvc_trace_param & UVC_TRACE_PROBE) |
9385 |
+ printk(" %d", term->id); |
9386 |
+ |
9387 |
+diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c |
9388 |
+index a9ad024ec6b0..16c6f07e045d 100644 |
9389 |
+--- a/drivers/mfd/da9062-core.c |
9390 |
++++ b/drivers/mfd/da9062-core.c |
9391 |
+@@ -142,7 +142,7 @@ static const struct mfd_cell da9062_devs[] = { |
9392 |
+ .name = "da9062-watchdog", |
9393 |
+ .num_resources = ARRAY_SIZE(da9062_wdt_resources), |
9394 |
+ .resources = da9062_wdt_resources, |
9395 |
+- .of_compatible = "dlg,da9062-wdt", |
9396 |
++ .of_compatible = "dlg,da9062-watchdog", |
9397 |
+ }, |
9398 |
+ { |
9399 |
+ .name = "da9062-thermal", |
9400 |
+diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c |
9401 |
+index 704e189ca162..95d0f2df0ad4 100644 |
9402 |
+--- a/drivers/mfd/dln2.c |
9403 |
++++ b/drivers/mfd/dln2.c |
9404 |
+@@ -729,6 +729,8 @@ static int dln2_probe(struct usb_interface *interface, |
9405 |
+ const struct usb_device_id *usb_id) |
9406 |
+ { |
9407 |
+ struct usb_host_interface *hostif = interface->cur_altsetting; |
9408 |
++ struct usb_endpoint_descriptor *epin; |
9409 |
++ struct usb_endpoint_descriptor *epout; |
9410 |
+ struct device *dev = &interface->dev; |
9411 |
+ struct dln2_dev *dln2; |
9412 |
+ int ret; |
9413 |
+@@ -738,12 +740,19 @@ static int dln2_probe(struct usb_interface *interface, |
9414 |
+ hostif->desc.bNumEndpoints < 2) |
9415 |
+ return -ENODEV; |
9416 |
+ |
9417 |
++ epin = &hostif->endpoint[0].desc; |
9418 |
++ epout = &hostif->endpoint[1].desc; |
9419 |
++ if (!usb_endpoint_is_bulk_out(epout)) |
9420 |
++ return -ENODEV; |
9421 |
++ if (!usb_endpoint_is_bulk_in(epin)) |
9422 |
++ return -ENODEV; |
9423 |
++ |
9424 |
+ dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL); |
9425 |
+ if (!dln2) |
9426 |
+ return -ENOMEM; |
9427 |
+ |
9428 |
+- dln2->ep_out = hostif->endpoint[0].desc.bEndpointAddress; |
9429 |
+- dln2->ep_in = hostif->endpoint[1].desc.bEndpointAddress; |
9430 |
++ dln2->ep_out = epout->bEndpointAddress; |
9431 |
++ dln2->ep_in = epin->bEndpointAddress; |
9432 |
+ dln2->usb_dev = usb_get_dev(interface_to_usbdev(interface)); |
9433 |
+ dln2->interface = interface; |
9434 |
+ usb_set_intfdata(interface, dln2); |
9435 |
+diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c |
9436 |
+index 666857192dbe..b6db5e5cc3a6 100644 |
9437 |
+--- a/drivers/mfd/rn5t618.c |
9438 |
++++ b/drivers/mfd/rn5t618.c |
9439 |
+@@ -28,6 +28,7 @@ static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg) |
9440 |
+ case RN5T618_WATCHDOGCNT: |
9441 |
+ case RN5T618_DCIRQ: |
9442 |
+ case RN5T618_ILIMDATAH ... RN5T618_AIN0DATAL: |
9443 |
++ case RN5T618_ADCCNT3: |
9444 |
+ case RN5T618_IR_ADC1 ... RN5T618_IR_ADC3: |
9445 |
+ case RN5T618_IR_GPR: |
9446 |
+ case RN5T618_IR_GPF: |
9447 |
+diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c |
9448 |
+index 40a369c7005a..b52489a67097 100644 |
9449 |
+--- a/drivers/mmc/host/mmc_spi.c |
9450 |
++++ b/drivers/mmc/host/mmc_spi.c |
9451 |
+@@ -1153,17 +1153,22 @@ static void mmc_spi_initsequence(struct mmc_spi_host *host) |
9452 |
+ * SPI protocol. Another is that when chipselect is released while |
9453 |
+ * the card returns BUSY status, the clock must issue several cycles |
9454 |
+ * with chipselect high before the card will stop driving its output. |
9455 |
++ * |
9456 |
++ * SPI_CS_HIGH means "asserted" here. In some cases like when using |
9457 |
++ * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically |
9458 |
++ * inverted by gpiolib, so if we want to ascertain to drive it high |
9459 |
++ * we should toggle the default with an XOR as we do here. |
9460 |
+ */ |
9461 |
+- host->spi->mode |= SPI_CS_HIGH; |
9462 |
++ host->spi->mode ^= SPI_CS_HIGH; |
9463 |
+ if (spi_setup(host->spi) != 0) { |
9464 |
+ /* Just warn; most cards work without it. */ |
9465 |
+ dev_warn(&host->spi->dev, |
9466 |
+ "can't change chip-select polarity\n"); |
9467 |
+- host->spi->mode &= ~SPI_CS_HIGH; |
9468 |
++ host->spi->mode ^= SPI_CS_HIGH; |
9469 |
+ } else { |
9470 |
+ mmc_spi_readbytes(host, 18); |
9471 |
+ |
9472 |
+- host->spi->mode &= ~SPI_CS_HIGH; |
9473 |
++ host->spi->mode ^= SPI_CS_HIGH; |
9474 |
+ if (spi_setup(host->spi) != 0) { |
9475 |
+ /* Wot, we can't get the same setup we had before? */ |
9476 |
+ dev_err(&host->spi->dev, |
9477 |
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c |
9478 |
+index 82d23bd3a742..0615522933dc 100644 |
9479 |
+--- a/drivers/net/bonding/bond_alb.c |
9480 |
++++ b/drivers/net/bonding/bond_alb.c |
9481 |
+@@ -1371,26 +1371,31 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) |
9482 |
+ bool do_tx_balance = true; |
9483 |
+ u32 hash_index = 0; |
9484 |
+ const u8 *hash_start = NULL; |
9485 |
+- struct ipv6hdr *ip6hdr; |
9486 |
+ |
9487 |
+ skb_reset_mac_header(skb); |
9488 |
+ eth_data = eth_hdr(skb); |
9489 |
+ |
9490 |
+ switch (ntohs(skb->protocol)) { |
9491 |
+ case ETH_P_IP: { |
9492 |
+- const struct iphdr *iph = ip_hdr(skb); |
9493 |
++ const struct iphdr *iph; |
9494 |
+ |
9495 |
+ if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) || |
9496 |
+- (iph->daddr == ip_bcast) || |
9497 |
+- (iph->protocol == IPPROTO_IGMP)) { |
9498 |
++ (!pskb_network_may_pull(skb, sizeof(*iph)))) { |
9499 |
++ do_tx_balance = false; |
9500 |
++ break; |
9501 |
++ } |
9502 |
++ iph = ip_hdr(skb); |
9503 |
++ if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) { |
9504 |
+ do_tx_balance = false; |
9505 |
+ break; |
9506 |
+ } |
9507 |
+ hash_start = (char *)&(iph->daddr); |
9508 |
+ hash_size = sizeof(iph->daddr); |
9509 |
+- } |
9510 |
+ break; |
9511 |
+- case ETH_P_IPV6: |
9512 |
++ } |
9513 |
++ case ETH_P_IPV6: { |
9514 |
++ const struct ipv6hdr *ip6hdr; |
9515 |
++ |
9516 |
+ /* IPv6 doesn't really use broadcast mac address, but leave |
9517 |
+ * that here just in case. |
9518 |
+ */ |
9519 |
+@@ -1407,7 +1412,11 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) |
9520 |
+ break; |
9521 |
+ } |
9522 |
+ |
9523 |
+- /* Additianally, DAD probes should not be tx-balanced as that |
9524 |
++ if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) { |
9525 |
++ do_tx_balance = false; |
9526 |
++ break; |
9527 |
++ } |
9528 |
++ /* Additionally, DAD probes should not be tx-balanced as that |
9529 |
+ * will lead to false positives for duplicate addresses and |
9530 |
+ * prevent address configuration from working. |
9531 |
+ */ |
9532 |
+@@ -1417,17 +1426,26 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) |
9533 |
+ break; |
9534 |
+ } |
9535 |
+ |
9536 |
+- hash_start = (char *)&(ipv6_hdr(skb)->daddr); |
9537 |
+- hash_size = sizeof(ipv6_hdr(skb)->daddr); |
9538 |
++ hash_start = (char *)&ip6hdr->daddr; |
9539 |
++ hash_size = sizeof(ip6hdr->daddr); |
9540 |
+ break; |
9541 |
+- case ETH_P_IPX: |
9542 |
+- if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) { |
9543 |
++ } |
9544 |
++ case ETH_P_IPX: { |
9545 |
++ const struct ipxhdr *ipxhdr; |
9546 |
++ |
9547 |
++ if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) { |
9548 |
++ do_tx_balance = false; |
9549 |
++ break; |
9550 |
++ } |
9551 |
++ ipxhdr = (struct ipxhdr *)skb_network_header(skb); |
9552 |
++ |
9553 |
++ if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) { |
9554 |
+ /* something is wrong with this packet */ |
9555 |
+ do_tx_balance = false; |
9556 |
+ break; |
9557 |
+ } |
9558 |
+ |
9559 |
+- if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) { |
9560 |
++ if (ipxhdr->ipx_type != IPX_TYPE_NCP) { |
9561 |
+ /* The only protocol worth balancing in |
9562 |
+ * this family since it has an "ARP" like |
9563 |
+ * mechanism |
9564 |
+@@ -1436,9 +1454,11 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) |
9565 |
+ break; |
9566 |
+ } |
9567 |
+ |
9568 |
++ eth_data = eth_hdr(skb); |
9569 |
+ hash_start = (char *)eth_data->h_dest; |
9570 |
+ hash_size = ETH_ALEN; |
9571 |
+ break; |
9572 |
++ } |
9573 |
+ case ETH_P_ARP: |
9574 |
+ do_tx_balance = false; |
9575 |
+ if (bond_info->rlb_enabled) |
9576 |
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c |
9577 |
+index 9530ee12726f..3cb99ce7325b 100644 |
9578 |
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c |
9579 |
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c |
9580 |
+@@ -1997,6 +1997,9 @@ static int bcm_sysport_resume(struct device *d) |
9581 |
+ |
9582 |
+ umac_reset(priv); |
9583 |
+ |
9584 |
++ /* Disable the UniMAC RX/TX */ |
9585 |
++ umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); |
9586 |
++ |
9587 |
+ /* We may have been suspended and never received a WOL event that |
9588 |
+ * would turn off MPD detection, take care of that now |
9589 |
+ */ |
9590 |
+diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c |
9591 |
+index afd8e78e024e..ff5cae052c40 100644 |
9592 |
+--- a/drivers/net/ethernet/dec/tulip/dmfe.c |
9593 |
++++ b/drivers/net/ethernet/dec/tulip/dmfe.c |
9594 |
+@@ -2228,15 +2228,16 @@ static int __init dmfe_init_module(void) |
9595 |
+ if (cr6set) |
9596 |
+ dmfe_cr6_user_set = cr6set; |
9597 |
+ |
9598 |
+- switch(mode) { |
9599 |
+- case DMFE_10MHF: |
9600 |
++ switch (mode) { |
9601 |
++ case DMFE_10MHF: |
9602 |
+ case DMFE_100MHF: |
9603 |
+ case DMFE_10MFD: |
9604 |
+ case DMFE_100MFD: |
9605 |
+ case DMFE_1M_HPNA: |
9606 |
+ dmfe_media_mode = mode; |
9607 |
+ break; |
9608 |
+- default:dmfe_media_mode = DMFE_AUTO; |
9609 |
++ default: |
9610 |
++ dmfe_media_mode = DMFE_AUTO; |
9611 |
+ break; |
9612 |
+ } |
9613 |
+ |
9614 |
+diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c |
9615 |
+index 447d09272ab7..7e0e36a3270c 100644 |
9616 |
+--- a/drivers/net/ethernet/dec/tulip/uli526x.c |
9617 |
++++ b/drivers/net/ethernet/dec/tulip/uli526x.c |
9618 |
+@@ -1813,8 +1813,8 @@ static int __init uli526x_init_module(void) |
9619 |
+ if (cr6set) |
9620 |
+ uli526x_cr6_user_set = cr6set; |
9621 |
+ |
9622 |
+- switch (mode) { |
9623 |
+- case ULI526X_10MHF: |
9624 |
++ switch (mode) { |
9625 |
++ case ULI526X_10MHF: |
9626 |
+ case ULI526X_100MHF: |
9627 |
+ case ULI526X_10MFD: |
9628 |
+ case ULI526X_100MFD: |
9629 |
+diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c |
9630 |
+index 37fb6dfc1087..0be9c74238fd 100644 |
9631 |
+--- a/drivers/net/ethernet/smsc/smc911x.c |
9632 |
++++ b/drivers/net/ethernet/smsc/smc911x.c |
9633 |
+@@ -945,7 +945,7 @@ static void smc911x_phy_configure(struct work_struct *work) |
9634 |
+ if (lp->ctl_rspeed != 100) |
9635 |
+ my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF); |
9636 |
+ |
9637 |
+- if (!lp->ctl_rfduplx) |
9638 |
++ if (!lp->ctl_rfduplx) |
9639 |
+ my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL); |
9640 |
+ |
9641 |
+ /* Update our Auto-Neg Advertisement Register */ |
9642 |
+diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c |
9643 |
+index 9c889e0303dd..cef40de1bd05 100644 |
9644 |
+--- a/drivers/net/ppp/ppp_async.c |
9645 |
++++ b/drivers/net/ppp/ppp_async.c |
9646 |
+@@ -878,15 +878,15 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf, |
9647 |
+ skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2); |
9648 |
+ if (!skb) |
9649 |
+ goto nomem; |
9650 |
+- ap->rpkt = skb; |
9651 |
+- } |
9652 |
+- if (skb->len == 0) { |
9653 |
+- /* Try to get the payload 4-byte aligned. |
9654 |
+- * This should match the |
9655 |
+- * PPP_ALLSTATIONS/PPP_UI/compressed tests in |
9656 |
+- * process_input_packet, but we do not have |
9657 |
+- * enough chars here to test buf[1] and buf[2]. |
9658 |
+- */ |
9659 |
++ ap->rpkt = skb; |
9660 |
++ } |
9661 |
++ if (skb->len == 0) { |
9662 |
++ /* Try to get the payload 4-byte aligned. |
9663 |
++ * This should match the |
9664 |
++ * PPP_ALLSTATIONS/PPP_UI/compressed tests in |
9665 |
++ * process_input_packet, but we do not have |
9666 |
++ * enough chars here to test buf[1] and buf[2]. |
9667 |
++ */ |
9668 |
+ if (buf[0] != PPP_ALLSTATIONS) |
9669 |
+ skb_reserve(skb, 2 + (buf[0] & 1)); |
9670 |
+ } |
9671 |
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c |
9672 |
+index b9bfa592bcab..2cb3f12dccbd 100644 |
9673 |
+--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c |
9674 |
++++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c |
9675 |
+@@ -426,6 +426,7 @@ fail: |
9676 |
+ usb_free_urb(req->urb); |
9677 |
+ list_del(q->next); |
9678 |
+ } |
9679 |
++ kfree(reqs); |
9680 |
+ return NULL; |
9681 |
+ |
9682 |
+ } |
9683 |
+diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c |
9684 |
+index 0824697c3dca..7d55de21b190 100644 |
9685 |
+--- a/drivers/net/wireless/libertas/cfg.c |
9686 |
++++ b/drivers/net/wireless/libertas/cfg.c |
9687 |
+@@ -1853,6 +1853,8 @@ static int lbs_ibss_join_existing(struct lbs_private *priv, |
9688 |
+ rates_max = rates_eid[1]; |
9689 |
+ if (rates_max > MAX_RATES) { |
9690 |
+ lbs_deb_join("invalid rates"); |
9691 |
++ rcu_read_unlock(); |
9692 |
++ ret = -EINVAL; |
9693 |
+ goto out; |
9694 |
+ } |
9695 |
+ rates = cmd.bss.rates; |
9696 |
+diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c |
9697 |
+index 39b78dc1bd92..e7c8972431d3 100644 |
9698 |
+--- a/drivers/net/wireless/mwifiex/scan.c |
9699 |
++++ b/drivers/net/wireless/mwifiex/scan.c |
9700 |
+@@ -2568,6 +2568,13 @@ mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, |
9701 |
+ vs_param_set->header.len = |
9702 |
+ cpu_to_le16((((u16) priv->vs_ie[id].ie[1]) |
9703 |
+ & 0x00FF) + 2); |
9704 |
++ if (le16_to_cpu(vs_param_set->header.len) > |
9705 |
++ MWIFIEX_MAX_VSIE_LEN) { |
9706 |
++ mwifiex_dbg(priv->adapter, ERROR, |
9707 |
++ "Invalid param length!\n"); |
9708 |
++ break; |
9709 |
++ } |
9710 |
++ |
9711 |
+ memcpy(vs_param_set->ie, priv->vs_ie[id].ie, |
9712 |
+ le16_to_cpu(vs_param_set->header.len)); |
9713 |
+ *buffer += le16_to_cpu(vs_param_set->header.len) + |
9714 |
+diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c |
9715 |
+index a13c6f1712b3..a1c376c5dab9 100644 |
9716 |
+--- a/drivers/net/wireless/mwifiex/sta_ioctl.c |
9717 |
++++ b/drivers/net/wireless/mwifiex/sta_ioctl.c |
9718 |
+@@ -232,6 +232,7 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv, |
9719 |
+ |
9720 |
+ if (country_ie_len > |
9721 |
+ (IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) { |
9722 |
++ rcu_read_unlock(); |
9723 |
+ mwifiex_dbg(priv->adapter, ERROR, |
9724 |
+ "11D: country_ie_len overflow!, deauth AP\n"); |
9725 |
+ return -EINVAL; |
9726 |
+diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c |
9727 |
+index 7015dfab49cf..3a2ecb6cf1c3 100644 |
9728 |
+--- a/drivers/net/wireless/mwifiex/wmm.c |
9729 |
++++ b/drivers/net/wireless/mwifiex/wmm.c |
9730 |
+@@ -978,6 +978,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, |
9731 |
+ "WMM Parameter Set Count: %d\n", |
9732 |
+ wmm_param_ie->qos_info_bitmap & mask); |
9733 |
+ |
9734 |
++ if (wmm_param_ie->vend_hdr.len + 2 > |
9735 |
++ sizeof(struct ieee_types_wmm_parameter)) |
9736 |
++ break; |
9737 |
++ |
9738 |
+ memcpy((u8 *) &priv->curr_bss_params.bss_descriptor. |
9739 |
+ wmm_ie, wmm_param_ie, |
9740 |
+ wmm_param_ie->vend_hdr.len + 2); |
9741 |
+diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c |
9742 |
+index 12e819ddf17a..3afc53ff7369 100644 |
9743 |
+--- a/drivers/nfc/pn544/pn544.c |
9744 |
++++ b/drivers/nfc/pn544/pn544.c |
9745 |
+@@ -704,7 +704,7 @@ static int pn544_hci_check_presence(struct nfc_hci_dev *hdev, |
9746 |
+ target->nfcid1_len != 10) |
9747 |
+ return -EOPNOTSUPP; |
9748 |
+ |
9749 |
+- return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE, |
9750 |
++ return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE, |
9751 |
+ PN544_RF_READER_CMD_ACTIVATE_NEXT, |
9752 |
+ target->nfcid1, target->nfcid1_len, NULL); |
9753 |
+ } else if (target->supported_protocols & (NFC_PROTO_JEWEL_MASK | |
9754 |
+diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig |
9755 |
+index e2a48415d969..4ba54ffb3a4b 100644 |
9756 |
+--- a/drivers/of/Kconfig |
9757 |
++++ b/drivers/of/Kconfig |
9758 |
+@@ -112,4 +112,8 @@ config OF_OVERLAY |
9759 |
+ While this option is selected automatically when needed, you can |
9760 |
+ enable it manually to improve device tree unit test coverage. |
9761 |
+ |
9762 |
++config OF_DMA_DEFAULT_COHERENT |
9763 |
++ # arches should select this if DMA is coherent by default for OF devices |
9764 |
++ bool |
9765 |
++ |
9766 |
+ endif # OF |
9767 |
+diff --git a/drivers/of/address.c b/drivers/of/address.c |
9768 |
+index 4fe5fe21cd49..b3bf8762f4e8 100644 |
9769 |
+--- a/drivers/of/address.c |
9770 |
++++ b/drivers/of/address.c |
9771 |
+@@ -1009,12 +1009,16 @@ EXPORT_SYMBOL_GPL(of_dma_get_range); |
9772 |
+ * @np: device node |
9773 |
+ * |
9774 |
+ * It returns true if "dma-coherent" property was found |
9775 |
+- * for this device in DT. |
9776 |
++ * for this device in the DT, or if DMA is coherent by |
9777 |
++ * default for OF devices on the current platform. |
9778 |
+ */ |
9779 |
+ bool of_dma_is_coherent(struct device_node *np) |
9780 |
+ { |
9781 |
+ struct device_node *node = of_node_get(np); |
9782 |
+ |
9783 |
++ if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT)) |
9784 |
++ return true; |
9785 |
++ |
9786 |
+ while (node) { |
9787 |
+ if (of_property_read_bool(node, "dma-coherent")) { |
9788 |
+ of_node_put(node); |
9789 |
+diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c |
9790 |
+index 6153853ca9c3..988e7e7350c7 100644 |
9791 |
+--- a/drivers/pci/host/pci-keystone-dw.c |
9792 |
++++ b/drivers/pci/host/pci-keystone-dw.c |
9793 |
+@@ -450,7 +450,7 @@ void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) |
9794 |
+ /* Disable Link training */ |
9795 |
+ val = readl(ks_pcie->va_app_base + CMD_STATUS); |
9796 |
+ val &= ~LTSSM_EN_VAL; |
9797 |
+- writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS); |
9798 |
++ writel(val, ks_pcie->va_app_base + CMD_STATUS); |
9799 |
+ |
9800 |
+ /* Initiate Link Training */ |
9801 |
+ val = readl(ks_pcie->va_app_base + CMD_STATUS); |
9802 |
+diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c |
9803 |
+index bbd35dc1a0c4..4d0ef5e9e9d8 100644 |
9804 |
+--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c |
9805 |
++++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c |
9806 |
+@@ -2324,7 +2324,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { |
9807 |
+ FN_ATAG0_A, 0, FN_REMOCON_B, 0, |
9808 |
+ /* IP0_11_8 [4] */ |
9809 |
+ FN_SD1_DAT2_A, FN_MMC_D2, 0, FN_BS, |
9810 |
+- FN_ATADIR0_A, 0, FN_SDSELF_B, 0, |
9811 |
++ FN_ATADIR0_A, 0, FN_SDSELF_A, 0, |
9812 |
+ FN_PWM4_B, 0, 0, 0, |
9813 |
+ 0, 0, 0, 0, |
9814 |
+ /* IP0_7_5 [3] */ |
9815 |
+@@ -2366,7 +2366,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { |
9816 |
+ FN_TS_SDAT0_A, 0, 0, 0, |
9817 |
+ 0, 0, 0, 0, |
9818 |
+ /* IP1_10_8 [3] */ |
9819 |
+- FN_SD1_CLK_B, FN_MMC_D6, 0, FN_A24, |
9820 |
++ FN_SD1_CD_A, FN_MMC_D6, 0, FN_A24, |
9821 |
+ FN_DREQ1_A, 0, FN_HRX0_B, FN_TS_SPSYNC0_A, |
9822 |
+ /* IP1_7_5 [3] */ |
9823 |
+ FN_A23, FN_HTX0_B, FN_TX2_B, FN_DACK2_A, |
9824 |
+diff --git a/drivers/power/ltc2941-battery-gauge.c b/drivers/power/ltc2941-battery-gauge.c |
9825 |
+index 4adf2ba021ce..043de9d039d5 100644 |
9826 |
+--- a/drivers/power/ltc2941-battery-gauge.c |
9827 |
++++ b/drivers/power/ltc2941-battery-gauge.c |
9828 |
+@@ -364,7 +364,7 @@ static int ltc294x_i2c_remove(struct i2c_client *client) |
9829 |
+ { |
9830 |
+ struct ltc294x_info *info = i2c_get_clientdata(client); |
9831 |
+ |
9832 |
+- cancel_delayed_work(&info->work); |
9833 |
++ cancel_delayed_work_sync(&info->work); |
9834 |
+ power_supply_unregister(info->supply); |
9835 |
+ return 0; |
9836 |
+ } |
9837 |
+diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c |
9838 |
+index b1b4746a0eab..dbec596494eb 100644 |
9839 |
+--- a/drivers/rtc/rtc-hym8563.c |
9840 |
++++ b/drivers/rtc/rtc-hym8563.c |
9841 |
+@@ -105,7 +105,7 @@ static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm) |
9842 |
+ |
9843 |
+ if (!hym8563->valid) { |
9844 |
+ dev_warn(&client->dev, "no valid clock/calendar values available\n"); |
9845 |
+- return -EPERM; |
9846 |
++ return -EINVAL; |
9847 |
+ } |
9848 |
+ |
9849 |
+ ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf); |
9850 |
+diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c |
9851 |
+index ddbdaade654d..11db61d3b966 100644 |
9852 |
+--- a/drivers/scsi/csiostor/csio_scsi.c |
9853 |
++++ b/drivers/scsi/csiostor/csio_scsi.c |
9854 |
+@@ -1383,7 +1383,7 @@ csio_device_reset(struct device *dev, |
9855 |
+ return -EINVAL; |
9856 |
+ |
9857 |
+ /* Delete NPIV lnodes */ |
9858 |
+- csio_lnodes_exit(hw, 1); |
9859 |
++ csio_lnodes_exit(hw, 1); |
9860 |
+ |
9861 |
+ /* Block upper IOs */ |
9862 |
+ csio_lnodes_block_request(hw); |
9863 |
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c |
9864 |
+index 87059a6786f4..03d466c07513 100644 |
9865 |
+--- a/drivers/scsi/qla2xxx/qla_mbx.c |
9866 |
++++ b/drivers/scsi/qla2xxx/qla_mbx.c |
9867 |
+@@ -5455,9 +5455,8 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, |
9868 |
+ mcp->mb[7] = LSW(MSD(req_dma)); |
9869 |
+ mcp->mb[8] = MSW(addr); |
9870 |
+ /* Setting RAM ID to valid */ |
9871 |
+- mcp->mb[10] |= BIT_7; |
9872 |
+ /* For MCTP RAM ID is 0x40 */ |
9873 |
+- mcp->mb[10] |= 0x40; |
9874 |
++ mcp->mb[10] = BIT_7 | 0x40; |
9875 |
+ |
9876 |
+ mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| |
9877 |
+ MBX_0; |
9878 |
+diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c |
9879 |
+index b6b4cfdd7620..65f8d2d94159 100644 |
9880 |
+--- a/drivers/scsi/qla2xxx/qla_nx.c |
9881 |
++++ b/drivers/scsi/qla2xxx/qla_nx.c |
9882 |
+@@ -10,6 +10,7 @@ |
9883 |
+ #include <linux/ratelimit.h> |
9884 |
+ #include <linux/vmalloc.h> |
9885 |
+ #include <scsi/scsi_tcq.h> |
9886 |
++#include <asm/unaligned.h> |
9887 |
+ |
9888 |
+ #define MASK(n) ((1ULL<<(n))-1) |
9889 |
+ #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \ |
9890 |
+@@ -1600,8 +1601,7 @@ qla82xx_get_bootld_offset(struct qla_hw_data *ha) |
9891 |
+ return (u8 *)&ha->hablob->fw->data[offset]; |
9892 |
+ } |
9893 |
+ |
9894 |
+-static __le32 |
9895 |
+-qla82xx_get_fw_size(struct qla_hw_data *ha) |
9896 |
++static u32 qla82xx_get_fw_size(struct qla_hw_data *ha) |
9897 |
+ { |
9898 |
+ struct qla82xx_uri_data_desc *uri_desc = NULL; |
9899 |
+ |
9900 |
+@@ -1612,7 +1612,7 @@ qla82xx_get_fw_size(struct qla_hw_data *ha) |
9901 |
+ return cpu_to_le32(uri_desc->size); |
9902 |
+ } |
9903 |
+ |
9904 |
+- return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]); |
9905 |
++ return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]); |
9906 |
+ } |
9907 |
+ |
9908 |
+ static u8 * |
9909 |
+@@ -1803,7 +1803,7 @@ qla82xx_fw_load_from_blob(struct qla_hw_data *ha) |
9910 |
+ } |
9911 |
+ |
9912 |
+ flashaddr = FLASH_ADDR_START; |
9913 |
+- size = (__force u32)qla82xx_get_fw_size(ha) / 8; |
9914 |
++ size = qla82xx_get_fw_size(ha) / 8; |
9915 |
+ ptr64 = (u64 *)qla82xx_get_fw_offs(ha); |
9916 |
+ |
9917 |
+ for (i = 0; i < size; i++) { |
9918 |
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c |
9919 |
+index f714d5f917d1..3fda5836aac6 100644 |
9920 |
+--- a/drivers/scsi/qla4xxx/ql4_os.c |
9921 |
++++ b/drivers/scsi/qla4xxx/ql4_os.c |
9922 |
+@@ -4150,7 +4150,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha) |
9923 |
+ dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, |
9924 |
+ ha->queues_dma); |
9925 |
+ |
9926 |
+- if (ha->fw_dump) |
9927 |
++ if (ha->fw_dump) |
9928 |
+ vfree(ha->fw_dump); |
9929 |
+ |
9930 |
+ ha->queues_len = 0; |
9931 |
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c |
9932 |
+index fcf5141bf950..19f82069c68a 100644 |
9933 |
+--- a/drivers/scsi/ufs/ufshcd.c |
9934 |
++++ b/drivers/scsi/ufs/ufshcd.c |
9935 |
+@@ -4324,7 +4324,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) |
9936 |
+ ufshcd_init_icc_levels(hba); |
9937 |
+ |
9938 |
+ /* Add required well known logical units to scsi mid layer */ |
9939 |
+- if (ufshcd_scsi_add_wlus(hba)) |
9940 |
++ ret = ufshcd_scsi_add_wlus(hba); |
9941 |
++ if (ret) |
9942 |
+ goto out; |
9943 |
+ |
9944 |
+ scsi_scan_host(hba->host); |
9945 |
+diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c |
9946 |
+index 4ce19b860289..79b52dce6b9a 100644 |
9947 |
+--- a/drivers/usb/gadget/function/f_ecm.c |
9948 |
++++ b/drivers/usb/gadget/function/f_ecm.c |
9949 |
+@@ -56,6 +56,7 @@ struct f_ecm { |
9950 |
+ struct usb_ep *notify; |
9951 |
+ struct usb_request *notify_req; |
9952 |
+ u8 notify_state; |
9953 |
++ atomic_t notify_count; |
9954 |
+ bool is_open; |
9955 |
+ |
9956 |
+ /* FIXME is_open needs some irq-ish locking |
9957 |
+@@ -384,7 +385,7 @@ static void ecm_do_notify(struct f_ecm *ecm) |
9958 |
+ int status; |
9959 |
+ |
9960 |
+ /* notification already in flight? */ |
9961 |
+- if (!req) |
9962 |
++ if (atomic_read(&ecm->notify_count)) |
9963 |
+ return; |
9964 |
+ |
9965 |
+ event = req->buf; |
9966 |
+@@ -424,10 +425,10 @@ static void ecm_do_notify(struct f_ecm *ecm) |
9967 |
+ event->bmRequestType = 0xA1; |
9968 |
+ event->wIndex = cpu_to_le16(ecm->ctrl_id); |
9969 |
+ |
9970 |
+- ecm->notify_req = NULL; |
9971 |
++ atomic_inc(&ecm->notify_count); |
9972 |
+ status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC); |
9973 |
+ if (status < 0) { |
9974 |
+- ecm->notify_req = req; |
9975 |
++ atomic_dec(&ecm->notify_count); |
9976 |
+ DBG(cdev, "notify --> %d\n", status); |
9977 |
+ } |
9978 |
+ } |
9979 |
+@@ -452,17 +453,19 @@ static void ecm_notify_complete(struct usb_ep *ep, struct usb_request *req) |
9980 |
+ switch (req->status) { |
9981 |
+ case 0: |
9982 |
+ /* no fault */ |
9983 |
++ atomic_dec(&ecm->notify_count); |
9984 |
+ break; |
9985 |
+ case -ECONNRESET: |
9986 |
+ case -ESHUTDOWN: |
9987 |
++ atomic_set(&ecm->notify_count, 0); |
9988 |
+ ecm->notify_state = ECM_NOTIFY_NONE; |
9989 |
+ break; |
9990 |
+ default: |
9991 |
+ DBG(cdev, "event %02x --> %d\n", |
9992 |
+ event->bNotificationType, req->status); |
9993 |
++ atomic_dec(&ecm->notify_count); |
9994 |
+ break; |
9995 |
+ } |
9996 |
+- ecm->notify_req = req; |
9997 |
+ ecm_do_notify(ecm); |
9998 |
+ } |
9999 |
+ |
10000 |
+@@ -909,6 +912,11 @@ static void ecm_unbind(struct usb_configuration *c, struct usb_function *f) |
10001 |
+ |
10002 |
+ usb_free_all_descriptors(f); |
10003 |
+ |
10004 |
++ if (atomic_read(&ecm->notify_count)) { |
10005 |
++ usb_ep_dequeue(ecm->notify, ecm->notify_req); |
10006 |
++ atomic_set(&ecm->notify_count, 0); |
10007 |
++ } |
10008 |
++ |
10009 |
+ kfree(ecm->notify_req->buf); |
10010 |
+ usb_ep_free_request(ecm->notify, ecm->notify_req); |
10011 |
+ } |
10012 |
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c |
10013 |
+index 7ad798ace1e5..16908737bff1 100644 |
10014 |
+--- a/drivers/usb/gadget/function/f_ncm.c |
10015 |
++++ b/drivers/usb/gadget/function/f_ncm.c |
10016 |
+@@ -57,6 +57,7 @@ struct f_ncm { |
10017 |
+ struct usb_ep *notify; |
10018 |
+ struct usb_request *notify_req; |
10019 |
+ u8 notify_state; |
10020 |
++ atomic_t notify_count; |
10021 |
+ bool is_open; |
10022 |
+ |
10023 |
+ const struct ndp_parser_opts *parser_opts; |
10024 |
+@@ -480,7 +481,7 @@ static void ncm_do_notify(struct f_ncm *ncm) |
10025 |
+ int status; |
10026 |
+ |
10027 |
+ /* notification already in flight? */ |
10028 |
+- if (!req) |
10029 |
++ if (atomic_read(&ncm->notify_count)) |
10030 |
+ return; |
10031 |
+ |
10032 |
+ event = req->buf; |
10033 |
+@@ -520,7 +521,8 @@ static void ncm_do_notify(struct f_ncm *ncm) |
10034 |
+ event->bmRequestType = 0xA1; |
10035 |
+ event->wIndex = cpu_to_le16(ncm->ctrl_id); |
10036 |
+ |
10037 |
+- ncm->notify_req = NULL; |
10038 |
++ atomic_inc(&ncm->notify_count); |
10039 |
++ |
10040 |
+ /* |
10041 |
+ * In double buffering if there is a space in FIFO, |
10042 |
+ * completion callback can be called right after the call, |
10043 |
+@@ -530,7 +532,7 @@ static void ncm_do_notify(struct f_ncm *ncm) |
10044 |
+ status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC); |
10045 |
+ spin_lock(&ncm->lock); |
10046 |
+ if (status < 0) { |
10047 |
+- ncm->notify_req = req; |
10048 |
++ atomic_dec(&ncm->notify_count); |
10049 |
+ DBG(cdev, "notify --> %d\n", status); |
10050 |
+ } |
10051 |
+ } |
10052 |
+@@ -565,17 +567,19 @@ static void ncm_notify_complete(struct usb_ep *ep, struct usb_request *req) |
10053 |
+ case 0: |
10054 |
+ VDBG(cdev, "Notification %02x sent\n", |
10055 |
+ event->bNotificationType); |
10056 |
++ atomic_dec(&ncm->notify_count); |
10057 |
+ break; |
10058 |
+ case -ECONNRESET: |
10059 |
+ case -ESHUTDOWN: |
10060 |
++ atomic_set(&ncm->notify_count, 0); |
10061 |
+ ncm->notify_state = NCM_NOTIFY_NONE; |
10062 |
+ break; |
10063 |
+ default: |
10064 |
+ DBG(cdev, "event %02x --> %d\n", |
10065 |
+ event->bNotificationType, req->status); |
10066 |
++ atomic_dec(&ncm->notify_count); |
10067 |
+ break; |
10068 |
+ } |
10069 |
+- ncm->notify_req = req; |
10070 |
+ ncm_do_notify(ncm); |
10071 |
+ spin_unlock(&ncm->lock); |
10072 |
+ } |
10073 |
+@@ -1559,6 +1563,11 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f) |
10074 |
+ ncm_string_defs[0].id = 0; |
10075 |
+ usb_free_all_descriptors(f); |
10076 |
+ |
10077 |
++ if (atomic_read(&ncm->notify_count)) { |
10078 |
++ usb_ep_dequeue(ncm->notify, ncm->notify_req); |
10079 |
++ atomic_set(&ncm->notify_count, 0); |
10080 |
++ } |
10081 |
++ |
10082 |
+ kfree(ncm->notify_req->buf); |
10083 |
+ usb_ep_free_request(ncm->notify, ncm->notify_req); |
10084 |
+ } |
10085 |
+diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c |
10086 |
+index ecd8c8d62f2e..d70e7d43241a 100644 |
10087 |
+--- a/drivers/usb/gadget/legacy/cdc2.c |
10088 |
++++ b/drivers/usb/gadget/legacy/cdc2.c |
10089 |
+@@ -229,7 +229,7 @@ static struct usb_composite_driver cdc_driver = { |
10090 |
+ .name = "g_cdc", |
10091 |
+ .dev = &device_desc, |
10092 |
+ .strings = dev_strings, |
10093 |
+- .max_speed = USB_SPEED_HIGH, |
10094 |
++ .max_speed = USB_SPEED_SUPER, |
10095 |
+ .bind = cdc_bind, |
10096 |
+ .unbind = cdc_unbind, |
10097 |
+ }; |
10098 |
+diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c |
10099 |
+index 320a81b2baa6..c0dccc65db3a 100644 |
10100 |
+--- a/drivers/usb/gadget/legacy/g_ffs.c |
10101 |
++++ b/drivers/usb/gadget/legacy/g_ffs.c |
10102 |
+@@ -153,7 +153,7 @@ static struct usb_composite_driver gfs_driver = { |
10103 |
+ .name = DRIVER_NAME, |
10104 |
+ .dev = &gfs_dev_desc, |
10105 |
+ .strings = gfs_dev_strings, |
10106 |
+- .max_speed = USB_SPEED_HIGH, |
10107 |
++ .max_speed = USB_SPEED_SUPER, |
10108 |
+ .bind = gfs_bind, |
10109 |
+ .unbind = gfs_unbind, |
10110 |
+ }; |
10111 |
+diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c |
10112 |
+index 09c7c28f32f7..612c1608fccf 100644 |
10113 |
+--- a/drivers/usb/gadget/legacy/multi.c |
10114 |
++++ b/drivers/usb/gadget/legacy/multi.c |
10115 |
+@@ -486,7 +486,7 @@ static struct usb_composite_driver multi_driver = { |
10116 |
+ .name = "g_multi", |
10117 |
+ .dev = &device_desc, |
10118 |
+ .strings = dev_strings, |
10119 |
+- .max_speed = USB_SPEED_HIGH, |
10120 |
++ .max_speed = USB_SPEED_SUPER, |
10121 |
+ .bind = multi_bind, |
10122 |
+ .unbind = multi_unbind, |
10123 |
+ .needs_serial = 1, |
10124 |
+diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c |
10125 |
+index 2bae4381332d..cc3ffacbade1 100644 |
10126 |
+--- a/drivers/usb/gadget/legacy/ncm.c |
10127 |
++++ b/drivers/usb/gadget/legacy/ncm.c |
10128 |
+@@ -203,7 +203,7 @@ static struct usb_composite_driver ncm_driver = { |
10129 |
+ .name = "g_ncm", |
10130 |
+ .dev = &device_desc, |
10131 |
+ .strings = dev_strings, |
10132 |
+- .max_speed = USB_SPEED_HIGH, |
10133 |
++ .max_speed = USB_SPEED_SUPER, |
10134 |
+ .bind = gncm_bind, |
10135 |
+ .unbind = gncm_unbind, |
10136 |
+ }; |
10137 |
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c |
10138 |
+index 62caf3bcadf8..8eac5f75bca3 100644 |
10139 |
+--- a/fs/btrfs/ctree.c |
10140 |
++++ b/fs/btrfs/ctree.c |
10141 |
+@@ -332,26 +332,6 @@ struct tree_mod_elem { |
10142 |
+ struct tree_mod_root old_root; |
10143 |
+ }; |
10144 |
+ |
10145 |
+-static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info) |
10146 |
+-{ |
10147 |
+- read_lock(&fs_info->tree_mod_log_lock); |
10148 |
+-} |
10149 |
+- |
10150 |
+-static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info) |
10151 |
+-{ |
10152 |
+- read_unlock(&fs_info->tree_mod_log_lock); |
10153 |
+-} |
10154 |
+- |
10155 |
+-static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info) |
10156 |
+-{ |
10157 |
+- write_lock(&fs_info->tree_mod_log_lock); |
10158 |
+-} |
10159 |
+- |
10160 |
+-static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info) |
10161 |
+-{ |
10162 |
+- write_unlock(&fs_info->tree_mod_log_lock); |
10163 |
+-} |
10164 |
+- |
10165 |
+ /* |
10166 |
+ * Pull a new tree mod seq number for our operation. |
10167 |
+ */ |
10168 |
+@@ -371,14 +351,12 @@ static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info) |
10169 |
+ u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, |
10170 |
+ struct seq_list *elem) |
10171 |
+ { |
10172 |
+- tree_mod_log_write_lock(fs_info); |
10173 |
+- spin_lock(&fs_info->tree_mod_seq_lock); |
10174 |
++ write_lock(&fs_info->tree_mod_log_lock); |
10175 |
+ if (!elem->seq) { |
10176 |
+ elem->seq = btrfs_inc_tree_mod_seq(fs_info); |
10177 |
+ list_add_tail(&elem->list, &fs_info->tree_mod_seq_list); |
10178 |
+ } |
10179 |
+- spin_unlock(&fs_info->tree_mod_seq_lock); |
10180 |
+- tree_mod_log_write_unlock(fs_info); |
10181 |
++ write_unlock(&fs_info->tree_mod_log_lock); |
10182 |
+ |
10183 |
+ return elem->seq; |
10184 |
+ } |
10185 |
+@@ -397,7 +375,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, |
10186 |
+ if (!seq_putting) |
10187 |
+ return; |
10188 |
+ |
10189 |
+- spin_lock(&fs_info->tree_mod_seq_lock); |
10190 |
++ write_lock(&fs_info->tree_mod_log_lock); |
10191 |
+ list_del(&elem->list); |
10192 |
+ elem->seq = 0; |
10193 |
+ |
10194 |
+@@ -408,19 +386,17 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, |
10195 |
+ * blocker with lower sequence number exists, we |
10196 |
+ * cannot remove anything from the log |
10197 |
+ */ |
10198 |
+- spin_unlock(&fs_info->tree_mod_seq_lock); |
10199 |
++ write_unlock(&fs_info->tree_mod_log_lock); |
10200 |
+ return; |
10201 |
+ } |
10202 |
+ min_seq = cur_elem->seq; |
10203 |
+ } |
10204 |
+ } |
10205 |
+- spin_unlock(&fs_info->tree_mod_seq_lock); |
10206 |
+ |
10207 |
+ /* |
10208 |
+ * anything that's lower than the lowest existing (read: blocked) |
10209 |
+ * sequence number can be removed from the tree. |
10210 |
+ */ |
10211 |
+- tree_mod_log_write_lock(fs_info); |
10212 |
+ tm_root = &fs_info->tree_mod_log; |
10213 |
+ for (node = rb_first(tm_root); node; node = next) { |
10214 |
+ next = rb_next(node); |
10215 |
+@@ -430,7 +406,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, |
10216 |
+ rb_erase(node, tm_root); |
10217 |
+ kfree(tm); |
10218 |
+ } |
10219 |
+- tree_mod_log_write_unlock(fs_info); |
10220 |
++ write_unlock(&fs_info->tree_mod_log_lock); |
10221 |
+ } |
10222 |
+ |
10223 |
+ /* |
10224 |
+@@ -441,7 +417,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, |
10225 |
+ * operations, or the shifted logical of the affected block for all other |
10226 |
+ * operations. |
10227 |
+ * |
10228 |
+- * Note: must be called with write lock (tree_mod_log_write_lock). |
10229 |
++ * Note: must be called with write lock for fs_info::tree_mod_log_lock. |
10230 |
+ */ |
10231 |
+ static noinline int |
10232 |
+ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) |
10233 |
+@@ -481,7 +457,7 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) |
10234 |
+ * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it |
10235 |
+ * returns zero with the tree_mod_log_lock acquired. The caller must hold |
10236 |
+ * this until all tree mod log insertions are recorded in the rb tree and then |
10237 |
+- * call tree_mod_log_write_unlock() to release. |
10238 |
++ * write unlock fs_info::tree_mod_log_lock. |
10239 |
+ */ |
10240 |
+ static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info, |
10241 |
+ struct extent_buffer *eb) { |
10242 |
+@@ -491,9 +467,9 @@ static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info, |
10243 |
+ if (eb && btrfs_header_level(eb) == 0) |
10244 |
+ return 1; |
10245 |
+ |
10246 |
+- tree_mod_log_write_lock(fs_info); |
10247 |
++ write_lock(&fs_info->tree_mod_log_lock); |
10248 |
+ if (list_empty(&(fs_info)->tree_mod_seq_list)) { |
10249 |
+- tree_mod_log_write_unlock(fs_info); |
10250 |
++ write_unlock(&fs_info->tree_mod_log_lock); |
10251 |
+ return 1; |
10252 |
+ } |
10253 |
+ |
10254 |
+@@ -557,7 +533,7 @@ tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, |
10255 |
+ } |
10256 |
+ |
10257 |
+ ret = __tree_mod_log_insert(fs_info, tm); |
10258 |
+- tree_mod_log_write_unlock(fs_info); |
10259 |
++ write_unlock(&eb->fs_info->tree_mod_log_lock); |
10260 |
+ if (ret) |
10261 |
+ kfree(tm); |
10262 |
+ |
10263 |
+@@ -621,7 +597,7 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, |
10264 |
+ ret = __tree_mod_log_insert(fs_info, tm); |
10265 |
+ if (ret) |
10266 |
+ goto free_tms; |
10267 |
+- tree_mod_log_write_unlock(fs_info); |
10268 |
++ write_unlock(&eb->fs_info->tree_mod_log_lock); |
10269 |
+ kfree(tm_list); |
10270 |
+ |
10271 |
+ return 0; |
10272 |
+@@ -632,7 +608,7 @@ free_tms: |
10273 |
+ kfree(tm_list[i]); |
10274 |
+ } |
10275 |
+ if (locked) |
10276 |
+- tree_mod_log_write_unlock(fs_info); |
10277 |
++ write_unlock(&eb->fs_info->tree_mod_log_lock); |
10278 |
+ kfree(tm_list); |
10279 |
+ kfree(tm); |
10280 |
+ |
10281 |
+@@ -713,7 +689,7 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, |
10282 |
+ if (!ret) |
10283 |
+ ret = __tree_mod_log_insert(fs_info, tm); |
10284 |
+ |
10285 |
+- tree_mod_log_write_unlock(fs_info); |
10286 |
++ write_unlock(&fs_info->tree_mod_log_lock); |
10287 |
+ if (ret) |
10288 |
+ goto free_tms; |
10289 |
+ kfree(tm_list); |
10290 |
+@@ -741,7 +717,7 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, |
10291 |
+ struct tree_mod_elem *found = NULL; |
10292 |
+ u64 index = start >> PAGE_CACHE_SHIFT; |
10293 |
+ |
10294 |
+- tree_mod_log_read_lock(fs_info); |
10295 |
++ read_lock(&fs_info->tree_mod_log_lock); |
10296 |
+ tm_root = &fs_info->tree_mod_log; |
10297 |
+ node = tm_root->rb_node; |
10298 |
+ while (node) { |
10299 |
+@@ -769,7 +745,7 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, |
10300 |
+ break; |
10301 |
+ } |
10302 |
+ } |
10303 |
+- tree_mod_log_read_unlock(fs_info); |
10304 |
++ read_unlock(&fs_info->tree_mod_log_lock); |
10305 |
+ |
10306 |
+ return found; |
10307 |
+ } |
10308 |
+@@ -850,7 +826,7 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, |
10309 |
+ goto free_tms; |
10310 |
+ } |
10311 |
+ |
10312 |
+- tree_mod_log_write_unlock(fs_info); |
10313 |
++ write_unlock(&fs_info->tree_mod_log_lock); |
10314 |
+ kfree(tm_list); |
10315 |
+ |
10316 |
+ return 0; |
10317 |
+@@ -862,7 +838,7 @@ free_tms: |
10318 |
+ kfree(tm_list[i]); |
10319 |
+ } |
10320 |
+ if (locked) |
10321 |
+- tree_mod_log_write_unlock(fs_info); |
10322 |
++ write_unlock(&fs_info->tree_mod_log_lock); |
10323 |
+ kfree(tm_list); |
10324 |
+ |
10325 |
+ return ret; |
10326 |
+@@ -922,7 +898,7 @@ tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) |
10327 |
+ goto free_tms; |
10328 |
+ |
10329 |
+ ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems); |
10330 |
+- tree_mod_log_write_unlock(fs_info); |
10331 |
++ write_unlock(&eb->fs_info->tree_mod_log_lock); |
10332 |
+ if (ret) |
10333 |
+ goto free_tms; |
10334 |
+ kfree(tm_list); |
10335 |
+@@ -1284,7 +1260,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, |
10336 |
+ unsigned long p_size = sizeof(struct btrfs_key_ptr); |
10337 |
+ |
10338 |
+ n = btrfs_header_nritems(eb); |
10339 |
+- tree_mod_log_read_lock(fs_info); |
10340 |
++ read_lock(&fs_info->tree_mod_log_lock); |
10341 |
+ while (tm && tm->seq >= time_seq) { |
10342 |
+ /* |
10343 |
+ * all the operations are recorded with the operator used for |
10344 |
+@@ -1339,7 +1315,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, |
10345 |
+ if (tm->index != first_tm->index) |
10346 |
+ break; |
10347 |
+ } |
10348 |
+- tree_mod_log_read_unlock(fs_info); |
10349 |
++ read_unlock(&fs_info->tree_mod_log_lock); |
10350 |
+ btrfs_set_header_nritems(eb, n); |
10351 |
+ } |
10352 |
+ |
10353 |
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h |
10354 |
+index 4a91d3119e59..0b06d4942da7 100644 |
10355 |
+--- a/fs/btrfs/ctree.h |
10356 |
++++ b/fs/btrfs/ctree.h |
10357 |
+@@ -1576,14 +1576,12 @@ struct btrfs_fs_info { |
10358 |
+ struct list_head delayed_iputs; |
10359 |
+ struct mutex cleaner_delayed_iput_mutex; |
10360 |
+ |
10361 |
+- /* this protects tree_mod_seq_list */ |
10362 |
+- spinlock_t tree_mod_seq_lock; |
10363 |
+ atomic64_t tree_mod_seq; |
10364 |
+- struct list_head tree_mod_seq_list; |
10365 |
+ |
10366 |
+- /* this protects tree_mod_log */ |
10367 |
++ /* this protects tree_mod_log and tree_mod_seq_list */ |
10368 |
+ rwlock_t tree_mod_log_lock; |
10369 |
+ struct rb_root tree_mod_log; |
10370 |
++ struct list_head tree_mod_seq_list; |
10371 |
+ |
10372 |
+ atomic_t nr_async_submits; |
10373 |
+ atomic_t async_submit_draining; |
10374 |
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c |
10375 |
+index a2f165029ee6..bb1e32f77b69 100644 |
10376 |
+--- a/fs/btrfs/delayed-ref.c |
10377 |
++++ b/fs/btrfs/delayed-ref.c |
10378 |
+@@ -279,7 +279,7 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, |
10379 |
+ if (head->is_data) |
10380 |
+ return; |
10381 |
+ |
10382 |
+- spin_lock(&fs_info->tree_mod_seq_lock); |
10383 |
++ read_lock(&fs_info->tree_mod_log_lock); |
10384 |
+ if (!list_empty(&fs_info->tree_mod_seq_list)) { |
10385 |
+ struct seq_list *elem; |
10386 |
+ |
10387 |
+@@ -287,7 +287,7 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, |
10388 |
+ struct seq_list, list); |
10389 |
+ seq = elem->seq; |
10390 |
+ } |
10391 |
+- spin_unlock(&fs_info->tree_mod_seq_lock); |
10392 |
++ read_unlock(&fs_info->tree_mod_log_lock); |
10393 |
+ |
10394 |
+ ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node, |
10395 |
+ list); |
10396 |
+@@ -315,7 +315,7 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, |
10397 |
+ struct seq_list *elem; |
10398 |
+ int ret = 0; |
10399 |
+ |
10400 |
+- spin_lock(&fs_info->tree_mod_seq_lock); |
10401 |
++ read_lock(&fs_info->tree_mod_log_lock); |
10402 |
+ if (!list_empty(&fs_info->tree_mod_seq_list)) { |
10403 |
+ elem = list_first_entry(&fs_info->tree_mod_seq_list, |
10404 |
+ struct seq_list, list); |
10405 |
+@@ -328,7 +328,7 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, |
10406 |
+ } |
10407 |
+ } |
10408 |
+ |
10409 |
+- spin_unlock(&fs_info->tree_mod_seq_lock); |
10410 |
++ read_unlock(&fs_info->tree_mod_log_lock); |
10411 |
+ return ret; |
10412 |
+ } |
10413 |
+ |
10414 |
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c |
10415 |
+index d50fc503f73b..2fb533233e8e 100644 |
10416 |
+--- a/fs/btrfs/disk-io.c |
10417 |
++++ b/fs/btrfs/disk-io.c |
10418 |
+@@ -2481,7 +2481,6 @@ int open_ctree(struct super_block *sb, |
10419 |
+ spin_lock_init(&fs_info->delayed_iput_lock); |
10420 |
+ spin_lock_init(&fs_info->defrag_inodes_lock); |
10421 |
+ spin_lock_init(&fs_info->free_chunk_lock); |
10422 |
+- spin_lock_init(&fs_info->tree_mod_seq_lock); |
10423 |
+ spin_lock_init(&fs_info->super_lock); |
10424 |
+ spin_lock_init(&fs_info->qgroup_op_lock); |
10425 |
+ spin_lock_init(&fs_info->buffer_lock); |
10426 |
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c |
10427 |
+index 6f5563ca70c1..2c86c472f670 100644 |
10428 |
+--- a/fs/btrfs/extent_io.c |
10429 |
++++ b/fs/btrfs/extent_io.c |
10430 |
+@@ -4164,6 +4164,14 @@ retry: |
10431 |
+ */ |
10432 |
+ scanned = 1; |
10433 |
+ index = 0; |
10434 |
++ |
10435 |
++ /* |
10436 |
++ * If we're looping we could run into a page that is locked by a |
10437 |
++ * writer and that writer could be waiting on writeback for a |
10438 |
++ * page in our current bio, and thus deadlock, so flush the |
10439 |
++ * write bio here. |
10440 |
++ */ |
10441 |
++ flush_write_bio(data); |
10442 |
+ goto retry; |
10443 |
+ } |
10444 |
+ btrfs_add_delayed_iput(inode); |
10445 |
+diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c |
10446 |
+index 9626252ee6b4..69255148f0c8 100644 |
10447 |
+--- a/fs/btrfs/tests/btrfs-tests.c |
10448 |
++++ b/fs/btrfs/tests/btrfs-tests.c |
10449 |
+@@ -109,7 +109,6 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void) |
10450 |
+ spin_lock_init(&fs_info->qgroup_op_lock); |
10451 |
+ spin_lock_init(&fs_info->super_lock); |
10452 |
+ spin_lock_init(&fs_info->fs_roots_radix_lock); |
10453 |
+- spin_lock_init(&fs_info->tree_mod_seq_lock); |
10454 |
+ mutex_init(&fs_info->qgroup_ioctl_lock); |
10455 |
+ mutex_init(&fs_info->qgroup_rescan_lock); |
10456 |
+ rwlock_init(&fs_info->tree_mod_log_lock); |
10457 |
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c |
10458 |
+index 098016338f98..64e449eb2ecd 100644 |
10459 |
+--- a/fs/btrfs/transaction.c |
10460 |
++++ b/fs/btrfs/transaction.c |
10461 |
+@@ -1814,6 +1814,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, |
10462 |
+ struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode); |
10463 |
+ int ret; |
10464 |
+ |
10465 |
++ /* |
10466 |
++ * Some places just start a transaction to commit it. We need to make |
10467 |
++ * sure that if this commit fails that the abort code actually marks the |
10468 |
++ * transaction as failed, so set trans->dirty to make the abort code do |
10469 |
++ * the right thing. |
10470 |
++ */ |
10471 |
++ trans->dirty = true; |
10472 |
++ |
10473 |
+ /* Stop the commit early if ->aborted is set */ |
10474 |
+ if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { |
10475 |
+ ret = cur_trans->aborted; |
10476 |
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c |
10477 |
+index f9c3907bf159..4320f346b0b9 100644 |
10478 |
+--- a/fs/btrfs/tree-log.c |
10479 |
++++ b/fs/btrfs/tree-log.c |
10480 |
+@@ -4404,13 +4404,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, |
10481 |
+ struct btrfs_file_extent_item); |
10482 |
+ |
10483 |
+ if (btrfs_file_extent_type(leaf, extent) == |
10484 |
+- BTRFS_FILE_EXTENT_INLINE) { |
10485 |
+- len = btrfs_file_extent_inline_len(leaf, |
10486 |
+- path->slots[0], |
10487 |
+- extent); |
10488 |
+- ASSERT(len == i_size); |
10489 |
++ BTRFS_FILE_EXTENT_INLINE) |
10490 |
+ return 0; |
10491 |
+- } |
10492 |
+ |
10493 |
+ len = btrfs_file_extent_num_bytes(leaf, extent); |
10494 |
+ /* Last extent goes beyond i_size, no need to log a hole. */ |
10495 |
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c |
10496 |
+index 84e60b3a5c7c..d4472a494758 100644 |
10497 |
+--- a/fs/cifs/smb2pdu.c |
10498 |
++++ b/fs/cifs/smb2pdu.c |
10499 |
+@@ -250,9 +250,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) |
10500 |
+ */ |
10501 |
+ mutex_lock(&tcon->ses->session_mutex); |
10502 |
+ rc = cifs_negotiate_protocol(0, tcon->ses); |
10503 |
+- if (!rc && tcon->ses->need_reconnect) |
10504 |
++ if (!rc && tcon->ses->need_reconnect) { |
10505 |
+ rc = cifs_setup_session(0, tcon->ses, nls_codepage); |
10506 |
+- |
10507 |
++ if ((rc == -EACCES) && !tcon->retry) { |
10508 |
++ rc = -EHOSTDOWN; |
10509 |
++ mutex_unlock(&tcon->ses->session_mutex); |
10510 |
++ goto failed; |
10511 |
++ } |
10512 |
++ } |
10513 |
+ if (rc || !tcon->need_reconnect) { |
10514 |
+ mutex_unlock(&tcon->ses->session_mutex); |
10515 |
+ goto out; |
10516 |
+@@ -286,6 +291,7 @@ out: |
10517 |
+ case SMB2_SET_INFO: |
10518 |
+ rc = -EAGAIN; |
10519 |
+ } |
10520 |
++failed: |
10521 |
+ unload_nls(nls_codepage); |
10522 |
+ return rc; |
10523 |
+ } |
10524 |
+diff --git a/fs/ext2/super.c b/fs/ext2/super.c |
10525 |
+index 860024392969..0d44f7ef3c5d 100644 |
10526 |
+--- a/fs/ext2/super.c |
10527 |
++++ b/fs/ext2/super.c |
10528 |
+@@ -1051,9 +1051,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) |
10529 |
+ |
10530 |
+ if (EXT2_BLOCKS_PER_GROUP(sb) == 0) |
10531 |
+ goto cantfind_ext2; |
10532 |
+- sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - |
10533 |
+- le32_to_cpu(es->s_first_data_block) - 1) |
10534 |
+- / EXT2_BLOCKS_PER_GROUP(sb)) + 1; |
10535 |
++ sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - |
10536 |
++ le32_to_cpu(es->s_first_data_block) - 1) |
10537 |
++ / EXT2_BLOCKS_PER_GROUP(sb)) + 1; |
10538 |
+ db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) / |
10539 |
+ EXT2_DESC_PER_BLOCK(sb); |
10540 |
+ sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL); |
10541 |
+diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c |
10542 |
+index 807eb6ef4f91..6f4f68967c31 100644 |
10543 |
+--- a/fs/nfs/callback_proc.c |
10544 |
++++ b/fs/nfs/callback_proc.c |
10545 |
+@@ -368,7 +368,7 @@ static bool referring_call_exists(struct nfs_client *clp, |
10546 |
+ uint32_t nrclists, |
10547 |
+ struct referring_call_list *rclists) |
10548 |
+ { |
10549 |
+- bool status = 0; |
10550 |
++ bool status = false; |
10551 |
+ int i, j; |
10552 |
+ struct nfs4_session *session; |
10553 |
+ struct nfs4_slot_table *tbl; |
10554 |
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c |
10555 |
+index c690a1c0c4e5..2ac3d2527ad2 100644 |
10556 |
+--- a/fs/nfs/dir.c |
10557 |
++++ b/fs/nfs/dir.c |
10558 |
+@@ -169,6 +169,17 @@ typedef struct { |
10559 |
+ unsigned int eof:1; |
10560 |
+ } nfs_readdir_descriptor_t; |
10561 |
+ |
10562 |
++static |
10563 |
++void nfs_readdir_init_array(struct page *page) |
10564 |
++{ |
10565 |
++ struct nfs_cache_array *array; |
10566 |
++ |
10567 |
++ array = kmap_atomic(page); |
10568 |
++ memset(array, 0, sizeof(struct nfs_cache_array)); |
10569 |
++ array->eof_index = -1; |
10570 |
++ kunmap_atomic(array); |
10571 |
++} |
10572 |
++ |
10573 |
+ /* |
10574 |
+ * The caller is responsible for calling nfs_readdir_release_array(page) |
10575 |
+ */ |
10576 |
+@@ -202,6 +213,7 @@ void nfs_readdir_clear_array(struct page *page) |
10577 |
+ array = kmap_atomic(page); |
10578 |
+ for (i = 0; i < array->size; i++) |
10579 |
+ kfree(array->array[i].string.name); |
10580 |
++ array->size = 0; |
10581 |
+ kunmap_atomic(array); |
10582 |
+ } |
10583 |
+ |
10584 |
+@@ -277,7 +289,7 @@ int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descri |
10585 |
+ desc->cache_entry_index = index; |
10586 |
+ return 0; |
10587 |
+ out_eof: |
10588 |
+- desc->eof = 1; |
10589 |
++ desc->eof = true; |
10590 |
+ return -EBADCOOKIE; |
10591 |
+ } |
10592 |
+ |
10593 |
+@@ -331,7 +343,7 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des |
10594 |
+ if (array->eof_index >= 0) { |
10595 |
+ status = -EBADCOOKIE; |
10596 |
+ if (*desc->dir_cookie == array->last_cookie) |
10597 |
+- desc->eof = 1; |
10598 |
++ desc->eof = true; |
10599 |
+ } |
10600 |
+ out: |
10601 |
+ return status; |
10602 |
+@@ -622,6 +634,8 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, |
10603 |
+ int status = -ENOMEM; |
10604 |
+ unsigned int array_size = ARRAY_SIZE(pages); |
10605 |
+ |
10606 |
++ nfs_readdir_init_array(page); |
10607 |
++ |
10608 |
+ entry.prev_cookie = 0; |
10609 |
+ entry.cookie = desc->last_cookie; |
10610 |
+ entry.eof = 0; |
10611 |
+@@ -642,8 +656,8 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, |
10612 |
+ status = PTR_ERR(array); |
10613 |
+ goto out_label_free; |
10614 |
+ } |
10615 |
+- memset(array, 0, sizeof(struct nfs_cache_array)); |
10616 |
+- array->eof_index = -1; |
10617 |
++ |
10618 |
++ array = kmap(page); |
10619 |
+ |
10620 |
+ status = nfs_readdir_alloc_pages(pages, array_size); |
10621 |
+ if (status < 0) |
10622 |
+@@ -698,6 +712,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page) |
10623 |
+ unlock_page(page); |
10624 |
+ return 0; |
10625 |
+ error: |
10626 |
++ nfs_readdir_clear_array(page); |
10627 |
+ unlock_page(page); |
10628 |
+ return ret; |
10629 |
+ } |
10630 |
+@@ -705,8 +720,6 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page) |
10631 |
+ static |
10632 |
+ void cache_page_release(nfs_readdir_descriptor_t *desc) |
10633 |
+ { |
10634 |
+- if (!desc->page->mapping) |
10635 |
+- nfs_readdir_clear_array(desc->page); |
10636 |
+ page_cache_release(desc->page); |
10637 |
+ desc->page = NULL; |
10638 |
+ } |
10639 |
+@@ -720,19 +733,28 @@ struct page *get_cache_page(nfs_readdir_descriptor_t *desc) |
10640 |
+ |
10641 |
+ /* |
10642 |
+ * Returns 0 if desc->dir_cookie was found on page desc->page_index |
10643 |
++ * and locks the page to prevent removal from the page cache. |
10644 |
+ */ |
10645 |
+ static |
10646 |
+-int find_cache_page(nfs_readdir_descriptor_t *desc) |
10647 |
++int find_and_lock_cache_page(nfs_readdir_descriptor_t *desc) |
10648 |
+ { |
10649 |
+ int res; |
10650 |
+ |
10651 |
+ desc->page = get_cache_page(desc); |
10652 |
+ if (IS_ERR(desc->page)) |
10653 |
+ return PTR_ERR(desc->page); |
10654 |
+- |
10655 |
+- res = nfs_readdir_search_array(desc); |
10656 |
++ res = lock_page_killable(desc->page); |
10657 |
+ if (res != 0) |
10658 |
+- cache_page_release(desc); |
10659 |
++ goto error; |
10660 |
++ res = -EAGAIN; |
10661 |
++ if (desc->page->mapping != NULL) { |
10662 |
++ res = nfs_readdir_search_array(desc); |
10663 |
++ if (res == 0) |
10664 |
++ return 0; |
10665 |
++ } |
10666 |
++ unlock_page(desc->page); |
10667 |
++error: |
10668 |
++ cache_page_release(desc); |
10669 |
+ return res; |
10670 |
+ } |
10671 |
+ |
10672 |
+@@ -747,7 +769,7 @@ int readdir_search_pagecache(nfs_readdir_descriptor_t *desc) |
10673 |
+ desc->last_cookie = 0; |
10674 |
+ } |
10675 |
+ do { |
10676 |
+- res = find_cache_page(desc); |
10677 |
++ res = find_and_lock_cache_page(desc); |
10678 |
+ } while (res == -EAGAIN); |
10679 |
+ return res; |
10680 |
+ } |
10681 |
+@@ -776,7 +798,7 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc) |
10682 |
+ ent = &array->array[i]; |
10683 |
+ if (!dir_emit(desc->ctx, ent->string.name, ent->string.len, |
10684 |
+ nfs_compat_user_ino64(ent->ino), ent->d_type)) { |
10685 |
+- desc->eof = 1; |
10686 |
++ desc->eof = true; |
10687 |
+ break; |
10688 |
+ } |
10689 |
+ desc->ctx->pos++; |
10690 |
+@@ -788,11 +810,10 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc) |
10691 |
+ ctx->duped = 1; |
10692 |
+ } |
10693 |
+ if (array->eof_index >= 0) |
10694 |
+- desc->eof = 1; |
10695 |
++ desc->eof = true; |
10696 |
+ |
10697 |
+ nfs_readdir_release_array(desc->page); |
10698 |
+ out: |
10699 |
+- cache_page_release(desc); |
10700 |
+ dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n", |
10701 |
+ (unsigned long long)*desc->dir_cookie, res); |
10702 |
+ return res; |
10703 |
+@@ -838,13 +859,13 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc) |
10704 |
+ |
10705 |
+ status = nfs_do_filldir(desc); |
10706 |
+ |
10707 |
++ out_release: |
10708 |
++ nfs_readdir_clear_array(desc->page); |
10709 |
++ cache_page_release(desc); |
10710 |
+ out: |
10711 |
+ dfprintk(DIRCACHE, "NFS: %s: returns %d\n", |
10712 |
+ __func__, status); |
10713 |
+ return status; |
10714 |
+- out_release: |
10715 |
+- cache_page_release(desc); |
10716 |
+- goto out; |
10717 |
+ } |
10718 |
+ |
10719 |
+ /* The file offset position represents the dirent entry number. A |
10720 |
+@@ -890,7 +911,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) |
10721 |
+ if (res == -EBADCOOKIE) { |
10722 |
+ res = 0; |
10723 |
+ /* This means either end of directory */ |
10724 |
+- if (*desc->dir_cookie && desc->eof == 0) { |
10725 |
++ if (*desc->dir_cookie && !desc->eof) { |
10726 |
+ /* Or that the server has 'lost' a cookie */ |
10727 |
+ res = uncached_readdir(desc); |
10728 |
+ if (res == 0) |
10729 |
+@@ -910,6 +931,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) |
10730 |
+ break; |
10731 |
+ |
10732 |
+ res = nfs_do_filldir(desc); |
10733 |
++ unlock_page(desc->page); |
10734 |
++ cache_page_release(desc); |
10735 |
+ if (res < 0) |
10736 |
+ break; |
10737 |
+ } while (!desc->eof); |
10738 |
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c |
10739 |
+index dac20f31f01f..92895f41d9a0 100644 |
10740 |
+--- a/fs/nfs/nfs4client.c |
10741 |
++++ b/fs/nfs/nfs4client.c |
10742 |
+@@ -751,7 +751,7 @@ nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr, |
10743 |
+ |
10744 |
+ spin_lock(&nn->nfs_client_lock); |
10745 |
+ list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { |
10746 |
+- if (nfs4_cb_match_client(addr, clp, minorversion) == false) |
10747 |
++ if (!nfs4_cb_match_client(addr, clp, minorversion)) |
10748 |
+ continue; |
10749 |
+ |
10750 |
+ if (!nfs4_has_session(clp)) |
10751 |
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c |
10752 |
+index 060482e349ef..013d27dc6f58 100644 |
10753 |
+--- a/fs/overlayfs/inode.c |
10754 |
++++ b/fs/overlayfs/inode.c |
10755 |
+@@ -9,7 +9,6 @@ |
10756 |
+ |
10757 |
+ #include <linux/fs.h> |
10758 |
+ #include <linux/slab.h> |
10759 |
+-#include <linux/cred.h> |
10760 |
+ #include <linux/xattr.h> |
10761 |
+ #include "overlayfs.h" |
10762 |
+ |
10763 |
+@@ -92,7 +91,6 @@ int ovl_permission(struct inode *inode, int mask) |
10764 |
+ struct ovl_entry *oe; |
10765 |
+ struct dentry *alias = NULL; |
10766 |
+ struct inode *realinode; |
10767 |
+- const struct cred *old_cred; |
10768 |
+ struct dentry *realdentry; |
10769 |
+ bool is_upper; |
10770 |
+ int err; |
10771 |
+@@ -145,18 +143,7 @@ int ovl_permission(struct inode *inode, int mask) |
10772 |
+ goto out_dput; |
10773 |
+ } |
10774 |
+ |
10775 |
+- /* |
10776 |
+- * Check overlay inode with the creds of task and underlying inode |
10777 |
+- * with creds of mounter |
10778 |
+- */ |
10779 |
+- err = generic_permission(inode, mask); |
10780 |
+- if (err) |
10781 |
+- goto out_dput; |
10782 |
+- |
10783 |
+- old_cred = ovl_override_creds(inode->i_sb); |
10784 |
+ err = __inode_permission(realinode, mask); |
10785 |
+- revert_creds(old_cred); |
10786 |
+- |
10787 |
+ out_dput: |
10788 |
+ dput(alias); |
10789 |
+ return err; |
10790 |
+diff --git a/kernel/events/core.c b/kernel/events/core.c |
10791 |
+index a7014f854e67..203384a71fee 100644 |
10792 |
+--- a/kernel/events/core.c |
10793 |
++++ b/kernel/events/core.c |
10794 |
+@@ -4887,7 +4887,15 @@ accounting: |
10795 |
+ */ |
10796 |
+ user_lock_limit *= num_online_cpus(); |
10797 |
+ |
10798 |
+- user_locked = atomic_long_read(&user->locked_vm) + user_extra; |
10799 |
++ user_locked = atomic_long_read(&user->locked_vm); |
10800 |
++ |
10801 |
++ /* |
10802 |
++ * sysctl_perf_event_mlock may have changed, so that |
10803 |
++ * user->locked_vm > user_lock_limit |
10804 |
++ */ |
10805 |
++ if (user_locked > user_lock_limit) |
10806 |
++ user_locked = user_lock_limit; |
10807 |
++ user_locked += user_extra; |
10808 |
+ |
10809 |
+ if (user_locked > user_lock_limit) |
10810 |
+ extra = user_locked - user_lock_limit; |
10811 |
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c |
10812 |
+index b98810d2f3b4..a20368e1a720 100644 |
10813 |
+--- a/kernel/time/clocksource.c |
10814 |
++++ b/kernel/time/clocksource.c |
10815 |
+@@ -272,8 +272,15 @@ static void clocksource_watchdog(unsigned long data) |
10816 |
+ next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); |
10817 |
+ if (next_cpu >= nr_cpu_ids) |
10818 |
+ next_cpu = cpumask_first(cpu_online_mask); |
10819 |
+- watchdog_timer.expires += WATCHDOG_INTERVAL; |
10820 |
+- add_timer_on(&watchdog_timer, next_cpu); |
10821 |
++ |
10822 |
++ /* |
10823 |
++ * Arm timer if not already pending: could race with concurrent |
10824 |
++ * pair clocksource_stop_watchdog() clocksource_start_watchdog(). |
10825 |
++ */ |
10826 |
++ if (!timer_pending(&watchdog_timer)) { |
10827 |
++ watchdog_timer.expires += WATCHDOG_INTERVAL; |
10828 |
++ add_timer_on(&watchdog_timer, next_cpu); |
10829 |
++ } |
10830 |
+ out: |
10831 |
+ spin_unlock(&watchdog_lock); |
10832 |
+ } |
10833 |
+diff --git a/lib/test_kasan.c b/lib/test_kasan.c |
10834 |
+index c32f3b0048dc..275ff0b5162e 100644 |
10835 |
+--- a/lib/test_kasan.c |
10836 |
++++ b/lib/test_kasan.c |
10837 |
+@@ -93,6 +93,7 @@ static noinline void __init kmalloc_oob_krealloc_more(void) |
10838 |
+ if (!ptr1 || !ptr2) { |
10839 |
+ pr_err("Allocation failed\n"); |
10840 |
+ kfree(ptr1); |
10841 |
++ kfree(ptr2); |
10842 |
+ return; |
10843 |
+ } |
10844 |
+ |
10845 |
+diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c |
10846 |
+index 7d37366cc695..7992c533e6f7 100644 |
10847 |
+--- a/net/hsr/hsr_slave.c |
10848 |
++++ b/net/hsr/hsr_slave.c |
10849 |
+@@ -30,6 +30,8 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) |
10850 |
+ |
10851 |
+ rcu_read_lock(); /* hsr->node_db, hsr->ports */ |
10852 |
+ port = hsr_port_get_rcu(skb->dev); |
10853 |
++ if (!port) |
10854 |
++ goto finish_pass; |
10855 |
+ |
10856 |
+ if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) { |
10857 |
+ /* Directly kill frames sent by ourselves */ |
10858 |
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
10859 |
+index 8e303cd7e2de..2ceda7ddaed5 100644 |
10860 |
+--- a/net/ipv4/tcp.c |
10861 |
++++ b/net/ipv4/tcp.c |
10862 |
+@@ -2261,6 +2261,7 @@ int tcp_disconnect(struct sock *sk, int flags) |
10863 |
+ tp->window_clamp = 0; |
10864 |
+ tcp_set_ca_state(sk, TCP_CA_Open); |
10865 |
+ tcp_clear_retrans(tp); |
10866 |
++ tp->total_retrans = 0; |
10867 |
+ inet_csk_delack_init(sk); |
10868 |
+ /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 |
10869 |
+ * issue in __tcp_select_window() |
10870 |
+@@ -2272,6 +2273,8 @@ int tcp_disconnect(struct sock *sk, int flags) |
10871 |
+ dst_release(sk->sk_rx_dst); |
10872 |
+ sk->sk_rx_dst = NULL; |
10873 |
+ tcp_saved_syn_free(tp); |
10874 |
++ tp->segs_in = 0; |
10875 |
++ tp->segs_out = 0; |
10876 |
+ tp->bytes_acked = 0; |
10877 |
+ tp->bytes_received = 0; |
10878 |
+ |
10879 |
+diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h |
10880 |
+index 9992dfac6938..7317a64fdb79 100644 |
10881 |
+--- a/net/sched/cls_rsvp.h |
10882 |
++++ b/net/sched/cls_rsvp.h |
10883 |
+@@ -455,10 +455,8 @@ static u32 gen_tunnel(struct rsvp_head *data) |
10884 |
+ |
10885 |
+ static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = { |
10886 |
+ [TCA_RSVP_CLASSID] = { .type = NLA_U32 }, |
10887 |
+- [TCA_RSVP_DST] = { .type = NLA_BINARY, |
10888 |
+- .len = RSVP_DST_LEN * sizeof(u32) }, |
10889 |
+- [TCA_RSVP_SRC] = { .type = NLA_BINARY, |
10890 |
+- .len = RSVP_DST_LEN * sizeof(u32) }, |
10891 |
++ [TCA_RSVP_DST] = { .len = RSVP_DST_LEN * sizeof(u32) }, |
10892 |
++ [TCA_RSVP_SRC] = { .len = RSVP_DST_LEN * sizeof(u32) }, |
10893 |
+ [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) }, |
10894 |
+ }; |
10895 |
+ |
10896 |
+diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c |
10897 |
+index 040d853f48b9..3086df21a1c1 100644 |
10898 |
+--- a/net/sched/cls_tcindex.c |
10899 |
++++ b/net/sched/cls_tcindex.c |
10900 |
+@@ -267,6 +267,25 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, |
10901 |
+ cp->fall_through = p->fall_through; |
10902 |
+ cp->tp = tp; |
10903 |
+ |
10904 |
++ if (tb[TCA_TCINDEX_HASH]) |
10905 |
++ cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); |
10906 |
++ |
10907 |
++ if (tb[TCA_TCINDEX_MASK]) |
10908 |
++ cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); |
10909 |
++ |
10910 |
++ if (tb[TCA_TCINDEX_SHIFT]) |
10911 |
++ cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); |
10912 |
++ |
10913 |
++ if (!cp->hash) { |
10914 |
++ /* Hash not specified, use perfect hash if the upper limit |
10915 |
++ * of the hashing index is below the threshold. |
10916 |
++ */ |
10917 |
++ if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD) |
10918 |
++ cp->hash = (cp->mask >> cp->shift) + 1; |
10919 |
++ else |
10920 |
++ cp->hash = DEFAULT_HASH_SIZE; |
10921 |
++ } |
10922 |
++ |
10923 |
+ if (p->perfect) { |
10924 |
+ int i; |
10925 |
+ |
10926 |
+@@ -274,7 +293,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, |
10927 |
+ sizeof(*r) * cp->hash, GFP_KERNEL); |
10928 |
+ if (!cp->perfect) |
10929 |
+ goto errout; |
10930 |
+- for (i = 0; i < cp->hash; i++) |
10931 |
++ for (i = 0; i < min(cp->hash, p->hash); i++) |
10932 |
+ tcf_exts_init(&cp->perfect[i].exts, |
10933 |
+ TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); |
10934 |
+ balloc = 1; |
10935 |
+@@ -286,15 +305,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, |
10936 |
+ if (old_r) |
10937 |
+ cr.res = r->res; |
10938 |
+ |
10939 |
+- if (tb[TCA_TCINDEX_HASH]) |
10940 |
+- cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); |
10941 |
+- |
10942 |
+- if (tb[TCA_TCINDEX_MASK]) |
10943 |
+- cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); |
10944 |
+- |
10945 |
+- if (tb[TCA_TCINDEX_SHIFT]) |
10946 |
+- cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); |
10947 |
+- |
10948 |
+ err = -EBUSY; |
10949 |
+ |
10950 |
+ /* Hash already allocated, make sure that we still meet the |
10951 |
+@@ -312,16 +322,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, |
10952 |
+ if (tb[TCA_TCINDEX_FALL_THROUGH]) |
10953 |
+ cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); |
10954 |
+ |
10955 |
+- if (!cp->hash) { |
10956 |
+- /* Hash not specified, use perfect hash if the upper limit |
10957 |
+- * of the hashing index is below the threshold. |
10958 |
+- */ |
10959 |
+- if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD) |
10960 |
+- cp->hash = (cp->mask >> cp->shift) + 1; |
10961 |
+- else |
10962 |
+- cp->hash = DEFAULT_HASH_SIZE; |
10963 |
+- } |
10964 |
+- |
10965 |
+ if (!cp->perfect && !cp->h) |
10966 |
+ cp->alloc_hash = cp->hash; |
10967 |
+ |
10968 |
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c |
10969 |
+index b5291ea54a3d..c1d1abde7072 100644 |
10970 |
+--- a/net/sunrpc/auth_gss/svcauth_gss.c |
10971 |
++++ b/net/sunrpc/auth_gss/svcauth_gss.c |
10972 |
+@@ -1173,6 +1173,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd, |
10973 |
+ dprintk("RPC: No creds found!\n"); |
10974 |
+ goto out; |
10975 |
+ } else { |
10976 |
++ struct timespec64 boot; |
10977 |
+ |
10978 |
+ /* steal creds */ |
10979 |
+ rsci.cred = ud->creds; |
10980 |
+@@ -1193,6 +1194,9 @@ static int gss_proxy_save_rsc(struct cache_detail *cd, |
10981 |
+ &expiry, GFP_KERNEL); |
10982 |
+ if (status) |
10983 |
+ goto out; |
10984 |
++ |
10985 |
++ getboottime64(&boot); |
10986 |
++ expiry -= boot.tv_sec; |
10987 |
+ } |
10988 |
+ |
10989 |
+ rsci.h.expiry_time = expiry; |
10990 |
+diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c |
10991 |
+index 67628616506e..e7dd0800965a 100644 |
10992 |
+--- a/sound/drivers/dummy.c |
10993 |
++++ b/sound/drivers/dummy.c |
10994 |
+@@ -925,7 +925,7 @@ static void print_formats(struct snd_dummy *dummy, |
10995 |
+ { |
10996 |
+ int i; |
10997 |
+ |
10998 |
+- for (i = 0; i < SNDRV_PCM_FORMAT_LAST; i++) { |
10999 |
++ for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) { |
11000 |
+ if (dummy->pcm_hw.formats & (1ULL << i)) |
11001 |
+ snd_iprintf(buffer, " %s", snd_pcm_format_name(i)); |
11002 |
+ } |
11003 |
+diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c |
11004 |
+index 886f2027e671..f2c71bcd06fa 100644 |
11005 |
+--- a/sound/soc/qcom/apq8016_sbc.c |
11006 |
++++ b/sound/soc/qcom/apq8016_sbc.c |
11007 |
+@@ -112,7 +112,8 @@ static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card) |
11008 |
+ link->codec_of_node = of_parse_phandle(codec, "sound-dai", 0); |
11009 |
+ if (!link->codec_of_node) { |
11010 |
+ dev_err(card->dev, "error getting codec phandle\n"); |
11011 |
+- return ERR_PTR(-EINVAL); |
11012 |
++ ret = -EINVAL; |
11013 |
++ goto error; |
11014 |
+ } |
11015 |
+ |
11016 |
+ ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name); |
11017 |
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c |
11018 |
+index dbdea1975f90..81bedd9bb922 100644 |
11019 |
+--- a/sound/soc/soc-pcm.c |
11020 |
++++ b/sound/soc/soc-pcm.c |
11021 |
+@@ -2026,42 +2026,81 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, |
11022 |
+ } |
11023 |
+ EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger); |
11024 |
+ |
11025 |
++static int dpcm_dai_trigger_fe_be(struct snd_pcm_substream *substream, |
11026 |
++ int cmd, bool fe_first) |
11027 |
++{ |
11028 |
++ struct snd_soc_pcm_runtime *fe = substream->private_data; |
11029 |
++ int ret; |
11030 |
++ |
11031 |
++ /* call trigger on the frontend before the backend. */ |
11032 |
++ if (fe_first) { |
11033 |
++ dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n", |
11034 |
++ fe->dai_link->name, cmd); |
11035 |
++ |
11036 |
++ ret = soc_pcm_trigger(substream, cmd); |
11037 |
++ if (ret < 0) |
11038 |
++ return ret; |
11039 |
++ |
11040 |
++ ret = dpcm_be_dai_trigger(fe, substream->stream, cmd); |
11041 |
++ return ret; |
11042 |
++ } |
11043 |
++ |
11044 |
++ /* call trigger on the frontend after the backend. */ |
11045 |
++ ret = dpcm_be_dai_trigger(fe, substream->stream, cmd); |
11046 |
++ if (ret < 0) |
11047 |
++ return ret; |
11048 |
++ |
11049 |
++ dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n", |
11050 |
++ fe->dai_link->name, cmd); |
11051 |
++ |
11052 |
++ ret = soc_pcm_trigger(substream, cmd); |
11053 |
++ |
11054 |
++ return ret; |
11055 |
++} |
11056 |
++ |
11057 |
+ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) |
11058 |
+ { |
11059 |
+ struct snd_soc_pcm_runtime *fe = substream->private_data; |
11060 |
+- int stream = substream->stream, ret; |
11061 |
++ int stream = substream->stream; |
11062 |
++ int ret = 0; |
11063 |
+ enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream]; |
11064 |
+ |
11065 |
+ fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; |
11066 |
+ |
11067 |
+ switch (trigger) { |
11068 |
+ case SND_SOC_DPCM_TRIGGER_PRE: |
11069 |
+- /* call trigger on the frontend before the backend. */ |
11070 |
+- |
11071 |
+- dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n", |
11072 |
+- fe->dai_link->name, cmd); |
11073 |
+- |
11074 |
+- ret = soc_pcm_trigger(substream, cmd); |
11075 |
+- if (ret < 0) { |
11076 |
+- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret); |
11077 |
+- goto out; |
11078 |
++ switch (cmd) { |
11079 |
++ case SNDRV_PCM_TRIGGER_START: |
11080 |
++ case SNDRV_PCM_TRIGGER_RESUME: |
11081 |
++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
11082 |
++ ret = dpcm_dai_trigger_fe_be(substream, cmd, true); |
11083 |
++ break; |
11084 |
++ case SNDRV_PCM_TRIGGER_STOP: |
11085 |
++ case SNDRV_PCM_TRIGGER_SUSPEND: |
11086 |
++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: |
11087 |
++ ret = dpcm_dai_trigger_fe_be(substream, cmd, false); |
11088 |
++ break; |
11089 |
++ default: |
11090 |
++ ret = -EINVAL; |
11091 |
++ break; |
11092 |
+ } |
11093 |
+- |
11094 |
+- ret = dpcm_be_dai_trigger(fe, substream->stream, cmd); |
11095 |
+ break; |
11096 |
+ case SND_SOC_DPCM_TRIGGER_POST: |
11097 |
+- /* call trigger on the frontend after the backend. */ |
11098 |
+- |
11099 |
+- ret = dpcm_be_dai_trigger(fe, substream->stream, cmd); |
11100 |
+- if (ret < 0) { |
11101 |
+- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret); |
11102 |
+- goto out; |
11103 |
++ switch (cmd) { |
11104 |
++ case SNDRV_PCM_TRIGGER_START: |
11105 |
++ case SNDRV_PCM_TRIGGER_RESUME: |
11106 |
++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
11107 |
++ ret = dpcm_dai_trigger_fe_be(substream, cmd, false); |
11108 |
++ break; |
11109 |
++ case SNDRV_PCM_TRIGGER_STOP: |
11110 |
++ case SNDRV_PCM_TRIGGER_SUSPEND: |
11111 |
++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: |
11112 |
++ ret = dpcm_dai_trigger_fe_be(substream, cmd, true); |
11113 |
++ break; |
11114 |
++ default: |
11115 |
++ ret = -EINVAL; |
11116 |
++ break; |
11117 |
+ } |
11118 |
+- |
11119 |
+- dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n", |
11120 |
+- fe->dai_link->name, cmd); |
11121 |
+- |
11122 |
+- ret = soc_pcm_trigger(substream, cmd); |
11123 |
+ break; |
11124 |
+ case SND_SOC_DPCM_TRIGGER_BESPOKE: |
11125 |
+ /* bespoke trigger() - handles both FE and BEs */ |
11126 |
+@@ -2070,10 +2109,6 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) |
11127 |
+ fe->dai_link->name, cmd); |
11128 |
+ |
11129 |
+ ret = soc_pcm_bespoke_trigger(substream, cmd); |
11130 |
+- if (ret < 0) { |
11131 |
+- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret); |
11132 |
+- goto out; |
11133 |
+- } |
11134 |
+ break; |
11135 |
+ default: |
11136 |
+ dev_err(fe->dev, "ASoC: invalid trigger cmd %d for %s\n", cmd, |
11137 |
+@@ -2082,6 +2117,12 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) |
11138 |
+ goto out; |
11139 |
+ } |
11140 |
+ |
11141 |
++ if (ret < 0) { |
11142 |
++ dev_err(fe->dev, "ASoC: trigger FE cmd: %d failed: %d\n", |
11143 |
++ cmd, ret); |
11144 |
++ goto out; |
11145 |
++ } |
11146 |
++ |
11147 |
+ switch (cmd) { |
11148 |
+ case SNDRV_PCM_TRIGGER_START: |
11149 |
+ case SNDRV_PCM_TRIGGER_RESUME: |