1 |
idella4 13/10/02 17:22:28 |
2 |
|
3 |
Added: xen-CVE-2013-4355-XSA-63.patch |
4 |
xen-CVE-2013-4361-XSA-66.patch |
5 |
xen-CVE-2013-1442-XSA-62.patch |
6 |
xen-CVE-2013-4356-XSA-64.patch |
7 |
Log: |
8 |
Adding security patches to 4.3.0 from Bug #486354, 4.2.2 excluded (for now) due to one sec. patch failing |
9 |
|
10 |
(Portage version: 2.2.0/cvs/Linux x86_64, signed Manifest commit with key 0xB8072B0D) |
11 |
|
12 |
Revision Changes Path |
13 |
1.1 app-emulation/xen/files/xen-CVE-2013-4355-XSA-63.patch |
14 |
|
15 |
file : http://sources.gentoo.org/viewvc.cgi/gentoo-x86/app-emulation/xen/files/xen-CVE-2013-4355-XSA-63.patch?rev=1.1&view=markup |
16 |
plain: http://sources.gentoo.org/viewvc.cgi/gentoo-x86/app-emulation/xen/files/xen-CVE-2013-4355-XSA-63.patch?rev=1.1&content-type=text/plain |
17 |
|
18 |
Index: xen-CVE-2013-4355-XSA-63.patch |
19 |
=================================================================== |
20 |
x86: properly handle hvm_copy_from_guest_{phys,virt}() errors |
21 |
|
22 |
Ignoring them generally implies using uninitialized data and, in all |
23 |
cases dealt with here, potentially leaking hypervisor stack contents to |
24 |
guests. |
25 |
|
26 |
This is XSA-63. |
27 |
|
28 |
Signed-off-by: Jan Beulich <jbeulich@××××.com> |
29 |
Reviewed-by: Tim Deegan <tim@×××.org> |
30 |
Reviewed-by: Andrew Cooper <andrew.cooper3@××××××.com> |
31 |
|
32 |
--- a/xen/arch/x86/hvm/hvm.c |
33 |
+++ b/xen/arch/x86/hvm/hvm.c |
34 |
@@ -2308,11 +2308,7 @@ void hvm_task_switch( |
35 |
|
36 |
rc = hvm_copy_from_guest_virt( |
37 |
&tss, prev_tr.base, sizeof(tss), PFEC_page_present); |
38 |
- if ( rc == HVMCOPY_bad_gva_to_gfn ) |
39 |
- goto out; |
40 |
- if ( rc == HVMCOPY_gfn_paged_out ) |
41 |
- goto out; |
42 |
- if ( rc == HVMCOPY_gfn_shared ) |
43 |
+ if ( rc != HVMCOPY_okay ) |
44 |
goto out; |
45 |
|
46 |
eflags = regs->eflags; |
47 |
@@ -2357,13 +2353,11 @@ void hvm_task_switch( |
48 |
|
49 |
rc = hvm_copy_from_guest_virt( |
50 |
&tss, tr.base, sizeof(tss), PFEC_page_present); |
51 |
- if ( rc == HVMCOPY_bad_gva_to_gfn ) |
52 |
- goto out; |
53 |
- if ( rc == HVMCOPY_gfn_paged_out ) |
54 |
- goto out; |
55 |
- /* Note: this could be optimised, if the callee functions knew we want RO |
56 |
- * access */ |
57 |
- if ( rc == HVMCOPY_gfn_shared ) |
58 |
+ /* |
59 |
+ * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee |
60 |
+ * functions knew we want RO access. |
61 |
+ */ |
62 |
+ if ( rc != HVMCOPY_okay ) |
63 |
goto out; |
64 |
|
65 |
|
66 |
--- a/xen/arch/x86/hvm/intercept.c |
67 |
+++ b/xen/arch/x86/hvm/intercept.c |
68 |
@@ -87,17 +87,28 @@ static int hvm_mmio_access(struct vcpu * |
69 |
{ |
70 |
for ( i = 0; i < p->count; i++ ) |
71 |
{ |
72 |
- int ret; |
73 |
- |
74 |
- ret = hvm_copy_from_guest_phys(&data, |
75 |
- p->data + (sign * i * p->size), |
76 |
- p->size); |
77 |
- if ( (ret == HVMCOPY_gfn_paged_out) || |
78 |
- (ret == HVMCOPY_gfn_shared) ) |
79 |
+ switch ( hvm_copy_from_guest_phys(&data, |
80 |
+ p->data + sign * i * p->size, |
81 |
+ p->size) ) |
82 |
{ |
83 |
+ case HVMCOPY_okay: |
84 |
+ break; |
85 |
+ case HVMCOPY_gfn_paged_out: |
86 |
+ case HVMCOPY_gfn_shared: |
87 |
rc = X86EMUL_RETRY; |
88 |
break; |
89 |
+ case HVMCOPY_bad_gfn_to_mfn: |
90 |
+ data = ~0; |
91 |
+ break; |
92 |
+ case HVMCOPY_bad_gva_to_gfn: |
93 |
+ ASSERT(0); |
94 |
+ /* fall through */ |
95 |
+ default: |
96 |
+ rc = X86EMUL_UNHANDLEABLE; |
97 |
+ break; |
98 |
} |
99 |
+ if ( rc != X86EMUL_OKAY ) |
100 |
+ break; |
101 |
rc = write_handler(v, p->addr + (sign * i * p->size), p->size, |
102 |
data); |
103 |
if ( rc != X86EMUL_OKAY ) |
104 |
@@ -165,8 +176,28 @@ static int process_portio_intercept(port |
105 |
for ( i = 0; i < p->count; i++ ) |
106 |
{ |
107 |
data = 0; |
108 |
- (void)hvm_copy_from_guest_phys(&data, p->data + sign*i*p->size, |
109 |
- p->size); |
110 |
+ switch ( hvm_copy_from_guest_phys(&data, |
111 |
+ p->data + sign * i * p->size, |
112 |
+ p->size) ) |
113 |
+ { |
114 |
+ case HVMCOPY_okay: |
115 |
+ break; |
116 |
+ case HVMCOPY_gfn_paged_out: |
117 |
+ case HVMCOPY_gfn_shared: |
118 |
+ rc = X86EMUL_RETRY; |
119 |
+ break; |
120 |
+ case HVMCOPY_bad_gfn_to_mfn: |
121 |
+ data = ~0; |
122 |
+ break; |
123 |
+ case HVMCOPY_bad_gva_to_gfn: |
124 |
+ ASSERT(0); |
125 |
+ /* fall through */ |
126 |
+ default: |
127 |
+ rc = X86EMUL_UNHANDLEABLE; |
128 |
+ break; |
129 |
+ } |
130 |
+ if ( rc != X86EMUL_OKAY ) |
131 |
+ break; |
132 |
rc = action(IOREQ_WRITE, p->addr, p->size, &data); |
133 |
if ( rc != X86EMUL_OKAY ) |
134 |
break; |
135 |
--- a/xen/arch/x86/hvm/io.c |
136 |
+++ b/xen/arch/x86/hvm/io.c |
137 |
@@ -340,14 +340,24 @@ static int dpci_ioport_write(uint32_t mp |
138 |
data = p->data; |
139 |
if ( p->data_is_ptr ) |
140 |
{ |
141 |
- int ret; |
142 |
- |
143 |
- ret = hvm_copy_from_guest_phys(&data, |
144 |
- p->data + (sign * i * p->size), |
145 |
- p->size); |
146 |
- if ( (ret == HVMCOPY_gfn_paged_out) && |
147 |
- (ret == HVMCOPY_gfn_shared) ) |
148 |
+ switch ( hvm_copy_from_guest_phys(&data, |
149 |
+ p->data + sign * i * p->size, |
150 |
+ p->size) ) |
151 |
+ { |
152 |
+ case HVMCOPY_okay: |
153 |
+ break; |
154 |
+ case HVMCOPY_gfn_paged_out: |
155 |
+ case HVMCOPY_gfn_shared: |
156 |
return X86EMUL_RETRY; |
157 |
+ case HVMCOPY_bad_gfn_to_mfn: |
158 |
+ data = ~0; |
159 |
+ break; |
160 |
+ case HVMCOPY_bad_gva_to_gfn: |
161 |
+ ASSERT(0); |
162 |
+ /* fall through */ |
163 |
+ default: |
164 |
+ return X86EMUL_UNHANDLEABLE; |
165 |
+ } |
166 |
} |
167 |
|
168 |
switch ( p->size ) |
169 |
--- a/xen/arch/x86/hvm/vmx/realmode.c |
170 |
+++ b/xen/arch/x86/hvm/vmx/realmode.c |
171 |
@@ -39,7 +39,9 @@ static void realmode_deliver_exception( |
172 |
|
173 |
again: |
174 |
last_byte = (vector * 4) + 3; |
175 |
- if ( idtr->limit < last_byte ) |
176 |
+ if ( idtr->limit < last_byte || |
177 |
+ hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4) != |
178 |
+ HVMCOPY_okay ) |
179 |
{ |
180 |
/* Software interrupt? */ |
181 |
if ( insn_len != 0 ) |
182 |
@@ -64,8 +66,6 @@ static void realmode_deliver_exception( |
183 |
} |
184 |
} |
185 |
|
186 |
- (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4); |
187 |
- |
188 |
frame[0] = regs->eip + insn_len; |
189 |
frame[1] = csr->sel; |
190 |
frame[2] = regs->eflags & ~X86_EFLAGS_RF; |
191 |
|
192 |
|
193 |
|
194 |
1.1 app-emulation/xen/files/xen-CVE-2013-4361-XSA-66.patch |
195 |
|
196 |
file : http://sources.gentoo.org/viewvc.cgi/gentoo-x86/app-emulation/xen/files/xen-CVE-2013-4361-XSA-66.patch?rev=1.1&view=markup |
197 |
plain: http://sources.gentoo.org/viewvc.cgi/gentoo-x86/app-emulation/xen/files/xen-CVE-2013-4361-XSA-66.patch?rev=1.1&content-type=text/plain |
198 |
|
199 |
Index: xen-CVE-2013-4361-XSA-66.patch |
200 |
=================================================================== |
201 |
x86: properly set up fbld emulation operand address |
202 |
|
203 |
This is CVE-2013-4361 / XSA-66. |
204 |
|
205 |
Signed-off-by: Jan Beulich <jbeulich@××××.com> |
206 |
Acked-by: Ian Jackson <ian.jackson@×××××××××.com> |
207 |
|
208 |
--- a/xen/arch/x86/x86_emulate/x86_emulate.c |
209 |
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c |
210 |
@@ -3156,11 +3156,11 @@ x86_emulate( |
211 |
break; |
212 |
case 4: /* fbld m80dec */ |
213 |
ea.bytes = 10; |
214 |
- dst = ea; |
215 |
+ src = ea; |
216 |
if ( (rc = ops->read(src.mem.seg, src.mem.off, |
217 |
&src.val, src.bytes, ctxt)) != 0 ) |
218 |
goto done; |
219 |
- emulate_fpu_insn_memdst("fbld", src.val); |
220 |
+ emulate_fpu_insn_memsrc("fbld", src.val); |
221 |
break; |
222 |
case 5: /* fild m64i */ |
223 |
ea.bytes = 8; |
224 |
|
225 |
|
226 |
|
227 |
1.1 app-emulation/xen/files/xen-CVE-2013-1442-XSA-62.patch |
228 |
|
229 |
file : http://sources.gentoo.org/viewvc.cgi/gentoo-x86/app-emulation/xen/files/xen-CVE-2013-1442-XSA-62.patch?rev=1.1&view=markup |
230 |
plain: http://sources.gentoo.org/viewvc.cgi/gentoo-x86/app-emulation/xen/files/xen-CVE-2013-1442-XSA-62.patch?rev=1.1&content-type=text/plain |
231 |
|
232 |
Index: xen-CVE-2013-1442-XSA-62.patch |
233 |
=================================================================== |
234 |
x86/xsave: initialize extended register state when guests enable it |
235 |
|
236 |
Till now, when setting previously unset bits in XCR0 we wouldn't touch |
237 |
the active register state, thus leaving in the newly enabled registers |
238 |
whatever a prior user of it left there, i.e. potentially leaking |
239 |
information between guests. |
240 |
|
241 |
This is CVE-2013-1442 / XSA-62. |
242 |
|
243 |
Signed-off-by: Jan Beulich <jbeulich@××××.com> |
244 |
Reviewed-by: Andrew Cooper <andrew.cooper3@××××××.com> |
245 |
|
246 |
--- a/xen/arch/x86/xstate.c |
247 |
+++ b/xen/arch/x86/xstate.c |
248 |
@@ -307,6 +307,7 @@ int validate_xstate(u64 xcr0, u64 xcr0_a |
249 |
int handle_xsetbv(u32 index, u64 new_bv) |
250 |
{ |
251 |
struct vcpu *curr = current; |
252 |
+ u64 mask; |
253 |
|
254 |
if ( index != XCR_XFEATURE_ENABLED_MASK ) |
255 |
return -EOPNOTSUPP; |
256 |
@@ -320,9 +321,23 @@ int handle_xsetbv(u32 index, u64 new_bv) |
257 |
if ( !set_xcr0(new_bv) ) |
258 |
return -EFAULT; |
259 |
|
260 |
+ mask = new_bv & ~curr->arch.xcr0_accum; |
261 |
curr->arch.xcr0 = new_bv; |
262 |
curr->arch.xcr0_accum |= new_bv; |
263 |
|
264 |
+ mask &= curr->fpu_dirtied ? ~XSTATE_FP_SSE : XSTATE_NONLAZY; |
265 |
+ if ( mask ) |
266 |
+ { |
267 |
+ unsigned long cr0 = read_cr0(); |
268 |
+ |
269 |
+ clts(); |
270 |
+ if ( curr->fpu_dirtied ) |
271 |
+ asm ( "stmxcsr %0" : "=m" (curr->arch.xsave_area->fpu_sse.mxcsr) ); |
272 |
+ xrstor(curr, mask); |
273 |
+ if ( cr0 & X86_CR0_TS ) |
274 |
+ write_cr0(cr0); |
275 |
+ } |
276 |
+ |
277 |
return 0; |
278 |
} |
279 |
|
280 |
|
281 |
|
282 |
|
283 |
1.1 app-emulation/xen/files/xen-CVE-2013-4356-XSA-64.patch |
284 |
|
285 |
file : http://sources.gentoo.org/viewvc.cgi/gentoo-x86/app-emulation/xen/files/xen-CVE-2013-4356-XSA-64.patch?rev=1.1&view=markup |
286 |
plain: http://sources.gentoo.org/viewvc.cgi/gentoo-x86/app-emulation/xen/files/xen-CVE-2013-4356-XSA-64.patch?rev=1.1&content-type=text/plain |
287 |
|
288 |
Index: xen-CVE-2013-4356-XSA-64.patch |
289 |
=================================================================== |
290 |
commit 95a0770282ea2a03f7bc48c6656d5fc79bae0599 |
291 |
Author: Tim Deegan <tim@×××.org> |
292 |
Date: Thu Sep 12 14:16:28 2013 +0100 |
293 |
|
294 |
x86/mm/shadow: Fix initialization of PV shadow L4 tables. |
295 |
|
296 |
Shadowed PV L4 tables must have the same Xen mappings as their |
297 |
unshadowed equivalent. This is done by copying the Xen entries |
298 |
verbatim from the idle pagetable, and then using guest_l4_slot() |
299 |
in the SHADOW_FOREACH_L4E() iterator to avoid touching those entries. |
300 |
|
301 |
adc5afbf1c70ef55c260fb93e4b8ce5ccb918706 (x86: support up to 16Tb) |
302 |
changed the definition of ROOT_PAGETABLE_XEN_SLOTS to extend right to |
303 |
the top of the address space, which causes the shadow code to |
304 |
copy Xen mappings into guest-kernel-address slots too. |
305 |
|
306 |
In the common case, all those slots are zero in the idle pagetable, |
307 |
and no harm is done. But if any slot above #271 is non-zero, Xen will |
308 |
crash when that slot is later cleared (it attempts to drop |
309 |
shadow-pagetable refcounts on its own L4 pagetables). |
310 |
|
311 |
Fix by using the new ROOT_PAGETABLE_PV_XEN_SLOTS when appropriate. |
312 |
Monitor pagetables need the full Xen mappings, so they keep using the |
313 |
old name (with its new semantics). |
314 |
|
315 |
This is XSA-64. |
316 |
|
317 |
Signed-off-by: Tim Deegan <tim@×××.org> |
318 |
Reviewed-by: Jan Beulich <jbeulich@××××.com> |
319 |
|
320 |
Xen 4.3.x and xen-unstable are vulnerable. |
321 |
|
322 |
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c |
323 |
index 4c4c2ba..3fed0b6 100644 |
324 |
--- a/xen/arch/x86/mm/shadow/multi.c |
325 |
+++ b/xen/arch/x86/mm/shadow/multi.c |
326 |
@@ -1433,15 +1433,19 @@ void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn) |
327 |
{ |
328 |
struct domain *d = v->domain; |
329 |
shadow_l4e_t *sl4e; |
330 |
+ unsigned int slots; |
331 |
|
332 |
sl4e = sh_map_domain_page(sl4mfn); |
333 |
ASSERT(sl4e != NULL); |
334 |
ASSERT(sizeof (l4_pgentry_t) == sizeof (shadow_l4e_t)); |
335 |
|
336 |
/* Copy the common Xen mappings from the idle domain */ |
337 |
+ slots = (shadow_mode_external(d) |
338 |
+ ? ROOT_PAGETABLE_XEN_SLOTS |
339 |
+ : ROOT_PAGETABLE_PV_XEN_SLOTS); |
340 |
memcpy(&sl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT], |
341 |
&idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT], |
342 |
- ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t)); |
343 |
+ slots * sizeof(l4_pgentry_t)); |
344 |
|
345 |
/* Install the per-domain mappings for this domain */ |
346 |
sl4e[shadow_l4_table_offset(PERDOMAIN_VIRT_START)] = |