mirror of
https://gitlab.alpinelinux.org/alpine/aports.git
synced 2026-01-02 23:31:34 +01:00
- XSA-198 CVE-2016-9379 CVE-2016-9380 delimiter injection vulnerabilities in pygrub - XSA-197 CVE-2016-9381 qemu incautious about shared ring processing - XSA-196 CVE-2016-9377 CVE-2016-9378 x86 software interrupt injection mis-handled - XSA-195 CVE-2016-9383 x86 64-bit bit test instruction emulation broken - XSA-194 CVE-2016-9384 guest 32-bit ELF symbol table load leaking host data - XSA-193 CVE-2016-9385 x86 segment base write emulation lacking canonical address checks - XSA-192 CVE-2016-9382 x86 task switch to VM86 mode mis-handled - XSA-191 CVE-2016-9386 x86 null segments not always treated as unusable fixes #6495
65 lines
2.3 KiB
Diff
65 lines
2.3 KiB
Diff
From: Jan Beulich <jbeulich@suse.com>
|
|
Subject: x86/HVM: don't load LDTR with VM86 mode attrs during task switch
|
|
|
|
Just like TR, LDTR is purely a protected mode facility and hence needs
|
|
to be loaded accordingly. Also move its loading to where it
|
|
architecurally belongs.
|
|
|
|
This is XSA-192.
|
|
|
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
|
|
--- a/xen/arch/x86/hvm/hvm.c
|
|
+++ b/xen/arch/x86/hvm/hvm.c
|
|
@@ -2728,17 +2728,16 @@ static void hvm_unmap_entry(void *p)
|
|
}
|
|
|
|
static int hvm_load_segment_selector(
|
|
- enum x86_segment seg, uint16_t sel)
|
|
+ enum x86_segment seg, uint16_t sel, unsigned int eflags)
|
|
{
|
|
struct segment_register desctab, cs, segr;
|
|
struct desc_struct *pdesc, desc;
|
|
u8 dpl, rpl, cpl;
|
|
bool_t writable;
|
|
int fault_type = TRAP_invalid_tss;
|
|
- struct cpu_user_regs *regs = guest_cpu_user_regs();
|
|
struct vcpu *v = current;
|
|
|
|
- if ( regs->eflags & X86_EFLAGS_VM )
|
|
+ if ( eflags & X86_EFLAGS_VM )
|
|
{
|
|
segr.sel = sel;
|
|
segr.base = (uint32_t)sel << 4;
|
|
@@ -2986,6 +2985,8 @@ void hvm_task_switch(
|
|
if ( rc != HVMCOPY_okay )
|
|
goto out;
|
|
|
|
+ if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt, 0) )
|
|
+ goto out;
|
|
|
|
if ( hvm_set_cr3(tss.cr3, 1) )
|
|
goto out;
|
|
@@ -3008,13 +3009,12 @@ void hvm_task_switch(
|
|
}
|
|
|
|
exn_raised = 0;
|
|
- if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt) ||
|
|
- hvm_load_segment_selector(x86_seg_es, tss.es) ||
|
|
- hvm_load_segment_selector(x86_seg_cs, tss.cs) ||
|
|
- hvm_load_segment_selector(x86_seg_ss, tss.ss) ||
|
|
- hvm_load_segment_selector(x86_seg_ds, tss.ds) ||
|
|
- hvm_load_segment_selector(x86_seg_fs, tss.fs) ||
|
|
- hvm_load_segment_selector(x86_seg_gs, tss.gs) )
|
|
+ if ( hvm_load_segment_selector(x86_seg_es, tss.es, tss.eflags) ||
|
|
+ hvm_load_segment_selector(x86_seg_cs, tss.cs, tss.eflags) ||
|
|
+ hvm_load_segment_selector(x86_seg_ss, tss.ss, tss.eflags) ||
|
|
+ hvm_load_segment_selector(x86_seg_ds, tss.ds, tss.eflags) ||
|
|
+ hvm_load_segment_selector(x86_seg_fs, tss.fs, tss.eflags) ||
|
|
+ hvm_load_segment_selector(x86_seg_gs, tss.gs, tss.eflags) )
|
|
exn_raised = 1;
|
|
|
|
rc = hvm_copy_to_guest_virt(
|